1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
43 #include "ixgbe_phy.h"
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 enum {NETDEV_STATS
, IXGBE_STATS
};
51 char stat_string
[ETH_GSTRING_LEN
];
57 #define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
64 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
69 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
70 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
71 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
72 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
73 {"lsc_int", IXGBE_STAT(lsc_int
)},
74 {"tx_busy", IXGBE_STAT(tx_busy
)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
81 {"broadcast", IXGBE_STAT(stats
.bprc
)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
89 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
90 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
100 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
101 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
102 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
103 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
104 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
105 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
107 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
108 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
109 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
110 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
111 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
112 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
113 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
115 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
116 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
117 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
118 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
119 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
120 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
121 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
122 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
123 #endif /* IXGBE_FCOE */
126 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
127 * we set the num_rx_queues to evaluate to num_tx_queues. This is
128 * used because we do not have a good way to get the max number of
129 * rx queues with CONFIG_RPS disabled.
131 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
133 #define IXGBE_QUEUE_STATS_LEN ( \
134 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
135 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
136 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
137 #define IXGBE_PB_STATS_LEN ( \
138 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
140 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
143 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN)
147 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
148 "Register test (offline)", "Eeprom test (offline)",
149 "Interrupt test (offline)", "Loopback test (offline)",
150 "Link test (on/offline)"
152 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
154 /* currently supported speeds for 10G */
155 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
156 SUPPORTED_10000baseKX4_Full | \
157 SUPPORTED_10000baseKR_Full)
159 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
161 static u32
ixgbe_get_supported_10gtypes(struct ixgbe_hw
*hw
)
163 if (!ixgbe_isbackplane(hw
->phy
.media_type
))
164 return SUPPORTED_10000baseT_Full
;
166 switch (hw
->device_id
) {
167 case IXGBE_DEV_ID_82598
:
168 case IXGBE_DEV_ID_82599_KX4
:
169 case IXGBE_DEV_ID_82599_KX4_MEZZ
:
170 case IXGBE_DEV_ID_X550EM_X_KX4
:
171 return SUPPORTED_10000baseKX4_Full
;
172 case IXGBE_DEV_ID_82598_BX
:
173 case IXGBE_DEV_ID_82599_KR
:
174 case IXGBE_DEV_ID_X550EM_X_KR
:
175 return SUPPORTED_10000baseKR_Full
;
177 return SUPPORTED_10000baseKX4_Full
|
178 SUPPORTED_10000baseKR_Full
;
182 static int ixgbe_get_settings(struct net_device
*netdev
,
183 struct ethtool_cmd
*ecmd
)
185 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
186 struct ixgbe_hw
*hw
= &adapter
->hw
;
187 ixgbe_link_speed supported_link
;
188 bool autoneg
= false;
190 hw
->mac
.ops
.get_link_capabilities(hw
, &supported_link
, &autoneg
);
192 /* set the supported link speeds */
193 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
194 ecmd
->supported
|= ixgbe_get_supported_10gtypes(hw
);
195 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
)
196 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
197 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
)
198 ecmd
->supported
|= ixgbe_isbackplane(hw
->phy
.media_type
) ?
199 SUPPORTED_1000baseKX_Full
:
200 SUPPORTED_1000baseT_Full
;
202 /* default advertised speed if phy.autoneg_advertised isn't set */
203 ecmd
->advertising
= ecmd
->supported
;
204 /* set the advertised speeds */
205 if (hw
->phy
.autoneg_advertised
) {
206 ecmd
->advertising
= 0;
207 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_100_FULL
)
208 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
209 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10GB_FULL
)
210 ecmd
->advertising
|= ecmd
->supported
& ADVRTSD_MSK_10G
;
211 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_1GB_FULL
) {
212 if (ecmd
->supported
& SUPPORTED_1000baseKX_Full
)
213 ecmd
->advertising
|= ADVERTISED_1000baseKX_Full
;
215 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
218 if (hw
->phy
.multispeed_fiber
&& !autoneg
) {
219 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
220 ecmd
->advertising
= ADVERTISED_10000baseT_Full
;
225 ecmd
->supported
|= SUPPORTED_Autoneg
;
226 ecmd
->advertising
|= ADVERTISED_Autoneg
;
227 ecmd
->autoneg
= AUTONEG_ENABLE
;
229 ecmd
->autoneg
= AUTONEG_DISABLE
;
231 ecmd
->transceiver
= XCVR_EXTERNAL
;
233 /* Determine the remaining settings based on the PHY type. */
234 switch (adapter
->hw
.phy
.type
) {
237 case ixgbe_phy_x550em_ext_t
:
238 case ixgbe_phy_cu_unknown
:
239 ecmd
->supported
|= SUPPORTED_TP
;
240 ecmd
->advertising
|= ADVERTISED_TP
;
241 ecmd
->port
= PORT_TP
;
244 ecmd
->supported
|= SUPPORTED_FIBRE
;
245 ecmd
->advertising
|= ADVERTISED_FIBRE
;
246 ecmd
->port
= PORT_FIBRE
;
249 case ixgbe_phy_sfp_passive_tyco
:
250 case ixgbe_phy_sfp_passive_unknown
:
251 case ixgbe_phy_sfp_ftl
:
252 case ixgbe_phy_sfp_avago
:
253 case ixgbe_phy_sfp_intel
:
254 case ixgbe_phy_sfp_unknown
:
255 case ixgbe_phy_qsfp_passive_unknown
:
256 case ixgbe_phy_qsfp_active_unknown
:
257 case ixgbe_phy_qsfp_intel
:
258 case ixgbe_phy_qsfp_unknown
:
259 /* SFP+ devices, further checking needed */
260 switch (adapter
->hw
.phy
.sfp_type
) {
261 case ixgbe_sfp_type_da_cu
:
262 case ixgbe_sfp_type_da_cu_core0
:
263 case ixgbe_sfp_type_da_cu_core1
:
264 ecmd
->supported
|= SUPPORTED_FIBRE
;
265 ecmd
->advertising
|= ADVERTISED_FIBRE
;
266 ecmd
->port
= PORT_DA
;
268 case ixgbe_sfp_type_sr
:
269 case ixgbe_sfp_type_lr
:
270 case ixgbe_sfp_type_srlr_core0
:
271 case ixgbe_sfp_type_srlr_core1
:
272 case ixgbe_sfp_type_1g_sx_core0
:
273 case ixgbe_sfp_type_1g_sx_core1
:
274 case ixgbe_sfp_type_1g_lx_core0
:
275 case ixgbe_sfp_type_1g_lx_core1
:
276 ecmd
->supported
|= SUPPORTED_FIBRE
;
277 ecmd
->advertising
|= ADVERTISED_FIBRE
;
278 ecmd
->port
= PORT_FIBRE
;
280 case ixgbe_sfp_type_not_present
:
281 ecmd
->supported
|= SUPPORTED_FIBRE
;
282 ecmd
->advertising
|= ADVERTISED_FIBRE
;
283 ecmd
->port
= PORT_NONE
;
285 case ixgbe_sfp_type_1g_cu_core0
:
286 case ixgbe_sfp_type_1g_cu_core1
:
287 ecmd
->supported
|= SUPPORTED_TP
;
288 ecmd
->advertising
|= ADVERTISED_TP
;
289 ecmd
->port
= PORT_TP
;
291 case ixgbe_sfp_type_unknown
:
293 ecmd
->supported
|= SUPPORTED_FIBRE
;
294 ecmd
->advertising
|= ADVERTISED_FIBRE
;
295 ecmd
->port
= PORT_OTHER
;
300 ecmd
->supported
|= SUPPORTED_FIBRE
;
301 ecmd
->advertising
|= ADVERTISED_FIBRE
;
302 ecmd
->port
= PORT_NONE
;
304 case ixgbe_phy_unknown
:
305 case ixgbe_phy_generic
:
306 case ixgbe_phy_sfp_unsupported
:
308 ecmd
->supported
|= SUPPORTED_FIBRE
;
309 ecmd
->advertising
|= ADVERTISED_FIBRE
;
310 ecmd
->port
= PORT_OTHER
;
314 if (netif_carrier_ok(netdev
)) {
315 switch (adapter
->link_speed
) {
316 case IXGBE_LINK_SPEED_10GB_FULL
:
317 ethtool_cmd_speed_set(ecmd
, SPEED_10000
);
319 case IXGBE_LINK_SPEED_2_5GB_FULL
:
320 ethtool_cmd_speed_set(ecmd
, SPEED_2500
);
322 case IXGBE_LINK_SPEED_1GB_FULL
:
323 ethtool_cmd_speed_set(ecmd
, SPEED_1000
);
325 case IXGBE_LINK_SPEED_100_FULL
:
326 ethtool_cmd_speed_set(ecmd
, SPEED_100
);
331 ecmd
->duplex
= DUPLEX_FULL
;
333 ethtool_cmd_speed_set(ecmd
, SPEED_UNKNOWN
);
334 ecmd
->duplex
= DUPLEX_UNKNOWN
;
340 static int ixgbe_set_settings(struct net_device
*netdev
,
341 struct ethtool_cmd
*ecmd
)
343 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
344 struct ixgbe_hw
*hw
= &adapter
->hw
;
348 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
349 (hw
->phy
.multispeed_fiber
)) {
351 * this function does not support duplex forcing, but can
352 * limit the advertising of the adapter to the specified speed
354 if (ecmd
->advertising
& ~ecmd
->supported
)
357 /* only allow one speed at a time if no autoneg */
358 if (!ecmd
->autoneg
&& hw
->phy
.multispeed_fiber
) {
359 if (ecmd
->advertising
==
360 (ADVERTISED_10000baseT_Full
|
361 ADVERTISED_1000baseT_Full
))
365 old
= hw
->phy
.autoneg_advertised
;
367 if (ecmd
->advertising
& ADVERTISED_10000baseT_Full
)
368 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
370 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
371 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
373 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
374 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
376 if (old
== advertised
)
378 /* this sets the link speed and restarts auto-neg */
379 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
380 usleep_range(1000, 2000);
382 hw
->mac
.autotry_restart
= true;
383 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true);
385 e_info(probe
, "setup link failed with code %d\n", err
);
386 hw
->mac
.ops
.setup_link(hw
, old
, true);
388 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
390 /* in this case we currently only support 10Gb/FULL */
391 u32 speed
= ethtool_cmd_speed(ecmd
);
392 if ((ecmd
->autoneg
== AUTONEG_ENABLE
) ||
393 (ecmd
->advertising
!= ADVERTISED_10000baseT_Full
) ||
394 (speed
+ ecmd
->duplex
!= SPEED_10000
+ DUPLEX_FULL
))
401 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
402 struct ethtool_pauseparam
*pause
)
404 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
405 struct ixgbe_hw
*hw
= &adapter
->hw
;
407 if (ixgbe_device_supports_autoneg_fc(hw
) &&
408 !hw
->fc
.disable_fc_autoneg
)
413 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
415 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
417 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
423 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
424 struct ethtool_pauseparam
*pause
)
426 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
427 struct ixgbe_hw
*hw
= &adapter
->hw
;
428 struct ixgbe_fc_info fc
= hw
->fc
;
430 /* 82598 does no support link flow control with DCB enabled */
431 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
432 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
435 /* some devices do not support autoneg of link flow control */
436 if ((pause
->autoneg
== AUTONEG_ENABLE
) &&
437 !ixgbe_device_supports_autoneg_fc(hw
))
440 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
442 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
443 fc
.requested_mode
= ixgbe_fc_full
;
444 else if (pause
->rx_pause
&& !pause
->tx_pause
)
445 fc
.requested_mode
= ixgbe_fc_rx_pause
;
446 else if (!pause
->rx_pause
&& pause
->tx_pause
)
447 fc
.requested_mode
= ixgbe_fc_tx_pause
;
449 fc
.requested_mode
= ixgbe_fc_none
;
451 /* if the thing changed then we'll update and use new autoneg */
452 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
454 if (netif_running(netdev
))
455 ixgbe_reinit_locked(adapter
);
457 ixgbe_reset(adapter
);
463 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
465 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
466 return adapter
->msg_enable
;
469 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
471 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
472 adapter
->msg_enable
= data
;
475 static int ixgbe_get_regs_len(struct net_device
*netdev
)
477 #define IXGBE_REGS_LEN 1139
478 return IXGBE_REGS_LEN
* sizeof(u32
);
481 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
483 static void ixgbe_get_regs(struct net_device
*netdev
,
484 struct ethtool_regs
*regs
, void *p
)
486 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
487 struct ixgbe_hw
*hw
= &adapter
->hw
;
491 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
493 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
496 /* General Registers */
497 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
498 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
499 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
500 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
501 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
502 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
503 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
504 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
507 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC(hw
));
508 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
509 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA(hw
));
510 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
511 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
512 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
513 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
514 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
515 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
516 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC(hw
));
519 /* don't read EICR because it can clear interrupt causes, instead
520 * read EICS which is a shadow but doesn't clear EICR */
521 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
522 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
523 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
524 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
525 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
526 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
527 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
528 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
529 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
530 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
531 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
532 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
535 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
536 regs_buff
[31] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(0));
537 regs_buff
[32] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(1));
538 regs_buff
[33] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(2));
539 regs_buff
[34] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(3));
540 for (i
= 0; i
< 8; i
++) {
541 switch (hw
->mac
.type
) {
542 case ixgbe_mac_82598EB
:
543 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
544 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
546 case ixgbe_mac_82599EB
:
549 case ixgbe_mac_X550EM_x
:
550 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
551 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
557 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
558 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
561 for (i
= 0; i
< 64; i
++)
562 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
563 for (i
= 0; i
< 64; i
++)
564 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
565 for (i
= 0; i
< 64; i
++)
566 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
567 for (i
= 0; i
< 64; i
++)
568 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
569 for (i
= 0; i
< 64; i
++)
570 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
571 for (i
= 0; i
< 64; i
++)
572 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
573 for (i
= 0; i
< 16; i
++)
574 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
575 for (i
= 0; i
< 16; i
++)
576 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
577 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
578 for (i
= 0; i
< 8; i
++)
579 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
580 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
581 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
584 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
585 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
586 for (i
= 0; i
< 16; i
++)
587 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
588 for (i
= 0; i
< 16; i
++)
589 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
590 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
591 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
592 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
593 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
594 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
595 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
596 for (i
= 0; i
< 8; i
++)
597 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
598 for (i
= 0; i
< 8; i
++)
599 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
600 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
603 for (i
= 0; i
< 32; i
++)
604 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
605 for (i
= 0; i
< 32; i
++)
606 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
607 for (i
= 0; i
< 32; i
++)
608 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
609 for (i
= 0; i
< 32; i
++)
610 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
611 for (i
= 0; i
< 32; i
++)
612 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
613 for (i
= 0; i
< 32; i
++)
614 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
615 for (i
= 0; i
< 32; i
++)
616 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
617 for (i
= 0; i
< 32; i
++)
618 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
619 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
620 for (i
= 0; i
< 16; i
++)
621 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
622 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
623 for (i
= 0; i
< 8; i
++)
624 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
625 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
628 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
629 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
630 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
631 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
632 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
633 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
634 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
635 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
636 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
639 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
); /* same as FCCFG */
640 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
); /* same as RTTPCS */
642 switch (hw
->mac
.type
) {
643 case ixgbe_mac_82598EB
:
644 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
645 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
646 for (i
= 0; i
< 8; i
++)
648 IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
649 for (i
= 0; i
< 8; i
++)
651 IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
652 for (i
= 0; i
< 8; i
++)
654 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
655 for (i
= 0; i
< 8; i
++)
657 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
659 case ixgbe_mac_82599EB
:
662 case ixgbe_mac_X550EM_x
:
663 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
664 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RTRPCS
);
665 for (i
= 0; i
< 8; i
++)
667 IXGBE_READ_REG(hw
, IXGBE_RTRPT4C(i
));
668 for (i
= 0; i
< 8; i
++)
670 IXGBE_READ_REG(hw
, IXGBE_RTRPT4S(i
));
671 for (i
= 0; i
< 8; i
++)
673 IXGBE_READ_REG(hw
, IXGBE_RTTDT2C(i
));
674 for (i
= 0; i
< 8; i
++)
676 IXGBE_READ_REG(hw
, IXGBE_RTTDT2S(i
));
682 for (i
= 0; i
< 8; i
++)
684 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
)); /* same as RTTPT2C */
685 for (i
= 0; i
< 8; i
++)
687 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
)); /* same as RTTPT2S */
690 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
691 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
692 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
693 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
694 for (i
= 0; i
< 8; i
++)
695 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
696 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
697 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
698 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
699 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
700 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
701 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
702 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
703 for (i
= 0; i
< 8; i
++)
704 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
705 for (i
= 0; i
< 8; i
++)
706 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
707 for (i
= 0; i
< 8; i
++)
708 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
709 for (i
= 0; i
< 8; i
++)
710 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
711 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
712 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
713 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
714 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
715 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
716 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
717 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
718 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
719 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
720 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
721 regs_buff
[942] = IXGBE_GET_STAT(adapter
, gorc
);
722 regs_buff
[944] = IXGBE_GET_STAT(adapter
, gotc
);
723 for (i
= 0; i
< 8; i
++)
724 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
725 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
726 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
727 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
728 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
729 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
730 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
731 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
732 regs_buff
[961] = IXGBE_GET_STAT(adapter
, tor
);
733 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
734 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
735 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
736 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
737 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
738 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
739 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
740 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
741 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
742 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
743 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
744 for (i
= 0; i
< 16; i
++)
745 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
746 for (i
= 0; i
< 16; i
++)
747 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
748 for (i
= 0; i
< 16; i
++)
749 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
750 for (i
= 0; i
< 16; i
++)
751 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
754 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
755 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
756 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
757 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
758 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
759 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
760 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
761 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
762 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
763 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
764 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
765 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
766 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
767 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
768 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
769 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
770 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
771 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
772 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
773 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
774 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
775 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
776 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
777 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
778 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
779 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
780 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
781 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
782 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
783 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
784 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
785 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
786 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
789 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
790 for (i
= 0; i
< 8; i
++)
791 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
792 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
793 for (i
= 0; i
< 4; i
++)
794 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
795 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
796 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
797 for (i
= 0; i
< 8; i
++)
798 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
799 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
800 for (i
= 0; i
< 4; i
++)
801 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
802 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
803 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
804 regs_buff
[1102] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA0
);
805 regs_buff
[1103] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA1
);
806 regs_buff
[1104] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA2
);
807 regs_buff
[1105] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA3
);
808 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
809 regs_buff
[1107] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA0
);
810 regs_buff
[1108] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA1
);
811 regs_buff
[1109] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA2
);
812 regs_buff
[1110] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA3
);
813 for (i
= 0; i
< 8; i
++)
814 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
815 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
816 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
817 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
818 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
819 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
820 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
821 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
822 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
823 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
825 /* 82599 X540 specific registers */
826 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
828 /* 82599 X540 specific DCB registers */
829 regs_buff
[1129] = IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
830 regs_buff
[1130] = IXGBE_READ_REG(hw
, IXGBE_RTTUP2TC
);
831 for (i
= 0; i
< 4; i
++)
832 regs_buff
[1131 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXLLQ(i
));
833 regs_buff
[1135] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRM
);
834 /* same as RTTQCNRM */
835 regs_buff
[1136] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRD
);
836 /* same as RTTQCNRR */
838 /* X540 specific DCB registers */
839 regs_buff
[1137] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNCR
);
840 regs_buff
[1138] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNTG
);
843 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
845 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
846 return adapter
->hw
.eeprom
.word_size
* 2;
849 static int ixgbe_get_eeprom(struct net_device
*netdev
,
850 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
852 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
853 struct ixgbe_hw
*hw
= &adapter
->hw
;
855 int first_word
, last_word
, eeprom_len
;
859 if (eeprom
->len
== 0)
862 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
864 first_word
= eeprom
->offset
>> 1;
865 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
866 eeprom_len
= last_word
- first_word
+ 1;
868 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
872 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
875 /* Device's eeprom is always little-endian, word addressable */
876 for (i
= 0; i
< eeprom_len
; i
++)
877 le16_to_cpus(&eeprom_buff
[i
]);
879 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
885 static int ixgbe_set_eeprom(struct net_device
*netdev
,
886 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
888 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
889 struct ixgbe_hw
*hw
= &adapter
->hw
;
892 int max_len
, first_word
, last_word
, ret_val
= 0;
895 if (eeprom
->len
== 0)
898 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
901 max_len
= hw
->eeprom
.word_size
* 2;
903 first_word
= eeprom
->offset
>> 1;
904 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
905 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
911 if (eeprom
->offset
& 1) {
913 * need read/modify/write of first changed EEPROM word
914 * only the second byte of the word is being modified
916 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
922 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
924 * need read/modify/write of last changed EEPROM word
925 * only the first byte of the word is being modified
927 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
928 &eeprom_buff
[last_word
- first_word
]);
933 /* Device's eeprom is always little-endian, word addressable */
934 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
935 le16_to_cpus(&eeprom_buff
[i
]);
937 memcpy(ptr
, bytes
, eeprom
->len
);
939 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
940 cpu_to_le16s(&eeprom_buff
[i
]);
942 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
943 last_word
- first_word
+ 1,
946 /* Update the checksum */
948 hw
->eeprom
.ops
.update_checksum(hw
);
955 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
956 struct ethtool_drvinfo
*drvinfo
)
958 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
961 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
962 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
963 sizeof(drvinfo
->version
));
965 nvm_track_id
= (adapter
->eeprom_verh
<< 16) |
966 adapter
->eeprom_verl
;
967 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
), "0x%08x",
970 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
971 sizeof(drvinfo
->bus_info
));
974 static void ixgbe_get_ringparam(struct net_device
*netdev
,
975 struct ethtool_ringparam
*ring
)
977 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
978 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
979 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
981 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
982 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
983 ring
->rx_pending
= rx_ring
->count
;
984 ring
->tx_pending
= tx_ring
->count
;
987 static int ixgbe_set_ringparam(struct net_device
*netdev
,
988 struct ethtool_ringparam
*ring
)
990 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
991 struct ixgbe_ring
*temp_ring
;
993 u32 new_rx_count
, new_tx_count
;
995 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
998 new_tx_count
= clamp_t(u32
, ring
->tx_pending
,
999 IXGBE_MIN_TXD
, IXGBE_MAX_TXD
);
1000 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
1002 new_rx_count
= clamp_t(u32
, ring
->rx_pending
,
1003 IXGBE_MIN_RXD
, IXGBE_MAX_RXD
);
1004 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
1006 if ((new_tx_count
== adapter
->tx_ring_count
) &&
1007 (new_rx_count
== adapter
->rx_ring_count
)) {
1012 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1013 usleep_range(1000, 2000);
1015 if (!netif_running(adapter
->netdev
)) {
1016 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1017 adapter
->tx_ring
[i
]->count
= new_tx_count
;
1018 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1019 adapter
->rx_ring
[i
]->count
= new_rx_count
;
1020 adapter
->tx_ring_count
= new_tx_count
;
1021 adapter
->rx_ring_count
= new_rx_count
;
1025 /* allocate temporary buffer to store rings in */
1026 i
= max_t(int, adapter
->num_tx_queues
, adapter
->num_rx_queues
);
1027 temp_ring
= vmalloc(i
* sizeof(struct ixgbe_ring
));
1034 ixgbe_down(adapter
);
1037 * Setup new Tx resources and free the old Tx resources in that order.
1038 * We can then assign the new resources to the rings via a memcpy.
1039 * The advantage to this approach is that we are guaranteed to still
1040 * have resources even in the case of an allocation failure.
1042 if (new_tx_count
!= adapter
->tx_ring_count
) {
1043 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1044 memcpy(&temp_ring
[i
], adapter
->tx_ring
[i
],
1045 sizeof(struct ixgbe_ring
));
1047 temp_ring
[i
].count
= new_tx_count
;
1048 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1052 ixgbe_free_tx_resources(&temp_ring
[i
]);
1058 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1059 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1061 memcpy(adapter
->tx_ring
[i
], &temp_ring
[i
],
1062 sizeof(struct ixgbe_ring
));
1065 adapter
->tx_ring_count
= new_tx_count
;
1068 /* Repeat the process for the Rx rings if needed */
1069 if (new_rx_count
!= adapter
->rx_ring_count
) {
1070 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1071 memcpy(&temp_ring
[i
], adapter
->rx_ring
[i
],
1072 sizeof(struct ixgbe_ring
));
1074 temp_ring
[i
].count
= new_rx_count
;
1075 err
= ixgbe_setup_rx_resources(&temp_ring
[i
]);
1079 ixgbe_free_rx_resources(&temp_ring
[i
]);
1086 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1087 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1089 memcpy(adapter
->rx_ring
[i
], &temp_ring
[i
],
1090 sizeof(struct ixgbe_ring
));
1093 adapter
->rx_ring_count
= new_rx_count
;
1100 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1104 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1108 return IXGBE_TEST_LEN
;
1110 return IXGBE_STATS_LEN
;
1116 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1117 struct ethtool_stats
*stats
, u64
*data
)
1119 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1120 struct rtnl_link_stats64 temp
;
1121 const struct rtnl_link_stats64
*net_stats
;
1123 struct ixgbe_ring
*ring
;
1127 ixgbe_update_stats(adapter
);
1128 net_stats
= dev_get_stats(netdev
, &temp
);
1129 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1130 switch (ixgbe_gstrings_stats
[i
].type
) {
1132 p
= (char *) net_stats
+
1133 ixgbe_gstrings_stats
[i
].stat_offset
;
1136 p
= (char *) adapter
+
1137 ixgbe_gstrings_stats
[i
].stat_offset
;
1144 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1145 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1147 for (j
= 0; j
< netdev
->num_tx_queues
; j
++) {
1148 ring
= adapter
->tx_ring
[j
];
1153 #ifdef BP_EXTENDED_STATS
1163 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1164 data
[i
] = ring
->stats
.packets
;
1165 data
[i
+1] = ring
->stats
.bytes
;
1166 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1168 #ifdef BP_EXTENDED_STATS
1169 data
[i
] = ring
->stats
.yields
;
1170 data
[i
+1] = ring
->stats
.misses
;
1171 data
[i
+2] = ring
->stats
.cleaned
;
1175 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1176 ring
= adapter
->rx_ring
[j
];
1181 #ifdef BP_EXTENDED_STATS
1191 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1192 data
[i
] = ring
->stats
.packets
;
1193 data
[i
+1] = ring
->stats
.bytes
;
1194 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1196 #ifdef BP_EXTENDED_STATS
1197 data
[i
] = ring
->stats
.yields
;
1198 data
[i
+1] = ring
->stats
.misses
;
1199 data
[i
+2] = ring
->stats
.cleaned
;
1204 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1205 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1206 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1208 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1209 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1210 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1214 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1217 char *p
= (char *)data
;
1220 switch (stringset
) {
1222 for (i
= 0; i
< IXGBE_TEST_LEN
; i
++) {
1223 memcpy(data
, ixgbe_gstrings_test
[i
], ETH_GSTRING_LEN
);
1224 data
+= ETH_GSTRING_LEN
;
1228 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1229 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1231 p
+= ETH_GSTRING_LEN
;
1233 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1234 sprintf(p
, "tx_queue_%u_packets", i
);
1235 p
+= ETH_GSTRING_LEN
;
1236 sprintf(p
, "tx_queue_%u_bytes", i
);
1237 p
+= ETH_GSTRING_LEN
;
1238 #ifdef BP_EXTENDED_STATS
1239 sprintf(p
, "tx_queue_%u_bp_napi_yield", i
);
1240 p
+= ETH_GSTRING_LEN
;
1241 sprintf(p
, "tx_queue_%u_bp_misses", i
);
1242 p
+= ETH_GSTRING_LEN
;
1243 sprintf(p
, "tx_queue_%u_bp_cleaned", i
);
1244 p
+= ETH_GSTRING_LEN
;
1245 #endif /* BP_EXTENDED_STATS */
1247 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1248 sprintf(p
, "rx_queue_%u_packets", i
);
1249 p
+= ETH_GSTRING_LEN
;
1250 sprintf(p
, "rx_queue_%u_bytes", i
);
1251 p
+= ETH_GSTRING_LEN
;
1252 #ifdef BP_EXTENDED_STATS
1253 sprintf(p
, "rx_queue_%u_bp_poll_yield", i
);
1254 p
+= ETH_GSTRING_LEN
;
1255 sprintf(p
, "rx_queue_%u_bp_misses", i
);
1256 p
+= ETH_GSTRING_LEN
;
1257 sprintf(p
, "rx_queue_%u_bp_cleaned", i
);
1258 p
+= ETH_GSTRING_LEN
;
1259 #endif /* BP_EXTENDED_STATS */
1261 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1262 sprintf(p
, "tx_pb_%u_pxon", i
);
1263 p
+= ETH_GSTRING_LEN
;
1264 sprintf(p
, "tx_pb_%u_pxoff", i
);
1265 p
+= ETH_GSTRING_LEN
;
1267 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1268 sprintf(p
, "rx_pb_%u_pxon", i
);
1269 p
+= ETH_GSTRING_LEN
;
1270 sprintf(p
, "rx_pb_%u_pxoff", i
);
1271 p
+= ETH_GSTRING_LEN
;
1273 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1278 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1280 struct ixgbe_hw
*hw
= &adapter
->hw
;
1284 if (ixgbe_removed(hw
->hw_addr
)) {
1290 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1298 /* ethtool register test data */
1299 struct ixgbe_reg_test
{
1307 /* In the hardware, registers are laid out either singly, in arrays
1308 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1309 * most tests take place on arrays or single registers (handled
1310 * as a single-element array) and special-case the tables.
1311 * Table tests are always pattern tests.
1313 * We also make provision for some required setup steps by specifying
1314 * registers to be written without any read-back testing.
1317 #define PATTERN_TEST 1
1318 #define SET_READ_TEST 2
1319 #define WRITE_NO_TEST 3
1320 #define TABLE32_TEST 4
1321 #define TABLE64_TEST_LO 5
1322 #define TABLE64_TEST_HI 6
1324 /* default 82599 register test */
1325 static const struct ixgbe_reg_test reg_test_82599
[] = {
1326 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1327 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1328 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1329 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1330 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1331 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1332 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1333 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1334 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1335 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1336 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1337 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1338 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1339 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1340 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1341 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1342 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1343 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1344 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1348 /* default 82598 register test */
1349 static const struct ixgbe_reg_test reg_test_82598
[] = {
1350 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1351 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1352 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1353 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1354 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1355 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1356 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1357 /* Enable all four RX queues before testing. */
1358 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1359 /* RDH is read-only for 82598, only test RDT. */
1360 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1361 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1362 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1363 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1364 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1365 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1366 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1367 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1368 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1369 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1370 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1371 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1372 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1376 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1377 u32 mask
, u32 write
)
1379 u32 pat
, val
, before
;
1380 static const u32 test_pattern
[] = {
1381 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1383 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1387 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1388 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1389 ixgbe_write_reg(&adapter
->hw
, reg
, test_pattern
[pat
] & write
);
1390 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1391 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1392 e_err(drv
, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1393 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1395 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1398 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1403 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1404 u32 mask
, u32 write
)
1408 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1412 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1413 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
1414 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1415 if ((write
& mask
) != (val
& mask
)) {
1416 e_err(drv
, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1417 reg
, (val
& mask
), (write
& mask
));
1419 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1422 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1426 static int ixgbe_reg_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1428 const struct ixgbe_reg_test
*test
;
1429 u32 value
, before
, after
;
1432 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1433 e_err(drv
, "Adapter removed - register test blocked\n");
1437 switch (adapter
->hw
.mac
.type
) {
1438 case ixgbe_mac_82598EB
:
1439 toggle
= 0x7FFFF3FF;
1440 test
= reg_test_82598
;
1442 case ixgbe_mac_82599EB
:
1443 case ixgbe_mac_X540
:
1444 case ixgbe_mac_X550
:
1445 case ixgbe_mac_X550EM_x
:
1446 toggle
= 0x7FFFF30F;
1447 test
= reg_test_82599
;
1455 * Because the status register is such a special case,
1456 * we handle it separately from the rest of the register
1457 * tests. Some bits are read-only, some toggle, and some
1458 * are writeable on newer MACs.
1460 before
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
);
1461 value
= (ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1462 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1463 after
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1464 if (value
!= after
) {
1465 e_err(drv
, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1470 /* restore previous status */
1471 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, before
);
1474 * Perform the remainder of the register test, looping through
1475 * the test table until we either fail or reach the null entry.
1478 for (i
= 0; i
< test
->array_len
; i
++) {
1481 switch (test
->test_type
) {
1483 b
= reg_pattern_test(adapter
, data
,
1484 test
->reg
+ (i
* 0x40),
1489 b
= reg_set_and_check(adapter
, data
,
1490 test
->reg
+ (i
* 0x40),
1495 ixgbe_write_reg(&adapter
->hw
,
1496 test
->reg
+ (i
* 0x40),
1500 b
= reg_pattern_test(adapter
, data
,
1501 test
->reg
+ (i
* 4),
1505 case TABLE64_TEST_LO
:
1506 b
= reg_pattern_test(adapter
, data
,
1507 test
->reg
+ (i
* 8),
1511 case TABLE64_TEST_HI
:
1512 b
= reg_pattern_test(adapter
, data
,
1513 (test
->reg
+ 4) + (i
* 8),
1528 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1530 struct ixgbe_hw
*hw
= &adapter
->hw
;
1531 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1538 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1540 struct net_device
*netdev
= (struct net_device
*) data
;
1541 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1543 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1548 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1550 struct net_device
*netdev
= adapter
->netdev
;
1551 u32 mask
, i
= 0, shared_int
= true;
1552 u32 irq
= adapter
->pdev
->irq
;
1556 /* Hook up test interrupt handler just for this test */
1557 if (adapter
->msix_entries
) {
1558 /* NOTE: we don't test MSI-X interrupts here, yet */
1560 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1562 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1567 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1568 netdev
->name
, netdev
)) {
1570 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1571 netdev
->name
, netdev
)) {
1575 e_info(hw
, "testing %s interrupt\n", shared_int
?
1576 "shared" : "unshared");
1578 /* Disable all the interrupts */
1579 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1580 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1581 usleep_range(10000, 20000);
1583 /* Test each interrupt */
1584 for (; i
< 10; i
++) {
1585 /* Interrupt to test */
1590 * Disable the interrupts to be reported in
1591 * the cause register and then force the same
1592 * interrupt and see if one gets posted. If
1593 * an interrupt was posted to the bus, the
1596 adapter
->test_icr
= 0;
1597 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1598 ~mask
& 0x00007FFF);
1599 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1600 ~mask
& 0x00007FFF);
1601 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1602 usleep_range(10000, 20000);
1604 if (adapter
->test_icr
& mask
) {
1611 * Enable the interrupt to be reported in the cause
1612 * register and then force the same interrupt and see
1613 * if one gets posted. If an interrupt was not posted
1614 * to the bus, the test failed.
1616 adapter
->test_icr
= 0;
1617 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1618 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1619 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1620 usleep_range(10000, 20000);
1622 if (!(adapter
->test_icr
& mask
)) {
1629 * Disable the other interrupts to be reported in
1630 * the cause register and then force the other
1631 * interrupts and see if any get posted. If
1632 * an interrupt was posted to the bus, the
1635 adapter
->test_icr
= 0;
1636 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1637 ~mask
& 0x00007FFF);
1638 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1639 ~mask
& 0x00007FFF);
1640 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1641 usleep_range(10000, 20000);
1643 if (adapter
->test_icr
) {
1650 /* Disable all the interrupts */
1651 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1652 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1653 usleep_range(10000, 20000);
1655 /* Unhook test interrupt handler */
1656 free_irq(irq
, netdev
);
1661 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1663 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1664 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1665 struct ixgbe_hw
*hw
= &adapter
->hw
;
1668 /* shut down the DMA engines now so they can be reinitialized later */
1671 hw
->mac
.ops
.disable_rx(hw
);
1672 ixgbe_disable_rx_queue(adapter
, rx_ring
);
1675 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
));
1676 reg_ctl
&= ~IXGBE_TXDCTL_ENABLE
;
1677 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
), reg_ctl
);
1679 switch (hw
->mac
.type
) {
1680 case ixgbe_mac_82599EB
:
1681 case ixgbe_mac_X540
:
1682 case ixgbe_mac_X550
:
1683 case ixgbe_mac_X550EM_x
:
1684 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1685 reg_ctl
&= ~IXGBE_DMATXCTL_TE
;
1686 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg_ctl
);
1692 ixgbe_reset(adapter
);
1694 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1695 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1698 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1700 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1701 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1702 struct ixgbe_hw
*hw
= &adapter
->hw
;
1707 /* Setup Tx descriptor ring and Tx buffers */
1708 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1709 tx_ring
->queue_index
= 0;
1710 tx_ring
->dev
= &adapter
->pdev
->dev
;
1711 tx_ring
->netdev
= adapter
->netdev
;
1712 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1714 err
= ixgbe_setup_tx_resources(tx_ring
);
1718 switch (adapter
->hw
.mac
.type
) {
1719 case ixgbe_mac_82599EB
:
1720 case ixgbe_mac_X540
:
1721 case ixgbe_mac_X550
:
1722 case ixgbe_mac_X550EM_x
:
1723 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1724 reg_data
|= IXGBE_DMATXCTL_TE
;
1725 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1731 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1733 /* Setup Rx Descriptor ring and Rx buffers */
1734 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1735 rx_ring
->queue_index
= 0;
1736 rx_ring
->dev
= &adapter
->pdev
->dev
;
1737 rx_ring
->netdev
= adapter
->netdev
;
1738 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1740 err
= ixgbe_setup_rx_resources(rx_ring
);
1746 hw
->mac
.ops
.disable_rx(hw
);
1748 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1750 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1751 rctl
|= IXGBE_RXCTRL_DMBYPS
;
1752 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1754 hw
->mac
.ops
.enable_rx(hw
);
1759 ixgbe_free_desc_rings(adapter
);
1763 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1765 struct ixgbe_hw
*hw
= &adapter
->hw
;
1769 /* Setup MAC loopback */
1770 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1771 reg_data
|= IXGBE_HLREG0_LPBK
;
1772 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1774 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1775 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1776 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1778 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1779 switch (adapter
->hw
.mac
.type
) {
1780 case ixgbe_mac_X540
:
1781 case ixgbe_mac_X550
:
1782 case ixgbe_mac_X550EM_x
:
1783 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1784 reg_data
|= IXGBE_MACC_FLU
;
1785 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1788 if (hw
->mac
.orig_autoc
) {
1789 reg_data
= hw
->mac
.orig_autoc
| IXGBE_AUTOC_FLU
;
1790 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1795 IXGBE_WRITE_FLUSH(hw
);
1796 usleep_range(10000, 20000);
1798 /* Disable Atlas Tx lanes; re-enabled in reset path */
1799 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1802 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1803 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1804 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1806 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1807 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1808 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1810 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1811 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1812 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1814 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1815 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1816 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1822 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1826 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1827 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1828 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1831 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1832 unsigned int frame_size
)
1834 memset(skb
->data
, 0xFF, frame_size
);
1836 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1837 memset(&skb
->data
[frame_size
+ 10], 0xBE, 1);
1838 memset(&skb
->data
[frame_size
+ 12], 0xAF, 1);
1841 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1842 unsigned int frame_size
)
1844 unsigned char *data
;
1849 data
= kmap(rx_buffer
->page
) + rx_buffer
->page_offset
;
1851 if (data
[3] != 0xFF ||
1852 data
[frame_size
+ 10] != 0xBE ||
1853 data
[frame_size
+ 12] != 0xAF)
1856 kunmap(rx_buffer
->page
);
1861 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1862 struct ixgbe_ring
*tx_ring
,
1865 union ixgbe_adv_rx_desc
*rx_desc
;
1866 struct ixgbe_rx_buffer
*rx_buffer
;
1867 struct ixgbe_tx_buffer
*tx_buffer
;
1868 u16 rx_ntc
, tx_ntc
, count
= 0;
1870 /* initialize next to clean and descriptor values */
1871 rx_ntc
= rx_ring
->next_to_clean
;
1872 tx_ntc
= tx_ring
->next_to_clean
;
1873 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1875 while (ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
)) {
1876 /* check Rx buffer */
1877 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1879 /* sync Rx buffer for CPU read */
1880 dma_sync_single_for_cpu(rx_ring
->dev
,
1882 ixgbe_rx_bufsz(rx_ring
),
1885 /* verify contents of skb */
1886 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
1889 /* sync Rx buffer for device write */
1890 dma_sync_single_for_device(rx_ring
->dev
,
1892 ixgbe_rx_bufsz(rx_ring
),
1895 /* unmap buffer on Tx side */
1896 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1897 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
1899 /* increment Rx/Tx next to clean counters */
1901 if (rx_ntc
== rx_ring
->count
)
1904 if (tx_ntc
== tx_ring
->count
)
1907 /* fetch next descriptor */
1908 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1911 netdev_tx_reset_queue(txring_txq(tx_ring
));
1913 /* re-map buffers to ring, store next to clean values */
1914 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1915 rx_ring
->next_to_clean
= rx_ntc
;
1916 tx_ring
->next_to_clean
= tx_ntc
;
1921 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
1923 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1924 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1925 int i
, j
, lc
, good_cnt
, ret_val
= 0;
1926 unsigned int size
= 1024;
1927 netdev_tx_t tx_ret_val
;
1928 struct sk_buff
*skb
;
1929 u32 flags_orig
= adapter
->flags
;
1931 /* DCB can modify the frames on Tx */
1932 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
1934 /* allocate test skb */
1935 skb
= alloc_skb(size
, GFP_KERNEL
);
1939 /* place data into test skb */
1940 ixgbe_create_lbtest_frame(skb
, size
);
1944 * Calculate the loop count based on the largest descriptor ring
1945 * The idea is to wrap the largest ring a number of times using 64
1946 * send/receive pairs during each loop
1949 if (rx_ring
->count
<= tx_ring
->count
)
1950 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
1952 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
1954 for (j
= 0; j
<= lc
; j
++) {
1955 /* reset count of good packets */
1958 /* place 64 packets on the transmit queue*/
1959 for (i
= 0; i
< 64; i
++) {
1961 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
1964 if (tx_ret_val
== NETDEV_TX_OK
)
1968 if (good_cnt
!= 64) {
1973 /* allow 200 milliseconds for packets to go from Tx to Rx */
1976 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
1977 if (good_cnt
!= 64) {
1983 /* free the original skb */
1985 adapter
->flags
= flags_orig
;
1990 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1992 *data
= ixgbe_setup_desc_rings(adapter
);
1995 *data
= ixgbe_setup_loopback_test(adapter
);
1998 *data
= ixgbe_run_loopback_test(adapter
);
1999 ixgbe_loopback_cleanup(adapter
);
2002 ixgbe_free_desc_rings(adapter
);
2007 static void ixgbe_diag_test(struct net_device
*netdev
,
2008 struct ethtool_test
*eth_test
, u64
*data
)
2010 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2011 bool if_running
= netif_running(netdev
);
2013 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
2014 e_err(hw
, "Adapter removed - test blocked\n");
2020 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2023 set_bit(__IXGBE_TESTING
, &adapter
->state
);
2024 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
2025 struct ixgbe_hw
*hw
= &adapter
->hw
;
2027 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2029 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
2030 if (adapter
->vfinfo
[i
].clear_to_send
) {
2031 netdev_warn(netdev
, "offline diagnostic is not supported when VFs are present\n");
2037 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2038 clear_bit(__IXGBE_TESTING
,
2046 e_info(hw
, "offline testing starting\n");
2048 /* Link test performed before hardware reset so autoneg doesn't
2049 * interfere with test result
2051 if (ixgbe_link_test(adapter
, &data
[4]))
2052 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2055 /* indicate we're in test mode */
2058 ixgbe_reset(adapter
);
2060 e_info(hw
, "register testing starting\n");
2061 if (ixgbe_reg_test(adapter
, &data
[0]))
2062 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2064 ixgbe_reset(adapter
);
2065 e_info(hw
, "eeprom testing starting\n");
2066 if (ixgbe_eeprom_test(adapter
, &data
[1]))
2067 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2069 ixgbe_reset(adapter
);
2070 e_info(hw
, "interrupt testing starting\n");
2071 if (ixgbe_intr_test(adapter
, &data
[2]))
2072 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2074 /* If SRIOV or VMDq is enabled then skip MAC
2075 * loopback diagnostic. */
2076 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
2077 IXGBE_FLAG_VMDQ_ENABLED
)) {
2078 e_info(hw
, "Skip MAC loopback diagnostic in VT mode\n");
2083 ixgbe_reset(adapter
);
2084 e_info(hw
, "loopback testing starting\n");
2085 if (ixgbe_loopback_test(adapter
, &data
[3]))
2086 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2089 ixgbe_reset(adapter
);
2091 /* clear testing bit and return adapter to previous state */
2092 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2095 else if (hw
->mac
.ops
.disable_tx_laser
)
2096 hw
->mac
.ops
.disable_tx_laser(hw
);
2098 e_info(hw
, "online testing starting\n");
2101 if (ixgbe_link_test(adapter
, &data
[4]))
2102 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2104 /* Offline tests aren't run; pass by default */
2110 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2114 msleep_interruptible(4 * 1000);
2117 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
2118 struct ethtool_wolinfo
*wol
)
2120 struct ixgbe_hw
*hw
= &adapter
->hw
;
2123 /* WOL not supported for all devices */
2124 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
2125 hw
->subsystem_device_id
)) {
2133 static void ixgbe_get_wol(struct net_device
*netdev
,
2134 struct ethtool_wolinfo
*wol
)
2136 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2138 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2139 WAKE_BCAST
| WAKE_MAGIC
;
2142 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2143 !device_can_wakeup(&adapter
->pdev
->dev
))
2146 if (adapter
->wol
& IXGBE_WUFC_EX
)
2147 wol
->wolopts
|= WAKE_UCAST
;
2148 if (adapter
->wol
& IXGBE_WUFC_MC
)
2149 wol
->wolopts
|= WAKE_MCAST
;
2150 if (adapter
->wol
& IXGBE_WUFC_BC
)
2151 wol
->wolopts
|= WAKE_BCAST
;
2152 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2153 wol
->wolopts
|= WAKE_MAGIC
;
2156 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2158 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2160 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
2163 if (ixgbe_wol_exclusion(adapter
, wol
))
2164 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2168 if (wol
->wolopts
& WAKE_UCAST
)
2169 adapter
->wol
|= IXGBE_WUFC_EX
;
2170 if (wol
->wolopts
& WAKE_MCAST
)
2171 adapter
->wol
|= IXGBE_WUFC_MC
;
2172 if (wol
->wolopts
& WAKE_BCAST
)
2173 adapter
->wol
|= IXGBE_WUFC_BC
;
2174 if (wol
->wolopts
& WAKE_MAGIC
)
2175 adapter
->wol
|= IXGBE_WUFC_MAG
;
2177 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2182 static int ixgbe_nway_reset(struct net_device
*netdev
)
2184 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2186 if (netif_running(netdev
))
2187 ixgbe_reinit_locked(adapter
);
2192 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2193 enum ethtool_phys_id_state state
)
2195 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2196 struct ixgbe_hw
*hw
= &adapter
->hw
;
2199 case ETHTOOL_ID_ACTIVE
:
2200 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2204 hw
->mac
.ops
.led_on(hw
, IXGBE_LED_ON
);
2207 case ETHTOOL_ID_OFF
:
2208 hw
->mac
.ops
.led_off(hw
, IXGBE_LED_ON
);
2211 case ETHTOOL_ID_INACTIVE
:
2212 /* Restore LED settings */
2213 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2220 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2221 struct ethtool_coalesce
*ec
)
2223 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2225 /* only valid if in constant ITR mode */
2226 if (adapter
->rx_itr_setting
<= 1)
2227 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2229 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2231 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2232 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2235 /* only valid if in constant ITR mode */
2236 if (adapter
->tx_itr_setting
<= 1)
2237 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2239 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2245 * this function must be called before setting the new value of
2248 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2250 struct net_device
*netdev
= adapter
->netdev
;
2252 /* nothing to do if LRO or RSC are not enabled */
2253 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2254 !(netdev
->features
& NETIF_F_LRO
))
2257 /* check the feature flag value and enable RSC if necessary */
2258 if (adapter
->rx_itr_setting
== 1 ||
2259 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2260 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2261 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2262 e_info(probe
, "rx-usecs value high enough to re-enable RSC\n");
2265 /* if interrupt rate is too high then disable RSC */
2266 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2267 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2268 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2274 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2275 struct ethtool_coalesce
*ec
)
2277 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2278 struct ixgbe_q_vector
*q_vector
;
2280 u16 tx_itr_param
, rx_itr_param
, tx_itr_prev
;
2281 bool need_reset
= false;
2283 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
) {
2284 /* reject Tx specific changes in case of mixed RxTx vectors */
2285 if (ec
->tx_coalesce_usecs
)
2287 tx_itr_prev
= adapter
->rx_itr_setting
;
2289 tx_itr_prev
= adapter
->tx_itr_setting
;
2292 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2293 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2296 if (ec
->rx_coalesce_usecs
> 1)
2297 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2299 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2301 if (adapter
->rx_itr_setting
== 1)
2302 rx_itr_param
= IXGBE_20K_ITR
;
2304 rx_itr_param
= adapter
->rx_itr_setting
;
2306 if (ec
->tx_coalesce_usecs
> 1)
2307 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2309 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2311 if (adapter
->tx_itr_setting
== 1)
2312 tx_itr_param
= IXGBE_12K_ITR
;
2314 tx_itr_param
= adapter
->tx_itr_setting
;
2317 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2318 adapter
->tx_itr_setting
= adapter
->rx_itr_setting
;
2320 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2321 if ((adapter
->tx_itr_setting
!= 1) &&
2322 (adapter
->tx_itr_setting
< IXGBE_100K_ITR
)) {
2323 if ((tx_itr_prev
== 1) ||
2324 (tx_itr_prev
>= IXGBE_100K_ITR
))
2327 if ((tx_itr_prev
!= 1) &&
2328 (tx_itr_prev
< IXGBE_100K_ITR
))
2332 /* check the old value and enable RSC if necessary */
2333 need_reset
|= ixgbe_update_rsc(adapter
);
2335 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2336 q_vector
= adapter
->q_vector
[i
];
2337 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2339 q_vector
->itr
= tx_itr_param
;
2341 /* rx only or mixed */
2342 q_vector
->itr
= rx_itr_param
;
2343 ixgbe_write_eitr(q_vector
);
2347 * do reset here at the end to make sure EITR==0 case is handled
2348 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2349 * also locks in RSC enable/disable which requires reset
2352 ixgbe_do_reset(netdev
);
2357 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2358 struct ethtool_rxnfc
*cmd
)
2360 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2361 struct ethtool_rx_flow_spec
*fsp
=
2362 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2363 struct hlist_node
*node2
;
2364 struct ixgbe_fdir_filter
*rule
= NULL
;
2366 /* report total rule count */
2367 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2369 hlist_for_each_entry_safe(rule
, node2
,
2370 &adapter
->fdir_filter_list
, fdir_node
) {
2371 if (fsp
->location
<= rule
->sw_idx
)
2375 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2378 /* fill out the flow spec entry */
2380 /* set flow type field */
2381 switch (rule
->filter
.formatted
.flow_type
) {
2382 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2383 fsp
->flow_type
= TCP_V4_FLOW
;
2385 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2386 fsp
->flow_type
= UDP_V4_FLOW
;
2388 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2389 fsp
->flow_type
= SCTP_V4_FLOW
;
2391 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2392 fsp
->flow_type
= IP_USER_FLOW
;
2393 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2394 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2395 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2401 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2402 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2403 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2404 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2405 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2406 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2407 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2408 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2409 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2410 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2411 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2412 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2413 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2414 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2415 fsp
->flow_type
|= FLOW_EXT
;
2418 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2419 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2421 fsp
->ring_cookie
= rule
->action
;
2426 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2427 struct ethtool_rxnfc
*cmd
,
2430 struct hlist_node
*node2
;
2431 struct ixgbe_fdir_filter
*rule
;
2434 /* report total rule count */
2435 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2437 hlist_for_each_entry_safe(rule
, node2
,
2438 &adapter
->fdir_filter_list
, fdir_node
) {
2439 if (cnt
== cmd
->rule_cnt
)
2441 rule_locs
[cnt
] = rule
->sw_idx
;
2445 cmd
->rule_cnt
= cnt
;
2450 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2451 struct ethtool_rxnfc
*cmd
)
2455 /* Report default options for RSS on ixgbe */
2456 switch (cmd
->flow_type
) {
2458 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2461 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2462 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2465 case AH_ESP_V4_FLOW
:
2469 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2472 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2475 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2476 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2479 case AH_ESP_V6_FLOW
:
2483 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2492 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2495 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2496 int ret
= -EOPNOTSUPP
;
2499 case ETHTOOL_GRXRINGS
:
2500 cmd
->data
= adapter
->num_rx_queues
;
2503 case ETHTOOL_GRXCLSRLCNT
:
2504 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2507 case ETHTOOL_GRXCLSRULE
:
2508 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2510 case ETHTOOL_GRXCLSRLALL
:
2511 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2514 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2523 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2524 struct ixgbe_fdir_filter
*input
,
2527 struct ixgbe_hw
*hw
= &adapter
->hw
;
2528 struct hlist_node
*node2
;
2529 struct ixgbe_fdir_filter
*rule
, *parent
;
2535 hlist_for_each_entry_safe(rule
, node2
,
2536 &adapter
->fdir_filter_list
, fdir_node
) {
2537 /* hash found, or no matching entry */
2538 if (rule
->sw_idx
>= sw_idx
)
2543 /* if there is an old rule occupying our place remove it */
2544 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2545 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2546 input
->filter
.formatted
.bkt_hash
)) {
2547 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2552 hlist_del(&rule
->fdir_node
);
2554 adapter
->fdir_filter_count
--;
2558 * If no input this was a delete, err should be 0 if a rule was
2559 * successfully found and removed from the list else -EINVAL
2564 /* initialize node and set software index */
2565 INIT_HLIST_NODE(&input
->fdir_node
);
2567 /* add filter to the list */
2569 hlist_add_behind(&input
->fdir_node
, &parent
->fdir_node
);
2571 hlist_add_head(&input
->fdir_node
,
2572 &adapter
->fdir_filter_list
);
2575 adapter
->fdir_filter_count
++;
2580 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2583 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2585 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2588 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2591 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2594 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2596 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2599 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2602 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2605 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2606 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2620 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2621 struct ethtool_rxnfc
*cmd
)
2623 struct ethtool_rx_flow_spec
*fsp
=
2624 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2625 struct ixgbe_hw
*hw
= &adapter
->hw
;
2626 struct ixgbe_fdir_filter
*input
;
2627 union ixgbe_atr_input mask
;
2631 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2634 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2635 * we use the drop index.
2637 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
2638 queue
= IXGBE_FDIR_DROP_QUEUE
;
2640 u32 ring
= ethtool_get_flow_spec_ring(fsp
->ring_cookie
);
2641 u8 vf
= ethtool_get_flow_spec_ring_vf(fsp
->ring_cookie
);
2643 if (!vf
&& (ring
>= adapter
->num_rx_queues
))
2646 ((vf
> adapter
->num_vfs
) ||
2647 ring
>= adapter
->num_rx_queues_per_pool
))
2650 /* Map the ring onto the absolute queue index */
2652 queue
= adapter
->rx_ring
[ring
]->reg_idx
;
2655 adapter
->num_rx_queues_per_pool
) + ring
;
2658 /* Don't allow indexes to exist outside of available space */
2659 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2660 e_err(drv
, "Location out of range\n");
2664 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2668 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2671 input
->sw_idx
= fsp
->location
;
2673 /* record flow type */
2674 if (!ixgbe_flowspec_to_flow_type(fsp
,
2675 &input
->filter
.formatted
.flow_type
)) {
2676 e_err(drv
, "Unrecognized flow type\n");
2680 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2681 IXGBE_ATR_L4TYPE_MASK
;
2683 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2684 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2686 /* Copy input into formatted structures */
2687 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2688 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2689 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2690 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2691 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2692 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2693 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2694 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2696 if (fsp
->flow_type
& FLOW_EXT
) {
2697 input
->filter
.formatted
.vm_pool
=
2698 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2699 mask
.formatted
.vm_pool
=
2700 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2701 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2702 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2703 input
->filter
.formatted
.flex_bytes
=
2704 fsp
->h_ext
.vlan_etype
;
2705 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2708 /* determine if we need to drop or route the packet */
2709 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2710 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2712 input
->action
= fsp
->ring_cookie
;
2714 spin_lock(&adapter
->fdir_perfect_lock
);
2716 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2717 /* save mask and program input mask into HW */
2718 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2719 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2721 e_err(drv
, "Error writing mask\n");
2722 goto err_out_w_lock
;
2724 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2725 e_err(drv
, "Only one mask supported per port\n");
2726 goto err_out_w_lock
;
2729 /* apply mask and compute/store hash */
2730 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2732 /* program filters to filter memory */
2733 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2734 &input
->filter
, input
->sw_idx
, queue
);
2736 goto err_out_w_lock
;
2738 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2740 spin_unlock(&adapter
->fdir_perfect_lock
);
2744 spin_unlock(&adapter
->fdir_perfect_lock
);
2750 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2751 struct ethtool_rxnfc
*cmd
)
2753 struct ethtool_rx_flow_spec
*fsp
=
2754 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2757 spin_lock(&adapter
->fdir_perfect_lock
);
2758 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2759 spin_unlock(&adapter
->fdir_perfect_lock
);
2764 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2765 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2766 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2767 struct ethtool_rxnfc
*nfc
)
2769 u32 flags2
= adapter
->flags2
;
2772 * RSS does not support anything other than hashing
2773 * to queues on src and dst IPs and ports
2775 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2776 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2779 switch (nfc
->flow_type
) {
2782 if (!(nfc
->data
& RXH_IP_SRC
) ||
2783 !(nfc
->data
& RXH_IP_DST
) ||
2784 !(nfc
->data
& RXH_L4_B_0_1
) ||
2785 !(nfc
->data
& RXH_L4_B_2_3
))
2789 if (!(nfc
->data
& RXH_IP_SRC
) ||
2790 !(nfc
->data
& RXH_IP_DST
))
2792 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2794 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2796 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2797 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2804 if (!(nfc
->data
& RXH_IP_SRC
) ||
2805 !(nfc
->data
& RXH_IP_DST
))
2807 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2809 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2811 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2812 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2818 case AH_ESP_V4_FLOW
:
2822 case AH_ESP_V6_FLOW
:
2826 if (!(nfc
->data
& RXH_IP_SRC
) ||
2827 !(nfc
->data
& RXH_IP_DST
) ||
2828 (nfc
->data
& RXH_L4_B_0_1
) ||
2829 (nfc
->data
& RXH_L4_B_2_3
))
2836 /* if we changed something we need to update flags */
2837 if (flags2
!= adapter
->flags2
) {
2838 struct ixgbe_hw
*hw
= &adapter
->hw
;
2840 unsigned int pf_pool
= adapter
->num_vfs
;
2842 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2843 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2844 mrqc
= IXGBE_READ_REG(hw
, IXGBE_PFVFMRQC(pf_pool
));
2846 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
2848 if ((flags2
& UDP_RSS_FLAGS
) &&
2849 !(adapter
->flags2
& UDP_RSS_FLAGS
))
2850 e_warn(drv
, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2852 adapter
->flags2
= flags2
;
2854 /* Perform hash on these packet types */
2855 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2856 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2857 | IXGBE_MRQC_RSS_FIELD_IPV6
2858 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2860 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
2861 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
2863 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2864 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
2866 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2867 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
2869 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2870 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2871 IXGBE_WRITE_REG(hw
, IXGBE_PFVFMRQC(pf_pool
), mrqc
);
2873 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2879 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2881 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2882 int ret
= -EOPNOTSUPP
;
2885 case ETHTOOL_SRXCLSRLINS
:
2886 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2888 case ETHTOOL_SRXCLSRLDEL
:
2889 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2892 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
2901 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter
*adapter
)
2903 if (adapter
->hw
.mac
.type
< ixgbe_mac_X550
)
2909 static u32
ixgbe_get_rxfh_key_size(struct net_device
*netdev
)
2911 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2913 return sizeof(adapter
->rss_key
);
2916 static u32
ixgbe_rss_indir_size(struct net_device
*netdev
)
2918 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2920 return ixgbe_rss_indir_tbl_entries(adapter
);
2923 static void ixgbe_get_reta(struct ixgbe_adapter
*adapter
, u32
*indir
)
2925 int i
, reta_size
= ixgbe_rss_indir_tbl_entries(adapter
);
2927 for (i
= 0; i
< reta_size
; i
++)
2928 indir
[i
] = adapter
->rss_indir_tbl
[i
];
2931 static int ixgbe_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
2934 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2937 *hfunc
= ETH_RSS_HASH_TOP
;
2940 ixgbe_get_reta(adapter
, indir
);
2943 memcpy(key
, adapter
->rss_key
, ixgbe_get_rxfh_key_size(netdev
));
2948 static int ixgbe_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
2949 const u8
*key
, const u8 hfunc
)
2951 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2953 u32 reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
2958 /* Fill out the redirection table */
2960 int max_queues
= min_t(int, adapter
->num_rx_queues
,
2961 ixgbe_rss_indir_tbl_max(adapter
));
2963 /*Allow at least 2 queues w/ SR-IOV.*/
2964 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
2968 /* Verify user input. */
2969 for (i
= 0; i
< reta_entries
; i
++)
2970 if (indir
[i
] >= max_queues
)
2973 for (i
= 0; i
< reta_entries
; i
++)
2974 adapter
->rss_indir_tbl
[i
] = indir
[i
];
2977 /* Fill out the rss hash key */
2979 memcpy(adapter
->rss_key
, key
, ixgbe_get_rxfh_key_size(netdev
));
2981 ixgbe_store_reta(adapter
);
2986 static int ixgbe_get_ts_info(struct net_device
*dev
,
2987 struct ethtool_ts_info
*info
)
2989 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2991 switch (adapter
->hw
.mac
.type
) {
2992 case ixgbe_mac_X550
:
2993 case ixgbe_mac_X550EM_x
:
2994 case ixgbe_mac_X540
:
2995 case ixgbe_mac_82599EB
:
2996 info
->so_timestamping
=
2997 SOF_TIMESTAMPING_TX_SOFTWARE
|
2998 SOF_TIMESTAMPING_RX_SOFTWARE
|
2999 SOF_TIMESTAMPING_SOFTWARE
|
3000 SOF_TIMESTAMPING_TX_HARDWARE
|
3001 SOF_TIMESTAMPING_RX_HARDWARE
|
3002 SOF_TIMESTAMPING_RAW_HARDWARE
;
3004 if (adapter
->ptp_clock
)
3005 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
3007 info
->phc_index
= -1;
3010 (1 << HWTSTAMP_TX_OFF
) |
3011 (1 << HWTSTAMP_TX_ON
);
3014 (1 << HWTSTAMP_FILTER_NONE
) |
3015 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
3016 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
3017 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
3020 return ethtool_op_get_ts_info(dev
, info
);
3025 static unsigned int ixgbe_max_channels(struct ixgbe_adapter
*adapter
)
3027 unsigned int max_combined
;
3028 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
3030 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
3031 /* We only support one q_vector without MSI-X */
3033 } else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3034 /* SR-IOV currently only allows one queue on the PF */
3036 } else if (tcs
> 1) {
3037 /* For DCB report channels per traffic class */
3038 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
3039 /* 8 TC w/ 4 queues per TC */
3041 } else if (tcs
> 4) {
3042 /* 8 TC w/ 8 queues per TC */
3045 /* 4 TC w/ 16 queues per TC */
3048 } else if (adapter
->atr_sample_rate
) {
3049 /* support up to 64 queues with ATR */
3050 max_combined
= IXGBE_MAX_FDIR_INDICES
;
3052 /* support up to 16 queues with RSS */
3053 max_combined
= ixgbe_max_rss_indices(adapter
);
3056 return max_combined
;
3059 static void ixgbe_get_channels(struct net_device
*dev
,
3060 struct ethtool_channels
*ch
)
3062 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3064 /* report maximum channels */
3065 ch
->max_combined
= ixgbe_max_channels(adapter
);
3067 /* report info for other vector */
3068 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3069 ch
->max_other
= NON_Q_VECTORS
;
3070 ch
->other_count
= NON_Q_VECTORS
;
3073 /* record RSS queues */
3074 ch
->combined_count
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3076 /* nothing else to report if RSS is disabled */
3077 if (ch
->combined_count
== 1)
3080 /* we do not support ATR queueing if SR-IOV is enabled */
3081 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3084 /* same thing goes for being DCB enabled */
3085 if (netdev_get_num_tc(dev
) > 1)
3088 /* if ATR is disabled we can exit */
3089 if (!adapter
->atr_sample_rate
)
3092 /* report flow director queues as maximum channels */
3093 ch
->combined_count
= adapter
->ring_feature
[RING_F_FDIR
].indices
;
3096 static int ixgbe_set_channels(struct net_device
*dev
,
3097 struct ethtool_channels
*ch
)
3099 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3100 unsigned int count
= ch
->combined_count
;
3101 u8 max_rss_indices
= ixgbe_max_rss_indices(adapter
);
3103 /* verify they are not requesting separate vectors */
3104 if (!count
|| ch
->rx_count
|| ch
->tx_count
)
3107 /* verify other_count has not changed */
3108 if (ch
->other_count
!= NON_Q_VECTORS
)
3111 /* verify the number of channels does not exceed hardware limits */
3112 if (count
> ixgbe_max_channels(adapter
))
3115 /* update feature limits from largest to smallest supported values */
3116 adapter
->ring_feature
[RING_F_FDIR
].limit
= count
;
3119 if (count
> max_rss_indices
)
3120 count
= max_rss_indices
;
3121 adapter
->ring_feature
[RING_F_RSS
].limit
= count
;
3124 /* cap FCoE limit at 8 */
3125 if (count
> IXGBE_FCRETA_SIZE
)
3126 count
= IXGBE_FCRETA_SIZE
;
3127 adapter
->ring_feature
[RING_F_FCOE
].limit
= count
;
3130 /* use setup TC to update any traffic class queue mapping */
3131 return ixgbe_setup_tc(dev
, netdev_get_num_tc(dev
));
3134 static int ixgbe_get_module_info(struct net_device
*dev
,
3135 struct ethtool_modinfo
*modinfo
)
3137 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3138 struct ixgbe_hw
*hw
= &adapter
->hw
;
3140 u8 sff8472_rev
, addr_mode
;
3141 bool page_swap
= false;
3143 /* Check whether we support SFF-8472 or not */
3144 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3145 IXGBE_SFF_SFF_8472_COMP
,
3150 /* addressing mode is not supported */
3151 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3152 IXGBE_SFF_SFF_8472_SWAP
,
3157 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
3158 e_err(drv
, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3162 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
) {
3163 /* We have a SFP, but it does not support SFF-8472 */
3164 modinfo
->type
= ETH_MODULE_SFF_8079
;
3165 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
3167 /* We have a SFP which supports a revision of SFF-8472. */
3168 modinfo
->type
= ETH_MODULE_SFF_8472
;
3169 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
3175 static int ixgbe_get_module_eeprom(struct net_device
*dev
,
3176 struct ethtool_eeprom
*ee
,
3179 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3180 struct ixgbe_hw
*hw
= &adapter
->hw
;
3181 s32 status
= IXGBE_ERR_PHY_ADDR_INVALID
;
3188 for (i
= ee
->offset
; i
< ee
->offset
+ ee
->len
; i
++) {
3189 /* I2C reads can take long time */
3190 if (test_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
3193 if (i
< ETH_MODULE_SFF_8079_LEN
)
3194 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
3196 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
3201 data
[i
- ee
->offset
] = databyte
;
3207 static const struct ethtool_ops ixgbe_ethtool_ops
= {
3208 .get_settings
= ixgbe_get_settings
,
3209 .set_settings
= ixgbe_set_settings
,
3210 .get_drvinfo
= ixgbe_get_drvinfo
,
3211 .get_regs_len
= ixgbe_get_regs_len
,
3212 .get_regs
= ixgbe_get_regs
,
3213 .get_wol
= ixgbe_get_wol
,
3214 .set_wol
= ixgbe_set_wol
,
3215 .nway_reset
= ixgbe_nway_reset
,
3216 .get_link
= ethtool_op_get_link
,
3217 .get_eeprom_len
= ixgbe_get_eeprom_len
,
3218 .get_eeprom
= ixgbe_get_eeprom
,
3219 .set_eeprom
= ixgbe_set_eeprom
,
3220 .get_ringparam
= ixgbe_get_ringparam
,
3221 .set_ringparam
= ixgbe_set_ringparam
,
3222 .get_pauseparam
= ixgbe_get_pauseparam
,
3223 .set_pauseparam
= ixgbe_set_pauseparam
,
3224 .get_msglevel
= ixgbe_get_msglevel
,
3225 .set_msglevel
= ixgbe_set_msglevel
,
3226 .self_test
= ixgbe_diag_test
,
3227 .get_strings
= ixgbe_get_strings
,
3228 .set_phys_id
= ixgbe_set_phys_id
,
3229 .get_sset_count
= ixgbe_get_sset_count
,
3230 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
3231 .get_coalesce
= ixgbe_get_coalesce
,
3232 .set_coalesce
= ixgbe_set_coalesce
,
3233 .get_rxnfc
= ixgbe_get_rxnfc
,
3234 .set_rxnfc
= ixgbe_set_rxnfc
,
3235 .get_rxfh_indir_size
= ixgbe_rss_indir_size
,
3236 .get_rxfh_key_size
= ixgbe_get_rxfh_key_size
,
3237 .get_rxfh
= ixgbe_get_rxfh
,
3238 .set_rxfh
= ixgbe_set_rxfh
,
3239 .get_channels
= ixgbe_get_channels
,
3240 .set_channels
= ixgbe_set_channels
,
3241 .get_ts_info
= ixgbe_get_ts_info
,
3242 .get_module_info
= ixgbe_get_module_info
,
3243 .get_module_eeprom
= ixgbe_get_module_eeprom
,
3246 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
3248 netdev
->ethtool_ops
= &ixgbe_ethtool_ops
;