]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / lib / librte_eal / linuxapp / kni / ethtool / ixgbe / ixgbe_ethtool.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 /* ethtool support for ixgbe */
29
30 #include <linux/types.h>
31 #include <linux/module.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/ethtool.h>
35 #include <linux/vmalloc.h>
36 #include <linux/highmem.h>
37 #ifdef SIOCETHTOOL
38 #include <asm/uaccess.h>
39
40 #include "ixgbe.h"
41
42 #ifndef ETH_GSTRING_LEN
43 #define ETH_GSTRING_LEN 32
44 #endif
45
46 #define IXGBE_ALL_RAR_ENTRIES 16
47
48 #ifdef ETHTOOL_OPS_COMPAT
49 #include "kcompat_ethtool.c"
50 #endif
51 #ifdef ETHTOOL_GSTATS
52 struct ixgbe_stats {
53 char stat_string[ETH_GSTRING_LEN];
54 int sizeof_stat;
55 int stat_offset;
56 };
57
58 #define IXGBE_NETDEV_STAT(_net_stat) { \
59 .stat_string = #_net_stat, \
60 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
61 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
62 }
63 static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = {
64 IXGBE_NETDEV_STAT(rx_packets),
65 IXGBE_NETDEV_STAT(tx_packets),
66 IXGBE_NETDEV_STAT(rx_bytes),
67 IXGBE_NETDEV_STAT(tx_bytes),
68 IXGBE_NETDEV_STAT(rx_errors),
69 IXGBE_NETDEV_STAT(tx_errors),
70 IXGBE_NETDEV_STAT(rx_dropped),
71 IXGBE_NETDEV_STAT(tx_dropped),
72 IXGBE_NETDEV_STAT(multicast),
73 IXGBE_NETDEV_STAT(collisions),
74 IXGBE_NETDEV_STAT(rx_over_errors),
75 IXGBE_NETDEV_STAT(rx_crc_errors),
76 IXGBE_NETDEV_STAT(rx_frame_errors),
77 IXGBE_NETDEV_STAT(rx_fifo_errors),
78 IXGBE_NETDEV_STAT(rx_missed_errors),
79 IXGBE_NETDEV_STAT(tx_aborted_errors),
80 IXGBE_NETDEV_STAT(tx_carrier_errors),
81 IXGBE_NETDEV_STAT(tx_fifo_errors),
82 IXGBE_NETDEV_STAT(tx_heartbeat_errors),
83 };
84
85 #define IXGBE_STAT(_name, _stat) { \
86 .stat_string = _name, \
87 .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \
88 .stat_offset = offsetof(struct ixgbe_adapter, _stat) \
89 }
90 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
91 IXGBE_STAT("rx_pkts_nic", stats.gprc),
92 IXGBE_STAT("tx_pkts_nic", stats.gptc),
93 IXGBE_STAT("rx_bytes_nic", stats.gorc),
94 IXGBE_STAT("tx_bytes_nic", stats.gotc),
95 IXGBE_STAT("lsc_int", lsc_int),
96 IXGBE_STAT("tx_busy", tx_busy),
97 IXGBE_STAT("non_eop_descs", non_eop_descs),
98 #ifndef CONFIG_IXGBE_NAPI
99 IXGBE_STAT("rx_dropped_backlog", rx_dropped_backlog),
100 #endif
101 IXGBE_STAT("broadcast", stats.bprc),
102 IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) ,
103 IXGBE_STAT("tx_timeout_count", tx_timeout_count),
104 IXGBE_STAT("tx_restart_queue", restart_queue),
105 IXGBE_STAT("rx_long_length_errors", stats.roc),
106 IXGBE_STAT("rx_short_length_errors", stats.ruc),
107 IXGBE_STAT("tx_flow_control_xon", stats.lxontxc),
108 IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc),
109 IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc),
110 IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc),
111 IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error),
112 IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
113 IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
114 #ifndef IXGBE_NO_LRO
115 IXGBE_STAT("lro_aggregated", lro_stats.coal),
116 IXGBE_STAT("lro_flushed", lro_stats.flushed),
117 #endif /* IXGBE_NO_LRO */
118 IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources),
119 IXGBE_STAT("hw_rsc_aggregated", rsc_total_count),
120 IXGBE_STAT("hw_rsc_flushed", rsc_total_flush),
121 #ifdef HAVE_TX_MQ
122 IXGBE_STAT("fdir_match", stats.fdirmatch),
123 IXGBE_STAT("fdir_miss", stats.fdirmiss),
124 IXGBE_STAT("fdir_overflow", fdir_overflow),
125 #endif /* HAVE_TX_MQ */
126 #ifdef IXGBE_FCOE
127 IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc),
128 IXGBE_STAT("fcoe_last_errors", stats.fclast),
129 IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc),
130 IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc),
131 IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc),
132 IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp),
133 IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff),
134 IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc),
135 IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc),
136 #endif /* IXGBE_FCOE */
137 IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
138 IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
139 IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc),
140 IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc),
141 };
142
143 #define IXGBE_QUEUE_STATS_LEN \
144 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
145 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
146 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
147 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
148 #define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats)
149 #define IXGBE_PB_STATS_LEN ( \
150 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
151 IXGBE_FLAG_DCB_ENABLED) ? \
152 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
153 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
154 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
155 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
156 / sizeof(u64) : 0)
157 #define IXGBE_VF_STATS_LEN \
158 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
159 (sizeof(struct vf_stats) / sizeof(u64)))
160 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
161 IXGBE_NETDEV_STATS_LEN + \
162 IXGBE_PB_STATS_LEN + \
163 IXGBE_QUEUE_STATS_LEN + \
164 IXGBE_VF_STATS_LEN)
165
166 #endif /* ETHTOOL_GSTATS */
167 #ifdef ETHTOOL_TEST
168 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
169 "Register test (offline)", "Eeprom test (offline)",
170 "Interrupt test (offline)", "Loopback test (offline)",
171 "Link test (on/offline)"
172 };
173 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
174 #endif /* ETHTOOL_TEST */
175
176 int ixgbe_get_settings(struct net_device *netdev,
177 struct ethtool_cmd *ecmd)
178 {
179 struct ixgbe_adapter *adapter = netdev_priv(netdev);
180 struct ixgbe_hw *hw = &adapter->hw;
181 u32 link_speed = 0;
182 bool link_up;
183
184 ecmd->supported = SUPPORTED_10000baseT_Full;
185 ecmd->autoneg = AUTONEG_ENABLE;
186 ecmd->transceiver = XCVR_EXTERNAL;
187 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
188 (hw->phy.multispeed_fiber)) {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_Autoneg);
191 switch (hw->mac.type) {
192 case ixgbe_mac_X540:
193 ecmd->supported |= SUPPORTED_100baseT_Full;
194 break;
195 default:
196 break;
197 }
198
199 ecmd->advertising = ADVERTISED_Autoneg;
200 if (hw->phy.autoneg_advertised) {
201 if (hw->phy.autoneg_advertised &
202 IXGBE_LINK_SPEED_100_FULL)
203 ecmd->advertising |= ADVERTISED_100baseT_Full;
204 if (hw->phy.autoneg_advertised &
205 IXGBE_LINK_SPEED_10GB_FULL)
206 ecmd->advertising |= ADVERTISED_10000baseT_Full;
207 if (hw->phy.autoneg_advertised &
208 IXGBE_LINK_SPEED_1GB_FULL)
209 ecmd->advertising |= ADVERTISED_1000baseT_Full;
210 } else {
211 /*
212 * Default advertised modes in case
213 * phy.autoneg_advertised isn't set.
214 */
215 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
216 ADVERTISED_1000baseT_Full);
217 if (hw->mac.type == ixgbe_mac_X540)
218 ecmd->advertising |= ADVERTISED_100baseT_Full;
219 }
220
221 if (hw->phy.media_type == ixgbe_media_type_copper) {
222 ecmd->supported |= SUPPORTED_TP;
223 ecmd->advertising |= ADVERTISED_TP;
224 ecmd->port = PORT_TP;
225 } else {
226 ecmd->supported |= SUPPORTED_FIBRE;
227 ecmd->advertising |= ADVERTISED_FIBRE;
228 ecmd->port = PORT_FIBRE;
229 }
230 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
231 /* Set as FIBRE until SERDES defined in kernel */
232 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
233 ecmd->supported = (SUPPORTED_1000baseT_Full |
234 SUPPORTED_FIBRE);
235 ecmd->advertising = (ADVERTISED_1000baseT_Full |
236 ADVERTISED_FIBRE);
237 ecmd->port = PORT_FIBRE;
238 ecmd->autoneg = AUTONEG_DISABLE;
239 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
240 || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
241 ecmd->supported |= (SUPPORTED_1000baseT_Full |
242 SUPPORTED_Autoneg |
243 SUPPORTED_FIBRE);
244 ecmd->advertising = (ADVERTISED_10000baseT_Full |
245 ADVERTISED_1000baseT_Full |
246 ADVERTISED_Autoneg |
247 ADVERTISED_FIBRE);
248 ecmd->port = PORT_FIBRE;
249 } else {
250 ecmd->supported |= (SUPPORTED_1000baseT_Full |
251 SUPPORTED_FIBRE);
252 ecmd->advertising = (ADVERTISED_10000baseT_Full |
253 ADVERTISED_1000baseT_Full |
254 ADVERTISED_FIBRE);
255 ecmd->port = PORT_FIBRE;
256 }
257 } else {
258 ecmd->supported |= SUPPORTED_FIBRE;
259 ecmd->advertising = (ADVERTISED_10000baseT_Full |
260 ADVERTISED_FIBRE);
261 ecmd->port = PORT_FIBRE;
262 ecmd->autoneg = AUTONEG_DISABLE;
263 }
264
265 #ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
266 /* Get PHY type */
267 switch (adapter->hw.phy.type) {
268 case ixgbe_phy_tn:
269 case ixgbe_phy_aq:
270 case ixgbe_phy_cu_unknown:
271 /* Copper 10G-BASET */
272 ecmd->port = PORT_TP;
273 break;
274 case ixgbe_phy_qt:
275 ecmd->port = PORT_FIBRE;
276 break;
277 case ixgbe_phy_nl:
278 case ixgbe_phy_sfp_passive_tyco:
279 case ixgbe_phy_sfp_passive_unknown:
280 case ixgbe_phy_sfp_ftl:
281 case ixgbe_phy_sfp_avago:
282 case ixgbe_phy_sfp_intel:
283 case ixgbe_phy_sfp_unknown:
284 switch (adapter->hw.phy.sfp_type) {
285 /* SFP+ devices, further checking needed */
286 case ixgbe_sfp_type_da_cu:
287 case ixgbe_sfp_type_da_cu_core0:
288 case ixgbe_sfp_type_da_cu_core1:
289 ecmd->port = PORT_DA;
290 break;
291 case ixgbe_sfp_type_sr:
292 case ixgbe_sfp_type_lr:
293 case ixgbe_sfp_type_srlr_core0:
294 case ixgbe_sfp_type_srlr_core1:
295 ecmd->port = PORT_FIBRE;
296 break;
297 case ixgbe_sfp_type_not_present:
298 ecmd->port = PORT_NONE;
299 break;
300 case ixgbe_sfp_type_1g_cu_core0:
301 case ixgbe_sfp_type_1g_cu_core1:
302 ecmd->port = PORT_TP;
303 ecmd->supported = SUPPORTED_TP;
304 ecmd->advertising = (ADVERTISED_1000baseT_Full |
305 ADVERTISED_TP);
306 break;
307 case ixgbe_sfp_type_1g_sx_core0:
308 case ixgbe_sfp_type_1g_sx_core1:
309 ecmd->port = PORT_FIBRE;
310 ecmd->supported = SUPPORTED_FIBRE;
311 ecmd->advertising = (ADVERTISED_1000baseT_Full |
312 ADVERTISED_FIBRE);
313 break;
314 case ixgbe_sfp_type_unknown:
315 default:
316 ecmd->port = PORT_OTHER;
317 break;
318 }
319 break;
320 case ixgbe_phy_xaui:
321 ecmd->port = PORT_NONE;
322 break;
323 case ixgbe_phy_unknown:
324 case ixgbe_phy_generic:
325 case ixgbe_phy_sfp_unsupported:
326 default:
327 ecmd->port = PORT_OTHER;
328 break;
329 }
330 #endif
331
332 if (!in_interrupt()) {
333 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
334 } else {
335 /*
336 * this case is a special workaround for RHEL5 bonding
337 * that calls this routine from interrupt context
338 */
339 link_speed = adapter->link_speed;
340 link_up = adapter->link_up;
341 }
342
343 if (link_up) {
344 switch (link_speed) {
345 case IXGBE_LINK_SPEED_10GB_FULL:
346 ecmd->speed = SPEED_10000;
347 break;
348 case IXGBE_LINK_SPEED_1GB_FULL:
349 ecmd->speed = SPEED_1000;
350 break;
351 case IXGBE_LINK_SPEED_100_FULL:
352 ecmd->speed = SPEED_100;
353 break;
354 default:
355 break;
356 }
357 ecmd->duplex = DUPLEX_FULL;
358 } else {
359 ecmd->speed = -1;
360 ecmd->duplex = -1;
361 }
362
363 return 0;
364 }
365
366 static int ixgbe_set_settings(struct net_device *netdev,
367 struct ethtool_cmd *ecmd)
368 {
369 struct ixgbe_adapter *adapter = netdev_priv(netdev);
370 struct ixgbe_hw *hw = &adapter->hw;
371 u32 advertised, old;
372 s32 err = 0;
373
374 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
375 (hw->phy.multispeed_fiber)) {
376 /*
377 * this function does not support duplex forcing, but can
378 * limit the advertising of the adapter to the specified speed
379 */
380 if (ecmd->autoneg == AUTONEG_DISABLE)
381 return -EINVAL;
382
383 if (ecmd->advertising & ~ecmd->supported)
384 return -EINVAL;
385
386 old = hw->phy.autoneg_advertised;
387 advertised = 0;
388 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
389 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
390
391 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
392 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
393
394 if (ecmd->advertising & ADVERTISED_100baseT_Full)
395 advertised |= IXGBE_LINK_SPEED_100_FULL;
396
397 if (old == advertised)
398 return err;
399 /* this sets the link speed and restarts auto-neg */
400 hw->mac.autotry_restart = true;
401 err = hw->mac.ops.setup_link(hw, advertised, true, true);
402 if (err) {
403 e_info(probe, "setup link failed with code %d\n", err);
404 hw->mac.ops.setup_link(hw, old, true, true);
405 }
406 }
407 return err;
408 }
409
410 static void ixgbe_get_pauseparam(struct net_device *netdev,
411 struct ethtool_pauseparam *pause)
412 {
413 struct ixgbe_adapter *adapter = netdev_priv(netdev);
414 struct ixgbe_hw *hw = &adapter->hw;
415
416 if (hw->fc.disable_fc_autoneg)
417 pause->autoneg = 0;
418 else
419 pause->autoneg = 1;
420
421 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
422 pause->rx_pause = 1;
423 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
424 pause->tx_pause = 1;
425 } else if (hw->fc.current_mode == ixgbe_fc_full) {
426 pause->rx_pause = 1;
427 pause->tx_pause = 1;
428 }
429 }
430
431 static int ixgbe_set_pauseparam(struct net_device *netdev,
432 struct ethtool_pauseparam *pause)
433 {
434 struct ixgbe_adapter *adapter = netdev_priv(netdev);
435 struct ixgbe_hw *hw = &adapter->hw;
436 struct ixgbe_fc_info fc = hw->fc;
437
438 /* 82598 does no support link flow control with DCB enabled */
439 if ((hw->mac.type == ixgbe_mac_82598EB) &&
440 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
441 return -EINVAL;
442
443 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
444
445 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
446 fc.requested_mode = ixgbe_fc_full;
447 else if (pause->rx_pause)
448 fc.requested_mode = ixgbe_fc_rx_pause;
449 else if (pause->tx_pause)
450 fc.requested_mode = ixgbe_fc_tx_pause;
451 else
452 fc.requested_mode = ixgbe_fc_none;
453
454 /* if the thing changed then we'll update and use new autoneg */
455 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
456 hw->fc = fc;
457 if (netif_running(netdev))
458 ixgbe_reinit_locked(adapter);
459 else
460 ixgbe_reset(adapter);
461 }
462
463 return 0;
464 }
465
466 static u32 ixgbe_get_msglevel(struct net_device *netdev)
467 {
468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
469 return adapter->msg_enable;
470 }
471
472 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
473 {
474 struct ixgbe_adapter *adapter = netdev_priv(netdev);
475 adapter->msg_enable = data;
476 }
477
478 static int ixgbe_get_regs_len(struct net_device *netdev)
479 {
480 #define IXGBE_REGS_LEN 1129
481 return IXGBE_REGS_LEN * sizeof(u32);
482 }
483
484 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
485
486
487 static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
488 void *p)
489 {
490 struct ixgbe_adapter *adapter = netdev_priv(netdev);
491 struct ixgbe_hw *hw = &adapter->hw;
492 u32 *regs_buff = p;
493 u8 i;
494
495 printk(KERN_DEBUG "ixgbe_get_regs_1\n");
496 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
497 printk(KERN_DEBUG "ixgbe_get_regs_2 0x%p\n", hw->hw_addr);
498
499 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
500
501 /* General Registers */
502 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
503 printk(KERN_DEBUG "ixgbe_get_regs_3\n");
504 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
505 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
506 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
507 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
508 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
509 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
510 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
511
512 printk(KERN_DEBUG "ixgbe_get_regs_4\n");
513
514 /* NVM Register */
515 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
516 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
517 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
518 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
519 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
520 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
521 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
522 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
523 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
524 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
525
526 /* Interrupt */
527 /* don't read EICR because it can clear interrupt causes, instead
528 * read EICS which is a shadow but doesn't clear EICR */
529 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
530 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
531 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
532 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
533 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
534 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
535 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
536 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
537 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
538 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
539 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
540 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
541
542 /* Flow Control */
543 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
544 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
545 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
546 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
547 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
548 for (i = 0; i < 8; i++) {
549 switch (hw->mac.type) {
550 case ixgbe_mac_82598EB:
551 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
552 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
553 break;
554 case ixgbe_mac_82599EB:
555 case ixgbe_mac_X540:
556 regs_buff[35 + i] = IXGBE_READ_REG(hw,
557 IXGBE_FCRTL_82599(i));
558 regs_buff[43 + i] = IXGBE_READ_REG(hw,
559 IXGBE_FCRTH_82599(i));
560 break;
561 default:
562 break;
563 }
564 }
565 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
566 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
567
568 /* Receive DMA */
569 for (i = 0; i < 64; i++)
570 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
571 for (i = 0; i < 64; i++)
572 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
573 for (i = 0; i < 64; i++)
574 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
575 for (i = 0; i < 64; i++)
576 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
577 for (i = 0; i < 64; i++)
578 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
579 for (i = 0; i < 64; i++)
580 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
581 for (i = 0; i < 16; i++)
582 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
583 for (i = 0; i < 16; i++)
584 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
585 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
586 for (i = 0; i < 8; i++)
587 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
588 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
589 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
590
591 /* Receive */
592 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
593 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
594 for (i = 0; i < 16; i++)
595 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
596 for (i = 0; i < 16; i++)
597 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
598 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
599 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
600 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
601 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
602 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
603 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
604 for (i = 0; i < 8; i++)
605 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
606 for (i = 0; i < 8; i++)
607 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
608 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
609
610 /* Transmit */
611 for (i = 0; i < 32; i++)
612 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
613 for (i = 0; i < 32; i++)
614 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
615 for (i = 0; i < 32; i++)
616 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
617 for (i = 0; i < 32; i++)
618 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
619 for (i = 0; i < 32; i++)
620 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
621 for (i = 0; i < 32; i++)
622 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
623 for (i = 0; i < 32; i++)
624 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
625 for (i = 0; i < 32; i++)
626 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
627 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
628 for (i = 0; i < 16; i++)
629 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
630 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
631 for (i = 0; i < 8; i++)
632 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
633 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
634
635 /* Wake Up */
636 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
637 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
638 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
639 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
640 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
641 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
642 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
643 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
644 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
645
646 /* DCB */
647 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
648 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
649 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
650 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
651 for (i = 0; i < 8; i++)
652 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
653 for (i = 0; i < 8; i++)
654 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
655 for (i = 0; i < 8; i++)
656 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
657 for (i = 0; i < 8; i++)
658 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
659 for (i = 0; i < 8; i++)
660 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
661 for (i = 0; i < 8; i++)
662 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
663
664 /* Statistics */
665 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
666 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
667 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
668 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
669 for (i = 0; i < 8; i++)
670 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
671 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
672 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
673 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
674 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
675 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
676 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
677 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
678 for (i = 0; i < 8; i++)
679 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
680 for (i = 0; i < 8; i++)
681 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
682 for (i = 0; i < 8; i++)
683 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
684 for (i = 0; i < 8; i++)
685 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
686 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
687 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
688 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
689 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
690 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
691 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
692 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
693 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
694 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
695 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
696 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
697 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
698 for (i = 0; i < 8; i++)
699 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
700 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
701 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
702 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
703 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
704 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
705 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
706 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
707 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
708 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
709 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
710 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
711 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
712 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
713 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
714 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
715 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
716 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
717 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
718 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
719 for (i = 0; i < 16; i++)
720 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
721 for (i = 0; i < 16; i++)
722 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
723 for (i = 0; i < 16; i++)
724 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
725 for (i = 0; i < 16; i++)
726 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
727
728 /* MAC */
729 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
730 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
731 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
732 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
733 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
734 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
735 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
736 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
737 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
738 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
739 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
740 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
741 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
742 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
743 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
744 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
745 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
746 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
747 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
748 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
749 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
750 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
751 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
752 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
753 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
754 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
755 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
756 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
757 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
758 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
759 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
760 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
761 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
762
763 /* Diagnostic */
764 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
765 for (i = 0; i < 8; i++)
766 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
767 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
768 for (i = 0; i < 4; i++)
769 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
770 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
771 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
772 for (i = 0; i < 8; i++)
773 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
774 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
775 for (i = 0; i < 4; i++)
776 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
777 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
778 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
779 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
780 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
781 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
782 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
783 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
784 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
785 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
786 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
787 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
788 for (i = 0; i < 8; i++)
789 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
790 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
791 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
792 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
793 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
794 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
795 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
796 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
797 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
798 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
799
800 /* 82599 X540 specific registers */
801 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
802 }
803
804 static int ixgbe_get_eeprom_len(struct net_device *netdev)
805 {
806 struct ixgbe_adapter *adapter = netdev_priv(netdev);
807 return adapter->hw.eeprom.word_size * 2;
808 }
809
810 static int ixgbe_get_eeprom(struct net_device *netdev,
811 struct ethtool_eeprom *eeprom, u8 *bytes)
812 {
813 struct ixgbe_adapter *adapter = netdev_priv(netdev);
814 struct ixgbe_hw *hw = &adapter->hw;
815 u16 *eeprom_buff;
816 int first_word, last_word, eeprom_len;
817 int ret_val = 0;
818 u16 i;
819
820 if (eeprom->len == 0)
821 return -EINVAL;
822
823 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
824
825 first_word = eeprom->offset >> 1;
826 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
827 eeprom_len = last_word - first_word + 1;
828
829 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
830 if (!eeprom_buff)
831 return -ENOMEM;
832
833 ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
834 eeprom_buff);
835
836 /* Device's eeprom is always little-endian, word addressable */
837 for (i = 0; i < eeprom_len; i++)
838 le16_to_cpus(&eeprom_buff[i]);
839
840 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
841 kfree(eeprom_buff);
842
843 return ret_val;
844 }
845
846 static int ixgbe_set_eeprom(struct net_device *netdev,
847 struct ethtool_eeprom *eeprom, u8 *bytes)
848 {
849 struct ixgbe_adapter *adapter = netdev_priv(netdev);
850 struct ixgbe_hw *hw = &adapter->hw;
851 u16 *eeprom_buff;
852 void *ptr;
853 int max_len, first_word, last_word, ret_val = 0;
854 u16 i;
855
856 if (eeprom->len == 0)
857 return -EINVAL;
858
859 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
860 return -EINVAL;
861
862 max_len = hw->eeprom.word_size * 2;
863
864 first_word = eeprom->offset >> 1;
865 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
866 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
867 if (!eeprom_buff)
868 return -ENOMEM;
869
870 ptr = eeprom_buff;
871
872 if (eeprom->offset & 1) {
873 /*
874 * need read/modify/write of first changed EEPROM word
875 * only the second byte of the word is being modified
876 */
877 ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
878 if (ret_val)
879 goto err;
880
881 ptr++;
882 }
883 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
884 /*
885 * need read/modify/write of last changed EEPROM word
886 * only the first byte of the word is being modified
887 */
888 ret_val = ixgbe_read_eeprom(hw, last_word,
889 &eeprom_buff[last_word - first_word]);
890 if (ret_val)
891 goto err;
892 }
893
894 /* Device's eeprom is always little-endian, word addressable */
895 for (i = 0; i < last_word - first_word + 1; i++)
896 le16_to_cpus(&eeprom_buff[i]);
897
898 memcpy(ptr, bytes, eeprom->len);
899
900 for (i = 0; i < last_word - first_word + 1; i++)
901 cpu_to_le16s(&eeprom_buff[i]);
902
903 ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
904 last_word - first_word + 1,
905 eeprom_buff);
906
907 /* Update the checksum */
908 if (ret_val == 0)
909 ixgbe_update_eeprom_checksum(hw);
910
911 err:
912 kfree(eeprom_buff);
913 return ret_val;
914 }
915
916 static void ixgbe_get_drvinfo(struct net_device *netdev,
917 struct ethtool_drvinfo *drvinfo)
918 {
919 struct ixgbe_adapter *adapter = netdev_priv(netdev);
920
921 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
922
923 strlcpy(drvinfo->version, ixgbe_driver_version,
924 sizeof(drvinfo->version));
925
926 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
927 sizeof(drvinfo->fw_version));
928
929 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
930 sizeof(drvinfo->bus_info));
931
932 drvinfo->n_stats = IXGBE_STATS_LEN;
933 drvinfo->testinfo_len = IXGBE_TEST_LEN;
934 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
935 }
936
937 static void ixgbe_get_ringparam(struct net_device *netdev,
938 struct ethtool_ringparam *ring)
939 {
940 struct ixgbe_adapter *adapter = netdev_priv(netdev);
941
942 ring->rx_max_pending = IXGBE_MAX_RXD;
943 ring->tx_max_pending = IXGBE_MAX_TXD;
944 ring->rx_mini_max_pending = 0;
945 ring->rx_jumbo_max_pending = 0;
946 ring->rx_pending = adapter->rx_ring_count;
947 ring->tx_pending = adapter->tx_ring_count;
948 ring->rx_mini_pending = 0;
949 ring->rx_jumbo_pending = 0;
950 }
951
952 static int ixgbe_set_ringparam(struct net_device *netdev,
953 struct ethtool_ringparam *ring)
954 {
955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
956 struct ixgbe_ring *tx_ring = NULL, *rx_ring = NULL;
957 u32 new_rx_count, new_tx_count;
958 int i, err = 0;
959
960 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
961 return -EINVAL;
962
963 new_tx_count = clamp_t(u32, ring->tx_pending,
964 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
965 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
966
967 new_rx_count = clamp_t(u32, ring->rx_pending,
968 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
969 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
970
971 /* if nothing to do return success */
972 if ((new_tx_count == adapter->tx_ring_count) &&
973 (new_rx_count == adapter->rx_ring_count))
974 return 0;
975
976 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
977 usleep_range(1000, 2000);
978
979 if (!netif_running(adapter->netdev)) {
980 for (i = 0; i < adapter->num_tx_queues; i++)
981 adapter->tx_ring[i]->count = new_tx_count;
982 for (i = 0; i < adapter->num_rx_queues; i++)
983 adapter->rx_ring[i]->count = new_rx_count;
984 adapter->tx_ring_count = new_tx_count;
985 adapter->rx_ring_count = new_rx_count;
986 goto clear_reset;
987 }
988
989 /* alloc updated Tx resources */
990 if (new_tx_count != adapter->tx_ring_count) {
991 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
992 if (!tx_ring) {
993 err = -ENOMEM;
994 goto clear_reset;
995 }
996
997 for (i = 0; i < adapter->num_tx_queues; i++) {
998 /* clone ring and setup updated count */
999 tx_ring[i] = *adapter->tx_ring[i];
1000 tx_ring[i].count = new_tx_count;
1001 err = ixgbe_setup_tx_resources(&tx_ring[i]);
1002 if (err) {
1003 while (i) {
1004 i--;
1005 ixgbe_free_tx_resources(&tx_ring[i]);
1006 }
1007
1008 vfree(tx_ring);
1009 tx_ring = NULL;
1010
1011 goto clear_reset;
1012 }
1013 }
1014 }
1015
1016 /* alloc updated Rx resources */
1017 if (new_rx_count != adapter->rx_ring_count) {
1018 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
1019 if (!rx_ring) {
1020 err = -ENOMEM;
1021 goto clear_reset;
1022 }
1023
1024 for (i = 0; i < adapter->num_rx_queues; i++) {
1025 /* clone ring and setup updated count */
1026 rx_ring[i] = *adapter->rx_ring[i];
1027 rx_ring[i].count = new_rx_count;
1028 err = ixgbe_setup_rx_resources(&rx_ring[i]);
1029 if (err) {
1030 while (i) {
1031 i--;
1032 ixgbe_free_rx_resources(&rx_ring[i]);
1033 }
1034
1035 vfree(rx_ring);
1036 rx_ring = NULL;
1037
1038 goto clear_reset;
1039 }
1040 }
1041 }
1042
1043 /* bring interface down to prepare for update */
1044 ixgbe_down(adapter);
1045
1046 /* Tx */
1047 if (tx_ring) {
1048 for (i = 0; i < adapter->num_tx_queues; i++) {
1049 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1050 *adapter->tx_ring[i] = tx_ring[i];
1051 }
1052 adapter->tx_ring_count = new_tx_count;
1053
1054 vfree(tx_ring);
1055 tx_ring = NULL;
1056 }
1057
1058 /* Rx */
1059 if (rx_ring) {
1060 for (i = 0; i < adapter->num_rx_queues; i++) {
1061 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1062 *adapter->rx_ring[i] = rx_ring[i];
1063 }
1064 adapter->rx_ring_count = new_rx_count;
1065
1066 vfree(rx_ring);
1067 rx_ring = NULL;
1068 }
1069
1070 /* restore interface using new values */
1071 ixgbe_up(adapter);
1072
1073 clear_reset:
1074 /* free Tx resources if Rx error is encountered */
1075 if (tx_ring) {
1076 for (i = 0; i < adapter->num_tx_queues; i++)
1077 ixgbe_free_tx_resources(&tx_ring[i]);
1078 vfree(tx_ring);
1079 }
1080
1081 clear_bit(__IXGBE_RESETTING, &adapter->state);
1082 return err;
1083 }
1084
1085 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
1086 static int ixgbe_get_stats_count(struct net_device *netdev)
1087 {
1088 return IXGBE_STATS_LEN;
1089 }
1090
1091 #else /* HAVE_ETHTOOL_GET_SSET_COUNT */
1092 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1093 {
1094 switch (sset) {
1095 case ETH_SS_TEST:
1096 return IXGBE_TEST_LEN;
1097 case ETH_SS_STATS:
1098 return IXGBE_STATS_LEN;
1099 default:
1100 return -EOPNOTSUPP;
1101 }
1102 }
1103
1104 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
1105 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1106 struct ethtool_stats *stats, u64 *data)
1107 {
1108 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1109 #ifdef HAVE_NETDEV_STATS_IN_NETDEV
1110 struct net_device_stats *net_stats = &netdev->stats;
1111 #else
1112 struct net_device_stats *net_stats = &adapter->net_stats;
1113 #endif
1114 u64 *queue_stat;
1115 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1116 int i, j, k;
1117 char *p;
1118
1119 printk(KERN_DEBUG "ixgbe_stats 0\n");
1120 ixgbe_update_stats(adapter);
1121 printk(KERN_DEBUG "ixgbe_stats 1\n");
1122
1123 for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
1124 p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset;
1125 data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat ==
1126 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1127 }
1128 for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) {
1129 p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset;
1130 data[i] = (ixgbe_gstrings_stats[j].sizeof_stat ==
1131 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1132 }
1133 printk(KERN_DEBUG "ixgbe_stats 2\n");
1134 #ifdef NO_VNIC
1135 for (j = 0; j < adapter->num_tx_queues; j++) {
1136 queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
1137 for (k = 0; k < stat_count; k++)
1138 data[i + k] = queue_stat[k];
1139 i += k;
1140 }
1141 for (j = 0; j < adapter->num_rx_queues; j++) {
1142 queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
1143 for (k = 0; k < stat_count; k++)
1144 data[i + k] = queue_stat[k];
1145 i += k;
1146 }
1147 printk(KERN_DEBUG "ixgbe_stats 3\n");
1148 #endif
1149 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1150 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
1151 data[i++] = adapter->stats.pxontxc[j];
1152 data[i++] = adapter->stats.pxofftxc[j];
1153 }
1154 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
1155 data[i++] = adapter->stats.pxonrxc[j];
1156 data[i++] = adapter->stats.pxoffrxc[j];
1157 }
1158 }
1159 printk(KERN_DEBUG "ixgbe_stats 4\n");
1160 stat_count = sizeof(struct vf_stats) / sizeof(u64);
1161 for (j = 0; j < adapter->num_vfs; j++) {
1162 queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
1163 for (k = 0; k < stat_count; k++)
1164 data[i + k] = queue_stat[k];
1165 queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
1166 for (k = 0; k < stat_count; k++)
1167 data[i + k] += queue_stat[k];
1168 i += k;
1169 }
1170 }
1171
1172 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1173 u8 *data)
1174 {
1175 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1176 char *p = (char *)data;
1177 int i;
1178
1179 switch (stringset) {
1180 case ETH_SS_TEST:
1181 memcpy(data, *ixgbe_gstrings_test,
1182 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
1183 break;
1184 case ETH_SS_STATS:
1185 for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
1186 memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
1187 ETH_GSTRING_LEN);
1188 p += ETH_GSTRING_LEN;
1189 }
1190 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1191 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1192 ETH_GSTRING_LEN);
1193 p += ETH_GSTRING_LEN;
1194 }
1195 for (i = 0; i < adapter->num_tx_queues; i++) {
1196 sprintf(p, "tx_queue_%u_packets", i);
1197 p += ETH_GSTRING_LEN;
1198 sprintf(p, "tx_queue_%u_bytes", i);
1199 p += ETH_GSTRING_LEN;
1200 }
1201 for (i = 0; i < adapter->num_rx_queues; i++) {
1202 sprintf(p, "rx_queue_%u_packets", i);
1203 p += ETH_GSTRING_LEN;
1204 sprintf(p, "rx_queue_%u_bytes", i);
1205 p += ETH_GSTRING_LEN;
1206 }
1207 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1208 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1209 sprintf(p, "tx_pb_%u_pxon", i);
1210 p += ETH_GSTRING_LEN;
1211 sprintf(p, "tx_pb_%u_pxoff", i);
1212 p += ETH_GSTRING_LEN;
1213 }
1214 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1215 sprintf(p, "rx_pb_%u_pxon", i);
1216 p += ETH_GSTRING_LEN;
1217 sprintf(p, "rx_pb_%u_pxoff", i);
1218 p += ETH_GSTRING_LEN;
1219 }
1220 }
1221 for (i = 0; i < adapter->num_vfs; i++) {
1222 sprintf(p, "VF %d Rx Packets", i);
1223 p += ETH_GSTRING_LEN;
1224 sprintf(p, "VF %d Rx Bytes", i);
1225 p += ETH_GSTRING_LEN;
1226 sprintf(p, "VF %d Tx Packets", i);
1227 p += ETH_GSTRING_LEN;
1228 sprintf(p, "VF %d Tx Bytes", i);
1229 p += ETH_GSTRING_LEN;
1230 sprintf(p, "VF %d MC Packets", i);
1231 p += ETH_GSTRING_LEN;
1232 }
1233 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1234 break;
1235 }
1236 }
1237
1238 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1239 {
1240 struct ixgbe_hw *hw = &adapter->hw;
1241 bool link_up;
1242 u32 link_speed = 0;
1243 *data = 0;
1244
1245 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1246 if (link_up)
1247 return *data;
1248 else
1249 *data = 1;
1250 return *data;
1251 }
1252
1253 /* ethtool register test data */
1254 struct ixgbe_reg_test {
1255 u16 reg;
1256 u8 array_len;
1257 u8 test_type;
1258 u32 mask;
1259 u32 write;
1260 };
1261
1262 /* In the hardware, registers are laid out either singly, in arrays
1263 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1264 * most tests take place on arrays or single registers (handled
1265 * as a single-element array) and special-case the tables.
1266 * Table tests are always pattern tests.
1267 *
1268 * We also make provision for some required setup steps by specifying
1269 * registers to be written without any read-back testing.
1270 */
1271
1272 #define PATTERN_TEST 1
1273 #define SET_READ_TEST 2
1274 #define WRITE_NO_TEST 3
1275 #define TABLE32_TEST 4
1276 #define TABLE64_TEST_LO 5
1277 #define TABLE64_TEST_HI 6
1278
1279 /* default 82599 register test */
1280 static struct ixgbe_reg_test reg_test_82599[] = {
1281 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1282 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1283 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1284 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1285 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1286 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1287 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1288 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1289 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1290 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1291 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1292 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1293 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1294 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1295 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1296 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1297 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1298 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1299 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1300 { 0, 0, 0, 0 }
1301 };
1302
1303 /* default 82598 register test */
1304 static struct ixgbe_reg_test reg_test_82598[] = {
1305 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1306 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1307 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1308 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1309 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1310 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1311 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1312 /* Enable all four RX queues before testing. */
1313 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1314 /* RDH is read-only for 82598, only test RDT. */
1315 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1316 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1317 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1318 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1319 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1320 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1321 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1322 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1323 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1324 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1325 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1326 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1327 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1328 { 0, 0, 0, 0 }
1329 };
1330
1331 #define REG_PATTERN_TEST(R, M, W) \
1332 { \
1333 u32 pat, val, before; \
1334 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
1335 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
1336 before = readl(adapter->hw.hw_addr + R); \
1337 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
1338 val = readl(adapter->hw.hw_addr + R); \
1339 if (val != (_test[pat] & W & M)) { \
1340 e_err(drv, "pattern test reg %04X failed: got " \
1341 "0x%08X expected 0x%08X\n", \
1342 R, val, (_test[pat] & W & M)); \
1343 *data = R; \
1344 writel(before, adapter->hw.hw_addr + R); \
1345 return 1; \
1346 } \
1347 writel(before, adapter->hw.hw_addr + R); \
1348 } \
1349 }
1350
1351 #define REG_SET_AND_CHECK(R, M, W) \
1352 { \
1353 u32 val, before; \
1354 before = readl(adapter->hw.hw_addr + R); \
1355 writel((W & M), (adapter->hw.hw_addr + R)); \
1356 val = readl(adapter->hw.hw_addr + R); \
1357 if ((W & M) != (val & M)) { \
1358 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
1359 "expected 0x%08X\n", R, (val & M), (W & M)); \
1360 *data = R; \
1361 writel(before, (adapter->hw.hw_addr + R)); \
1362 return 1; \
1363 } \
1364 writel(before, (adapter->hw.hw_addr + R)); \
1365 }
1366
1367 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1368 {
1369 struct ixgbe_reg_test *test;
1370 u32 value, status_before, status_after;
1371 u32 i, toggle;
1372
1373 switch (adapter->hw.mac.type) {
1374 case ixgbe_mac_82598EB:
1375 toggle = 0x7FFFF3FF;
1376 test = reg_test_82598;
1377 break;
1378 case ixgbe_mac_82599EB:
1379 case ixgbe_mac_X540:
1380 toggle = 0x7FFFF30F;
1381 test = reg_test_82599;
1382 break;
1383 default:
1384 *data = 1;
1385 return 1;
1386 break;
1387 }
1388
1389 /*
1390 * Because the status register is such a special case,
1391 * we handle it separately from the rest of the register
1392 * tests. Some bits are read-only, some toggle, and some
1393 * are writeable on newer MACs.
1394 */
1395 status_before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1396 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1398 status_after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1399 if (value != status_after) {
1400 e_err(drv, "failed STATUS register test got: "
1401 "0x%08X expected: 0x%08X\n", status_after, value);
1402 *data = 1;
1403 return 1;
1404 }
1405 /* restore previous status */
1406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, status_before);
1407
1408 /*
1409 * Perform the remainder of the register test, looping through
1410 * the test table until we either fail or reach the null entry.
1411 */
1412 while (test->reg) {
1413 for (i = 0; i < test->array_len; i++) {
1414 switch (test->test_type) {
1415 case PATTERN_TEST:
1416 REG_PATTERN_TEST(test->reg + (i * 0x40),
1417 test->mask,
1418 test->write);
1419 break;
1420 case SET_READ_TEST:
1421 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1422 test->mask,
1423 test->write);
1424 break;
1425 case WRITE_NO_TEST:
1426 writel(test->write,
1427 (adapter->hw.hw_addr + test->reg)
1428 + (i * 0x40));
1429 break;
1430 case TABLE32_TEST:
1431 REG_PATTERN_TEST(test->reg + (i * 4),
1432 test->mask,
1433 test->write);
1434 break;
1435 case TABLE64_TEST_LO:
1436 REG_PATTERN_TEST(test->reg + (i * 8),
1437 test->mask,
1438 test->write);
1439 break;
1440 case TABLE64_TEST_HI:
1441 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1442 test->mask,
1443 test->write);
1444 break;
1445 }
1446 }
1447 test++;
1448 }
1449
1450 *data = 0;
1451 return 0;
1452 }
1453
1454 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1455 {
1456 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
1457 *data = 1;
1458 else
1459 *data = 0;
1460 return *data;
1461 }
1462
1463 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1464 {
1465 struct net_device *netdev = (struct net_device *) data;
1466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1467
1468 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1469
1470 return IRQ_HANDLED;
1471 }
1472
1473 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1474 {
1475 struct net_device *netdev = adapter->netdev;
1476 u32 mask, i = 0, shared_int = true;
1477 u32 irq = adapter->pdev->irq;
1478
1479 *data = 0;
1480
1481 /* Hook up test interrupt handler just for this test */
1482 if (adapter->msix_entries) {
1483 /* NOTE: we don't test MSI-X interrupts here, yet */
1484 return 0;
1485 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1486 shared_int = false;
1487 if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
1488 netdev)) {
1489 *data = 1;
1490 return -1;
1491 }
1492 } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
1493 netdev->name, netdev)) {
1494 shared_int = false;
1495 } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
1496 netdev->name, netdev)) {
1497 *data = 1;
1498 return -1;
1499 }
1500 e_info(hw, "testing %s interrupt\n",
1501 (shared_int ? "shared" : "unshared"));
1502
1503 /* Disable all the interrupts */
1504 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1505 IXGBE_WRITE_FLUSH(&adapter->hw);
1506 usleep_range(10000, 20000);
1507
1508 /* Test each interrupt */
1509 for (; i < 10; i++) {
1510 /* Interrupt to test */
1511 mask = 1 << i;
1512
1513 if (!shared_int) {
1514 /*
1515 * Disable the interrupts to be reported in
1516 * the cause register and then force the same
1517 * interrupt and see if one gets posted. If
1518 * an interrupt was posted to the bus, the
1519 * test failed.
1520 */
1521 adapter->test_icr = 0;
1522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1523 ~mask & 0x00007FFF);
1524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1525 ~mask & 0x00007FFF);
1526 IXGBE_WRITE_FLUSH(&adapter->hw);
1527 usleep_range(10000, 20000);
1528
1529 if (adapter->test_icr & mask) {
1530 *data = 3;
1531 break;
1532 }
1533 }
1534
1535 /*
1536 * Enable the interrupt to be reported in the cause
1537 * register and then force the same interrupt and see
1538 * if one gets posted. If an interrupt was not posted
1539 * to the bus, the test failed.
1540 */
1541 adapter->test_icr = 0;
1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1544 IXGBE_WRITE_FLUSH(&adapter->hw);
1545 usleep_range(10000, 20000);
1546
1547 if (!(adapter->test_icr & mask)) {
1548 *data = 4;
1549 break;
1550 }
1551
1552 if (!shared_int) {
1553 /*
1554 * Disable the other interrupts to be reported in
1555 * the cause register and then force the other
1556 * interrupts and see if any get posted. If
1557 * an interrupt was posted to the bus, the
1558 * test failed.
1559 */
1560 adapter->test_icr = 0;
1561 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1562 ~mask & 0x00007FFF);
1563 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1564 ~mask & 0x00007FFF);
1565 IXGBE_WRITE_FLUSH(&adapter->hw);
1566 usleep_range(10000, 20000);
1567
1568 if (adapter->test_icr) {
1569 *data = 5;
1570 break;
1571 }
1572 }
1573 }
1574
1575 /* Disable all the interrupts */
1576 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1577 IXGBE_WRITE_FLUSH(&adapter->hw);
1578 usleep_range(10000, 20000);
1579
1580 /* Unhook test interrupt handler */
1581 free_irq(irq, netdev);
1582
1583 return *data;
1584 }
1585
1586
1587
1588 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1589 {
1590 struct ixgbe_hw *hw = &adapter->hw;
1591 u32 reg_data;
1592
1593 /* X540 needs to set the MACC.FLU bit to force link up */
1594 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1595 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1596 reg_data |= IXGBE_MACC_FLU;
1597 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1598 }
1599
1600 /* right now we only support MAC loopback in the driver */
1601 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1602 /* Setup MAC loopback */
1603 reg_data |= IXGBE_HLREG0_LPBK;
1604 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1605
1606 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1607 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1608 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1609
1610 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1611 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1612 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1613 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1614 IXGBE_WRITE_FLUSH(hw);
1615 usleep_range(10000, 20000);
1616
1617 /* Disable Atlas Tx lanes; re-enabled in reset path */
1618 if (hw->mac.type == ixgbe_mac_82598EB) {
1619 u8 atlas;
1620
1621 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1622 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1623 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1624
1625 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1626 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1627 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1628
1629 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1630 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1631 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1632
1633 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1634 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1635 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1636 }
1637
1638 return 0;
1639 }
1640
1641 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1642 {
1643 u32 reg_data;
1644
1645 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1646 reg_data &= ~IXGBE_HLREG0_LPBK;
1647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1648 }
1649
1650
1651
1652
1653
1654
1655 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1656 {
1657
1658 //*data = ixgbe_setup_desc_rings(adapter);
1659 //if (*data)
1660 // goto out;
1661 *data = ixgbe_setup_loopback_test(adapter);
1662 if (*data)
1663 goto err_loopback;
1664 //*data = ixgbe_run_loopback_test(adapter);
1665 ixgbe_loopback_cleanup(adapter);
1666
1667 err_loopback:
1668 //ixgbe_free_desc_rings(adapter);
1669 //out:
1670 return *data;
1671
1672 }
1673
1674 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
1675 static int ixgbe_diag_test_count(struct net_device *netdev)
1676 {
1677 return IXGBE_TEST_LEN;
1678 }
1679
1680 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
1681 static void ixgbe_diag_test(struct net_device *netdev,
1682 struct ethtool_test *eth_test, u64 *data)
1683 {
1684 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1685 bool if_running = netif_running(netdev);
1686
1687 set_bit(__IXGBE_TESTING, &adapter->state);
1688 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1689 /* Offline tests */
1690
1691 e_info(hw, "offline testing starting\n");
1692
1693 /* Link test performed before hardware reset so autoneg doesn't
1694 * interfere with test result */
1695 if (ixgbe_link_test(adapter, &data[4]))
1696 eth_test->flags |= ETH_TEST_FL_FAILED;
1697
1698 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1699 int i;
1700 for (i = 0; i < adapter->num_vfs; i++) {
1701 if (adapter->vfinfo[i].clear_to_send) {
1702 e_warn(drv, "Please take active VFS "
1703 "offline and restart the "
1704 "adapter before running NIC "
1705 "diagnostics\n");
1706 data[0] = 1;
1707 data[1] = 1;
1708 data[2] = 1;
1709 data[3] = 1;
1710 eth_test->flags |= ETH_TEST_FL_FAILED;
1711 clear_bit(__IXGBE_TESTING,
1712 &adapter->state);
1713 goto skip_ol_tests;
1714 }
1715 }
1716 }
1717
1718 if (if_running)
1719 /* indicate we're in test mode */
1720 dev_close(netdev);
1721 else
1722 ixgbe_reset(adapter);
1723
1724 e_info(hw, "register testing starting\n");
1725 if (ixgbe_reg_test(adapter, &data[0]))
1726 eth_test->flags |= ETH_TEST_FL_FAILED;
1727
1728 ixgbe_reset(adapter);
1729 e_info(hw, "eeprom testing starting\n");
1730 if (ixgbe_eeprom_test(adapter, &data[1]))
1731 eth_test->flags |= ETH_TEST_FL_FAILED;
1732
1733 ixgbe_reset(adapter);
1734 e_info(hw, "interrupt testing starting\n");
1735 if (ixgbe_intr_test(adapter, &data[2]))
1736 eth_test->flags |= ETH_TEST_FL_FAILED;
1737
1738 /* If SRIOV or VMDq is enabled then skip MAC
1739 * loopback diagnostic. */
1740 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1741 IXGBE_FLAG_VMDQ_ENABLED)) {
1742 e_info(hw, "skip MAC loopback diagnostic in VT mode\n");
1743 data[3] = 0;
1744 goto skip_loopback;
1745 }
1746
1747 ixgbe_reset(adapter);
1748 e_info(hw, "loopback testing starting\n");
1749 if (ixgbe_loopback_test(adapter, &data[3]))
1750 eth_test->flags |= ETH_TEST_FL_FAILED;
1751
1752 skip_loopback:
1753 ixgbe_reset(adapter);
1754
1755 clear_bit(__IXGBE_TESTING, &adapter->state);
1756 if (if_running)
1757 dev_open(netdev);
1758 } else {
1759 e_info(hw, "online testing starting\n");
1760 /* Online tests */
1761 if (ixgbe_link_test(adapter, &data[4]))
1762 eth_test->flags |= ETH_TEST_FL_FAILED;
1763
1764 /* Online tests aren't run; pass by default */
1765 data[0] = 0;
1766 data[1] = 0;
1767 data[2] = 0;
1768 data[3] = 0;
1769
1770 clear_bit(__IXGBE_TESTING, &adapter->state);
1771 }
1772 skip_ol_tests:
1773 msleep_interruptible(4 * 1000);
1774 }
1775
1776 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1777 struct ethtool_wolinfo *wol)
1778 {
1779 struct ixgbe_hw *hw = &adapter->hw;
1780 int retval = 1;
1781 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
1782
1783 /* WOL not supported except for the following */
1784 switch (hw->device_id) {
1785 case IXGBE_DEV_ID_82599_SFP:
1786 /* Only these subdevice could supports WOL */
1787 switch (hw->subsystem_device_id) {
1788 case IXGBE_SUBDEV_ID_82599_560FLR:
1789 /* only support first port */
1790 if (hw->bus.func != 0) {
1791 wol->supported = 0;
1792 break;
1793 }
1794 case IXGBE_SUBDEV_ID_82599_SFP:
1795 retval = 0;
1796 break;
1797 default:
1798 wol->supported = 0;
1799 break;
1800 }
1801 break;
1802 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1803 /* All except this subdevice support WOL */
1804 if (hw->subsystem_device_id ==
1805 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1806 wol->supported = 0;
1807 break;
1808 }
1809 retval = 0;
1810 break;
1811 case IXGBE_DEV_ID_82599_KX4:
1812 retval = 0;
1813 break;
1814 case IXGBE_DEV_ID_X540T:
1815 /* check eeprom to see if enabled wol */
1816 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1817 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1818 (hw->bus.func == 0))) {
1819 retval = 0;
1820 break;
1821 }
1822
1823 /* All others not supported */
1824 wol->supported = 0;
1825 break;
1826 default:
1827 wol->supported = 0;
1828 }
1829 return retval;
1830 }
1831
1832 static void ixgbe_get_wol(struct net_device *netdev,
1833 struct ethtool_wolinfo *wol)
1834 {
1835 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1836
1837 wol->supported = WAKE_UCAST | WAKE_MCAST |
1838 WAKE_BCAST | WAKE_MAGIC;
1839 wol->wolopts = 0;
1840
1841 if (ixgbe_wol_exclusion(adapter, wol) ||
1842 !device_can_wakeup(&adapter->pdev->dev))
1843 return;
1844
1845 if (adapter->wol & IXGBE_WUFC_EX)
1846 wol->wolopts |= WAKE_UCAST;
1847 if (adapter->wol & IXGBE_WUFC_MC)
1848 wol->wolopts |= WAKE_MCAST;
1849 if (adapter->wol & IXGBE_WUFC_BC)
1850 wol->wolopts |= WAKE_BCAST;
1851 if (adapter->wol & IXGBE_WUFC_MAG)
1852 wol->wolopts |= WAKE_MAGIC;
1853 }
1854
1855 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1856 {
1857 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1858
1859 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1860 return -EOPNOTSUPP;
1861
1862 if (ixgbe_wol_exclusion(adapter, wol))
1863 return wol->wolopts ? -EOPNOTSUPP : 0;
1864
1865 adapter->wol = 0;
1866
1867 if (wol->wolopts & WAKE_UCAST)
1868 adapter->wol |= IXGBE_WUFC_EX;
1869 if (wol->wolopts & WAKE_MCAST)
1870 adapter->wol |= IXGBE_WUFC_MC;
1871 if (wol->wolopts & WAKE_BCAST)
1872 adapter->wol |= IXGBE_WUFC_BC;
1873 if (wol->wolopts & WAKE_MAGIC)
1874 adapter->wol |= IXGBE_WUFC_MAG;
1875
1876 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1877
1878 return 0;
1879 }
1880
1881 static int ixgbe_nway_reset(struct net_device *netdev)
1882 {
1883 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1884
1885 if (netif_running(netdev))
1886 ixgbe_reinit_locked(adapter);
1887
1888 return 0;
1889 }
1890
1891 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
1892 static int ixgbe_set_phys_id(struct net_device *netdev,
1893 enum ethtool_phys_id_state state)
1894 {
1895 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1896 struct ixgbe_hw *hw = &adapter->hw;
1897
1898 switch (state) {
1899 case ETHTOOL_ID_ACTIVE:
1900 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1901 return 2;
1902
1903 case ETHTOOL_ID_ON:
1904 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
1905 break;
1906
1907 case ETHTOOL_ID_OFF:
1908 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
1909 break;
1910
1911 case ETHTOOL_ID_INACTIVE:
1912 /* Restore LED settings */
1913 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
1914 break;
1915 }
1916
1917 return 0;
1918 }
1919 #else
1920 static int ixgbe_phys_id(struct net_device *netdev, u32 data)
1921 {
1922 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1923 struct ixgbe_hw *hw = &adapter->hw;
1924 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1925 u32 i;
1926
1927 if (!data || data > 300)
1928 data = 300;
1929
1930 for (i = 0; i < (data * 1000); i += 400) {
1931 ixgbe_led_on(hw, IXGBE_LED_ON);
1932 msleep_interruptible(200);
1933 ixgbe_led_off(hw, IXGBE_LED_ON);
1934 msleep_interruptible(200);
1935 }
1936
1937 /* Restore LED settings */
1938 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1939
1940 return 0;
1941 }
1942 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
1943
1944 static int ixgbe_get_coalesce(struct net_device *netdev,
1945 struct ethtool_coalesce *ec)
1946 {
1947 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1948
1949 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
1950 #ifndef CONFIG_IXGBE_NAPI
1951 ec->rx_max_coalesced_frames_irq = adapter->rx_work_limit;
1952 #endif /* CONFIG_IXGBE_NAPI */
1953 /* only valid if in constant ITR mode */
1954 if (adapter->rx_itr_setting <= 1)
1955 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1956 else
1957 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1958
1959 /* if in mixed tx/rx queues per vector mode, report only rx settings */
1960 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
1961 return 0;
1962
1963 /* only valid if in constant ITR mode */
1964 if (adapter->tx_itr_setting <= 1)
1965 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1966 else
1967 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1968
1969 return 0;
1970 }
1971
1972 /*
1973 * this function must be called before setting the new value of
1974 * rx_itr_setting
1975 */
1976 #ifdef NO_VNIC
1977 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
1978 {
1979 struct net_device *netdev = adapter->netdev;
1980
1981 /* nothing to do if LRO or RSC are not enabled */
1982 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
1983 !(netdev->features & NETIF_F_LRO))
1984 return false;
1985
1986 /* check the feature flag value and enable RSC if necessary */
1987 if (adapter->rx_itr_setting == 1 ||
1988 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
1989 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
1990 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
1991 e_info(probe, "rx-usecs value high enough "
1992 "to re-enable RSC\n");
1993 return true;
1994 }
1995 /* if interrupt rate is too high then disable RSC */
1996 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1997 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
1998 #ifdef IXGBE_NO_LRO
1999 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2000 #else
2001 e_info(probe, "rx-usecs set too low, "
2002 "falling back to software LRO\n");
2003 #endif
2004 return true;
2005 }
2006 return false;
2007 }
2008 #endif
2009
2010 static int ixgbe_set_coalesce(struct net_device *netdev,
2011 struct ethtool_coalesce *ec)
2012 {
2013 #ifdef NO_VNIC
2014 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2015 struct ixgbe_q_vector *q_vector;
2016 int i;
2017 int num_vectors;
2018 u16 tx_itr_param, rx_itr_param;
2019 bool need_reset = false;
2020
2021 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2022 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2023 && ec->tx_coalesce_usecs)
2024 return -EINVAL;
2025
2026 if (ec->tx_max_coalesced_frames_irq)
2027 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
2028
2029 #ifndef CONFIG_IXGBE_NAPI
2030 if (ec->rx_max_coalesced_frames_irq)
2031 adapter->rx_work_limit = ec->rx_max_coalesced_frames_irq;
2032
2033 #endif
2034 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2035 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2036 return -EINVAL;
2037
2038 if (ec->rx_coalesce_usecs > 1)
2039 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2040 else
2041 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2042
2043 if (adapter->rx_itr_setting == 1)
2044 rx_itr_param = IXGBE_20K_ITR;
2045 else
2046 rx_itr_param = adapter->rx_itr_setting;
2047
2048 if (ec->tx_coalesce_usecs > 1)
2049 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2050 else
2051 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2052
2053 if (adapter->tx_itr_setting == 1)
2054 tx_itr_param = IXGBE_10K_ITR;
2055 else
2056 tx_itr_param = adapter->tx_itr_setting;
2057
2058 /* check the old value and enable RSC if necessary */
2059 need_reset = ixgbe_update_rsc(adapter);
2060
2061 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2062 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2063 else
2064 num_vectors = 1;
2065
2066 for (i = 0; i < num_vectors; i++) {
2067 q_vector = adapter->q_vector[i];
2068 q_vector->tx.work_limit = adapter->tx_work_limit;
2069 q_vector->rx.work_limit = adapter->rx_work_limit;
2070 if (q_vector->tx.count && !q_vector->rx.count)
2071 /* tx only */
2072 q_vector->itr = tx_itr_param;
2073 else
2074 /* rx only or mixed */
2075 q_vector->itr = rx_itr_param;
2076 ixgbe_write_eitr(q_vector);
2077 }
2078
2079 /*
2080 * do reset here at the end to make sure EITR==0 case is handled
2081 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2082 * also locks in RSC enable/disable which requires reset
2083 */
2084 if (need_reset)
2085 ixgbe_do_reset(netdev);
2086 #endif
2087 return 0;
2088 }
2089
2090 #ifndef HAVE_NDO_SET_FEATURES
2091 static u32 ixgbe_get_rx_csum(struct net_device *netdev)
2092 {
2093 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2094 struct ixgbe_ring *ring = adapter->rx_ring[0];
2095 return test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2096 }
2097
2098 static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
2099 {
2100 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2101 int i;
2102
2103 for (i = 0; i < adapter->num_rx_queues; i++) {
2104 struct ixgbe_ring *ring = adapter->rx_ring[i];
2105 if (data)
2106 set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2107 else
2108 clear_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
2109 }
2110
2111 /* LRO and RSC both depend on RX checksum to function */
2112 if (!data && (netdev->features & NETIF_F_LRO)) {
2113 netdev->features &= ~NETIF_F_LRO;
2114
2115 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2116 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2117 ixgbe_do_reset(netdev);
2118 }
2119 }
2120
2121 return 0;
2122 }
2123
2124 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
2125 {
2126 return (netdev->features & NETIF_F_IP_CSUM) != 0;
2127 }
2128
2129 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
2130 {
2131 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2132 u32 feature_list;
2133
2134 #ifdef NETIF_F_IPV6_CSUM
2135 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2136 #else
2137 feature_list = NETIF_F_IP_CSUM;
2138 #endif
2139 switch (adapter->hw.mac.type) {
2140 case ixgbe_mac_82599EB:
2141 case ixgbe_mac_X540:
2142 feature_list |= NETIF_F_SCTP_CSUM;
2143 break;
2144 default:
2145 break;
2146 }
2147 if (data)
2148 netdev->features |= feature_list;
2149 else
2150 netdev->features &= ~feature_list;
2151
2152 return 0;
2153 }
2154
2155 #ifdef NETIF_F_TSO
2156 static int ixgbe_set_tso(struct net_device *netdev, u32 data)
2157 {
2158 if (data) {
2159 netdev->features |= NETIF_F_TSO;
2160 #ifdef NETIF_F_TSO6
2161 netdev->features |= NETIF_F_TSO6;
2162 #endif
2163 } else {
2164 #ifndef HAVE_NETDEV_VLAN_FEATURES
2165 #ifdef NETIF_F_HW_VLAN_TX
2166 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2167 /* disable TSO on all VLANs if they're present */
2168 if (adapter->vlgrp) {
2169 int i;
2170 struct net_device *v_netdev;
2171 for (i = 0; i < VLAN_N_VID; i++) {
2172 v_netdev =
2173 vlan_group_get_device(adapter->vlgrp, i);
2174 if (v_netdev) {
2175 v_netdev->features &= ~NETIF_F_TSO;
2176 #ifdef NETIF_F_TSO6
2177 v_netdev->features &= ~NETIF_F_TSO6;
2178 #endif
2179 vlan_group_set_device(adapter->vlgrp, i,
2180 v_netdev);
2181 }
2182 }
2183 }
2184 #endif
2185 #endif /* HAVE_NETDEV_VLAN_FEATURES */
2186 netdev->features &= ~NETIF_F_TSO;
2187 #ifdef NETIF_F_TSO6
2188 netdev->features &= ~NETIF_F_TSO6;
2189 #endif
2190 }
2191 return 0;
2192 }
2193
2194 #endif /* NETIF_F_TSO */
2195 #ifdef ETHTOOL_GFLAGS
2196 static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2197 {
2198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2199 u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
2200 u32 changed = netdev->features ^ data;
2201 bool need_reset = false;
2202 int rc;
2203
2204 #ifndef HAVE_VLAN_RX_REGISTER
2205 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2206 !(data & ETH_FLAG_RXVLAN))
2207 return -EINVAL;
2208
2209 #endif
2210 #ifdef NETIF_F_RXHASH
2211 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2212 supported_flags |= ETH_FLAG_RXHASH;
2213 #endif
2214 #ifdef IXGBE_NO_LRO
2215 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
2216 #endif
2217 supported_flags |= ETH_FLAG_LRO;
2218
2219 #ifdef ETHTOOL_GRXRINGS
2220 switch (adapter->hw.mac.type) {
2221 case ixgbe_mac_X540:
2222 case ixgbe_mac_82599EB:
2223 supported_flags |= ETH_FLAG_NTUPLE;
2224 default:
2225 break;
2226 }
2227
2228 #endif
2229 rc = ethtool_op_set_flags(netdev, data, supported_flags);
2230 if (rc)
2231 return rc;
2232
2233 #ifndef HAVE_VLAN_RX_REGISTER
2234 if (changed & ETH_FLAG_RXVLAN)
2235 ixgbe_vlan_mode(netdev, netdev->features);
2236
2237 #endif
2238 /* if state changes we need to update adapter->flags and reset */
2239 if (!(netdev->features & NETIF_F_LRO)) {
2240 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2241 need_reset = true;
2242 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2243 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2244 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2245 if (adapter->rx_itr_setting == 1 ||
2246 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2247 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2248 need_reset = true;
2249 } else if (changed & ETH_FLAG_LRO) {
2250 #ifdef IXGBE_NO_LRO
2251 e_info(probe, "rx-usecs set too low, "
2252 "disabling RSC\n");
2253 #else
2254 e_info(probe, "rx-usecs set too low, "
2255 "falling back to software LRO\n");
2256 #endif
2257 }
2258 }
2259
2260 #ifdef ETHTOOL_GRXRINGS
2261 /*
2262 * Check if Flow Director n-tuple support was enabled or disabled. If
2263 * the state changed, we need to reset.
2264 */
2265 if (!(netdev->features & NETIF_F_NTUPLE)) {
2266 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2267 /* turn off Flow Director, set ATR and reset */
2268 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
2269 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2270 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2271 need_reset = true;
2272 }
2273 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2274 } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
2275 /* turn off ATR, enable perfect filters and reset */
2276 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2277 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2278 need_reset = true;
2279 }
2280
2281 #endif /* ETHTOOL_GRXRINGS */
2282 if (need_reset)
2283 ixgbe_do_reset(netdev);
2284
2285 return 0;
2286 }
2287
2288 #endif /* ETHTOOL_GFLAGS */
2289 #endif /* HAVE_NDO_SET_FEATURES */
2290 #ifdef ETHTOOL_GRXRINGS
2291 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2292 struct ethtool_rxnfc *cmd)
2293 {
2294 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2295 struct ethtool_rx_flow_spec *fsp =
2296 (struct ethtool_rx_flow_spec *)&cmd->fs;
2297 struct hlist_node *node, *node2;
2298 struct ixgbe_fdir_filter *rule = NULL;
2299
2300 /* report total rule count */
2301 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2302
2303 hlist_for_each_entry_safe(rule, node, node2,
2304 &adapter->fdir_filter_list, fdir_node) {
2305 if (fsp->location <= rule->sw_idx)
2306 break;
2307 }
2308
2309 if (!rule || fsp->location != rule->sw_idx)
2310 return -EINVAL;
2311
2312 /* fill out the flow spec entry */
2313
2314 /* set flow type field */
2315 switch (rule->filter.formatted.flow_type) {
2316 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2317 fsp->flow_type = TCP_V4_FLOW;
2318 break;
2319 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2320 fsp->flow_type = UDP_V4_FLOW;
2321 break;
2322 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2323 fsp->flow_type = SCTP_V4_FLOW;
2324 break;
2325 case IXGBE_ATR_FLOW_TYPE_IPV4:
2326 fsp->flow_type = IP_USER_FLOW;
2327 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2328 fsp->h_u.usr_ip4_spec.proto = 0;
2329 fsp->m_u.usr_ip4_spec.proto = 0;
2330 break;
2331 default:
2332 return -EINVAL;
2333 }
2334
2335 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2336 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2337 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2338 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2339 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2340 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2341 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2342 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2343 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2344 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2345 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2346 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2347 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2348 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2349 fsp->flow_type |= FLOW_EXT;
2350
2351 /* record action */
2352 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2353 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2354 else
2355 fsp->ring_cookie = rule->action;
2356
2357 return 0;
2358 }
2359
2360 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2361 struct ethtool_rxnfc *cmd,
2362 u32 *rule_locs)
2363 {
2364 struct hlist_node *node, *node2;
2365 struct ixgbe_fdir_filter *rule;
2366 int cnt = 0;
2367
2368 /* report total rule count */
2369 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2370
2371 hlist_for_each_entry_safe(rule, node, node2,
2372 &adapter->fdir_filter_list, fdir_node) {
2373 if (cnt == cmd->rule_cnt)
2374 return -EMSGSIZE;
2375 rule_locs[cnt] = rule->sw_idx;
2376 cnt++;
2377 }
2378
2379 cmd->rule_cnt = cnt;
2380
2381 return 0;
2382 }
2383
2384 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2385 struct ethtool_rxnfc *cmd)
2386 {
2387 cmd->data = 0;
2388
2389 /* if RSS is disabled then report no hashing */
2390 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2391 return 0;
2392
2393 /* Report default options for RSS on ixgbe */
2394 switch (cmd->flow_type) {
2395 case TCP_V4_FLOW:
2396 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2397 case UDP_V4_FLOW:
2398 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2399 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2400 case SCTP_V4_FLOW:
2401 case AH_ESP_V4_FLOW:
2402 case AH_V4_FLOW:
2403 case ESP_V4_FLOW:
2404 case IPV4_FLOW:
2405 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2406 break;
2407 case TCP_V6_FLOW:
2408 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2409 case UDP_V6_FLOW:
2410 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2411 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2412 case SCTP_V6_FLOW:
2413 case AH_ESP_V6_FLOW:
2414 case AH_V6_FLOW:
2415 case ESP_V6_FLOW:
2416 case IPV6_FLOW:
2417 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2418 break;
2419 default:
2420 return -EINVAL;
2421 }
2422
2423 return 0;
2424 }
2425
2426 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2427 #ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
2428 void *rule_locs)
2429 #else
2430 u32 *rule_locs)
2431 #endif
2432 {
2433 struct ixgbe_adapter *adapter = netdev_priv(dev);
2434 int ret = -EOPNOTSUPP;
2435
2436 switch (cmd->cmd) {
2437 case ETHTOOL_GRXRINGS:
2438 cmd->data = adapter->num_rx_queues;
2439 ret = 0;
2440 break;
2441 case ETHTOOL_GRXCLSRLCNT:
2442 cmd->rule_cnt = adapter->fdir_filter_count;
2443 ret = 0;
2444 break;
2445 case ETHTOOL_GRXCLSRULE:
2446 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2447 break;
2448 case ETHTOOL_GRXCLSRLALL:
2449 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
2450 (u32 *)rule_locs);
2451 break;
2452 case ETHTOOL_GRXFH:
2453 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2454 break;
2455 default:
2456 break;
2457 }
2458
2459 return ret;
2460 }
2461
2462 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2463 struct ixgbe_fdir_filter *input,
2464 u16 sw_idx)
2465 {
2466 struct ixgbe_hw *hw = &adapter->hw;
2467 struct hlist_node *node, *node2, *parent;
2468 struct ixgbe_fdir_filter *rule;
2469 int err = -EINVAL;
2470
2471 parent = NULL;
2472 rule = NULL;
2473
2474 hlist_for_each_entry_safe(rule, node, node2,
2475 &adapter->fdir_filter_list, fdir_node) {
2476 /* hash found, or no matching entry */
2477 if (rule->sw_idx >= sw_idx)
2478 break;
2479 parent = node;
2480 }
2481
2482 /* if there is an old rule occupying our place remove it */
2483 if (rule && (rule->sw_idx == sw_idx)) {
2484 if (!input || (rule->filter.formatted.bkt_hash !=
2485 input->filter.formatted.bkt_hash)) {
2486 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2487 &rule->filter,
2488 sw_idx);
2489 }
2490
2491 hlist_del(&rule->fdir_node);
2492 kfree(rule);
2493 adapter->fdir_filter_count--;
2494 }
2495
2496 /*
2497 * If no input this was a delete, err should be 0 if a rule was
2498 * successfully found and removed from the list else -EINVAL
2499 */
2500 if (!input)
2501 return err;
2502
2503 /* initialize node and set software index */
2504 INIT_HLIST_NODE(&input->fdir_node);
2505
2506 /* add filter to the list */
2507 if (parent)
2508 hlist_add_after(parent, &input->fdir_node);
2509 else
2510 hlist_add_head(&input->fdir_node,
2511 &adapter->fdir_filter_list);
2512
2513 /* update counts */
2514 adapter->fdir_filter_count++;
2515
2516 return 0;
2517 }
2518
2519 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2520 u8 *flow_type)
2521 {
2522 switch (fsp->flow_type & ~FLOW_EXT) {
2523 case TCP_V4_FLOW:
2524 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2525 break;
2526 case UDP_V4_FLOW:
2527 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2528 break;
2529 case SCTP_V4_FLOW:
2530 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2531 break;
2532 case IP_USER_FLOW:
2533 switch (fsp->h_u.usr_ip4_spec.proto) {
2534 case IPPROTO_TCP:
2535 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2536 break;
2537 case IPPROTO_UDP:
2538 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2539 break;
2540 case IPPROTO_SCTP:
2541 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2542 break;
2543 case 0:
2544 if (!fsp->m_u.usr_ip4_spec.proto) {
2545 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2546 break;
2547 }
2548 default:
2549 return 0;
2550 }
2551 break;
2552 default:
2553 return 0;
2554 }
2555
2556 return 1;
2557 }
2558
2559 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2560 struct ethtool_rxnfc *cmd)
2561 {
2562 struct ethtool_rx_flow_spec *fsp =
2563 (struct ethtool_rx_flow_spec *)&cmd->fs;
2564 struct ixgbe_hw *hw = &adapter->hw;
2565 struct ixgbe_fdir_filter *input;
2566 union ixgbe_atr_input mask;
2567 int err;
2568
2569 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2570 return -EOPNOTSUPP;
2571
2572 /*
2573 * Don't allow programming if the action is a queue greater than
2574 * the number of online Rx queues.
2575 */
2576 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2577 (fsp->ring_cookie >= adapter->num_rx_queues))
2578 return -EINVAL;
2579
2580 /* Don't allow indexes to exist outside of available space */
2581 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2582 e_err(drv, "Location out of range\n");
2583 return -EINVAL;
2584 }
2585
2586 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2587 if (!input)
2588 return -ENOMEM;
2589
2590 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2591
2592 /* set SW index */
2593 input->sw_idx = fsp->location;
2594
2595 /* record flow type */
2596 if (!ixgbe_flowspec_to_flow_type(fsp,
2597 &input->filter.formatted.flow_type)) {
2598 e_err(drv, "Unrecognized flow type\n");
2599 goto err_out;
2600 }
2601
2602 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2603 IXGBE_ATR_L4TYPE_MASK;
2604
2605 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2606 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2607
2608 /* Copy input into formatted structures */
2609 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2610 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2611 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2612 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2613 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2614 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2615 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2616 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2617
2618 if (fsp->flow_type & FLOW_EXT) {
2619 input->filter.formatted.vm_pool =
2620 (unsigned char)ntohl(fsp->h_ext.data[1]);
2621 mask.formatted.vm_pool =
2622 (unsigned char)ntohl(fsp->m_ext.data[1]);
2623 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2624 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2625 input->filter.formatted.flex_bytes =
2626 fsp->h_ext.vlan_etype;
2627 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2628 }
2629
2630 /* determine if we need to drop or route the packet */
2631 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2632 input->action = IXGBE_FDIR_DROP_QUEUE;
2633 else
2634 input->action = fsp->ring_cookie;
2635
2636 spin_lock(&adapter->fdir_perfect_lock);
2637
2638 if (hlist_empty(&adapter->fdir_filter_list)) {
2639 /* save mask and program input mask into HW */
2640 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2641 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2642 if (err) {
2643 e_err(drv, "Error writing mask\n");
2644 goto err_out_w_lock;
2645 }
2646 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2647 e_err(drv, "Only one mask supported per port\n");
2648 goto err_out_w_lock;
2649 }
2650
2651 /* apply mask and compute/store hash */
2652 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2653
2654 /* program filters to filter memory */
2655 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2656 &input->filter, input->sw_idx,
2657 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2658 IXGBE_FDIR_DROP_QUEUE :
2659 adapter->rx_ring[input->action]->reg_idx);
2660 if (err)
2661 goto err_out_w_lock;
2662
2663 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2664
2665 spin_unlock(&adapter->fdir_perfect_lock);
2666
2667 kfree(input);
2668 return err;
2669 err_out_w_lock:
2670 spin_unlock(&adapter->fdir_perfect_lock);
2671 err_out:
2672 kfree(input);
2673 return -EINVAL;
2674 }
2675
2676 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2677 struct ethtool_rxnfc *cmd)
2678 {
2679 struct ethtool_rx_flow_spec *fsp =
2680 (struct ethtool_rx_flow_spec *)&cmd->fs;
2681 int err;
2682
2683 spin_lock(&adapter->fdir_perfect_lock);
2684 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, (u16)(fsp->location));
2685 spin_unlock(&adapter->fdir_perfect_lock);
2686
2687 return err;
2688 }
2689
2690 #ifdef ETHTOOL_SRXNTUPLE
2691 /*
2692 * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
2693 * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
2694 * was defined that this function was present.
2695 */
2696 static int ixgbe_set_rx_ntuple(struct net_device *dev,
2697 struct ethtool_rx_ntuple *cmd)
2698 {
2699 return -EOPNOTSUPP;
2700 }
2701
2702 #endif
2703 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2704 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2705 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2706 struct ethtool_rxnfc *nfc)
2707 {
2708 u32 flags2 = adapter->flags2;
2709
2710 /*
2711 * RSS does not support anything other than hashing
2712 * to queues on src and dst IPs and ports
2713 */
2714 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2715 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2716 return -EINVAL;
2717
2718 switch (nfc->flow_type) {
2719 case TCP_V4_FLOW:
2720 case TCP_V6_FLOW:
2721 if (!(nfc->data & RXH_IP_SRC) ||
2722 !(nfc->data & RXH_IP_DST) ||
2723 !(nfc->data & RXH_L4_B_0_1) ||
2724 !(nfc->data & RXH_L4_B_2_3))
2725 return -EINVAL;
2726 break;
2727 case UDP_V4_FLOW:
2728 if (!(nfc->data & RXH_IP_SRC) ||
2729 !(nfc->data & RXH_IP_DST))
2730 return -EINVAL;
2731 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2732 case 0:
2733 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2734 break;
2735 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2736 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2737 break;
2738 default:
2739 return -EINVAL;
2740 }
2741 break;
2742 case UDP_V6_FLOW:
2743 if (!(nfc->data & RXH_IP_SRC) ||
2744 !(nfc->data & RXH_IP_DST))
2745 return -EINVAL;
2746 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2747 case 0:
2748 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2749 break;
2750 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2751 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2752 break;
2753 default:
2754 return -EINVAL;
2755 }
2756 break;
2757 case AH_ESP_V4_FLOW:
2758 case AH_V4_FLOW:
2759 case ESP_V4_FLOW:
2760 case SCTP_V4_FLOW:
2761 case AH_ESP_V6_FLOW:
2762 case AH_V6_FLOW:
2763 case ESP_V6_FLOW:
2764 case SCTP_V6_FLOW:
2765 if (!(nfc->data & RXH_IP_SRC) ||
2766 !(nfc->data & RXH_IP_DST) ||
2767 (nfc->data & RXH_L4_B_0_1) ||
2768 (nfc->data & RXH_L4_B_2_3))
2769 return -EINVAL;
2770 break;
2771 default:
2772 return -EINVAL;
2773 }
2774
2775 /* if we changed something we need to update flags */
2776 if (flags2 != adapter->flags2) {
2777 struct ixgbe_hw *hw = &adapter->hw;
2778 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2779
2780 if ((flags2 & UDP_RSS_FLAGS) &&
2781 !(adapter->flags2 & UDP_RSS_FLAGS))
2782 e_warn(drv, "enabling UDP RSS: fragmented packets"
2783 " may arrive out of order to the stack above\n");
2784
2785 adapter->flags2 = flags2;
2786
2787 /* Perform hash on these packet types */
2788 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2789 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2790 | IXGBE_MRQC_RSS_FIELD_IPV6
2791 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2792
2793 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2794 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2795
2796 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2797 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2798
2799 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2800 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2801
2802 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2803 }
2804
2805 return 0;
2806 }
2807
2808 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2809 {
2810 struct ixgbe_adapter *adapter = netdev_priv(dev);
2811 int ret = -EOPNOTSUPP;
2812
2813 switch (cmd->cmd) {
2814 case ETHTOOL_SRXCLSRLINS:
2815 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2816 break;
2817 case ETHTOOL_SRXCLSRLDEL:
2818 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2819 break;
2820 case ETHTOOL_SRXFH:
2821 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2822 break;
2823 default:
2824 break;
2825 }
2826
2827 return ret;
2828 }
2829
2830 #endif /* ETHTOOL_GRXRINGS */
2831 //static
2832 struct ethtool_ops ixgbe_ethtool_ops = {
2833 .get_settings = ixgbe_get_settings,
2834 .set_settings = ixgbe_set_settings,
2835 .get_drvinfo = ixgbe_get_drvinfo,
2836 .get_regs_len = ixgbe_get_regs_len,
2837 .get_regs = ixgbe_get_regs,
2838 .get_wol = ixgbe_get_wol,
2839 .set_wol = ixgbe_set_wol,
2840 .nway_reset = ixgbe_nway_reset,
2841 .get_link = ethtool_op_get_link,
2842 .get_eeprom_len = ixgbe_get_eeprom_len,
2843 .get_eeprom = ixgbe_get_eeprom,
2844 .set_eeprom = ixgbe_set_eeprom,
2845 .get_ringparam = ixgbe_get_ringparam,
2846 .set_ringparam = ixgbe_set_ringparam,
2847 .get_pauseparam = ixgbe_get_pauseparam,
2848 .set_pauseparam = ixgbe_set_pauseparam,
2849 .get_msglevel = ixgbe_get_msglevel,
2850 .set_msglevel = ixgbe_set_msglevel,
2851 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
2852 .self_test_count = ixgbe_diag_test_count,
2853 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
2854 .self_test = ixgbe_diag_test,
2855 .get_strings = ixgbe_get_strings,
2856 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
2857 .set_phys_id = ixgbe_set_phys_id,
2858 #else
2859 .phys_id = ixgbe_phys_id,
2860 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
2861 #ifndef HAVE_ETHTOOL_GET_SSET_COUNT
2862 .get_stats_count = ixgbe_get_stats_count,
2863 #else /* HAVE_ETHTOOL_GET_SSET_COUNT */
2864 .get_sset_count = ixgbe_get_sset_count,
2865 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
2866 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2867 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
2868 .get_perm_addr = ethtool_op_get_perm_addr,
2869 #endif
2870 .get_coalesce = ixgbe_get_coalesce,
2871 .set_coalesce = ixgbe_set_coalesce,
2872 #ifndef HAVE_NDO_SET_FEATURES
2873 .get_rx_csum = ixgbe_get_rx_csum,
2874 .set_rx_csum = ixgbe_set_rx_csum,
2875 .get_tx_csum = ixgbe_get_tx_csum,
2876 .set_tx_csum = ixgbe_set_tx_csum,
2877 .get_sg = ethtool_op_get_sg,
2878 .set_sg = ethtool_op_set_sg,
2879 #ifdef NETIF_F_TSO
2880 .get_tso = ethtool_op_get_tso,
2881 .set_tso = ixgbe_set_tso,
2882 #endif
2883 #ifdef ETHTOOL_GFLAGS
2884 .get_flags = ethtool_op_get_flags,
2885 .set_flags = ixgbe_set_flags,
2886 #endif
2887 #endif /* HAVE_NDO_SET_FEATURES */
2888 #ifdef ETHTOOL_GRXRINGS
2889 .get_rxnfc = ixgbe_get_rxnfc,
2890 .set_rxnfc = ixgbe_set_rxnfc,
2891 #ifdef ETHTOOL_SRXNTUPLE
2892 .set_rx_ntuple = ixgbe_set_rx_ntuple,
2893 #endif
2894 #endif
2895 };
2896
2897 void ixgbe_set_ethtool_ops(struct net_device *netdev)
2898 {
2899 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2900 }
2901 #endif /* SIOCETHTOOL */