1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
36 #include <linux/interrupt.h>
38 #include <linux/tcp.h>
39 #include <linux/sctp.h>
40 #include <linux/pkt_sched.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/etherdevice.h>
46 #include <linux/ethtool.h>
48 #include <linux/if_vlan.h>
49 #include <linux/if_macvlan.h>
50 #include <linux/if_bridge.h>
51 #include <linux/prefetch.h>
52 #include <scsi/fc/fc_fcoe.h>
53 #include <net/udp_tunnel.h>
54 #include <net/pkt_cls.h>
55 #include <net/tc_act/tc_gact.h>
56 #include <net/tc_act/tc_mirred.h>
57 #include <net/vxlan.h>
60 #include "ixgbe_common.h"
61 #include "ixgbe_dcb_82599.h"
62 #include "ixgbe_sriov.h"
63 #include "ixgbe_model.h"
65 char ixgbe_driver_name
[] = "ixgbe";
66 static const char ixgbe_driver_string
[] =
67 "Intel(R) 10 Gigabit PCI Express Network Driver";
69 char ixgbe_default_device_descr
[] =
70 "Intel(R) 10 Gigabit Network Connection";
72 static char ixgbe_default_device_descr
[] =
73 "Intel(R) 10 Gigabit Network Connection";
75 #define DRV_VERSION "4.4.0-k"
76 const char ixgbe_driver_version
[] = DRV_VERSION
;
77 static const char ixgbe_copyright
[] =
78 "Copyright (c) 1999-2016 Intel Corporation.";
80 static const char ixgbe_overheat_msg
[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
82 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
83 [board_82598
] = &ixgbe_82598_info
,
84 [board_82599
] = &ixgbe_82599_info
,
85 [board_X540
] = &ixgbe_X540_info
,
86 [board_X550
] = &ixgbe_X550_info
,
87 [board_X550EM_x
] = &ixgbe_X550EM_x_info
,
88 [board_x550em_a
] = &ixgbe_x550em_a_info
,
91 /* ixgbe_pci_tbl - PCI Device ID Table
93 * Wildcard entries (PCI_ANY_ID) should come last
94 * Last entry must be all 0s
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
97 * Class, Class Mask, private data (not used) }
99 static const struct pci_device_id ixgbe_pci_tbl
[] = {
100 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598
), board_82598
},
101 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
), board_82598
},
102 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
), board_82598
},
103 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT
), board_82598
},
104 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT2
), board_82598
},
105 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
), board_82598
},
106 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_CX4_DUAL_PORT
), board_82598
},
107 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_DA_DUAL_PORT
), board_82598
},
108 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM
), board_82598
},
109 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_XF_LR
), board_82598
},
110 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_SFP_LOM
), board_82598
},
111 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_BX
), board_82598
},
112 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KX4
), board_82599
},
113 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_XAUI_LOM
), board_82599
},
114 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KR
), board_82599
},
115 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP
), board_82599
},
116 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_EM
), board_82599
},
117 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KX4_MEZZ
), board_82599
},
118 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_CX4
), board_82599
},
119 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_BACKPLANE_FCOE
), board_82599
},
120 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_FCOE
), board_82599
},
121 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_T3_LOM
), board_82599
},
122 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_COMBO_BACKPLANE
), board_82599
},
123 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540T
), board_X540
},
124 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_SF2
), board_82599
},
125 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_LS
), board_82599
},
126 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_QSFP_SF_QP
), board_82599
},
127 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599EN_SFP
), board_82599
},
128 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_SF_QP
), board_82599
},
129 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540T1
), board_X540
},
130 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550T
), board_X550
},
131 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550T1
), board_X550
},
132 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_KX4
), board_X550EM_x
},
133 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_KR
), board_X550EM_x
},
134 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_10G_T
), board_X550EM_x
},
135 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_SFP
), board_X550EM_x
},
136 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_KR
), board_x550em_a
},
137 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_KR_L
), board_x550em_a
},
138 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_SFP_N
), board_x550em_a
},
139 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_SGMII
), board_x550em_a
},
140 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_SGMII_L
), board_x550em_a
},
141 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_10G_T
), board_x550em_a
},
142 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_SFP
), board_x550em_a
},
143 /* required last entry */
146 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
148 #ifdef CONFIG_IXGBE_DCA
149 static int ixgbe_notify_dca(struct notifier_block
*, unsigned long event
,
151 static struct notifier_block dca_notifier
= {
152 .notifier_call
= ixgbe_notify_dca
,
158 #ifdef CONFIG_PCI_IOV
159 static unsigned int max_vfs
;
160 module_param(max_vfs
, uint
, 0);
161 MODULE_PARM_DESC(max_vfs
,
162 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
163 #endif /* CONFIG_PCI_IOV */
165 static unsigned int allow_unsupported_sfp
;
166 module_param(allow_unsupported_sfp
, uint
, 0);
167 MODULE_PARM_DESC(allow_unsupported_sfp
,
168 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
170 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
171 static int debug
= -1;
172 module_param(debug
, int, 0);
173 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
175 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
176 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
177 MODULE_LICENSE("GPL");
178 MODULE_VERSION(DRV_VERSION
);
180 static struct workqueue_struct
*ixgbe_wq
;
182 static bool ixgbe_check_cfg_remove(struct ixgbe_hw
*hw
, struct pci_dev
*pdev
);
184 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter
*adapter
,
187 struct pci_dev
*parent_dev
;
188 struct pci_bus
*parent_bus
;
190 parent_bus
= adapter
->pdev
->bus
->parent
;
194 parent_dev
= parent_bus
->self
;
198 if (!pci_is_pcie(parent_dev
))
201 pcie_capability_read_word(parent_dev
, reg
, value
);
202 if (*value
== IXGBE_FAILED_READ_CFG_WORD
&&
203 ixgbe_check_cfg_remove(&adapter
->hw
, parent_dev
))
208 static s32
ixgbe_get_parent_bus_info(struct ixgbe_adapter
*adapter
)
210 struct ixgbe_hw
*hw
= &adapter
->hw
;
214 hw
->bus
.type
= ixgbe_bus_type_pci_express
;
216 /* Get the negotiated link width and speed from PCI config space of the
217 * parent, as this device is behind a switch
219 err
= ixgbe_read_pci_cfg_word_parent(adapter
, 18, &link_status
);
221 /* assume caller will handle error case */
225 hw
->bus
.width
= ixgbe_convert_bus_width(link_status
);
226 hw
->bus
.speed
= ixgbe_convert_bus_speed(link_status
);
232 * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
233 * @hw: hw specific details
235 * This function is used by probe to determine whether a device's PCI-Express
236 * bandwidth details should be gathered from the parent bus instead of from the
237 * device. Used to ensure that various locations all have the correct device ID
240 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw
*hw
)
242 switch (hw
->device_id
) {
243 case IXGBE_DEV_ID_82599_SFP_SF_QP
:
244 case IXGBE_DEV_ID_82599_QSFP_SF_QP
:
251 static void ixgbe_check_minimum_link(struct ixgbe_adapter
*adapter
,
254 struct ixgbe_hw
*hw
= &adapter
->hw
;
256 enum pci_bus_speed speed
= PCI_SPEED_UNKNOWN
;
257 enum pcie_link_width width
= PCIE_LNK_WIDTH_UNKNOWN
;
258 struct pci_dev
*pdev
;
260 /* Some devices are not connected over PCIe and thus do not negotiate
261 * speed. These devices do not have valid bus info, and thus any report
262 * we generate may not be correct.
264 if (hw
->bus
.type
== ixgbe_bus_type_internal
)
267 /* determine whether to use the parent device */
268 if (ixgbe_pcie_from_parent(&adapter
->hw
))
269 pdev
= adapter
->pdev
->bus
->parent
->self
;
271 pdev
= adapter
->pdev
;
273 if (pcie_get_minimum_link(pdev
, &speed
, &width
) ||
274 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
) {
275 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
280 case PCIE_SPEED_2_5GT
:
281 /* 8b/10b encoding reduces max throughput by 20% */
284 case PCIE_SPEED_5_0GT
:
285 /* 8b/10b encoding reduces max throughput by 20% */
288 case PCIE_SPEED_8_0GT
:
289 /* 128b/130b encoding reduces throughput by less than 2% */
293 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
297 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
299 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
300 (speed
== PCIE_SPEED_8_0GT
? "8.0GT/s" :
301 speed
== PCIE_SPEED_5_0GT
? "5.0GT/s" :
302 speed
== PCIE_SPEED_2_5GT
? "2.5GT/s" :
305 (speed
== PCIE_SPEED_2_5GT
? "20%" :
306 speed
== PCIE_SPEED_5_0GT
? "20%" :
307 speed
== PCIE_SPEED_8_0GT
? "<2%" :
310 if (max_gts
< expected_gts
) {
311 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
312 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
314 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
318 static void ixgbe_service_event_schedule(struct ixgbe_adapter
*adapter
)
320 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
) &&
321 !test_bit(__IXGBE_REMOVING
, &adapter
->state
) &&
322 !test_and_set_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
))
323 queue_work(ixgbe_wq
, &adapter
->service_task
);
326 static void ixgbe_remove_adapter(struct ixgbe_hw
*hw
)
328 struct ixgbe_adapter
*adapter
= hw
->back
;
333 e_dev_err("Adapter removed\n");
334 if (test_bit(__IXGBE_SERVICE_INITED
, &adapter
->state
))
335 ixgbe_service_event_schedule(adapter
);
338 static void ixgbe_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
342 /* The following check not only optimizes a bit by not
343 * performing a read on the status register when the
344 * register just read was a status register read that
345 * returned IXGBE_FAILED_READ_REG. It also blocks any
346 * potential recursion.
348 if (reg
== IXGBE_STATUS
) {
349 ixgbe_remove_adapter(hw
);
352 value
= ixgbe_read_reg(hw
, IXGBE_STATUS
);
353 if (value
== IXGBE_FAILED_READ_REG
)
354 ixgbe_remove_adapter(hw
);
358 * ixgbe_read_reg - Read from device register
359 * @hw: hw specific details
360 * @reg: offset of register to read
362 * Returns : value read or IXGBE_FAILED_READ_REG if removed
364 * This function is used to read device registers. It checks for device
365 * removal by confirming any read that returns all ones by checking the
366 * status register value for all ones. This function avoids reading from
367 * the hardware if a removal was previously detected in which case it
368 * returns IXGBE_FAILED_READ_REG (all ones).
370 u32
ixgbe_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
372 u8 __iomem
*reg_addr
= ACCESS_ONCE(hw
->hw_addr
);
375 if (ixgbe_removed(reg_addr
))
376 return IXGBE_FAILED_READ_REG
;
377 if (unlikely(hw
->phy
.nw_mng_if_sel
&
378 IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M
)) {
379 struct ixgbe_adapter
*adapter
;
382 for (i
= 0; i
< 200; ++i
) {
383 value
= readl(reg_addr
+ IXGBE_MAC_SGMII_BUSY
);
385 goto writes_completed
;
386 if (value
== IXGBE_FAILED_READ_REG
) {
387 ixgbe_remove_adapter(hw
);
388 return IXGBE_FAILED_READ_REG
;
394 e_warn(hw
, "register writes incomplete %08x\n", value
);
398 value
= readl(reg_addr
+ reg
);
399 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
400 ixgbe_check_remove(hw
, reg
);
404 static bool ixgbe_check_cfg_remove(struct ixgbe_hw
*hw
, struct pci_dev
*pdev
)
408 pci_read_config_word(pdev
, PCI_VENDOR_ID
, &value
);
409 if (value
== IXGBE_FAILED_READ_CFG_WORD
) {
410 ixgbe_remove_adapter(hw
);
416 u16
ixgbe_read_pci_cfg_word(struct ixgbe_hw
*hw
, u32 reg
)
418 struct ixgbe_adapter
*adapter
= hw
->back
;
421 if (ixgbe_removed(hw
->hw_addr
))
422 return IXGBE_FAILED_READ_CFG_WORD
;
423 pci_read_config_word(adapter
->pdev
, reg
, &value
);
424 if (value
== IXGBE_FAILED_READ_CFG_WORD
&&
425 ixgbe_check_cfg_remove(hw
, adapter
->pdev
))
426 return IXGBE_FAILED_READ_CFG_WORD
;
430 #ifdef CONFIG_PCI_IOV
431 static u32
ixgbe_read_pci_cfg_dword(struct ixgbe_hw
*hw
, u32 reg
)
433 struct ixgbe_adapter
*adapter
= hw
->back
;
436 if (ixgbe_removed(hw
->hw_addr
))
437 return IXGBE_FAILED_READ_CFG_DWORD
;
438 pci_read_config_dword(adapter
->pdev
, reg
, &value
);
439 if (value
== IXGBE_FAILED_READ_CFG_DWORD
&&
440 ixgbe_check_cfg_remove(hw
, adapter
->pdev
))
441 return IXGBE_FAILED_READ_CFG_DWORD
;
444 #endif /* CONFIG_PCI_IOV */
446 void ixgbe_write_pci_cfg_word(struct ixgbe_hw
*hw
, u32 reg
, u16 value
)
448 struct ixgbe_adapter
*adapter
= hw
->back
;
450 if (ixgbe_removed(hw
->hw_addr
))
452 pci_write_config_word(adapter
->pdev
, reg
, value
);
455 static void ixgbe_service_event_complete(struct ixgbe_adapter
*adapter
)
457 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
));
459 /* flush memory to make sure state is correct before next watchdog */
460 smp_mb__before_atomic();
461 clear_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
);
464 struct ixgbe_reg_info
{
469 static const struct ixgbe_reg_info ixgbe_reg_info_tbl
[] = {
471 /* General Registers */
472 {IXGBE_CTRL
, "CTRL"},
473 {IXGBE_STATUS
, "STATUS"},
474 {IXGBE_CTRL_EXT
, "CTRL_EXT"},
476 /* Interrupt Registers */
477 {IXGBE_EICR
, "EICR"},
480 {IXGBE_SRRCTL(0), "SRRCTL"},
481 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
482 {IXGBE_RDLEN(0), "RDLEN"},
483 {IXGBE_RDH(0), "RDH"},
484 {IXGBE_RDT(0), "RDT"},
485 {IXGBE_RXDCTL(0), "RXDCTL"},
486 {IXGBE_RDBAL(0), "RDBAL"},
487 {IXGBE_RDBAH(0), "RDBAH"},
490 {IXGBE_TDBAL(0), "TDBAL"},
491 {IXGBE_TDBAH(0), "TDBAH"},
492 {IXGBE_TDLEN(0), "TDLEN"},
493 {IXGBE_TDH(0), "TDH"},
494 {IXGBE_TDT(0), "TDT"},
495 {IXGBE_TXDCTL(0), "TXDCTL"},
497 /* List Terminator */
503 * ixgbe_regdump - register printout routine
505 static void ixgbe_regdump(struct ixgbe_hw
*hw
, struct ixgbe_reg_info
*reginfo
)
511 switch (reginfo
->ofs
) {
512 case IXGBE_SRRCTL(0):
513 for (i
= 0; i
< 64; i
++)
514 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
516 case IXGBE_DCA_RXCTRL(0):
517 for (i
= 0; i
< 64; i
++)
518 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
521 for (i
= 0; i
< 64; i
++)
522 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
525 for (i
= 0; i
< 64; i
++)
526 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
529 for (i
= 0; i
< 64; i
++)
530 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
532 case IXGBE_RXDCTL(0):
533 for (i
= 0; i
< 64; i
++)
534 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
537 for (i
= 0; i
< 64; i
++)
538 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
541 for (i
= 0; i
< 64; i
++)
542 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
545 for (i
= 0; i
< 64; i
++)
546 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
549 for (i
= 0; i
< 64; i
++)
550 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
553 for (i
= 0; i
< 64; i
++)
554 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
557 for (i
= 0; i
< 64; i
++)
558 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
561 for (i
= 0; i
< 64; i
++)
562 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
564 case IXGBE_TXDCTL(0):
565 for (i
= 0; i
< 64; i
++)
566 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
569 pr_info("%-15s %08x\n", reginfo
->name
,
570 IXGBE_READ_REG(hw
, reginfo
->ofs
));
574 for (i
= 0; i
< 8; i
++) {
575 snprintf(rname
, 16, "%s[%d-%d]", reginfo
->name
, i
*8, i
*8+7);
576 pr_err("%-15s", rname
);
577 for (j
= 0; j
< 8; j
++)
578 pr_cont(" %08x", regs
[i
*8+j
]);
585 * ixgbe_dump - Print registers, tx-rings and rx-rings
587 static void ixgbe_dump(struct ixgbe_adapter
*adapter
)
589 struct net_device
*netdev
= adapter
->netdev
;
590 struct ixgbe_hw
*hw
= &adapter
->hw
;
591 struct ixgbe_reg_info
*reginfo
;
593 struct ixgbe_ring
*tx_ring
;
594 struct ixgbe_tx_buffer
*tx_buffer
;
595 union ixgbe_adv_tx_desc
*tx_desc
;
596 struct my_u0
{ u64 a
; u64 b
; } *u0
;
597 struct ixgbe_ring
*rx_ring
;
598 union ixgbe_adv_rx_desc
*rx_desc
;
599 struct ixgbe_rx_buffer
*rx_buffer_info
;
603 if (!netif_msg_hw(adapter
))
606 /* Print netdevice Info */
608 dev_info(&adapter
->pdev
->dev
, "Net device Info\n");
609 pr_info("Device Name state "
610 "trans_start last_rx\n");
611 pr_info("%-15s %016lX %016lX %016lX\n",
614 dev_trans_start(netdev
),
618 /* Print Registers */
619 dev_info(&adapter
->pdev
->dev
, "Register Dump\n");
620 pr_info(" Register Name Value\n");
621 for (reginfo
= (struct ixgbe_reg_info
*)ixgbe_reg_info_tbl
;
622 reginfo
->name
; reginfo
++) {
623 ixgbe_regdump(hw
, reginfo
);
626 /* Print TX Ring Summary */
627 if (!netdev
|| !netif_running(netdev
))
630 dev_info(&adapter
->pdev
->dev
, "TX Rings Summary\n");
631 pr_info(" %s %s %s %s\n",
632 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
633 "leng", "ntw", "timestamp");
634 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
635 tx_ring
= adapter
->tx_ring
[n
];
636 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_clean
];
637 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
638 n
, tx_ring
->next_to_use
, tx_ring
->next_to_clean
,
639 (u64
)dma_unmap_addr(tx_buffer
, dma
),
640 dma_unmap_len(tx_buffer
, len
),
641 tx_buffer
->next_to_watch
,
642 (u64
)tx_buffer
->time_stamp
);
646 if (!netif_msg_tx_done(adapter
))
647 goto rx_ring_summary
;
649 dev_info(&adapter
->pdev
->dev
, "TX Rings Dump\n");
651 /* Transmit Descriptor Formats
653 * 82598 Advanced Transmit Descriptor
654 * +--------------------------------------------------------------+
655 * 0 | Buffer Address [63:0] |
656 * +--------------------------------------------------------------+
657 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
658 * +--------------------------------------------------------------+
659 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
661 * 82598 Advanced Transmit Descriptor (Write-Back Format)
662 * +--------------------------------------------------------------+
664 * +--------------------------------------------------------------+
665 * 8 | RSV | STA | NXTSEQ |
666 * +--------------------------------------------------------------+
669 * 82599+ Advanced Transmit Descriptor
670 * +--------------------------------------------------------------+
671 * 0 | Buffer Address [63:0] |
672 * +--------------------------------------------------------------+
673 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
674 * +--------------------------------------------------------------+
675 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
677 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
678 * +--------------------------------------------------------------+
680 * +--------------------------------------------------------------+
681 * 8 | RSV | STA | RSV |
682 * +--------------------------------------------------------------+
686 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
687 tx_ring
= adapter
->tx_ring
[n
];
688 pr_info("------------------------------------\n");
689 pr_info("TX QUEUE INDEX = %d\n", tx_ring
->queue_index
);
690 pr_info("------------------------------------\n");
691 pr_info("%s%s %s %s %s %s\n",
692 "T [desc] [address 63:0 ] ",
693 "[PlPOIdStDDt Ln] [bi->dma ] ",
694 "leng", "ntw", "timestamp", "bi->skb");
696 for (i
= 0; tx_ring
->desc
&& (i
< tx_ring
->count
); i
++) {
697 tx_desc
= IXGBE_TX_DESC(tx_ring
, i
);
698 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
699 u0
= (struct my_u0
*)tx_desc
;
700 if (dma_unmap_len(tx_buffer
, len
) > 0) {
701 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
705 (u64
)dma_unmap_addr(tx_buffer
, dma
),
706 dma_unmap_len(tx_buffer
, len
),
707 tx_buffer
->next_to_watch
,
708 (u64
)tx_buffer
->time_stamp
,
710 if (i
== tx_ring
->next_to_use
&&
711 i
== tx_ring
->next_to_clean
)
713 else if (i
== tx_ring
->next_to_use
)
715 else if (i
== tx_ring
->next_to_clean
)
720 if (netif_msg_pktdata(adapter
) &&
722 print_hex_dump(KERN_INFO
, "",
723 DUMP_PREFIX_ADDRESS
, 16, 1,
724 tx_buffer
->skb
->data
,
725 dma_unmap_len(tx_buffer
, len
),
731 /* Print RX Rings Summary */
733 dev_info(&adapter
->pdev
->dev
, "RX Rings Summary\n");
734 pr_info("Queue [NTU] [NTC]\n");
735 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
736 rx_ring
= adapter
->rx_ring
[n
];
737 pr_info("%5d %5X %5X\n",
738 n
, rx_ring
->next_to_use
, rx_ring
->next_to_clean
);
742 if (!netif_msg_rx_status(adapter
))
745 dev_info(&adapter
->pdev
->dev
, "RX Rings Dump\n");
747 /* Receive Descriptor Formats
749 * 82598 Advanced Receive Descriptor (Read) Format
751 * +-----------------------------------------------------+
752 * 0 | Packet Buffer Address [63:1] |A0/NSE|
753 * +----------------------------------------------+------+
754 * 8 | Header Buffer Address [63:1] | DD |
755 * +-----------------------------------------------------+
758 * 82598 Advanced Receive Descriptor (Write-Back) Format
760 * 63 48 47 32 31 30 21 20 16 15 4 3 0
761 * +------------------------------------------------------+
762 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
763 * | Packet | IP | | | | Type | Type |
764 * | Checksum | Ident | | | | | |
765 * +------------------------------------------------------+
766 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
767 * +------------------------------------------------------+
768 * 63 48 47 32 31 20 19 0
770 * 82599+ Advanced Receive Descriptor (Read) Format
772 * +-----------------------------------------------------+
773 * 0 | Packet Buffer Address [63:1] |A0/NSE|
774 * +----------------------------------------------+------+
775 * 8 | Header Buffer Address [63:1] | DD |
776 * +-----------------------------------------------------+
779 * 82599+ Advanced Receive Descriptor (Write-Back) Format
781 * 63 48 47 32 31 30 21 20 17 16 4 3 0
782 * +------------------------------------------------------+
783 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
784 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
785 * |/ Flow Dir Flt ID | | | | | |
786 * +------------------------------------------------------+
787 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
788 * +------------------------------------------------------+
789 * 63 48 47 32 31 20 19 0
792 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
793 rx_ring
= adapter
->rx_ring
[n
];
794 pr_info("------------------------------------\n");
795 pr_info("RX QUEUE INDEX = %d\n", rx_ring
->queue_index
);
796 pr_info("------------------------------------\n");
798 "R [desc] [ PktBuf A0] ",
799 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
800 "<-- Adv Rx Read format\n");
802 "RWB[desc] [PcsmIpSHl PtRs] ",
803 "[vl er S cks ln] ---------------- [bi->skb ] ",
804 "<-- Adv Rx Write-Back format\n");
806 for (i
= 0; i
< rx_ring
->count
; i
++) {
807 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
808 rx_desc
= IXGBE_RX_DESC(rx_ring
, i
);
809 u0
= (struct my_u0
*)rx_desc
;
810 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
811 if (staterr
& IXGBE_RXD_STAT_DD
) {
812 /* Descriptor Done */
813 pr_info("RWB[0x%03X] %016llX "
814 "%016llX ---------------- %p", i
,
817 rx_buffer_info
->skb
);
819 pr_info("R [0x%03X] %016llX "
820 "%016llX %016llX %p", i
,
823 (u64
)rx_buffer_info
->dma
,
824 rx_buffer_info
->skb
);
826 if (netif_msg_pktdata(adapter
) &&
827 rx_buffer_info
->dma
) {
828 print_hex_dump(KERN_INFO
, "",
829 DUMP_PREFIX_ADDRESS
, 16, 1,
830 page_address(rx_buffer_info
->page
) +
831 rx_buffer_info
->page_offset
,
832 ixgbe_rx_bufsz(rx_ring
), true);
836 if (i
== rx_ring
->next_to_use
)
838 else if (i
== rx_ring
->next_to_clean
)
847 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
851 /* Let firmware take over control of h/w */
852 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
853 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
854 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
857 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
861 /* Let firmware know the driver has taken over */
862 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
863 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
864 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
868 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
869 * @adapter: pointer to adapter struct
870 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
871 * @queue: queue to map the corresponding interrupt to
872 * @msix_vector: the vector to map to the corresponding queue
875 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, s8 direction
,
876 u8 queue
, u8 msix_vector
)
879 struct ixgbe_hw
*hw
= &adapter
->hw
;
880 switch (hw
->mac
.type
) {
881 case ixgbe_mac_82598EB
:
882 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
885 index
= (((direction
* 64) + queue
) >> 2) & 0x1F;
886 ivar
= IXGBE_READ_REG(hw
, IXGBE_IVAR(index
));
887 ivar
&= ~(0xFF << (8 * (queue
& 0x3)));
888 ivar
|= (msix_vector
<< (8 * (queue
& 0x3)));
889 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(index
), ivar
);
891 case ixgbe_mac_82599EB
:
894 case ixgbe_mac_X550EM_x
:
895 case ixgbe_mac_x550em_a
:
896 if (direction
== -1) {
898 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
899 index
= ((queue
& 1) * 8);
900 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR_MISC
);
901 ivar
&= ~(0xFF << index
);
902 ivar
|= (msix_vector
<< index
);
903 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR_MISC
, ivar
);
906 /* tx or rx causes */
907 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
908 index
= ((16 * (queue
& 1)) + (8 * direction
));
909 ivar
= IXGBE_READ_REG(hw
, IXGBE_IVAR(queue
>> 1));
910 ivar
&= ~(0xFF << index
);
911 ivar
|= (msix_vector
<< index
);
912 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(queue
>> 1), ivar
);
920 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter
*adapter
,
925 switch (adapter
->hw
.mac
.type
) {
926 case ixgbe_mac_82598EB
:
927 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
928 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
930 case ixgbe_mac_82599EB
:
933 case ixgbe_mac_X550EM_x
:
934 case ixgbe_mac_x550em_a
:
935 mask
= (qmask
& 0xFFFFFFFF);
936 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS_EX(0), mask
);
937 mask
= (qmask
>> 32);
938 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS_EX(1), mask
);
945 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring
*ring
,
946 struct ixgbe_tx_buffer
*tx_buffer
)
948 if (tx_buffer
->skb
) {
949 dev_kfree_skb_any(tx_buffer
->skb
);
950 if (dma_unmap_len(tx_buffer
, len
))
951 dma_unmap_single(ring
->dev
,
952 dma_unmap_addr(tx_buffer
, dma
),
953 dma_unmap_len(tx_buffer
, len
),
955 } else if (dma_unmap_len(tx_buffer
, len
)) {
956 dma_unmap_page(ring
->dev
,
957 dma_unmap_addr(tx_buffer
, dma
),
958 dma_unmap_len(tx_buffer
, len
),
961 tx_buffer
->next_to_watch
= NULL
;
962 tx_buffer
->skb
= NULL
;
963 dma_unmap_len_set(tx_buffer
, len
, 0);
964 /* tx_buffer must be completely set up in the transmit path */
967 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter
*adapter
)
969 struct ixgbe_hw
*hw
= &adapter
->hw
;
970 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
974 if ((hw
->fc
.current_mode
!= ixgbe_fc_full
) &&
975 (hw
->fc
.current_mode
!= ixgbe_fc_rx_pause
))
978 switch (hw
->mac
.type
) {
979 case ixgbe_mac_82598EB
:
980 data
= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
983 data
= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXCNT
);
985 hwstats
->lxoffrxc
+= data
;
987 /* refill credits (no tx hang) if we received xoff */
991 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
992 clear_bit(__IXGBE_HANG_CHECK_ARMED
,
993 &adapter
->tx_ring
[i
]->state
);
996 static void ixgbe_update_xoff_received(struct ixgbe_adapter
*adapter
)
998 struct ixgbe_hw
*hw
= &adapter
->hw
;
999 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
1003 bool pfc_en
= adapter
->dcb_cfg
.pfc_mode_enable
;
1005 if (adapter
->ixgbe_ieee_pfc
)
1006 pfc_en
|= !!(adapter
->ixgbe_ieee_pfc
->pfc_en
);
1008 if (!(adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) || !pfc_en
) {
1009 ixgbe_update_xoff_rx_lfc(adapter
);
1013 /* update stats for each tc, only valid with PFC enabled */
1014 for (i
= 0; i
< MAX_TX_PACKET_BUFFERS
; i
++) {
1017 switch (hw
->mac
.type
) {
1018 case ixgbe_mac_82598EB
:
1019 pxoffrxc
= IXGBE_READ_REG(hw
, IXGBE_PXOFFRXC(i
));
1022 pxoffrxc
= IXGBE_READ_REG(hw
, IXGBE_PXOFFRXCNT(i
));
1024 hwstats
->pxoffrxc
[i
] += pxoffrxc
;
1025 /* Get the TC for given UP */
1026 tc
= netdev_get_prio_tc_map(adapter
->netdev
, i
);
1027 xoff
[tc
] += pxoffrxc
;
1030 /* disarm tx queues that have received xoff frames */
1031 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1032 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
1034 tc
= tx_ring
->dcb_tc
;
1036 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &tx_ring
->state
);
1040 static u64
ixgbe_get_tx_completed(struct ixgbe_ring
*ring
)
1042 return ring
->stats
.packets
;
1045 static u64
ixgbe_get_tx_pending(struct ixgbe_ring
*ring
)
1047 struct ixgbe_adapter
*adapter
;
1048 struct ixgbe_hw
*hw
;
1051 if (ring
->l2_accel_priv
)
1052 adapter
= ring
->l2_accel_priv
->real_adapter
;
1054 adapter
= netdev_priv(ring
->netdev
);
1057 head
= IXGBE_READ_REG(hw
, IXGBE_TDH(ring
->reg_idx
));
1058 tail
= IXGBE_READ_REG(hw
, IXGBE_TDT(ring
->reg_idx
));
1061 return (head
< tail
) ?
1062 tail
- head
: (tail
+ ring
->count
- head
);
1067 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring
*tx_ring
)
1069 u32 tx_done
= ixgbe_get_tx_completed(tx_ring
);
1070 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
1071 u32 tx_pending
= ixgbe_get_tx_pending(tx_ring
);
1073 clear_check_for_tx_hang(tx_ring
);
1076 * Check for a hung queue, but be thorough. This verifies
1077 * that a transmit has been completed since the previous
1078 * check AND there is at least one packet pending. The
1079 * ARMED bit is set to indicate a potential hang. The
1080 * bit is cleared if a pause frame is received to remove
1081 * false hang detection due to PFC or 802.3x frames. By
1082 * requiring this to fail twice we avoid races with
1083 * pfc clearing the ARMED bit and conditions where we
1084 * run the check_tx_hang logic with a transmit completion
1085 * pending but without time to complete it yet.
1087 if (tx_done_old
== tx_done
&& tx_pending
)
1088 /* make sure it is true for two checks in a row */
1089 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED
,
1091 /* update completed stats and continue */
1092 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
1093 /* reset the countdown */
1094 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &tx_ring
->state
);
1100 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1101 * @adapter: driver private struct
1103 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter
*adapter
)
1106 /* Do the reset outside of interrupt context */
1107 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1108 set_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
);
1109 e_warn(drv
, "initiating reset due to tx timeout\n");
1110 ixgbe_service_event_schedule(adapter
);
1115 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
1117 static int ixgbe_tx_maxrate(struct net_device
*netdev
,
1118 int queue_index
, u32 maxrate
)
1120 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1121 struct ixgbe_hw
*hw
= &adapter
->hw
;
1122 u32 bcnrc_val
= ixgbe_link_mbps(adapter
);
1127 /* Calculate the rate factor values to set */
1128 bcnrc_val
<<= IXGBE_RTTBCNRC_RF_INT_SHIFT
;
1129 bcnrc_val
/= maxrate
;
1131 /* clear everything but the rate factor */
1132 bcnrc_val
&= IXGBE_RTTBCNRC_RF_INT_MASK
|
1133 IXGBE_RTTBCNRC_RF_DEC_MASK
;
1135 /* enable the rate scheduler */
1136 bcnrc_val
|= IXGBE_RTTBCNRC_RS_ENA
;
1138 IXGBE_WRITE_REG(hw
, IXGBE_RTTDQSEL
, queue_index
);
1139 IXGBE_WRITE_REG(hw
, IXGBE_RTTBCNRC
, bcnrc_val
);
1145 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1146 * @q_vector: structure containing interrupt and ring information
1147 * @tx_ring: tx ring to clean
1148 * @napi_budget: Used to determine if we are in netpoll
1150 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector
*q_vector
,
1151 struct ixgbe_ring
*tx_ring
, int napi_budget
)
1153 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1154 struct ixgbe_tx_buffer
*tx_buffer
;
1155 union ixgbe_adv_tx_desc
*tx_desc
;
1156 unsigned int total_bytes
= 0, total_packets
= 0;
1157 unsigned int budget
= q_vector
->tx
.work_limit
;
1158 unsigned int i
= tx_ring
->next_to_clean
;
1160 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
1163 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
1164 tx_desc
= IXGBE_TX_DESC(tx_ring
, i
);
1165 i
-= tx_ring
->count
;
1168 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
1170 /* if next_to_watch is not set then there is no work pending */
1174 /* prevent any other reads prior to eop_desc */
1175 read_barrier_depends();
1177 /* if DD is not set pending work has not been completed */
1178 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
1181 /* clear next_to_watch to prevent false hangs */
1182 tx_buffer
->next_to_watch
= NULL
;
1184 /* update the statistics for this packet */
1185 total_bytes
+= tx_buffer
->bytecount
;
1186 total_packets
+= tx_buffer
->gso_segs
;
1189 napi_consume_skb(tx_buffer
->skb
, napi_budget
);
1191 /* unmap skb header data */
1192 dma_unmap_single(tx_ring
->dev
,
1193 dma_unmap_addr(tx_buffer
, dma
),
1194 dma_unmap_len(tx_buffer
, len
),
1197 /* clear tx_buffer data */
1198 tx_buffer
->skb
= NULL
;
1199 dma_unmap_len_set(tx_buffer
, len
, 0);
1201 /* unmap remaining buffers */
1202 while (tx_desc
!= eop_desc
) {
1207 i
-= tx_ring
->count
;
1208 tx_buffer
= tx_ring
->tx_buffer_info
;
1209 tx_desc
= IXGBE_TX_DESC(tx_ring
, 0);
1212 /* unmap any remaining paged data */
1213 if (dma_unmap_len(tx_buffer
, len
)) {
1214 dma_unmap_page(tx_ring
->dev
,
1215 dma_unmap_addr(tx_buffer
, dma
),
1216 dma_unmap_len(tx_buffer
, len
),
1218 dma_unmap_len_set(tx_buffer
, len
, 0);
1222 /* move us one more past the eop_desc for start of next pkt */
1227 i
-= tx_ring
->count
;
1228 tx_buffer
= tx_ring
->tx_buffer_info
;
1229 tx_desc
= IXGBE_TX_DESC(tx_ring
, 0);
1232 /* issue prefetch for next Tx descriptor */
1235 /* update budget accounting */
1237 } while (likely(budget
));
1239 i
+= tx_ring
->count
;
1240 tx_ring
->next_to_clean
= i
;
1241 u64_stats_update_begin(&tx_ring
->syncp
);
1242 tx_ring
->stats
.bytes
+= total_bytes
;
1243 tx_ring
->stats
.packets
+= total_packets
;
1244 u64_stats_update_end(&tx_ring
->syncp
);
1245 q_vector
->tx
.total_bytes
+= total_bytes
;
1246 q_vector
->tx
.total_packets
+= total_packets
;
1248 if (check_for_tx_hang(tx_ring
) && ixgbe_check_tx_hang(tx_ring
)) {
1249 /* schedule immediate reset if we believe we hung */
1250 struct ixgbe_hw
*hw
= &adapter
->hw
;
1251 e_err(drv
, "Detected Tx Unit Hang\n"
1253 " TDH, TDT <%x>, <%x>\n"
1254 " next_to_use <%x>\n"
1255 " next_to_clean <%x>\n"
1256 "tx_buffer_info[next_to_clean]\n"
1257 " time_stamp <%lx>\n"
1259 tx_ring
->queue_index
,
1260 IXGBE_READ_REG(hw
, IXGBE_TDH(tx_ring
->reg_idx
)),
1261 IXGBE_READ_REG(hw
, IXGBE_TDT(tx_ring
->reg_idx
)),
1262 tx_ring
->next_to_use
, i
,
1263 tx_ring
->tx_buffer_info
[i
].time_stamp
, jiffies
);
1265 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
1268 "tx hang %d detected on queue %d, resetting adapter\n",
1269 adapter
->tx_timeout_count
+ 1, tx_ring
->queue_index
);
1271 /* schedule immediate reset if we believe we hung */
1272 ixgbe_tx_timeout_reset(adapter
);
1274 /* the adapter is about to reset, no point in enabling stuff */
1278 netdev_tx_completed_queue(txring_txq(tx_ring
),
1279 total_packets
, total_bytes
);
1281 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1282 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
1283 (ixgbe_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
1284 /* Make sure that anybody stopping the queue after this
1285 * sees the new next_to_clean.
1288 if (__netif_subqueue_stopped(tx_ring
->netdev
,
1289 tx_ring
->queue_index
)
1290 && !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1291 netif_wake_subqueue(tx_ring
->netdev
,
1292 tx_ring
->queue_index
);
1293 ++tx_ring
->tx_stats
.restart_queue
;
1300 #ifdef CONFIG_IXGBE_DCA
1301 static void ixgbe_update_tx_dca(struct ixgbe_adapter
*adapter
,
1302 struct ixgbe_ring
*tx_ring
,
1305 struct ixgbe_hw
*hw
= &adapter
->hw
;
1309 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1310 txctrl
= dca3_get_tag(tx_ring
->dev
, cpu
);
1312 switch (hw
->mac
.type
) {
1313 case ixgbe_mac_82598EB
:
1314 reg_offset
= IXGBE_DCA_TXCTRL(tx_ring
->reg_idx
);
1316 case ixgbe_mac_82599EB
:
1317 case ixgbe_mac_X540
:
1318 reg_offset
= IXGBE_DCA_TXCTRL_82599(tx_ring
->reg_idx
);
1319 txctrl
<<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599
;
1322 /* for unknown hardware do not write register */
1327 * We can enable relaxed ordering for reads, but not writes when
1328 * DCA is enabled. This is due to a known issue in some chipsets
1329 * which will cause the DCA tag to be cleared.
1331 txctrl
|= IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1332 IXGBE_DCA_TXCTRL_DATA_RRO_EN
|
1333 IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
1335 IXGBE_WRITE_REG(hw
, reg_offset
, txctrl
);
1338 static void ixgbe_update_rx_dca(struct ixgbe_adapter
*adapter
,
1339 struct ixgbe_ring
*rx_ring
,
1342 struct ixgbe_hw
*hw
= &adapter
->hw
;
1344 u8 reg_idx
= rx_ring
->reg_idx
;
1346 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1347 rxctrl
= dca3_get_tag(rx_ring
->dev
, cpu
);
1349 switch (hw
->mac
.type
) {
1350 case ixgbe_mac_82599EB
:
1351 case ixgbe_mac_X540
:
1352 rxctrl
<<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599
;
1359 * We can enable relaxed ordering for reads, but not writes when
1360 * DCA is enabled. This is due to a known issue in some chipsets
1361 * which will cause the DCA tag to be cleared.
1363 rxctrl
|= IXGBE_DCA_RXCTRL_DESC_RRO_EN
|
1364 IXGBE_DCA_RXCTRL_DATA_DCA_EN
|
1365 IXGBE_DCA_RXCTRL_DESC_DCA_EN
;
1367 IXGBE_WRITE_REG(hw
, IXGBE_DCA_RXCTRL(reg_idx
), rxctrl
);
1370 static void ixgbe_update_dca(struct ixgbe_q_vector
*q_vector
)
1372 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1373 struct ixgbe_ring
*ring
;
1374 int cpu
= get_cpu();
1376 if (q_vector
->cpu
== cpu
)
1379 ixgbe_for_each_ring(ring
, q_vector
->tx
)
1380 ixgbe_update_tx_dca(adapter
, ring
, cpu
);
1382 ixgbe_for_each_ring(ring
, q_vector
->rx
)
1383 ixgbe_update_rx_dca(adapter
, ring
, cpu
);
1385 q_vector
->cpu
= cpu
;
1390 static void ixgbe_setup_dca(struct ixgbe_adapter
*adapter
)
1394 /* always use CB2 mode, difference is masked in the CB driver */
1395 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1396 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
,
1397 IXGBE_DCA_CTRL_DCA_MODE_CB2
);
1399 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
,
1400 IXGBE_DCA_CTRL_DCA_DISABLE
);
1402 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1403 adapter
->q_vector
[i
]->cpu
= -1;
1404 ixgbe_update_dca(adapter
->q_vector
[i
]);
1408 static int __ixgbe_notify_dca(struct device
*dev
, void *data
)
1410 struct ixgbe_adapter
*adapter
= dev_get_drvdata(dev
);
1411 unsigned long event
= *(unsigned long *)data
;
1413 if (!(adapter
->flags
& IXGBE_FLAG_DCA_CAPABLE
))
1417 case DCA_PROVIDER_ADD
:
1418 /* if we're already enabled, don't do it again */
1419 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1421 if (dca_add_requester(dev
) == 0) {
1422 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
1423 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
,
1424 IXGBE_DCA_CTRL_DCA_MODE_CB2
);
1427 /* Fall Through since DCA is disabled. */
1428 case DCA_PROVIDER_REMOVE
:
1429 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
1430 dca_remove_requester(dev
);
1431 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
1432 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
,
1433 IXGBE_DCA_CTRL_DCA_DISABLE
);
1441 #endif /* CONFIG_IXGBE_DCA */
1443 #define IXGBE_RSS_L4_TYPES_MASK \
1444 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1445 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1446 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1447 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1449 static inline void ixgbe_rx_hash(struct ixgbe_ring
*ring
,
1450 union ixgbe_adv_rx_desc
*rx_desc
,
1451 struct sk_buff
*skb
)
1455 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
1458 rss_type
= le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
) &
1459 IXGBE_RXDADV_RSSTYPE_MASK
;
1464 skb_set_hash(skb
, le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
),
1465 (IXGBE_RSS_L4_TYPES_MASK
& (1ul << rss_type
)) ?
1466 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
1471 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1472 * @ring: structure containing ring specific data
1473 * @rx_desc: advanced rx descriptor
1475 * Returns : true if it is FCoE pkt
1477 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring
*ring
,
1478 union ixgbe_adv_rx_desc
*rx_desc
)
1480 __le16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
1482 return test_bit(__IXGBE_RX_FCOE
, &ring
->state
) &&
1483 ((pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK
)) ==
1484 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE
<<
1485 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT
)));
1488 #endif /* IXGBE_FCOE */
1490 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1491 * @ring: structure containing ring specific data
1492 * @rx_desc: current Rx descriptor being processed
1493 * @skb: skb currently being received and modified
1495 static inline void ixgbe_rx_checksum(struct ixgbe_ring
*ring
,
1496 union ixgbe_adv_rx_desc
*rx_desc
,
1497 struct sk_buff
*skb
)
1499 __le16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
1500 bool encap_pkt
= false;
1502 skb_checksum_none_assert(skb
);
1504 /* Rx csum disabled */
1505 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
1508 /* check for VXLAN and Geneve packets */
1509 if (pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN
)) {
1511 skb
->encapsulation
= 1;
1514 /* if IP and error */
1515 if (ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
1516 ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
1517 ring
->rx_stats
.csum_err
++;
1521 if (!ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
1524 if (ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
1526 * 82599 errata, UDP frames with a 0 checksum can be marked as
1529 if ((pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP
)) &&
1530 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR
, &ring
->state
))
1533 ring
->rx_stats
.csum_err
++;
1537 /* It must be a TCP or UDP packet with a valid checksum */
1538 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1540 if (!ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_OUTERIPCS
))
1543 if (ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_OUTERIPER
)) {
1544 skb
->ip_summed
= CHECKSUM_NONE
;
1547 /* If we checked the outer header let the stack know */
1548 skb
->csum_level
= 1;
1552 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring
*rx_ring
,
1553 struct ixgbe_rx_buffer
*bi
)
1555 struct page
*page
= bi
->page
;
1558 /* since we are recycling buffers we should seldom need to alloc */
1562 /* alloc new page for storage */
1563 page
= dev_alloc_pages(ixgbe_rx_pg_order(rx_ring
));
1564 if (unlikely(!page
)) {
1565 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
1569 /* map page for use */
1570 dma
= dma_map_page(rx_ring
->dev
, page
, 0,
1571 ixgbe_rx_pg_size(rx_ring
), DMA_FROM_DEVICE
);
1574 * if mapping failed free memory back to system since
1575 * there isn't much point in holding memory we can't use
1577 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
1578 __free_pages(page
, ixgbe_rx_pg_order(rx_ring
));
1580 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
1586 bi
->page_offset
= 0;
1592 * ixgbe_alloc_rx_buffers - Replace used receive buffers
1593 * @rx_ring: ring to place buffers on
1594 * @cleaned_count: number of buffers to replace
1596 void ixgbe_alloc_rx_buffers(struct ixgbe_ring
*rx_ring
, u16 cleaned_count
)
1598 union ixgbe_adv_rx_desc
*rx_desc
;
1599 struct ixgbe_rx_buffer
*bi
;
1600 u16 i
= rx_ring
->next_to_use
;
1606 rx_desc
= IXGBE_RX_DESC(rx_ring
, i
);
1607 bi
= &rx_ring
->rx_buffer_info
[i
];
1608 i
-= rx_ring
->count
;
1611 if (!ixgbe_alloc_mapped_page(rx_ring
, bi
))
1615 * Refresh the desc even if buffer_addrs didn't change
1616 * because each write-back erases this info.
1618 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
1624 rx_desc
= IXGBE_RX_DESC(rx_ring
, 0);
1625 bi
= rx_ring
->rx_buffer_info
;
1626 i
-= rx_ring
->count
;
1629 /* clear the status bits for the next_to_use descriptor */
1630 rx_desc
->wb
.upper
.status_error
= 0;
1633 } while (cleaned_count
);
1635 i
+= rx_ring
->count
;
1637 if (rx_ring
->next_to_use
!= i
) {
1638 rx_ring
->next_to_use
= i
;
1640 /* update next to alloc since we have filled the ring */
1641 rx_ring
->next_to_alloc
= i
;
1643 /* Force memory writes to complete before letting h/w
1644 * know there are new descriptors to fetch. (Only
1645 * applicable for weak-ordered memory model archs,
1649 writel(i
, rx_ring
->tail
);
1653 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring
*ring
,
1654 struct sk_buff
*skb
)
1656 u16 hdr_len
= skb_headlen(skb
);
1658 /* set gso_size to avoid messing up TCP MSS */
1659 skb_shinfo(skb
)->gso_size
= DIV_ROUND_UP((skb
->len
- hdr_len
),
1660 IXGBE_CB(skb
)->append_cnt
);
1661 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1664 static void ixgbe_update_rsc_stats(struct ixgbe_ring
*rx_ring
,
1665 struct sk_buff
*skb
)
1667 /* if append_cnt is 0 then frame is not RSC */
1668 if (!IXGBE_CB(skb
)->append_cnt
)
1671 rx_ring
->rx_stats
.rsc_count
+= IXGBE_CB(skb
)->append_cnt
;
1672 rx_ring
->rx_stats
.rsc_flush
++;
1674 ixgbe_set_rsc_gso_size(rx_ring
, skb
);
1676 /* gso_size is computed using append_cnt so always clear it last */
1677 IXGBE_CB(skb
)->append_cnt
= 0;
1681 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1682 * @rx_ring: rx descriptor ring packet is being transacted on
1683 * @rx_desc: pointer to the EOP Rx descriptor
1684 * @skb: pointer to current skb being populated
1686 * This function checks the ring, descriptor, and packet information in
1687 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1688 * other fields within the skb.
1690 static void ixgbe_process_skb_fields(struct ixgbe_ring
*rx_ring
,
1691 union ixgbe_adv_rx_desc
*rx_desc
,
1692 struct sk_buff
*skb
)
1694 struct net_device
*dev
= rx_ring
->netdev
;
1695 u32 flags
= rx_ring
->q_vector
->adapter
->flags
;
1697 ixgbe_update_rsc_stats(rx_ring
, skb
);
1699 ixgbe_rx_hash(rx_ring
, rx_desc
, skb
);
1701 ixgbe_rx_checksum(rx_ring
, rx_desc
, skb
);
1703 if (unlikely(flags
& IXGBE_FLAG_RX_HWTSTAMP_ENABLED
))
1704 ixgbe_ptp_rx_hwtstamp(rx_ring
, rx_desc
, skb
);
1706 if ((dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1707 ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
1708 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
1709 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1712 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1714 skb
->protocol
= eth_type_trans(skb
, dev
);
1717 static void ixgbe_rx_skb(struct ixgbe_q_vector
*q_vector
,
1718 struct sk_buff
*skb
)
1720 skb_mark_napi_id(skb
, &q_vector
->napi
);
1721 if (ixgbe_qv_busy_polling(q_vector
))
1722 netif_receive_skb(skb
);
1724 napi_gro_receive(&q_vector
->napi
, skb
);
1728 * ixgbe_is_non_eop - process handling of non-EOP buffers
1729 * @rx_ring: Rx ring being processed
1730 * @rx_desc: Rx descriptor for current buffer
1731 * @skb: Current socket buffer containing buffer in progress
1733 * This function updates next to clean. If the buffer is an EOP buffer
1734 * this function exits returning false, otherwise it will place the
1735 * sk_buff in the next buffer to be chained and return true indicating
1736 * that this is in fact a non-EOP buffer.
1738 static bool ixgbe_is_non_eop(struct ixgbe_ring
*rx_ring
,
1739 union ixgbe_adv_rx_desc
*rx_desc
,
1740 struct sk_buff
*skb
)
1742 u32 ntc
= rx_ring
->next_to_clean
+ 1;
1744 /* fetch, update, and store next to clean */
1745 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
1746 rx_ring
->next_to_clean
= ntc
;
1748 prefetch(IXGBE_RX_DESC(rx_ring
, ntc
));
1750 /* update RSC append count if present */
1751 if (ring_is_rsc_enabled(rx_ring
)) {
1752 __le32 rsc_enabled
= rx_desc
->wb
.lower
.lo_dword
.data
&
1753 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK
);
1755 if (unlikely(rsc_enabled
)) {
1756 u32 rsc_cnt
= le32_to_cpu(rsc_enabled
);
1758 rsc_cnt
>>= IXGBE_RXDADV_RSCCNT_SHIFT
;
1759 IXGBE_CB(skb
)->append_cnt
+= rsc_cnt
- 1;
1761 /* update ntc based on RSC value */
1762 ntc
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1763 ntc
&= IXGBE_RXDADV_NEXTP_MASK
;
1764 ntc
>>= IXGBE_RXDADV_NEXTP_SHIFT
;
1768 /* if we are the last buffer then there is nothing else to do */
1769 if (likely(ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
1772 /* place skb in next buffer to be received */
1773 rx_ring
->rx_buffer_info
[ntc
].skb
= skb
;
1774 rx_ring
->rx_stats
.non_eop_descs
++;
1780 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1781 * @rx_ring: rx descriptor ring packet is being transacted on
1782 * @skb: pointer to current skb being adjusted
1784 * This function is an ixgbe specific version of __pskb_pull_tail. The
1785 * main difference between this version and the original function is that
1786 * this function can make several assumptions about the state of things
1787 * that allow for significant optimizations versus the standard function.
1788 * As a result we can do things like drop a frag and maintain an accurate
1789 * truesize for the skb.
1791 static void ixgbe_pull_tail(struct ixgbe_ring
*rx_ring
,
1792 struct sk_buff
*skb
)
1794 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
1796 unsigned int pull_len
;
1799 * it is valid to use page_address instead of kmap since we are
1800 * working with pages allocated out of the lomem pool per
1801 * alloc_page(GFP_ATOMIC)
1803 va
= skb_frag_address(frag
);
1806 * we need the header to contain the greater of either ETH_HLEN or
1807 * 60 bytes if the skb->len is less than 60 for skb_pad.
1809 pull_len
= eth_get_headlen(va
, IXGBE_RX_HDR_SIZE
);
1811 /* align pull length to size of long to optimize memcpy performance */
1812 skb_copy_to_linear_data(skb
, va
, ALIGN(pull_len
, sizeof(long)));
1814 /* update all of the pointers */
1815 skb_frag_size_sub(frag
, pull_len
);
1816 frag
->page_offset
+= pull_len
;
1817 skb
->data_len
-= pull_len
;
1818 skb
->tail
+= pull_len
;
1822 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1823 * @rx_ring: rx descriptor ring packet is being transacted on
1824 * @skb: pointer to current skb being updated
1826 * This function provides a basic DMA sync up for the first fragment of an
1827 * skb. The reason for doing this is that the first fragment cannot be
1828 * unmapped until we have reached the end of packet descriptor for a buffer
1831 static void ixgbe_dma_sync_frag(struct ixgbe_ring
*rx_ring
,
1832 struct sk_buff
*skb
)
1834 /* if the page was released unmap it, else just sync our portion */
1835 if (unlikely(IXGBE_CB(skb
)->page_released
)) {
1836 dma_unmap_page(rx_ring
->dev
, IXGBE_CB(skb
)->dma
,
1837 ixgbe_rx_pg_size(rx_ring
), DMA_FROM_DEVICE
);
1838 IXGBE_CB(skb
)->page_released
= false;
1840 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
1842 dma_sync_single_range_for_cpu(rx_ring
->dev
,
1845 ixgbe_rx_bufsz(rx_ring
),
1848 IXGBE_CB(skb
)->dma
= 0;
1852 * ixgbe_cleanup_headers - Correct corrupted or empty headers
1853 * @rx_ring: rx descriptor ring packet is being transacted on
1854 * @rx_desc: pointer to the EOP Rx descriptor
1855 * @skb: pointer to current skb being fixed
1857 * Check for corrupted packet headers caused by senders on the local L2
1858 * embedded NIC switch not setting up their Tx Descriptors right. These
1859 * should be very rare.
1861 * Also address the case where we are pulling data in on pages only
1862 * and as such no data is present in the skb header.
1864 * In addition if skb is not at least 60 bytes we need to pad it so that
1865 * it is large enough to qualify as a valid Ethernet frame.
1867 * Returns true if an error was encountered and skb was freed.
1869 static bool ixgbe_cleanup_headers(struct ixgbe_ring
*rx_ring
,
1870 union ixgbe_adv_rx_desc
*rx_desc
,
1871 struct sk_buff
*skb
)
1873 struct net_device
*netdev
= rx_ring
->netdev
;
1875 /* verify that the packet does not have any known errors */
1876 if (unlikely(ixgbe_test_staterr(rx_desc
,
1877 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) &&
1878 !(netdev
->features
& NETIF_F_RXALL
))) {
1879 dev_kfree_skb_any(skb
);
1883 /* place header in linear portion of buffer */
1884 if (skb_is_nonlinear(skb
))
1885 ixgbe_pull_tail(rx_ring
, skb
);
1888 /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1889 if (ixgbe_rx_is_fcoe(rx_ring
, rx_desc
))
1893 /* if eth_skb_pad returns an error the skb was freed */
1894 if (eth_skb_pad(skb
))
1901 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1902 * @rx_ring: rx descriptor ring to store buffers on
1903 * @old_buff: donor buffer to have page reused
1905 * Synchronizes page for reuse by the adapter
1907 static void ixgbe_reuse_rx_page(struct ixgbe_ring
*rx_ring
,
1908 struct ixgbe_rx_buffer
*old_buff
)
1910 struct ixgbe_rx_buffer
*new_buff
;
1911 u16 nta
= rx_ring
->next_to_alloc
;
1913 new_buff
= &rx_ring
->rx_buffer_info
[nta
];
1915 /* update, and store next to alloc */
1917 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
1919 /* transfer page from old buffer to new buffer */
1920 *new_buff
= *old_buff
;
1922 /* sync the buffer for use by the device */
1923 dma_sync_single_range_for_device(rx_ring
->dev
, new_buff
->dma
,
1924 new_buff
->page_offset
,
1925 ixgbe_rx_bufsz(rx_ring
),
1929 static inline bool ixgbe_page_is_reserved(struct page
*page
)
1931 return (page_to_nid(page
) != numa_mem_id()) || page_is_pfmemalloc(page
);
1935 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1936 * @rx_ring: rx descriptor ring to transact packets on
1937 * @rx_buffer: buffer containing page to add
1938 * @rx_desc: descriptor containing length of buffer written by hardware
1939 * @skb: sk_buff to place the data into
1941 * This function will add the data contained in rx_buffer->page to the skb.
1942 * This is done either through a direct copy if the data in the buffer is
1943 * less than the skb header size, otherwise it will just attach the page as
1944 * a frag to the skb.
1946 * The function will then update the page offset if necessary and return
1947 * true if the buffer can be reused by the adapter.
1949 static bool ixgbe_add_rx_frag(struct ixgbe_ring
*rx_ring
,
1950 struct ixgbe_rx_buffer
*rx_buffer
,
1951 union ixgbe_adv_rx_desc
*rx_desc
,
1952 struct sk_buff
*skb
)
1954 struct page
*page
= rx_buffer
->page
;
1955 unsigned int size
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1956 #if (PAGE_SIZE < 8192)
1957 unsigned int truesize
= ixgbe_rx_bufsz(rx_ring
);
1959 unsigned int truesize
= ALIGN(size
, L1_CACHE_BYTES
);
1960 unsigned int last_offset
= ixgbe_rx_pg_size(rx_ring
) -
1961 ixgbe_rx_bufsz(rx_ring
);
1964 if ((size
<= IXGBE_RX_HDR_SIZE
) && !skb_is_nonlinear(skb
)) {
1965 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
1967 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
1969 /* page is not reserved, we can reuse buffer as-is */
1970 if (likely(!ixgbe_page_is_reserved(page
)))
1973 /* this page cannot be reused so discard it */
1974 __free_pages(page
, ixgbe_rx_pg_order(rx_ring
));
1978 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
1979 rx_buffer
->page_offset
, size
, truesize
);
1981 /* avoid re-using remote pages */
1982 if (unlikely(ixgbe_page_is_reserved(page
)))
1985 #if (PAGE_SIZE < 8192)
1986 /* if we are only owner of page we can reuse it */
1987 if (unlikely(page_count(page
) != 1))
1990 /* flip page offset to other buffer */
1991 rx_buffer
->page_offset
^= truesize
;
1993 /* move offset up to the next cache line */
1994 rx_buffer
->page_offset
+= truesize
;
1996 if (rx_buffer
->page_offset
> last_offset
)
2000 /* Even if we own the page, we are not allowed to use atomic_set()
2001 * This would break get_page_unless_zero() users.
2008 static struct sk_buff
*ixgbe_fetch_rx_buffer(struct ixgbe_ring
*rx_ring
,
2009 union ixgbe_adv_rx_desc
*rx_desc
)
2011 struct ixgbe_rx_buffer
*rx_buffer
;
2012 struct sk_buff
*skb
;
2015 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ring
->next_to_clean
];
2016 page
= rx_buffer
->page
;
2019 skb
= rx_buffer
->skb
;
2022 void *page_addr
= page_address(page
) +
2023 rx_buffer
->page_offset
;
2025 /* prefetch first cache line of first page */
2026 prefetch(page_addr
);
2027 #if L1_CACHE_BYTES < 128
2028 prefetch(page_addr
+ L1_CACHE_BYTES
);
2031 /* allocate a skb to store the frags */
2032 skb
= napi_alloc_skb(&rx_ring
->q_vector
->napi
,
2034 if (unlikely(!skb
)) {
2035 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
2040 * we will be copying header into skb->data in
2041 * pskb_may_pull so it is in our interest to prefetch
2042 * it now to avoid a possible cache miss
2044 prefetchw(skb
->data
);
2047 * Delay unmapping of the first packet. It carries the
2048 * header information, HW may still access the header
2049 * after the writeback. Only unmap it when EOP is
2052 if (likely(ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
2055 IXGBE_CB(skb
)->dma
= rx_buffer
->dma
;
2057 if (ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
))
2058 ixgbe_dma_sync_frag(rx_ring
, skb
);
2061 /* we are reusing so sync this buffer for CPU use */
2062 dma_sync_single_range_for_cpu(rx_ring
->dev
,
2064 rx_buffer
->page_offset
,
2065 ixgbe_rx_bufsz(rx_ring
),
2068 rx_buffer
->skb
= NULL
;
2071 /* pull page into skb */
2072 if (ixgbe_add_rx_frag(rx_ring
, rx_buffer
, rx_desc
, skb
)) {
2073 /* hand second half of page back to the ring */
2074 ixgbe_reuse_rx_page(rx_ring
, rx_buffer
);
2075 } else if (IXGBE_CB(skb
)->dma
== rx_buffer
->dma
) {
2076 /* the page has been released from the ring */
2077 IXGBE_CB(skb
)->page_released
= true;
2079 /* we are not reusing the buffer so unmap it */
2080 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
2081 ixgbe_rx_pg_size(rx_ring
),
2085 /* clear contents of buffer_info */
2086 rx_buffer
->page
= NULL
;
2092 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2093 * @q_vector: structure containing interrupt and ring information
2094 * @rx_ring: rx descriptor ring to transact packets on
2095 * @budget: Total limit on number of packets to process
2097 * This function provides a "bounce buffer" approach to Rx interrupt
2098 * processing. The advantage to this is that on systems that have
2099 * expensive overhead for IOMMU access this provides a means of avoiding
2100 * it by maintaining the mapping of the page to the syste.
2102 * Returns amount of work completed
2104 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector
*q_vector
,
2105 struct ixgbe_ring
*rx_ring
,
2108 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
2110 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2112 unsigned int mss
= 0;
2113 #endif /* IXGBE_FCOE */
2114 u16 cleaned_count
= ixgbe_desc_unused(rx_ring
);
2116 while (likely(total_rx_packets
< budget
)) {
2117 union ixgbe_adv_rx_desc
*rx_desc
;
2118 struct sk_buff
*skb
;
2120 /* return some buffers to hardware, one at a time is too slow */
2121 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
2122 ixgbe_alloc_rx_buffers(rx_ring
, cleaned_count
);
2126 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
2128 if (!rx_desc
->wb
.upper
.status_error
)
2131 /* This memory barrier is needed to keep us from reading
2132 * any other fields out of the rx_desc until we know the
2133 * descriptor has been written back
2137 /* retrieve a buffer from the ring */
2138 skb
= ixgbe_fetch_rx_buffer(rx_ring
, rx_desc
);
2140 /* exit if we failed to retrieve a buffer */
2146 /* place incomplete frames back on ring for completion */
2147 if (ixgbe_is_non_eop(rx_ring
, rx_desc
, skb
))
2150 /* verify the packet layout is correct */
2151 if (ixgbe_cleanup_headers(rx_ring
, rx_desc
, skb
))
2154 /* probably a little skewed due to removing CRC */
2155 total_rx_bytes
+= skb
->len
;
2157 /* populate checksum, timestamp, VLAN, and protocol */
2158 ixgbe_process_skb_fields(rx_ring
, rx_desc
, skb
);
2161 /* if ddp, not passing to ULD unless for FCP_RSP or error */
2162 if (ixgbe_rx_is_fcoe(rx_ring
, rx_desc
)) {
2163 ddp_bytes
= ixgbe_fcoe_ddp(adapter
, rx_desc
, skb
);
2164 /* include DDPed FCoE data */
2165 if (ddp_bytes
> 0) {
2167 mss
= rx_ring
->netdev
->mtu
-
2168 sizeof(struct fcoe_hdr
) -
2169 sizeof(struct fc_frame_header
) -
2170 sizeof(struct fcoe_crc_eof
);
2174 total_rx_bytes
+= ddp_bytes
;
2175 total_rx_packets
+= DIV_ROUND_UP(ddp_bytes
,
2179 dev_kfree_skb_any(skb
);
2184 #endif /* IXGBE_FCOE */
2185 ixgbe_rx_skb(q_vector
, skb
);
2187 /* update budget accounting */
2191 u64_stats_update_begin(&rx_ring
->syncp
);
2192 rx_ring
->stats
.packets
+= total_rx_packets
;
2193 rx_ring
->stats
.bytes
+= total_rx_bytes
;
2194 u64_stats_update_end(&rx_ring
->syncp
);
2195 q_vector
->rx
.total_packets
+= total_rx_packets
;
2196 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
2198 return total_rx_packets
;
2201 #ifdef CONFIG_NET_RX_BUSY_POLL
2202 /* must be called with local_bh_disable()d */
2203 static int ixgbe_low_latency_recv(struct napi_struct
*napi
)
2205 struct ixgbe_q_vector
*q_vector
=
2206 container_of(napi
, struct ixgbe_q_vector
, napi
);
2207 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2208 struct ixgbe_ring
*ring
;
2211 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
2212 return LL_FLUSH_FAILED
;
2214 if (!ixgbe_qv_lock_poll(q_vector
))
2215 return LL_FLUSH_BUSY
;
2217 ixgbe_for_each_ring(ring
, q_vector
->rx
) {
2218 found
= ixgbe_clean_rx_irq(q_vector
, ring
, 4);
2219 #ifdef BP_EXTENDED_STATS
2221 ring
->stats
.cleaned
+= found
;
2223 ring
->stats
.misses
++;
2229 ixgbe_qv_unlock_poll(q_vector
);
2233 #endif /* CONFIG_NET_RX_BUSY_POLL */
2236 * ixgbe_configure_msix - Configure MSI-X hardware
2237 * @adapter: board private structure
2239 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2242 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
2244 struct ixgbe_q_vector
*q_vector
;
2248 /* Populate MSIX to EITR Select */
2249 if (adapter
->num_vfs
> 32) {
2250 u32 eitrsel
= BIT(adapter
->num_vfs
- 32) - 1;
2251 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITRSEL
, eitrsel
);
2255 * Populate the IVAR table and set the ITR values to the
2256 * corresponding register.
2258 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
2259 struct ixgbe_ring
*ring
;
2260 q_vector
= adapter
->q_vector
[v_idx
];
2262 ixgbe_for_each_ring(ring
, q_vector
->rx
)
2263 ixgbe_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
2265 ixgbe_for_each_ring(ring
, q_vector
->tx
)
2266 ixgbe_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
2268 ixgbe_write_eitr(q_vector
);
2271 switch (adapter
->hw
.mac
.type
) {
2272 case ixgbe_mac_82598EB
:
2273 ixgbe_set_ivar(adapter
, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX
,
2276 case ixgbe_mac_82599EB
:
2277 case ixgbe_mac_X540
:
2278 case ixgbe_mac_X550
:
2279 case ixgbe_mac_X550EM_x
:
2280 case ixgbe_mac_x550em_a
:
2281 ixgbe_set_ivar(adapter
, -1, 1, v_idx
);
2286 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
), 1950);
2288 /* set up to autoclear timer, and the vectors */
2289 mask
= IXGBE_EIMS_ENABLE_MASK
;
2290 mask
&= ~(IXGBE_EIMS_OTHER
|
2291 IXGBE_EIMS_MAILBOX
|
2294 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
, mask
);
2297 enum latency_range
{
2301 latency_invalid
= 255
2305 * ixgbe_update_itr - update the dynamic ITR value based on statistics
2306 * @q_vector: structure containing interrupt and ring information
2307 * @ring_container: structure containing ring performance data
2309 * Stores a new ITR value based on packets and byte
2310 * counts during the last interrupt. The advantage of per interrupt
2311 * computation is faster updates and more accurate ITR for the current
2312 * traffic pattern. Constants in this function were computed
2313 * based on theoretical maximum wire speed and thresholds were set based
2314 * on testing data as well as attempting to minimize response time
2315 * while increasing bulk throughput.
2316 * this functionality is controlled by the InterruptThrottleRate module
2317 * parameter (see ixgbe_param.c)
2319 static void ixgbe_update_itr(struct ixgbe_q_vector
*q_vector
,
2320 struct ixgbe_ring_container
*ring_container
)
2322 int bytes
= ring_container
->total_bytes
;
2323 int packets
= ring_container
->total_packets
;
2326 u8 itr_setting
= ring_container
->itr
;
2331 /* simple throttlerate management
2332 * 0-10MB/s lowest (100000 ints/s)
2333 * 10-20MB/s low (20000 ints/s)
2334 * 20-1249MB/s bulk (12000 ints/s)
2336 /* what was last interrupt timeslice? */
2337 timepassed_us
= q_vector
->itr
>> 2;
2338 if (timepassed_us
== 0)
2341 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
2343 switch (itr_setting
) {
2344 case lowest_latency
:
2345 if (bytes_perint
> 10)
2346 itr_setting
= low_latency
;
2349 if (bytes_perint
> 20)
2350 itr_setting
= bulk_latency
;
2351 else if (bytes_perint
<= 10)
2352 itr_setting
= lowest_latency
;
2355 if (bytes_perint
<= 20)
2356 itr_setting
= low_latency
;
2360 /* clear work counters since we have the values we need */
2361 ring_container
->total_bytes
= 0;
2362 ring_container
->total_packets
= 0;
2364 /* write updated itr to ring container */
2365 ring_container
->itr
= itr_setting
;
2369 * ixgbe_write_eitr - write EITR register in hardware specific way
2370 * @q_vector: structure containing interrupt and ring information
2372 * This function is made to be called by ethtool and by the driver
2373 * when it needs to update EITR registers at runtime. Hardware
2374 * specific quirks/differences are taken care of here.
2376 void ixgbe_write_eitr(struct ixgbe_q_vector
*q_vector
)
2378 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2379 struct ixgbe_hw
*hw
= &adapter
->hw
;
2380 int v_idx
= q_vector
->v_idx
;
2381 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
2383 switch (adapter
->hw
.mac
.type
) {
2384 case ixgbe_mac_82598EB
:
2385 /* must write high and low 16 bits to reset counter */
2386 itr_reg
|= (itr_reg
<< 16);
2388 case ixgbe_mac_82599EB
:
2389 case ixgbe_mac_X540
:
2390 case ixgbe_mac_X550
:
2391 case ixgbe_mac_X550EM_x
:
2392 case ixgbe_mac_x550em_a
:
2394 * set the WDIS bit to not clear the timer bits and cause an
2395 * immediate assertion of the interrupt
2397 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
2402 IXGBE_WRITE_REG(hw
, IXGBE_EITR(v_idx
), itr_reg
);
2405 static void ixgbe_set_itr(struct ixgbe_q_vector
*q_vector
)
2407 u32 new_itr
= q_vector
->itr
;
2410 ixgbe_update_itr(q_vector
, &q_vector
->tx
);
2411 ixgbe_update_itr(q_vector
, &q_vector
->rx
);
2413 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
2415 switch (current_itr
) {
2416 /* counts and packets in update_itr are dependent on these numbers */
2417 case lowest_latency
:
2418 new_itr
= IXGBE_100K_ITR
;
2421 new_itr
= IXGBE_20K_ITR
;
2424 new_itr
= IXGBE_12K_ITR
;
2430 if (new_itr
!= q_vector
->itr
) {
2431 /* do an exponential smoothing */
2432 new_itr
= (10 * new_itr
* q_vector
->itr
) /
2433 ((9 * new_itr
) + q_vector
->itr
);
2435 /* save the algorithm value here */
2436 q_vector
->itr
= new_itr
;
2438 ixgbe_write_eitr(q_vector
);
2443 * ixgbe_check_overtemp_subtask - check for over temperature
2444 * @adapter: pointer to adapter
2446 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter
*adapter
)
2448 struct ixgbe_hw
*hw
= &adapter
->hw
;
2449 u32 eicr
= adapter
->interrupt_event
;
2451 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
2454 if (!(adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
) &&
2455 !(adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_EVENT
))
2458 adapter
->flags2
&= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT
;
2460 switch (hw
->device_id
) {
2461 case IXGBE_DEV_ID_82599_T3_LOM
:
2463 * Since the warning interrupt is for both ports
2464 * we don't have to check if:
2465 * - This interrupt wasn't for our port.
2466 * - We may have missed the interrupt so always have to
2467 * check if we got a LSC
2469 if (!(eicr
& IXGBE_EICR_GPI_SDP0_8259X
) &&
2470 !(eicr
& IXGBE_EICR_LSC
))
2473 if (!(eicr
& IXGBE_EICR_LSC
) && hw
->mac
.ops
.check_link
) {
2475 bool link_up
= false;
2477 hw
->mac
.ops
.check_link(hw
, &speed
, &link_up
, false);
2483 /* Check if this is not due to overtemp */
2484 if (hw
->phy
.ops
.check_overtemp(hw
) != IXGBE_ERR_OVERTEMP
)
2489 if (adapter
->hw
.mac
.type
>= ixgbe_mac_X540
)
2491 if (!(eicr
& IXGBE_EICR_GPI_SDP0(hw
)))
2495 e_crit(drv
, "%s\n", ixgbe_overheat_msg
);
2497 adapter
->interrupt_event
= 0;
2500 static void ixgbe_check_fan_failure(struct ixgbe_adapter
*adapter
, u32 eicr
)
2502 struct ixgbe_hw
*hw
= &adapter
->hw
;
2504 if ((adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) &&
2505 (eicr
& IXGBE_EICR_GPI_SDP1(hw
))) {
2506 e_crit(probe
, "Fan has stopped, replace the adapter\n");
2507 /* write to clear the interrupt */
2508 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP1(hw
));
2512 static void ixgbe_check_overtemp_event(struct ixgbe_adapter
*adapter
, u32 eicr
)
2514 struct ixgbe_hw
*hw
= &adapter
->hw
;
2516 if (!(adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
))
2519 switch (adapter
->hw
.mac
.type
) {
2520 case ixgbe_mac_82599EB
:
2522 * Need to check link state so complete overtemp check
2525 if (((eicr
& IXGBE_EICR_GPI_SDP0(hw
)) ||
2526 (eicr
& IXGBE_EICR_LSC
)) &&
2527 (!test_bit(__IXGBE_DOWN
, &adapter
->state
))) {
2528 adapter
->interrupt_event
= eicr
;
2529 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_EVENT
;
2530 ixgbe_service_event_schedule(adapter
);
2534 case ixgbe_mac_X540
:
2535 if (!(eicr
& IXGBE_EICR_TS
))
2542 e_crit(drv
, "%s\n", ixgbe_overheat_msg
);
2545 static inline bool ixgbe_is_sfp(struct ixgbe_hw
*hw
)
2547 switch (hw
->mac
.type
) {
2548 case ixgbe_mac_82598EB
:
2549 if (hw
->phy
.type
== ixgbe_phy_nl
)
2552 case ixgbe_mac_82599EB
:
2553 case ixgbe_mac_X550EM_x
:
2554 case ixgbe_mac_x550em_a
:
2555 switch (hw
->mac
.ops
.get_media_type(hw
)) {
2556 case ixgbe_media_type_fiber
:
2557 case ixgbe_media_type_fiber_qsfp
:
2567 static void ixgbe_check_sfp_event(struct ixgbe_adapter
*adapter
, u32 eicr
)
2569 struct ixgbe_hw
*hw
= &adapter
->hw
;
2570 u32 eicr_mask
= IXGBE_EICR_GPI_SDP2(hw
);
2572 if (!ixgbe_is_sfp(hw
))
2575 /* Later MAC's use different SDP */
2576 if (hw
->mac
.type
>= ixgbe_mac_X540
)
2577 eicr_mask
= IXGBE_EICR_GPI_SDP0_X540
;
2579 if (eicr
& eicr_mask
) {
2580 /* Clear the interrupt */
2581 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, eicr_mask
);
2582 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
2583 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
2584 adapter
->sfp_poll_time
= 0;
2585 ixgbe_service_event_schedule(adapter
);
2589 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
&&
2590 (eicr
& IXGBE_EICR_GPI_SDP1(hw
))) {
2591 /* Clear the interrupt */
2592 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP1(hw
));
2593 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
2594 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_CONFIG
;
2595 ixgbe_service_event_schedule(adapter
);
2600 static void ixgbe_check_lsc(struct ixgbe_adapter
*adapter
)
2602 struct ixgbe_hw
*hw
= &adapter
->hw
;
2605 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
2606 adapter
->link_check_timeout
= jiffies
;
2607 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
2608 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_LSC
);
2609 IXGBE_WRITE_FLUSH(hw
);
2610 ixgbe_service_event_schedule(adapter
);
2614 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter
*adapter
,
2618 struct ixgbe_hw
*hw
= &adapter
->hw
;
2620 switch (hw
->mac
.type
) {
2621 case ixgbe_mac_82598EB
:
2622 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
2623 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, mask
);
2625 case ixgbe_mac_82599EB
:
2626 case ixgbe_mac_X540
:
2627 case ixgbe_mac_X550
:
2628 case ixgbe_mac_X550EM_x
:
2629 case ixgbe_mac_x550em_a
:
2630 mask
= (qmask
& 0xFFFFFFFF);
2632 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(0), mask
);
2633 mask
= (qmask
>> 32);
2635 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(1), mask
);
2640 /* skip the flush */
2643 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter
*adapter
,
2647 struct ixgbe_hw
*hw
= &adapter
->hw
;
2649 switch (hw
->mac
.type
) {
2650 case ixgbe_mac_82598EB
:
2651 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
2652 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, mask
);
2654 case ixgbe_mac_82599EB
:
2655 case ixgbe_mac_X540
:
2656 case ixgbe_mac_X550
:
2657 case ixgbe_mac_X550EM_x
:
2658 case ixgbe_mac_x550em_a
:
2659 mask
= (qmask
& 0xFFFFFFFF);
2661 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(0), mask
);
2662 mask
= (qmask
>> 32);
2664 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(1), mask
);
2669 /* skip the flush */
2673 * ixgbe_irq_enable - Enable default interrupt generation settings
2674 * @adapter: board private structure
2676 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
, bool queues
,
2679 struct ixgbe_hw
*hw
= &adapter
->hw
;
2680 u32 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
2682 /* don't reenable LSC while waiting for link */
2683 if (adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
)
2684 mask
&= ~IXGBE_EIMS_LSC
;
2686 if (adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
)
2687 switch (adapter
->hw
.mac
.type
) {
2688 case ixgbe_mac_82599EB
:
2689 mask
|= IXGBE_EIMS_GPI_SDP0(hw
);
2691 case ixgbe_mac_X540
:
2692 case ixgbe_mac_X550
:
2693 case ixgbe_mac_X550EM_x
:
2694 case ixgbe_mac_x550em_a
:
2695 mask
|= IXGBE_EIMS_TS
;
2700 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
)
2701 mask
|= IXGBE_EIMS_GPI_SDP1(hw
);
2702 switch (adapter
->hw
.mac
.type
) {
2703 case ixgbe_mac_82599EB
:
2704 mask
|= IXGBE_EIMS_GPI_SDP1(hw
);
2705 mask
|= IXGBE_EIMS_GPI_SDP2(hw
);
2707 case ixgbe_mac_X540
:
2708 case ixgbe_mac_X550
:
2709 case ixgbe_mac_X550EM_x
:
2710 case ixgbe_mac_x550em_a
:
2711 if (adapter
->hw
.device_id
== IXGBE_DEV_ID_X550EM_X_SFP
||
2712 adapter
->hw
.device_id
== IXGBE_DEV_ID_X550EM_A_SFP
||
2713 adapter
->hw
.device_id
== IXGBE_DEV_ID_X550EM_A_SFP_N
)
2714 mask
|= IXGBE_EIMS_GPI_SDP0(&adapter
->hw
);
2715 if (adapter
->hw
.phy
.type
== ixgbe_phy_x550em_ext_t
)
2716 mask
|= IXGBE_EICR_GPI_SDP0_X540
;
2717 mask
|= IXGBE_EIMS_ECC
;
2718 mask
|= IXGBE_EIMS_MAILBOX
;
2724 if ((adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) &&
2725 !(adapter
->flags2
& IXGBE_FLAG2_FDIR_REQUIRES_REINIT
))
2726 mask
|= IXGBE_EIMS_FLOW_DIR
;
2728 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
2730 ixgbe_irq_enable_queues(adapter
, ~0);
2732 IXGBE_WRITE_FLUSH(&adapter
->hw
);
2735 static irqreturn_t
ixgbe_msix_other(int irq
, void *data
)
2737 struct ixgbe_adapter
*adapter
= data
;
2738 struct ixgbe_hw
*hw
= &adapter
->hw
;
2742 * Workaround for Silicon errata. Use clear-by-write instead
2743 * of clear-by-read. Reading with EICS will return the
2744 * interrupt causes without clearing, which later be done
2745 * with the write to EICR.
2747 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICS
);
2749 /* The lower 16bits of the EICR register are for the queue interrupts
2750 * which should be masked here in order to not accidentally clear them if
2751 * the bits are high when ixgbe_msix_other is called. There is a race
2752 * condition otherwise which results in possible performance loss
2753 * especially if the ixgbe_msix_other interrupt is triggering
2754 * consistently (as it would when PPS is turned on for the X540 device)
2758 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, eicr
);
2760 if (eicr
& IXGBE_EICR_LSC
)
2761 ixgbe_check_lsc(adapter
);
2763 if (eicr
& IXGBE_EICR_MAILBOX
)
2764 ixgbe_msg_task(adapter
);
2766 switch (hw
->mac
.type
) {
2767 case ixgbe_mac_82599EB
:
2768 case ixgbe_mac_X540
:
2769 case ixgbe_mac_X550
:
2770 case ixgbe_mac_X550EM_x
:
2771 case ixgbe_mac_x550em_a
:
2772 if (hw
->phy
.type
== ixgbe_phy_x550em_ext_t
&&
2773 (eicr
& IXGBE_EICR_GPI_SDP0_X540
)) {
2774 adapter
->flags2
|= IXGBE_FLAG2_PHY_INTERRUPT
;
2775 ixgbe_service_event_schedule(adapter
);
2776 IXGBE_WRITE_REG(hw
, IXGBE_EICR
,
2777 IXGBE_EICR_GPI_SDP0_X540
);
2779 if (eicr
& IXGBE_EICR_ECC
) {
2780 e_info(link
, "Received ECC Err, initiating reset\n");
2781 set_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
);
2782 ixgbe_service_event_schedule(adapter
);
2783 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_ECC
);
2785 /* Handle Flow Director Full threshold interrupt */
2786 if (eicr
& IXGBE_EICR_FLOW_DIR
) {
2787 int reinit_count
= 0;
2789 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2790 struct ixgbe_ring
*ring
= adapter
->tx_ring
[i
];
2791 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE
,
2796 /* no more flow director interrupts until after init */
2797 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_FLOW_DIR
);
2798 adapter
->flags2
|= IXGBE_FLAG2_FDIR_REQUIRES_REINIT
;
2799 ixgbe_service_event_schedule(adapter
);
2802 ixgbe_check_sfp_event(adapter
, eicr
);
2803 ixgbe_check_overtemp_event(adapter
, eicr
);
2809 ixgbe_check_fan_failure(adapter
, eicr
);
2811 if (unlikely(eicr
& IXGBE_EICR_TIMESYNC
))
2812 ixgbe_ptp_check_pps_event(adapter
);
2814 /* re-enable the original interrupt state, no lsc, no queues */
2815 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2816 ixgbe_irq_enable(adapter
, false, false);
2821 static irqreturn_t
ixgbe_msix_clean_rings(int irq
, void *data
)
2823 struct ixgbe_q_vector
*q_vector
= data
;
2825 /* EIAM disabled interrupts (on this vector) for us */
2827 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
2828 napi_schedule_irqoff(&q_vector
->napi
);
2834 * ixgbe_poll - NAPI Rx polling callback
2835 * @napi: structure for representing this polling device
2836 * @budget: how many packets driver is allowed to clean
2838 * This function is used for legacy and MSI, NAPI mode
2840 int ixgbe_poll(struct napi_struct
*napi
, int budget
)
2842 struct ixgbe_q_vector
*q_vector
=
2843 container_of(napi
, struct ixgbe_q_vector
, napi
);
2844 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2845 struct ixgbe_ring
*ring
;
2846 int per_ring_budget
, work_done
= 0;
2847 bool clean_complete
= true;
2849 #ifdef CONFIG_IXGBE_DCA
2850 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
2851 ixgbe_update_dca(q_vector
);
2854 ixgbe_for_each_ring(ring
, q_vector
->tx
) {
2855 if (!ixgbe_clean_tx_irq(q_vector
, ring
, budget
))
2856 clean_complete
= false;
2859 /* Exit if we are called by netpoll or busy polling is active */
2860 if ((budget
<= 0) || !ixgbe_qv_lock_napi(q_vector
))
2863 /* attempt to distribute budget to each queue fairly, but don't allow
2864 * the budget to go below 1 because we'll exit polling */
2865 if (q_vector
->rx
.count
> 1)
2866 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
2868 per_ring_budget
= budget
;
2870 ixgbe_for_each_ring(ring
, q_vector
->rx
) {
2871 int cleaned
= ixgbe_clean_rx_irq(q_vector
, ring
,
2874 work_done
+= cleaned
;
2875 if (cleaned
>= per_ring_budget
)
2876 clean_complete
= false;
2879 ixgbe_qv_unlock_napi(q_vector
);
2880 /* If all work not completed, return budget and keep polling */
2881 if (!clean_complete
)
2884 /* all work done, exit the polling mode */
2885 napi_complete_done(napi
, work_done
);
2886 if (adapter
->rx_itr_setting
& 1)
2887 ixgbe_set_itr(q_vector
);
2888 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2889 ixgbe_irq_enable_queues(adapter
, BIT_ULL(q_vector
->v_idx
));
2891 return min(work_done
, budget
- 1);
2895 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2896 * @adapter: board private structure
2898 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2899 * interrupts from the kernel.
2901 static int ixgbe_request_msix_irqs(struct ixgbe_adapter
*adapter
)
2903 struct net_device
*netdev
= adapter
->netdev
;
2907 for (vector
= 0; vector
< adapter
->num_q_vectors
; vector
++) {
2908 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[vector
];
2909 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
2911 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
2912 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2913 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
2915 } else if (q_vector
->rx
.ring
) {
2916 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2917 "%s-%s-%d", netdev
->name
, "rx", ri
++);
2918 } else if (q_vector
->tx
.ring
) {
2919 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2920 "%s-%s-%d", netdev
->name
, "tx", ti
++);
2922 /* skip this unused q_vector */
2925 err
= request_irq(entry
->vector
, &ixgbe_msix_clean_rings
, 0,
2926 q_vector
->name
, q_vector
);
2928 e_err(probe
, "request_irq failed for MSIX interrupt "
2929 "Error: %d\n", err
);
2930 goto free_queue_irqs
;
2932 /* If Flow Director is enabled, set interrupt affinity */
2933 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
2934 /* assign the mask for this irq */
2935 irq_set_affinity_hint(entry
->vector
,
2936 &q_vector
->affinity_mask
);
2940 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2941 ixgbe_msix_other
, 0, netdev
->name
, adapter
);
2943 e_err(probe
, "request_irq for msix_other failed: %d\n", err
);
2944 goto free_queue_irqs
;
2952 irq_set_affinity_hint(adapter
->msix_entries
[vector
].vector
,
2954 free_irq(adapter
->msix_entries
[vector
].vector
,
2955 adapter
->q_vector
[vector
]);
2957 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2958 pci_disable_msix(adapter
->pdev
);
2959 kfree(adapter
->msix_entries
);
2960 adapter
->msix_entries
= NULL
;
2965 * ixgbe_intr - legacy mode Interrupt Handler
2966 * @irq: interrupt number
2967 * @data: pointer to a network interface device structure
2969 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
2971 struct ixgbe_adapter
*adapter
= data
;
2972 struct ixgbe_hw
*hw
= &adapter
->hw
;
2973 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[0];
2977 * Workaround for silicon errata #26 on 82598. Mask the interrupt
2978 * before the read of EICR.
2980 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_IRQ_CLEAR_MASK
);
2982 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2983 * therefore no explicit interrupt disable is necessary */
2984 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
2987 * shared interrupt alert!
2988 * make sure interrupts are enabled because the read will
2989 * have disabled interrupts due to EIAM
2990 * finish the workaround of silicon errata on 82598. Unmask
2991 * the interrupt that we masked before the EICR read.
2993 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2994 ixgbe_irq_enable(adapter
, true, true);
2995 return IRQ_NONE
; /* Not our interrupt */
2998 if (eicr
& IXGBE_EICR_LSC
)
2999 ixgbe_check_lsc(adapter
);
3001 switch (hw
->mac
.type
) {
3002 case ixgbe_mac_82599EB
:
3003 ixgbe_check_sfp_event(adapter
, eicr
);
3005 case ixgbe_mac_X540
:
3006 case ixgbe_mac_X550
:
3007 case ixgbe_mac_X550EM_x
:
3008 case ixgbe_mac_x550em_a
:
3009 if (eicr
& IXGBE_EICR_ECC
) {
3010 e_info(link
, "Received ECC Err, initiating reset\n");
3011 set_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
);
3012 ixgbe_service_event_schedule(adapter
);
3013 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_ECC
);
3015 ixgbe_check_overtemp_event(adapter
, eicr
);
3021 ixgbe_check_fan_failure(adapter
, eicr
);
3022 if (unlikely(eicr
& IXGBE_EICR_TIMESYNC
))
3023 ixgbe_ptp_check_pps_event(adapter
);
3025 /* would disable interrupts here but EIAM disabled it */
3026 napi_schedule_irqoff(&q_vector
->napi
);
3029 * re-enable link(maybe) and non-queue interrupts, no flush.
3030 * ixgbe_poll will re-enable the queue interrupts
3032 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
3033 ixgbe_irq_enable(adapter
, false, false);
3039 * ixgbe_request_irq - initialize interrupts
3040 * @adapter: board private structure
3042 * Attempts to configure interrupts using the best available
3043 * capabilities of the hardware and kernel.
3045 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
)
3047 struct net_device
*netdev
= adapter
->netdev
;
3050 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
3051 err
= ixgbe_request_msix_irqs(adapter
);
3052 else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
)
3053 err
= request_irq(adapter
->pdev
->irq
, ixgbe_intr
, 0,
3054 netdev
->name
, adapter
);
3056 err
= request_irq(adapter
->pdev
->irq
, ixgbe_intr
, IRQF_SHARED
,
3057 netdev
->name
, adapter
);
3060 e_err(probe
, "request_irq failed, Error %d\n", err
);
3065 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
3069 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
3070 free_irq(adapter
->pdev
->irq
, adapter
);
3074 if (!adapter
->msix_entries
)
3077 for (vector
= 0; vector
< adapter
->num_q_vectors
; vector
++) {
3078 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[vector
];
3079 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
3081 /* free only the irqs that were actually requested */
3082 if (!q_vector
->rx
.ring
&& !q_vector
->tx
.ring
)
3085 /* clear the affinity_mask in the IRQ descriptor */
3086 irq_set_affinity_hint(entry
->vector
, NULL
);
3088 free_irq(entry
->vector
, q_vector
);
3091 free_irq(adapter
->msix_entries
[vector
].vector
, adapter
);
3095 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3096 * @adapter: board private structure
3098 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
3100 switch (adapter
->hw
.mac
.type
) {
3101 case ixgbe_mac_82598EB
:
3102 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
3104 case ixgbe_mac_82599EB
:
3105 case ixgbe_mac_X540
:
3106 case ixgbe_mac_X550
:
3107 case ixgbe_mac_X550EM_x
:
3108 case ixgbe_mac_x550em_a
:
3109 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFF0000);
3110 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC_EX(0), ~0);
3111 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC_EX(1), ~0);
3116 IXGBE_WRITE_FLUSH(&adapter
->hw
);
3117 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3120 for (vector
= 0; vector
< adapter
->num_q_vectors
; vector
++)
3121 synchronize_irq(adapter
->msix_entries
[vector
].vector
);
3123 synchronize_irq(adapter
->msix_entries
[vector
++].vector
);
3125 synchronize_irq(adapter
->pdev
->irq
);
3130 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3133 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
3135 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[0];
3137 ixgbe_write_eitr(q_vector
);
3139 ixgbe_set_ivar(adapter
, 0, 0, 0);
3140 ixgbe_set_ivar(adapter
, 1, 0, 0);
3142 e_info(hw
, "Legacy interrupt IVAR setup done\n");
3146 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3147 * @adapter: board private structure
3148 * @ring: structure containing ring specific data
3150 * Configure the Tx descriptor ring after a reset.
3152 void ixgbe_configure_tx_ring(struct ixgbe_adapter
*adapter
,
3153 struct ixgbe_ring
*ring
)
3155 struct ixgbe_hw
*hw
= &adapter
->hw
;
3156 u64 tdba
= ring
->dma
;
3158 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
3159 u8 reg_idx
= ring
->reg_idx
;
3161 /* disable queue to avoid issues while updating state */
3162 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
), 0);
3163 IXGBE_WRITE_FLUSH(hw
);
3165 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(reg_idx
),
3166 (tdba
& DMA_BIT_MASK(32)));
3167 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(reg_idx
), (tdba
>> 32));
3168 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(reg_idx
),
3169 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
3170 IXGBE_WRITE_REG(hw
, IXGBE_TDH(reg_idx
), 0);
3171 IXGBE_WRITE_REG(hw
, IXGBE_TDT(reg_idx
), 0);
3172 ring
->tail
= adapter
->io_addr
+ IXGBE_TDT(reg_idx
);
3175 * set WTHRESH to encourage burst writeback, it should not be set
3176 * higher than 1 when:
3177 * - ITR is 0 as it could cause false TX hangs
3178 * - ITR is set to > 100k int/sec and BQL is enabled
3180 * In order to avoid issues WTHRESH + PTHRESH should always be equal
3181 * to or less than the number of on chip descriptors, which is
3184 if (!ring
->q_vector
|| (ring
->q_vector
->itr
< IXGBE_100K_ITR
))
3185 txdctl
|= 1u << 16; /* WTHRESH = 1 */
3187 txdctl
|= 8u << 16; /* WTHRESH = 8 */
3190 * Setting PTHRESH to 32 both improves performance
3191 * and avoids a TX hang with DFP enabled
3193 txdctl
|= (1u << 8) | /* HTHRESH = 1 */
3194 32; /* PTHRESH = 32 */
3196 /* reinitialize flowdirector state */
3197 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
3198 ring
->atr_sample_rate
= adapter
->atr_sample_rate
;
3199 ring
->atr_count
= 0;
3200 set_bit(__IXGBE_TX_FDIR_INIT_DONE
, &ring
->state
);
3202 ring
->atr_sample_rate
= 0;
3205 /* initialize XPS */
3206 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE
, &ring
->state
)) {
3207 struct ixgbe_q_vector
*q_vector
= ring
->q_vector
;
3210 netif_set_xps_queue(ring
->netdev
,
3211 &q_vector
->affinity_mask
,
3215 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &ring
->state
);
3218 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
), txdctl
);
3220 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3221 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
3222 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
3225 /* poll to verify queue is enabled */
3227 usleep_range(1000, 2000);
3228 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(reg_idx
));
3229 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
3231 hw_dbg(hw
, "Could not enable Tx Queue %d\n", reg_idx
);
3234 static void ixgbe_setup_mtqc(struct ixgbe_adapter
*adapter
)
3236 struct ixgbe_hw
*hw
= &adapter
->hw
;
3238 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
3240 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3243 /* disable the arbiter while setting MTQC */
3244 rttdcs
= IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
3245 rttdcs
|= IXGBE_RTTDCS_ARBDIS
;
3246 IXGBE_WRITE_REG(hw
, IXGBE_RTTDCS
, rttdcs
);
3248 /* set transmit pool layout */
3249 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3250 mtqc
= IXGBE_MTQC_VT_ENA
;
3252 mtqc
|= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_8TC_8TQ
;
3254 mtqc
|= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_4TC_4TQ
;
3255 else if (adapter
->ring_feature
[RING_F_VMDQ
].mask
==
3256 IXGBE_82599_VMDQ_4Q_MASK
)
3257 mtqc
|= IXGBE_MTQC_32VF
;
3259 mtqc
|= IXGBE_MTQC_64VF
;
3262 mtqc
= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_8TC_8TQ
;
3264 mtqc
= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_4TC_4TQ
;
3266 mtqc
= IXGBE_MTQC_64Q_1PB
;
3269 IXGBE_WRITE_REG(hw
, IXGBE_MTQC
, mtqc
);
3271 /* Enable Security TX Buffer IFG for multiple pb */
3273 u32 sectx
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
3274 sectx
|= IXGBE_SECTX_DCB
;
3275 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, sectx
);
3278 /* re-enable the arbiter */
3279 rttdcs
&= ~IXGBE_RTTDCS_ARBDIS
;
3280 IXGBE_WRITE_REG(hw
, IXGBE_RTTDCS
, rttdcs
);
3284 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3285 * @adapter: board private structure
3287 * Configure the Tx unit of the MAC after a reset.
3289 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
3291 struct ixgbe_hw
*hw
= &adapter
->hw
;
3295 ixgbe_setup_mtqc(adapter
);
3297 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
3298 /* DMATXCTL.EN must be before Tx queues are enabled */
3299 dmatxctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
3300 dmatxctl
|= IXGBE_DMATXCTL_TE
;
3301 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, dmatxctl
);
3304 /* Setup the HW Tx Head and Tail descriptor pointers */
3305 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3306 ixgbe_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
3309 static void ixgbe_enable_rx_drop(struct ixgbe_adapter
*adapter
,
3310 struct ixgbe_ring
*ring
)
3312 struct ixgbe_hw
*hw
= &adapter
->hw
;
3313 u8 reg_idx
= ring
->reg_idx
;
3314 u32 srrctl
= IXGBE_READ_REG(hw
, IXGBE_SRRCTL(reg_idx
));
3316 srrctl
|= IXGBE_SRRCTL_DROP_EN
;
3318 IXGBE_WRITE_REG(hw
, IXGBE_SRRCTL(reg_idx
), srrctl
);
3321 static void ixgbe_disable_rx_drop(struct ixgbe_adapter
*adapter
,
3322 struct ixgbe_ring
*ring
)
3324 struct ixgbe_hw
*hw
= &adapter
->hw
;
3325 u8 reg_idx
= ring
->reg_idx
;
3326 u32 srrctl
= IXGBE_READ_REG(hw
, IXGBE_SRRCTL(reg_idx
));
3328 srrctl
&= ~IXGBE_SRRCTL_DROP_EN
;
3330 IXGBE_WRITE_REG(hw
, IXGBE_SRRCTL(reg_idx
), srrctl
);
3333 #ifdef CONFIG_IXGBE_DCB
3334 void ixgbe_set_rx_drop_en(struct ixgbe_adapter
*adapter
)
3336 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter
*adapter
)
3340 bool pfc_en
= adapter
->dcb_cfg
.pfc_mode_enable
;
3342 if (adapter
->ixgbe_ieee_pfc
)
3343 pfc_en
|= !!(adapter
->ixgbe_ieee_pfc
->pfc_en
);
3346 * We should set the drop enable bit if:
3349 * Number of Rx queues > 1 and flow control is disabled
3351 * This allows us to avoid head of line blocking for security
3352 * and performance reasons.
3354 if (adapter
->num_vfs
|| (adapter
->num_rx_queues
> 1 &&
3355 !(adapter
->hw
.fc
.current_mode
& ixgbe_fc_tx_pause
) && !pfc_en
)) {
3356 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3357 ixgbe_enable_rx_drop(adapter
, adapter
->rx_ring
[i
]);
3359 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3360 ixgbe_disable_rx_drop(adapter
, adapter
->rx_ring
[i
]);
3364 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3366 static void ixgbe_configure_srrctl(struct ixgbe_adapter
*adapter
,
3367 struct ixgbe_ring
*rx_ring
)
3369 struct ixgbe_hw
*hw
= &adapter
->hw
;
3371 u8 reg_idx
= rx_ring
->reg_idx
;
3373 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
3374 u16 mask
= adapter
->ring_feature
[RING_F_RSS
].mask
;
3377 * if VMDq is not active we must program one srrctl register
3378 * per RSS queue since we have enabled RDRXCTL.MVMEN
3383 /* configure header buffer length, needed for RSC */
3384 srrctl
= IXGBE_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
3386 /* configure the packet buffer length */
3387 srrctl
|= ixgbe_rx_bufsz(rx_ring
) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
3389 /* configure descriptor type */
3390 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
3392 IXGBE_WRITE_REG(hw
, IXGBE_SRRCTL(reg_idx
), srrctl
);
3396 * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
3397 * @adapter: device handle
3399 * - 82598/82599/X540: 128
3400 * - X550(non-SRIOV mode): 512
3401 * - X550(SRIOV mode): 64
3403 u32
ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter
*adapter
)
3405 if (adapter
->hw
.mac
.type
< ixgbe_mac_X550
)
3407 else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3414 * ixgbe_store_reta - Write the RETA table to HW
3415 * @adapter: device handle
3417 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3419 void ixgbe_store_reta(struct ixgbe_adapter
*adapter
)
3421 u32 i
, reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3422 struct ixgbe_hw
*hw
= &adapter
->hw
;
3425 u8
*indir_tbl
= adapter
->rss_indir_tbl
;
3427 /* Fill out the redirection table as follows:
3428 * - 82598: 8 bit wide entries containing pair of 4 bit RSS
3430 * - 82599/X540: 8 bit wide entries containing 4 bit RSS index
3431 * - X550: 8 bit wide entries containing 6 bit RSS index
3433 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
3434 indices_multi
= 0x11;
3436 indices_multi
= 0x1;
3438 /* Write redirection table to HW */
3439 for (i
= 0; i
< reta_entries
; i
++) {
3440 reta
|= indices_multi
* indir_tbl
[i
] << (i
& 0x3) * 8;
3443 IXGBE_WRITE_REG(hw
, IXGBE_RETA(i
>> 2), reta
);
3445 IXGBE_WRITE_REG(hw
, IXGBE_ERETA((i
>> 2) - 32),
3453 * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
3454 * @adapter: device handle
3456 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3458 static void ixgbe_store_vfreta(struct ixgbe_adapter
*adapter
)
3460 u32 i
, reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3461 struct ixgbe_hw
*hw
= &adapter
->hw
;
3463 unsigned int pf_pool
= adapter
->num_vfs
;
3465 /* Write redirection table to HW */
3466 for (i
= 0; i
< reta_entries
; i
++) {
3467 vfreta
|= (u32
)adapter
->rss_indir_tbl
[i
] << (i
& 0x3) * 8;
3469 IXGBE_WRITE_REG(hw
, IXGBE_PFVFRETA(i
>> 2, pf_pool
),
3476 static void ixgbe_setup_reta(struct ixgbe_adapter
*adapter
)
3478 struct ixgbe_hw
*hw
= &adapter
->hw
;
3480 u32 reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3481 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3483 /* Program table for at least 4 queues w/ SR-IOV so that VFs can
3484 * make full use of any rings they may have. We will use the
3485 * PSRTYPE register to control how many rings we use within the PF.
3487 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) && (rss_i
< 4))
3490 /* Fill out hash function seeds */
3491 for (i
= 0; i
< 10; i
++)
3492 IXGBE_WRITE_REG(hw
, IXGBE_RSSRK(i
), adapter
->rss_key
[i
]);
3494 /* Fill out redirection table */
3495 memset(adapter
->rss_indir_tbl
, 0, sizeof(adapter
->rss_indir_tbl
));
3497 for (i
= 0, j
= 0; i
< reta_entries
; i
++, j
++) {
3501 adapter
->rss_indir_tbl
[i
] = j
;
3504 ixgbe_store_reta(adapter
);
3507 static void ixgbe_setup_vfreta(struct ixgbe_adapter
*adapter
)
3509 struct ixgbe_hw
*hw
= &adapter
->hw
;
3510 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3511 unsigned int pf_pool
= adapter
->num_vfs
;
3514 /* Fill out hash function seeds */
3515 for (i
= 0; i
< 10; i
++)
3516 IXGBE_WRITE_REG(hw
, IXGBE_PFVFRSSRK(i
, pf_pool
),
3517 adapter
->rss_key
[i
]);
3519 /* Fill out the redirection table */
3520 for (i
= 0, j
= 0; i
< 64; i
++, j
++) {
3524 adapter
->rss_indir_tbl
[i
] = j
;
3527 ixgbe_store_vfreta(adapter
);
3530 static void ixgbe_setup_mrqc(struct ixgbe_adapter
*adapter
)
3532 struct ixgbe_hw
*hw
= &adapter
->hw
;
3533 u32 mrqc
= 0, rss_field
= 0, vfmrqc
= 0;
3536 /* Disable indicating checksum in descriptor, enables RSS hash */
3537 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
3538 rxcsum
|= IXGBE_RXCSUM_PCSD
;
3539 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
3541 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
3542 if (adapter
->ring_feature
[RING_F_RSS
].mask
)
3543 mrqc
= IXGBE_MRQC_RSSEN
;
3545 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
3547 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3549 mrqc
= IXGBE_MRQC_VMDQRT8TCEN
; /* 8 TCs */
3551 mrqc
= IXGBE_MRQC_VMDQRT4TCEN
; /* 4 TCs */
3552 else if (adapter
->ring_feature
[RING_F_VMDQ
].mask
==
3553 IXGBE_82599_VMDQ_4Q_MASK
)
3554 mrqc
= IXGBE_MRQC_VMDQRSS32EN
;
3556 mrqc
= IXGBE_MRQC_VMDQRSS64EN
;
3559 mrqc
= IXGBE_MRQC_RTRSS8TCEN
;
3561 mrqc
= IXGBE_MRQC_RTRSS4TCEN
;
3563 mrqc
= IXGBE_MRQC_RSSEN
;
3567 /* Perform hash on these packet types */
3568 rss_field
|= IXGBE_MRQC_RSS_FIELD_IPV4
|
3569 IXGBE_MRQC_RSS_FIELD_IPV4_TCP
|
3570 IXGBE_MRQC_RSS_FIELD_IPV6
|
3571 IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
3573 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
3574 rss_field
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
3575 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
3576 rss_field
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
3578 netdev_rss_key_fill(adapter
->rss_key
, sizeof(adapter
->rss_key
));
3579 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
3580 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)) {
3581 unsigned int pf_pool
= adapter
->num_vfs
;
3583 /* Enable VF RSS mode */
3584 mrqc
|= IXGBE_MRQC_MULTIPLE_RSS
;
3585 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
3587 /* Setup RSS through the VF registers */
3588 ixgbe_setup_vfreta(adapter
);
3589 vfmrqc
= IXGBE_MRQC_RSSEN
;
3590 vfmrqc
|= rss_field
;
3591 IXGBE_WRITE_REG(hw
, IXGBE_PFVFMRQC(pf_pool
), vfmrqc
);
3593 ixgbe_setup_reta(adapter
);
3595 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
3600 * ixgbe_configure_rscctl - enable RSC for the indicated ring
3601 * @adapter: address of board private structure
3602 * @index: index of ring to set
3604 static void ixgbe_configure_rscctl(struct ixgbe_adapter
*adapter
,
3605 struct ixgbe_ring
*ring
)
3607 struct ixgbe_hw
*hw
= &adapter
->hw
;
3609 u8 reg_idx
= ring
->reg_idx
;
3611 if (!ring_is_rsc_enabled(ring
))
3614 rscctrl
= IXGBE_READ_REG(hw
, IXGBE_RSCCTL(reg_idx
));
3615 rscctrl
|= IXGBE_RSCCTL_RSCEN
;
3617 * we must limit the number of descriptors so that the
3618 * total size of max desc * buf_len is not greater
3621 rscctrl
|= IXGBE_RSCCTL_MAXDESC_16
;
3622 IXGBE_WRITE_REG(hw
, IXGBE_RSCCTL(reg_idx
), rscctrl
);
3625 #define IXGBE_MAX_RX_DESC_POLL 10
3626 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter
*adapter
,
3627 struct ixgbe_ring
*ring
)
3629 struct ixgbe_hw
*hw
= &adapter
->hw
;
3630 int wait_loop
= IXGBE_MAX_RX_DESC_POLL
;
3632 u8 reg_idx
= ring
->reg_idx
;
3634 if (ixgbe_removed(hw
->hw_addr
))
3636 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3637 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
3638 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
3642 usleep_range(1000, 2000);
3643 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
3644 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
3647 e_err(drv
, "RXDCTL.ENABLE on Rx queue %d not set within "
3648 "the polling period\n", reg_idx
);
3652 void ixgbe_disable_rx_queue(struct ixgbe_adapter
*adapter
,
3653 struct ixgbe_ring
*ring
)
3655 struct ixgbe_hw
*hw
= &adapter
->hw
;
3656 int wait_loop
= IXGBE_MAX_RX_DESC_POLL
;
3658 u8 reg_idx
= ring
->reg_idx
;
3660 if (ixgbe_removed(hw
->hw_addr
))
3662 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
3663 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
3665 /* write value back with RXDCTL.ENABLE bit cleared */
3666 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(reg_idx
), rxdctl
);
3668 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
3669 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
3672 /* the hardware may take up to 100us to really disable the rx queue */
3675 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
3676 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
3679 e_err(drv
, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3680 "the polling period\n", reg_idx
);
3684 void ixgbe_configure_rx_ring(struct ixgbe_adapter
*adapter
,
3685 struct ixgbe_ring
*ring
)
3687 struct ixgbe_hw
*hw
= &adapter
->hw
;
3688 u64 rdba
= ring
->dma
;
3690 u8 reg_idx
= ring
->reg_idx
;
3692 /* disable queue to avoid issues while updating state */
3693 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
3694 ixgbe_disable_rx_queue(adapter
, ring
);
3696 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(reg_idx
), (rdba
& DMA_BIT_MASK(32)));
3697 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(reg_idx
), (rdba
>> 32));
3698 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(reg_idx
),
3699 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
3700 /* Force flushing of IXGBE_RDLEN to prevent MDD */
3701 IXGBE_WRITE_FLUSH(hw
);
3703 IXGBE_WRITE_REG(hw
, IXGBE_RDH(reg_idx
), 0);
3704 IXGBE_WRITE_REG(hw
, IXGBE_RDT(reg_idx
), 0);
3705 ring
->tail
= adapter
->io_addr
+ IXGBE_RDT(reg_idx
);
3707 ixgbe_configure_srrctl(adapter
, ring
);
3708 ixgbe_configure_rscctl(adapter
, ring
);
3710 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
3712 * enable cache line friendly hardware writes:
3713 * PTHRESH=32 descriptors (half the internal cache),
3714 * this also removes ugly rx_no_buffer_count increment
3715 * HTHRESH=4 descriptors (to minimize latency on fetch)
3716 * WTHRESH=8 burst writeback up to two cache lines
3718 rxdctl
&= ~0x3FFFFF;
3722 /* enable receive descriptor ring */
3723 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
3724 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(reg_idx
), rxdctl
);
3726 ixgbe_rx_desc_queue_enable(adapter
, ring
);
3727 ixgbe_alloc_rx_buffers(ring
, ixgbe_desc_unused(ring
));
3730 static void ixgbe_setup_psrtype(struct ixgbe_adapter
*adapter
)
3732 struct ixgbe_hw
*hw
= &adapter
->hw
;
3733 int rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3736 /* PSRTYPE must be initialized in non 82598 adapters */
3737 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
3738 IXGBE_PSRTYPE_UDPHDR
|
3739 IXGBE_PSRTYPE_IPV4HDR
|
3740 IXGBE_PSRTYPE_L2HDR
|
3741 IXGBE_PSRTYPE_IPV6HDR
;
3743 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3747 psrtype
|= 2u << 29;
3749 psrtype
|= 1u << 29;
3751 for_each_set_bit(pool
, &adapter
->fwd_bitmask
, 32)
3752 IXGBE_WRITE_REG(hw
, IXGBE_PSRTYPE(VMDQ_P(pool
)), psrtype
);
3755 static void ixgbe_configure_virtualization(struct ixgbe_adapter
*adapter
)
3757 struct ixgbe_hw
*hw
= &adapter
->hw
;
3758 u32 reg_offset
, vf_shift
;
3759 u32 gcr_ext
, vmdctl
;
3762 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
3765 vmdctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
3766 vmdctl
|= IXGBE_VMD_CTL_VMDQ_EN
;
3767 vmdctl
&= ~IXGBE_VT_CTL_POOL_MASK
;
3768 vmdctl
|= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT
;
3769 vmdctl
|= IXGBE_VT_CTL_REPLEN
;
3770 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vmdctl
);
3772 vf_shift
= VMDQ_P(0) % 32;
3773 reg_offset
= (VMDQ_P(0) >= 32) ? 1 : 0;
3775 /* Enable only the PF's pool for Tx/Rx */
3776 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
), GENMASK(31, vf_shift
));
3777 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
^ 1), reg_offset
- 1);
3778 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
), GENMASK(31, vf_shift
));
3779 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
^ 1), reg_offset
- 1);
3780 if (adapter
->bridge_mode
== BRIDGE_MODE_VEB
)
3781 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
3783 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3784 hw
->mac
.ops
.set_vmdq(hw
, 0, VMDQ_P(0));
3786 /* clear VLAN promisc flag so VFTA will be updated if necessary */
3787 adapter
->flags2
&= ~IXGBE_FLAG2_VLAN_PROMISC
;
3790 * Set up VF register offsets for selected VT Mode,
3791 * i.e. 32 or 64 VFs for SR-IOV
3793 switch (adapter
->ring_feature
[RING_F_VMDQ
].mask
) {
3794 case IXGBE_82599_VMDQ_8Q_MASK
:
3795 gcr_ext
= IXGBE_GCR_EXT_VT_MODE_16
;
3797 case IXGBE_82599_VMDQ_4Q_MASK
:
3798 gcr_ext
= IXGBE_GCR_EXT_VT_MODE_32
;
3801 gcr_ext
= IXGBE_GCR_EXT_VT_MODE_64
;
3805 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr_ext
);
3807 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
3808 /* configure spoof checking */
3809 ixgbe_ndo_set_vf_spoofchk(adapter
->netdev
, i
,
3810 adapter
->vfinfo
[i
].spoofchk_enabled
);
3812 /* Enable/Disable RSS query feature */
3813 ixgbe_ndo_set_vf_rss_query_en(adapter
->netdev
, i
,
3814 adapter
->vfinfo
[i
].rss_query_enabled
);
3818 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter
*adapter
)
3820 struct ixgbe_hw
*hw
= &adapter
->hw
;
3821 struct net_device
*netdev
= adapter
->netdev
;
3822 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3823 struct ixgbe_ring
*rx_ring
;
3828 /* adjust max frame to be able to do baby jumbo for FCoE */
3829 if ((adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) &&
3830 (max_frame
< IXGBE_FCOE_JUMBO_FRAME_SIZE
))
3831 max_frame
= IXGBE_FCOE_JUMBO_FRAME_SIZE
;
3833 #endif /* IXGBE_FCOE */
3835 /* adjust max frame to be at least the size of a standard frame */
3836 if (max_frame
< (ETH_FRAME_LEN
+ ETH_FCS_LEN
))
3837 max_frame
= (ETH_FRAME_LEN
+ ETH_FCS_LEN
);
3839 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
3840 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
3841 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
3842 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
3844 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
3847 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
3848 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3849 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
3850 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
3853 * Setup the HW Rx Head and Tail Descriptor Pointers and
3854 * the Base and Length of the Rx Descriptor Ring
3856 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3857 rx_ring
= adapter
->rx_ring
[i
];
3858 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)
3859 set_ring_rsc_enabled(rx_ring
);
3861 clear_ring_rsc_enabled(rx_ring
);
3865 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter
*adapter
)
3867 struct ixgbe_hw
*hw
= &adapter
->hw
;
3868 u32 rdrxctl
= IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
3870 switch (hw
->mac
.type
) {
3871 case ixgbe_mac_82598EB
:
3873 * For VMDq support of different descriptor types or
3874 * buffer sizes through the use of multiple SRRCTL
3875 * registers, RDRXCTL.MVMEN must be set to 1
3877 * also, the manual doesn't mention it clearly but DCA hints
3878 * will only use queue 0's tags unless this bit is set. Side
3879 * effects of setting this bit are only that SRRCTL must be
3880 * fully programmed [0..15]
3882 rdrxctl
|= IXGBE_RDRXCTL_MVMEN
;
3884 case ixgbe_mac_X550
:
3885 case ixgbe_mac_X550EM_x
:
3886 case ixgbe_mac_x550em_a
:
3887 if (adapter
->num_vfs
)
3888 rdrxctl
|= IXGBE_RDRXCTL_PSP
;
3889 /* fall through for older HW */
3890 case ixgbe_mac_82599EB
:
3891 case ixgbe_mac_X540
:
3892 /* Disable RSC for ACK packets */
3893 IXGBE_WRITE_REG(hw
, IXGBE_RSCDBU
,
3894 (IXGBE_RSCDBU_RSCACKDIS
| IXGBE_READ_REG(hw
, IXGBE_RSCDBU
)));
3895 rdrxctl
&= ~IXGBE_RDRXCTL_RSCFRSTSIZE
;
3896 /* hardware requires some bits to be set by default */
3897 rdrxctl
|= (IXGBE_RDRXCTL_RSCACKC
| IXGBE_RDRXCTL_FCOE_WRFIX
);
3898 rdrxctl
|= IXGBE_RDRXCTL_CRCSTRIP
;
3901 /* We should do nothing since we don't know this hardware */
3905 IXGBE_WRITE_REG(hw
, IXGBE_RDRXCTL
, rdrxctl
);
3909 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3910 * @adapter: board private structure
3912 * Configure the Rx unit of the MAC after a reset.
3914 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
3916 struct ixgbe_hw
*hw
= &adapter
->hw
;
3920 /* disable receives while setting up the descriptors */
3921 hw
->mac
.ops
.disable_rx(hw
);
3923 ixgbe_setup_psrtype(adapter
);
3924 ixgbe_setup_rdrxctl(adapter
);
3927 rfctl
= IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
3928 rfctl
&= ~IXGBE_RFCTL_RSC_DIS
;
3929 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
))
3930 rfctl
|= IXGBE_RFCTL_RSC_DIS
;
3932 /* disable NFS filtering */
3933 rfctl
|= (IXGBE_RFCTL_NFSW_DIS
| IXGBE_RFCTL_NFSR_DIS
);
3934 IXGBE_WRITE_REG(hw
, IXGBE_RFCTL
, rfctl
);
3936 /* Program registers for the distribution of queues */
3937 ixgbe_setup_mrqc(adapter
);
3939 /* set_rx_buffer_len must be called before ring initialization */
3940 ixgbe_set_rx_buffer_len(adapter
);
3943 * Setup the HW Rx Head and Tail Descriptor Pointers and
3944 * the Base and Length of the Rx Descriptor Ring
3946 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3947 ixgbe_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
3949 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
3950 /* disable drop enable for 82598 parts */
3951 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3952 rxctrl
|= IXGBE_RXCTRL_DMBYPS
;
3954 /* enable all receives */
3955 rxctrl
|= IXGBE_RXCTRL_RXEN
;
3956 hw
->mac
.ops
.enable_rx_dma(hw
, rxctrl
);
3959 static int ixgbe_vlan_rx_add_vid(struct net_device
*netdev
,
3960 __be16 proto
, u16 vid
)
3962 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3963 struct ixgbe_hw
*hw
= &adapter
->hw
;
3965 /* add VID to filter table */
3966 if (!vid
|| !(adapter
->flags2
& IXGBE_FLAG2_VLAN_PROMISC
))
3967 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, VMDQ_P(0), true, !!vid
);
3969 set_bit(vid
, adapter
->active_vlans
);
3974 static int ixgbe_find_vlvf_entry(struct ixgbe_hw
*hw
, u32 vlan
)
3979 /* short cut the special case */
3983 /* Search for the vlan id in the VLVF entries */
3984 for (idx
= IXGBE_VLVF_ENTRIES
; --idx
;) {
3985 vlvf
= IXGBE_READ_REG(hw
, IXGBE_VLVF(idx
));
3986 if ((vlvf
& VLAN_VID_MASK
) == vlan
)
3993 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter
*adapter
, u32 vid
)
3995 struct ixgbe_hw
*hw
= &adapter
->hw
;
3999 idx
= ixgbe_find_vlvf_entry(hw
, vid
);
4003 /* See if any other pools are set for this VLAN filter
4004 * entry other than the PF.
4006 word
= idx
* 2 + (VMDQ_P(0) / 32);
4007 bits
= ~BIT(VMDQ_P(0) % 32);
4008 bits
&= IXGBE_READ_REG(hw
, IXGBE_VLVFB(word
));
4010 /* Disable the filter so this falls into the default pool. */
4011 if (!bits
&& !IXGBE_READ_REG(hw
, IXGBE_VLVFB(word
^ 1))) {
4012 if (!(adapter
->flags2
& IXGBE_FLAG2_VLAN_PROMISC
))
4013 IXGBE_WRITE_REG(hw
, IXGBE_VLVFB(word
), 0);
4014 IXGBE_WRITE_REG(hw
, IXGBE_VLVF(idx
), 0);
4018 static int ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
,
4019 __be16 proto
, u16 vid
)
4021 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4022 struct ixgbe_hw
*hw
= &adapter
->hw
;
4024 /* remove VID from filter table */
4025 if (vid
&& !(adapter
->flags2
& IXGBE_FLAG2_VLAN_PROMISC
))
4026 hw
->mac
.ops
.set_vfta(hw
, vid
, VMDQ_P(0), false, true);
4028 clear_bit(vid
, adapter
->active_vlans
);
4034 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
4035 * @adapter: driver data
4037 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter
*adapter
)
4039 struct ixgbe_hw
*hw
= &adapter
->hw
;
4043 switch (hw
->mac
.type
) {
4044 case ixgbe_mac_82598EB
:
4045 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
4046 vlnctrl
&= ~IXGBE_VLNCTRL_VME
;
4047 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
4049 case ixgbe_mac_82599EB
:
4050 case ixgbe_mac_X540
:
4051 case ixgbe_mac_X550
:
4052 case ixgbe_mac_X550EM_x
:
4053 case ixgbe_mac_x550em_a
:
4054 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4055 struct ixgbe_ring
*ring
= adapter
->rx_ring
[i
];
4057 if (ring
->l2_accel_priv
)
4060 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
4061 vlnctrl
&= ~IXGBE_RXDCTL_VME
;
4062 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), vlnctrl
);
4071 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
4072 * @adapter: driver data
4074 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter
*adapter
)
4076 struct ixgbe_hw
*hw
= &adapter
->hw
;
4080 switch (hw
->mac
.type
) {
4081 case ixgbe_mac_82598EB
:
4082 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
4083 vlnctrl
|= IXGBE_VLNCTRL_VME
;
4084 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
4086 case ixgbe_mac_82599EB
:
4087 case ixgbe_mac_X540
:
4088 case ixgbe_mac_X550
:
4089 case ixgbe_mac_X550EM_x
:
4090 case ixgbe_mac_x550em_a
:
4091 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4092 struct ixgbe_ring
*ring
= adapter
->rx_ring
[i
];
4094 if (ring
->l2_accel_priv
)
4097 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
4098 vlnctrl
|= IXGBE_RXDCTL_VME
;
4099 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), vlnctrl
);
4107 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter
*adapter
)
4109 struct ixgbe_hw
*hw
= &adapter
->hw
;
4112 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
4114 if (adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
) {
4115 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
4116 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
4117 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
4119 vlnctrl
&= ~IXGBE_VLNCTRL_VFE
;
4120 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
4124 /* Nothing to do for 82598 */
4125 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
4128 /* We are already in VLAN promisc, nothing to do */
4129 if (adapter
->flags2
& IXGBE_FLAG2_VLAN_PROMISC
)
4132 /* Set flag so we don't redo unnecessary work */
4133 adapter
->flags2
|= IXGBE_FLAG2_VLAN_PROMISC
;
4135 /* Add PF to all active pools */
4136 for (i
= IXGBE_VLVF_ENTRIES
; --i
;) {
4137 u32 reg_offset
= IXGBE_VLVFB(i
* 2 + VMDQ_P(0) / 32);
4138 u32 vlvfb
= IXGBE_READ_REG(hw
, reg_offset
);
4140 vlvfb
|= BIT(VMDQ_P(0) % 32);
4141 IXGBE_WRITE_REG(hw
, reg_offset
, vlvfb
);
4144 /* Set all bits in the VLAN filter table array */
4145 for (i
= hw
->mac
.vft_size
; i
--;)
4146 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(i
), ~0U);
4149 #define VFTA_BLOCK_SIZE 8
4150 static void ixgbe_scrub_vfta(struct ixgbe_adapter
*adapter
, u32 vfta_offset
)
4152 struct ixgbe_hw
*hw
= &adapter
->hw
;
4153 u32 vfta
[VFTA_BLOCK_SIZE
] = { 0 };
4154 u32 vid_start
= vfta_offset
* 32;
4155 u32 vid_end
= vid_start
+ (VFTA_BLOCK_SIZE
* 32);
4156 u32 i
, vid
, word
, bits
;
4158 for (i
= IXGBE_VLVF_ENTRIES
; --i
;) {
4159 u32 vlvf
= IXGBE_READ_REG(hw
, IXGBE_VLVF(i
));
4161 /* pull VLAN ID from VLVF */
4162 vid
= vlvf
& VLAN_VID_MASK
;
4164 /* only concern outselves with a certain range */
4165 if (vid
< vid_start
|| vid
>= vid_end
)
4169 /* record VLAN ID in VFTA */
4170 vfta
[(vid
- vid_start
) / 32] |= BIT(vid
% 32);
4172 /* if PF is part of this then continue */
4173 if (test_bit(vid
, adapter
->active_vlans
))
4177 /* remove PF from the pool */
4178 word
= i
* 2 + VMDQ_P(0) / 32;
4179 bits
= ~BIT(VMDQ_P(0) % 32);
4180 bits
&= IXGBE_READ_REG(hw
, IXGBE_VLVFB(word
));
4181 IXGBE_WRITE_REG(hw
, IXGBE_VLVFB(word
), bits
);
4184 /* extract values from active_vlans and write back to VFTA */
4185 for (i
= VFTA_BLOCK_SIZE
; i
--;) {
4186 vid
= (vfta_offset
+ i
) * 32;
4187 word
= vid
/ BITS_PER_LONG
;
4188 bits
= vid
% BITS_PER_LONG
;
4190 vfta
[i
] |= adapter
->active_vlans
[word
] >> bits
;
4192 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(vfta_offset
+ i
), vfta
[i
]);
4196 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter
*adapter
)
4198 struct ixgbe_hw
*hw
= &adapter
->hw
;
4201 /* Set VLAN filtering to enabled */
4202 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
4203 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
4204 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
4206 if (!(adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
) ||
4207 hw
->mac
.type
== ixgbe_mac_82598EB
)
4210 /* We are not in VLAN promisc, nothing to do */
4211 if (!(adapter
->flags2
& IXGBE_FLAG2_VLAN_PROMISC
))
4214 /* Set flag so we don't redo unnecessary work */
4215 adapter
->flags2
&= ~IXGBE_FLAG2_VLAN_PROMISC
;
4217 for (i
= 0; i
< hw
->mac
.vft_size
; i
+= VFTA_BLOCK_SIZE
)
4218 ixgbe_scrub_vfta(adapter
, i
);
4221 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
4225 ixgbe_vlan_rx_add_vid(adapter
->netdev
, htons(ETH_P_8021Q
), 0);
4227 for_each_set_bit_from(vid
, adapter
->active_vlans
, VLAN_N_VID
)
4228 ixgbe_vlan_rx_add_vid(adapter
->netdev
, htons(ETH_P_8021Q
), vid
);
4232 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
4233 * @netdev: network interface device structure
4235 * Writes multicast address list to the MTA hash table.
4236 * Returns: -ENOMEM on failure
4237 * 0 on no addresses written
4238 * X on writing X addresses to MTA
4240 static int ixgbe_write_mc_addr_list(struct net_device
*netdev
)
4242 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4243 struct ixgbe_hw
*hw
= &adapter
->hw
;
4245 if (!netif_running(netdev
))
4248 if (hw
->mac
.ops
.update_mc_addr_list
)
4249 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
4253 #ifdef CONFIG_PCI_IOV
4254 ixgbe_restore_vf_multicasts(adapter
);
4257 return netdev_mc_count(netdev
);
4260 #ifdef CONFIG_PCI_IOV
4261 void ixgbe_full_sync_mac_table(struct ixgbe_adapter
*adapter
)
4263 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4264 struct ixgbe_hw
*hw
= &adapter
->hw
;
4267 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4268 mac_table
->state
&= ~IXGBE_MAC_STATE_MODIFIED
;
4270 if (mac_table
->state
& IXGBE_MAC_STATE_IN_USE
)
4271 hw
->mac
.ops
.set_rar(hw
, i
,
4276 hw
->mac
.ops
.clear_rar(hw
, i
);
4281 static void ixgbe_sync_mac_table(struct ixgbe_adapter
*adapter
)
4283 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4284 struct ixgbe_hw
*hw
= &adapter
->hw
;
4287 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4288 if (!(mac_table
->state
& IXGBE_MAC_STATE_MODIFIED
))
4291 mac_table
->state
&= ~IXGBE_MAC_STATE_MODIFIED
;
4293 if (mac_table
->state
& IXGBE_MAC_STATE_IN_USE
)
4294 hw
->mac
.ops
.set_rar(hw
, i
,
4299 hw
->mac
.ops
.clear_rar(hw
, i
);
4303 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter
*adapter
)
4305 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4306 struct ixgbe_hw
*hw
= &adapter
->hw
;
4309 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4310 mac_table
->state
|= IXGBE_MAC_STATE_MODIFIED
;
4311 mac_table
->state
&= ~IXGBE_MAC_STATE_IN_USE
;
4314 ixgbe_sync_mac_table(adapter
);
4317 static int ixgbe_available_rars(struct ixgbe_adapter
*adapter
, u16 pool
)
4319 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4320 struct ixgbe_hw
*hw
= &adapter
->hw
;
4323 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4324 /* do not count default RAR as available */
4325 if (mac_table
->state
& IXGBE_MAC_STATE_DEFAULT
)
4328 /* only count unused and addresses that belong to us */
4329 if (mac_table
->state
& IXGBE_MAC_STATE_IN_USE
) {
4330 if (mac_table
->pool
!= pool
)
4340 /* this function destroys the first RAR entry */
4341 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter
*adapter
)
4343 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4344 struct ixgbe_hw
*hw
= &adapter
->hw
;
4346 memcpy(&mac_table
->addr
, hw
->mac
.addr
, ETH_ALEN
);
4347 mac_table
->pool
= VMDQ_P(0);
4349 mac_table
->state
= IXGBE_MAC_STATE_DEFAULT
| IXGBE_MAC_STATE_IN_USE
;
4351 hw
->mac
.ops
.set_rar(hw
, 0, mac_table
->addr
, mac_table
->pool
,
4355 int ixgbe_add_mac_filter(struct ixgbe_adapter
*adapter
,
4356 const u8
*addr
, u16 pool
)
4358 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4359 struct ixgbe_hw
*hw
= &adapter
->hw
;
4362 if (is_zero_ether_addr(addr
))
4365 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4366 if (mac_table
->state
& IXGBE_MAC_STATE_IN_USE
)
4369 ether_addr_copy(mac_table
->addr
, addr
);
4370 mac_table
->pool
= pool
;
4372 mac_table
->state
|= IXGBE_MAC_STATE_MODIFIED
|
4373 IXGBE_MAC_STATE_IN_USE
;
4375 ixgbe_sync_mac_table(adapter
);
4383 int ixgbe_del_mac_filter(struct ixgbe_adapter
*adapter
,
4384 const u8
*addr
, u16 pool
)
4386 struct ixgbe_mac_addr
*mac_table
= &adapter
->mac_table
[0];
4387 struct ixgbe_hw
*hw
= &adapter
->hw
;
4390 if (is_zero_ether_addr(addr
))
4393 /* search table for addr, if found clear IN_USE flag and sync */
4394 for (i
= 0; i
< hw
->mac
.num_rar_entries
; i
++, mac_table
++) {
4395 /* we can only delete an entry if it is in use */
4396 if (!(mac_table
->state
& IXGBE_MAC_STATE_IN_USE
))
4398 /* we only care about entries that belong to the given pool */
4399 if (mac_table
->pool
!= pool
)
4401 /* we only care about a specific MAC address */
4402 if (!ether_addr_equal(addr
, mac_table
->addr
))
4405 mac_table
->state
|= IXGBE_MAC_STATE_MODIFIED
;
4406 mac_table
->state
&= ~IXGBE_MAC_STATE_IN_USE
;
4408 ixgbe_sync_mac_table(adapter
);
4416 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
4417 * @netdev: network interface device structure
4419 * Writes unicast address list to the RAR table.
4420 * Returns: -ENOMEM on failure/insufficient address space
4421 * 0 on no addresses written
4422 * X on writing X addresses to the RAR table
4424 static int ixgbe_write_uc_addr_list(struct net_device
*netdev
, int vfn
)
4426 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4429 /* return ENOMEM indicating insufficient memory for addresses */
4430 if (netdev_uc_count(netdev
) > ixgbe_available_rars(adapter
, vfn
))
4433 if (!netdev_uc_empty(netdev
)) {
4434 struct netdev_hw_addr
*ha
;
4435 netdev_for_each_uc_addr(ha
, netdev
) {
4436 ixgbe_del_mac_filter(adapter
, ha
->addr
, vfn
);
4437 ixgbe_add_mac_filter(adapter
, ha
->addr
, vfn
);
4444 static int ixgbe_uc_sync(struct net_device
*netdev
, const unsigned char *addr
)
4446 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4449 ret
= ixgbe_add_mac_filter(adapter
, addr
, VMDQ_P(0));
4451 return min_t(int, ret
, 0);
4454 static int ixgbe_uc_unsync(struct net_device
*netdev
, const unsigned char *addr
)
4456 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4458 ixgbe_del_mac_filter(adapter
, addr
, VMDQ_P(0));
4464 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4465 * @netdev: network interface device structure
4467 * The set_rx_method entry point is called whenever the unicast/multicast
4468 * address list or the network interface flags are updated. This routine is
4469 * responsible for configuring the hardware for proper unicast, multicast and
4472 void ixgbe_set_rx_mode(struct net_device
*netdev
)
4474 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4475 struct ixgbe_hw
*hw
= &adapter
->hw
;
4476 u32 fctrl
, vmolr
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
;
4477 netdev_features_t features
= netdev
->features
;
4480 /* Check for Promiscuous and All Multicast modes */
4481 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
4483 /* set all bits that we expect to always be set */
4484 fctrl
&= ~IXGBE_FCTRL_SBP
; /* disable store-bad-packets */
4485 fctrl
|= IXGBE_FCTRL_BAM
;
4486 fctrl
|= IXGBE_FCTRL_DPF
; /* discard pause frames when FC enabled */
4487 fctrl
|= IXGBE_FCTRL_PMCF
;
4489 /* clear the bits we are changing the status of */
4490 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
4491 if (netdev
->flags
& IFF_PROMISC
) {
4492 hw
->addr_ctrl
.user_set_promisc
= true;
4493 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
4494 vmolr
|= IXGBE_VMOLR_MPE
;
4495 features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
4497 if (netdev
->flags
& IFF_ALLMULTI
) {
4498 fctrl
|= IXGBE_FCTRL_MPE
;
4499 vmolr
|= IXGBE_VMOLR_MPE
;
4501 hw
->addr_ctrl
.user_set_promisc
= false;
4505 * Write addresses to available RAR registers, if there is not
4506 * sufficient space to store all the addresses then enable
4507 * unicast promiscuous mode
4509 if (__dev_uc_sync(netdev
, ixgbe_uc_sync
, ixgbe_uc_unsync
)) {
4510 fctrl
|= IXGBE_FCTRL_UPE
;
4511 vmolr
|= IXGBE_VMOLR_ROPE
;
4514 /* Write addresses to the MTA, if the attempt fails
4515 * then we should just turn on promiscuous mode so
4516 * that we can at least receive multicast traffic
4518 count
= ixgbe_write_mc_addr_list(netdev
);
4520 fctrl
|= IXGBE_FCTRL_MPE
;
4521 vmolr
|= IXGBE_VMOLR_MPE
;
4523 vmolr
|= IXGBE_VMOLR_ROMPE
;
4526 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
4527 vmolr
|= IXGBE_READ_REG(hw
, IXGBE_VMOLR(VMDQ_P(0))) &
4528 ~(IXGBE_VMOLR_MPE
| IXGBE_VMOLR_ROMPE
|
4530 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(VMDQ_P(0)), vmolr
);
4533 /* This is useful for sniffing bad packets. */
4534 if (features
& NETIF_F_RXALL
) {
4535 /* UPE and MPE will be handled by normal PROMISC logic
4536 * in e1000e_set_rx_mode */
4537 fctrl
|= (IXGBE_FCTRL_SBP
| /* Receive bad packets */
4538 IXGBE_FCTRL_BAM
| /* RX All Bcast Pkts */
4539 IXGBE_FCTRL_PMCF
); /* RX All MAC Ctrl Pkts */
4541 fctrl
&= ~(IXGBE_FCTRL_DPF
);
4542 /* NOTE: VLAN filtering is disabled by setting PROMISC */
4545 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
4547 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
4548 ixgbe_vlan_strip_enable(adapter
);
4550 ixgbe_vlan_strip_disable(adapter
);
4552 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
4553 ixgbe_vlan_promisc_disable(adapter
);
4555 ixgbe_vlan_promisc_enable(adapter
);
4558 static void ixgbe_napi_enable_all(struct ixgbe_adapter
*adapter
)
4562 for (q_idx
= 0; q_idx
< adapter
->num_q_vectors
; q_idx
++) {
4563 ixgbe_qv_init_lock(adapter
->q_vector
[q_idx
]);
4564 napi_enable(&adapter
->q_vector
[q_idx
]->napi
);
4568 static void ixgbe_napi_disable_all(struct ixgbe_adapter
*adapter
)
4572 for (q_idx
= 0; q_idx
< adapter
->num_q_vectors
; q_idx
++) {
4573 napi_disable(&adapter
->q_vector
[q_idx
]->napi
);
4574 while (!ixgbe_qv_disable(adapter
->q_vector
[q_idx
])) {
4575 pr_info("QV %d locked\n", q_idx
);
4576 usleep_range(1000, 20000);
4581 static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter
*adapter
, u32 mask
)
4583 struct ixgbe_hw
*hw
= &adapter
->hw
;
4586 if (!(adapter
->flags
& (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE
|
4587 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE
)))
4590 vxlanctrl
= IXGBE_READ_REG(hw
, IXGBE_VXLANCTRL
) && ~mask
;
4591 IXGBE_WRITE_REG(hw
, IXGBE_VXLANCTRL
, vxlanctrl
);
4593 if (mask
& IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK
)
4594 adapter
->vxlan_port
= 0;
4596 if (mask
& IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK
)
4597 adapter
->geneve_port
= 0;
4600 #ifdef CONFIG_IXGBE_DCB
4602 * ixgbe_configure_dcb - Configure DCB hardware
4603 * @adapter: ixgbe adapter struct
4605 * This is called by the driver on open to configure the DCB hardware.
4606 * This is also called by the gennetlink interface when reconfiguring
4609 static void ixgbe_configure_dcb(struct ixgbe_adapter
*adapter
)
4611 struct ixgbe_hw
*hw
= &adapter
->hw
;
4612 int max_frame
= adapter
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4614 if (!(adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
)) {
4615 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
4616 netif_set_gso_max_size(adapter
->netdev
, 65536);
4620 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
4621 netif_set_gso_max_size(adapter
->netdev
, 32768);
4624 if (adapter
->netdev
->features
& NETIF_F_FCOE_MTU
)
4625 max_frame
= max(max_frame
, IXGBE_FCOE_JUMBO_FRAME_SIZE
);
4628 /* reconfigure the hardware */
4629 if (adapter
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
) {
4630 ixgbe_dcb_calculate_tc_credits(hw
, &adapter
->dcb_cfg
, max_frame
,
4632 ixgbe_dcb_calculate_tc_credits(hw
, &adapter
->dcb_cfg
, max_frame
,
4634 ixgbe_dcb_hw_config(hw
, &adapter
->dcb_cfg
);
4635 } else if (adapter
->ixgbe_ieee_ets
&& adapter
->ixgbe_ieee_pfc
) {
4636 ixgbe_dcb_hw_ets(&adapter
->hw
,
4637 adapter
->ixgbe_ieee_ets
,
4639 ixgbe_dcb_hw_pfc_config(&adapter
->hw
,
4640 adapter
->ixgbe_ieee_pfc
->pfc_en
,
4641 adapter
->ixgbe_ieee_ets
->prio_tc
);
4644 /* Enable RSS Hash per TC */
4645 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
4647 u16 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
- 1;
4654 /* write msb to all 8 TCs in one write */
4655 IXGBE_WRITE_REG(hw
, IXGBE_RQTC
, msb
* 0x11111111);
4660 /* Additional bittime to account for IXGBE framing */
4661 #define IXGBE_ETH_FRAMING 20
4664 * ixgbe_hpbthresh - calculate high water mark for flow control
4666 * @adapter: board private structure to calculate for
4667 * @pb: packet buffer to calculate
4669 static int ixgbe_hpbthresh(struct ixgbe_adapter
*adapter
, int pb
)
4671 struct ixgbe_hw
*hw
= &adapter
->hw
;
4672 struct net_device
*dev
= adapter
->netdev
;
4673 int link
, tc
, kb
, marker
;
4676 /* Calculate max LAN frame size */
4677 tc
= link
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ IXGBE_ETH_FRAMING
;
4680 /* FCoE traffic class uses FCOE jumbo frames */
4681 if ((dev
->features
& NETIF_F_FCOE_MTU
) &&
4682 (tc
< IXGBE_FCOE_JUMBO_FRAME_SIZE
) &&
4683 (pb
== ixgbe_fcoe_get_tc(adapter
)))
4684 tc
= IXGBE_FCOE_JUMBO_FRAME_SIZE
;
4687 /* Calculate delay value for device */
4688 switch (hw
->mac
.type
) {
4689 case ixgbe_mac_X540
:
4690 case ixgbe_mac_X550
:
4691 case ixgbe_mac_X550EM_x
:
4692 case ixgbe_mac_x550em_a
:
4693 dv_id
= IXGBE_DV_X540(link
, tc
);
4696 dv_id
= IXGBE_DV(link
, tc
);
4700 /* Loopback switch introduces additional latency */
4701 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
4702 dv_id
+= IXGBE_B2BT(tc
);
4704 /* Delay value is calculated in bit times convert to KB */
4705 kb
= IXGBE_BT2KB(dv_id
);
4706 rx_pba
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(pb
)) >> 10;
4708 marker
= rx_pba
- kb
;
4710 /* It is possible that the packet buffer is not large enough
4711 * to provide required headroom. In this case throw an error
4712 * to user and a do the best we can.
4715 e_warn(drv
, "Packet Buffer(%i) can not provide enough"
4716 "headroom to support flow control."
4717 "Decrease MTU or number of traffic classes\n", pb
);
4725 * ixgbe_lpbthresh - calculate low water mark for for flow control
4727 * @adapter: board private structure to calculate for
4728 * @pb: packet buffer to calculate
4730 static int ixgbe_lpbthresh(struct ixgbe_adapter
*adapter
, int pb
)
4732 struct ixgbe_hw
*hw
= &adapter
->hw
;
4733 struct net_device
*dev
= adapter
->netdev
;
4737 /* Calculate max LAN frame size */
4738 tc
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4741 /* FCoE traffic class uses FCOE jumbo frames */
4742 if ((dev
->features
& NETIF_F_FCOE_MTU
) &&
4743 (tc
< IXGBE_FCOE_JUMBO_FRAME_SIZE
) &&
4744 (pb
== netdev_get_prio_tc_map(dev
, adapter
->fcoe
.up
)))
4745 tc
= IXGBE_FCOE_JUMBO_FRAME_SIZE
;
4748 /* Calculate delay value for device */
4749 switch (hw
->mac
.type
) {
4750 case ixgbe_mac_X540
:
4751 case ixgbe_mac_X550
:
4752 case ixgbe_mac_X550EM_x
:
4753 case ixgbe_mac_x550em_a
:
4754 dv_id
= IXGBE_LOW_DV_X540(tc
);
4757 dv_id
= IXGBE_LOW_DV(tc
);
4761 /* Delay value is calculated in bit times convert to KB */
4762 return IXGBE_BT2KB(dv_id
);
4766 * ixgbe_pbthresh_setup - calculate and setup high low water marks
4768 static void ixgbe_pbthresh_setup(struct ixgbe_adapter
*adapter
)
4770 struct ixgbe_hw
*hw
= &adapter
->hw
;
4771 int num_tc
= netdev_get_num_tc(adapter
->netdev
);
4777 for (i
= 0; i
< num_tc
; i
++) {
4778 hw
->fc
.high_water
[i
] = ixgbe_hpbthresh(adapter
, i
);
4779 hw
->fc
.low_water
[i
] = ixgbe_lpbthresh(adapter
, i
);
4781 /* Low water marks must not be larger than high water marks */
4782 if (hw
->fc
.low_water
[i
] > hw
->fc
.high_water
[i
])
4783 hw
->fc
.low_water
[i
] = 0;
4786 for (; i
< MAX_TRAFFIC_CLASS
; i
++)
4787 hw
->fc
.high_water
[i
] = 0;
4790 static void ixgbe_configure_pb(struct ixgbe_adapter
*adapter
)
4792 struct ixgbe_hw
*hw
= &adapter
->hw
;
4794 u8 tc
= netdev_get_num_tc(adapter
->netdev
);
4796 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
||
4797 adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
4798 hdrm
= 32 << adapter
->fdir_pballoc
;
4802 hw
->mac
.ops
.set_rxpba(hw
, tc
, hdrm
, PBA_STRATEGY_EQUAL
);
4803 ixgbe_pbthresh_setup(adapter
);
4806 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter
*adapter
)
4808 struct ixgbe_hw
*hw
= &adapter
->hw
;
4809 struct hlist_node
*node2
;
4810 struct ixgbe_fdir_filter
*filter
;
4812 spin_lock(&adapter
->fdir_perfect_lock
);
4814 if (!hlist_empty(&adapter
->fdir_filter_list
))
4815 ixgbe_fdir_set_input_mask_82599(hw
, &adapter
->fdir_mask
);
4817 hlist_for_each_entry_safe(filter
, node2
,
4818 &adapter
->fdir_filter_list
, fdir_node
) {
4819 ixgbe_fdir_write_perfect_filter_82599(hw
,
4822 (filter
->action
== IXGBE_FDIR_DROP_QUEUE
) ?
4823 IXGBE_FDIR_DROP_QUEUE
:
4824 adapter
->rx_ring
[filter
->action
]->reg_idx
);
4827 spin_unlock(&adapter
->fdir_perfect_lock
);
4830 static void ixgbe_macvlan_set_rx_mode(struct net_device
*dev
, unsigned int pool
,
4831 struct ixgbe_adapter
*adapter
)
4833 struct ixgbe_hw
*hw
= &adapter
->hw
;
4836 /* No unicast promiscuous support for VMDQ devices. */
4837 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(pool
));
4838 vmolr
|= (IXGBE_VMOLR_ROMPE
| IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
);
4840 /* clear the affected bit */
4841 vmolr
&= ~IXGBE_VMOLR_MPE
;
4843 if (dev
->flags
& IFF_ALLMULTI
) {
4844 vmolr
|= IXGBE_VMOLR_MPE
;
4846 vmolr
|= IXGBE_VMOLR_ROMPE
;
4847 hw
->mac
.ops
.update_mc_addr_list(hw
, dev
);
4849 ixgbe_write_uc_addr_list(adapter
->netdev
, pool
);
4850 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(pool
), vmolr
);
4853 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter
*vadapter
)
4855 struct ixgbe_adapter
*adapter
= vadapter
->real_adapter
;
4856 int rss_i
= adapter
->num_rx_queues_per_pool
;
4857 struct ixgbe_hw
*hw
= &adapter
->hw
;
4858 u16 pool
= vadapter
->pool
;
4859 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
4860 IXGBE_PSRTYPE_UDPHDR
|
4861 IXGBE_PSRTYPE_IPV4HDR
|
4862 IXGBE_PSRTYPE_L2HDR
|
4863 IXGBE_PSRTYPE_IPV6HDR
;
4865 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
4869 psrtype
|= 2u << 29;
4871 psrtype
|= 1u << 29;
4873 IXGBE_WRITE_REG(hw
, IXGBE_PSRTYPE(VMDQ_P(pool
)), psrtype
);
4877 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4878 * @rx_ring: ring to free buffers from
4880 static void ixgbe_clean_rx_ring(struct ixgbe_ring
*rx_ring
)
4882 struct device
*dev
= rx_ring
->dev
;
4886 /* ring already cleared, nothing to do */
4887 if (!rx_ring
->rx_buffer_info
)
4890 /* Free all the Rx ring sk_buffs */
4891 for (i
= 0; i
< rx_ring
->count
; i
++) {
4892 struct ixgbe_rx_buffer
*rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
4894 if (rx_buffer
->skb
) {
4895 struct sk_buff
*skb
= rx_buffer
->skb
;
4896 if (IXGBE_CB(skb
)->page_released
)
4899 ixgbe_rx_bufsz(rx_ring
),
4902 rx_buffer
->skb
= NULL
;
4905 if (!rx_buffer
->page
)
4908 dma_unmap_page(dev
, rx_buffer
->dma
,
4909 ixgbe_rx_pg_size(rx_ring
), DMA_FROM_DEVICE
);
4910 __free_pages(rx_buffer
->page
, ixgbe_rx_pg_order(rx_ring
));
4912 rx_buffer
->page
= NULL
;
4915 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
4916 memset(rx_ring
->rx_buffer_info
, 0, size
);
4918 /* Zero out the descriptor ring */
4919 memset(rx_ring
->desc
, 0, rx_ring
->size
);
4921 rx_ring
->next_to_alloc
= 0;
4922 rx_ring
->next_to_clean
= 0;
4923 rx_ring
->next_to_use
= 0;
4926 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter
*vadapter
,
4927 struct ixgbe_ring
*rx_ring
)
4929 struct ixgbe_adapter
*adapter
= vadapter
->real_adapter
;
4930 int index
= rx_ring
->queue_index
+ vadapter
->rx_base_queue
;
4932 /* shutdown specific queue receive and wait for dma to settle */
4933 ixgbe_disable_rx_queue(adapter
, rx_ring
);
4934 usleep_range(10000, 20000);
4935 ixgbe_irq_disable_queues(adapter
, BIT_ULL(index
));
4936 ixgbe_clean_rx_ring(rx_ring
);
4937 rx_ring
->l2_accel_priv
= NULL
;
4940 static int ixgbe_fwd_ring_down(struct net_device
*vdev
,
4941 struct ixgbe_fwd_adapter
*accel
)
4943 struct ixgbe_adapter
*adapter
= accel
->real_adapter
;
4944 unsigned int rxbase
= accel
->rx_base_queue
;
4945 unsigned int txbase
= accel
->tx_base_queue
;
4948 netif_tx_stop_all_queues(vdev
);
4950 for (i
= 0; i
< adapter
->num_rx_queues_per_pool
; i
++) {
4951 ixgbe_disable_fwd_ring(accel
, adapter
->rx_ring
[rxbase
+ i
]);
4952 adapter
->rx_ring
[rxbase
+ i
]->netdev
= adapter
->netdev
;
4955 for (i
= 0; i
< adapter
->num_rx_queues_per_pool
; i
++) {
4956 adapter
->tx_ring
[txbase
+ i
]->l2_accel_priv
= NULL
;
4957 adapter
->tx_ring
[txbase
+ i
]->netdev
= adapter
->netdev
;
4964 static int ixgbe_fwd_ring_up(struct net_device
*vdev
,
4965 struct ixgbe_fwd_adapter
*accel
)
4967 struct ixgbe_adapter
*adapter
= accel
->real_adapter
;
4968 unsigned int rxbase
, txbase
, queues
;
4969 int i
, baseq
, err
= 0;
4971 if (!test_bit(accel
->pool
, &adapter
->fwd_bitmask
))
4974 baseq
= accel
->pool
* adapter
->num_rx_queues_per_pool
;
4975 netdev_dbg(vdev
, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4976 accel
->pool
, adapter
->num_rx_pools
,
4977 baseq
, baseq
+ adapter
->num_rx_queues_per_pool
,
4978 adapter
->fwd_bitmask
);
4980 accel
->netdev
= vdev
;
4981 accel
->rx_base_queue
= rxbase
= baseq
;
4982 accel
->tx_base_queue
= txbase
= baseq
;
4984 for (i
= 0; i
< adapter
->num_rx_queues_per_pool
; i
++)
4985 ixgbe_disable_fwd_ring(accel
, adapter
->rx_ring
[rxbase
+ i
]);
4987 for (i
= 0; i
< adapter
->num_rx_queues_per_pool
; i
++) {
4988 adapter
->rx_ring
[rxbase
+ i
]->netdev
= vdev
;
4989 adapter
->rx_ring
[rxbase
+ i
]->l2_accel_priv
= accel
;
4990 ixgbe_configure_rx_ring(adapter
, adapter
->rx_ring
[rxbase
+ i
]);
4993 for (i
= 0; i
< adapter
->num_rx_queues_per_pool
; i
++) {
4994 adapter
->tx_ring
[txbase
+ i
]->netdev
= vdev
;
4995 adapter
->tx_ring
[txbase
+ i
]->l2_accel_priv
= accel
;
4998 queues
= min_t(unsigned int,
4999 adapter
->num_rx_queues_per_pool
, vdev
->num_tx_queues
);
5000 err
= netif_set_real_num_tx_queues(vdev
, queues
);
5004 err
= netif_set_real_num_rx_queues(vdev
, queues
);
5008 if (is_valid_ether_addr(vdev
->dev_addr
))
5009 ixgbe_add_mac_filter(adapter
, vdev
->dev_addr
, accel
->pool
);
5011 ixgbe_fwd_psrtype(accel
);
5012 ixgbe_macvlan_set_rx_mode(vdev
, accel
->pool
, adapter
);
5015 ixgbe_fwd_ring_down(vdev
, accel
);
5019 static int ixgbe_upper_dev_walk(struct net_device
*upper
, void *data
)
5021 if (netif_is_macvlan(upper
)) {
5022 struct macvlan_dev
*dfwd
= netdev_priv(upper
);
5023 struct ixgbe_fwd_adapter
*vadapter
= dfwd
->fwd_priv
;
5026 ixgbe_fwd_ring_up(upper
, vadapter
);
5032 static void ixgbe_configure_dfwd(struct ixgbe_adapter
*adapter
)
5034 netdev_walk_all_upper_dev_rcu(adapter
->netdev
,
5035 ixgbe_upper_dev_walk
, NULL
);
5038 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
5040 struct ixgbe_hw
*hw
= &adapter
->hw
;
5042 ixgbe_configure_pb(adapter
);
5043 #ifdef CONFIG_IXGBE_DCB
5044 ixgbe_configure_dcb(adapter
);
5047 * We must restore virtualization before VLANs or else
5048 * the VLVF registers will not be populated
5050 ixgbe_configure_virtualization(adapter
);
5052 ixgbe_set_rx_mode(adapter
->netdev
);
5053 ixgbe_restore_vlan(adapter
);
5055 switch (hw
->mac
.type
) {
5056 case ixgbe_mac_82599EB
:
5057 case ixgbe_mac_X540
:
5058 hw
->mac
.ops
.disable_rx_buff(hw
);
5064 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
5065 ixgbe_init_fdir_signature_82599(&adapter
->hw
,
5066 adapter
->fdir_pballoc
);
5067 } else if (adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
) {
5068 ixgbe_init_fdir_perfect_82599(&adapter
->hw
,
5069 adapter
->fdir_pballoc
);
5070 ixgbe_fdir_filter_restore(adapter
);
5073 switch (hw
->mac
.type
) {
5074 case ixgbe_mac_82599EB
:
5075 case ixgbe_mac_X540
:
5076 hw
->mac
.ops
.enable_rx_buff(hw
);
5082 #ifdef CONFIG_IXGBE_DCA
5084 if (adapter
->flags
& IXGBE_FLAG_DCA_CAPABLE
)
5085 ixgbe_setup_dca(adapter
);
5086 #endif /* CONFIG_IXGBE_DCA */
5089 /* configure FCoE L2 filters, redirection table, and Rx control */
5090 ixgbe_configure_fcoe(adapter
);
5092 #endif /* IXGBE_FCOE */
5093 ixgbe_configure_tx(adapter
);
5094 ixgbe_configure_rx(adapter
);
5095 ixgbe_configure_dfwd(adapter
);
5099 * ixgbe_sfp_link_config - set up SFP+ link
5100 * @adapter: pointer to private adapter struct
5102 static void ixgbe_sfp_link_config(struct ixgbe_adapter
*adapter
)
5105 * We are assuming the worst case scenario here, and that
5106 * is that an SFP was inserted/removed after the reset
5107 * but before SFP detection was enabled. As such the best
5108 * solution is to just start searching as soon as we start
5110 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
5111 adapter
->flags2
|= IXGBE_FLAG2_SEARCH_FOR_SFP
;
5113 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
5114 adapter
->sfp_poll_time
= 0;
5118 * ixgbe_non_sfp_link_config - set up non-SFP+ link
5119 * @hw: pointer to private hardware struct
5121 * Returns 0 on success, negative on failure
5123 static int ixgbe_non_sfp_link_config(struct ixgbe_hw
*hw
)
5126 bool autoneg
, link_up
= false;
5127 int ret
= IXGBE_ERR_LINK_SETUP
;
5129 if (hw
->mac
.ops
.check_link
)
5130 ret
= hw
->mac
.ops
.check_link(hw
, &speed
, &link_up
, false);
5135 speed
= hw
->phy
.autoneg_advertised
;
5136 if ((!speed
) && (hw
->mac
.ops
.get_link_capabilities
))
5137 ret
= hw
->mac
.ops
.get_link_capabilities(hw
, &speed
,
5142 if (hw
->mac
.ops
.setup_link
)
5143 ret
= hw
->mac
.ops
.setup_link(hw
, speed
, link_up
);
5148 static void ixgbe_setup_gpie(struct ixgbe_adapter
*adapter
)
5150 struct ixgbe_hw
*hw
= &adapter
->hw
;
5153 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
5154 gpie
= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
|
5156 gpie
|= IXGBE_GPIE_EIAME
;
5158 * use EIAM to auto-mask when MSI-X interrupt is asserted
5159 * this saves a register write for every interrupt
5161 switch (hw
->mac
.type
) {
5162 case ixgbe_mac_82598EB
:
5163 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
5165 case ixgbe_mac_82599EB
:
5166 case ixgbe_mac_X540
:
5167 case ixgbe_mac_X550
:
5168 case ixgbe_mac_X550EM_x
:
5169 case ixgbe_mac_x550em_a
:
5171 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5172 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5176 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
5177 * specifically only auto mask tx and rx interrupts */
5178 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
5181 /* XXX: to interrupt immediately for EICS writes, enable this */
5182 /* gpie |= IXGBE_GPIE_EIMEN; */
5184 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
5185 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
5187 switch (adapter
->ring_feature
[RING_F_VMDQ
].mask
) {
5188 case IXGBE_82599_VMDQ_8Q_MASK
:
5189 gpie
|= IXGBE_GPIE_VTMODE_16
;
5191 case IXGBE_82599_VMDQ_4Q_MASK
:
5192 gpie
|= IXGBE_GPIE_VTMODE_32
;
5195 gpie
|= IXGBE_GPIE_VTMODE_64
;
5200 /* Enable Thermal over heat sensor interrupt */
5201 if (adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
) {
5202 switch (adapter
->hw
.mac
.type
) {
5203 case ixgbe_mac_82599EB
:
5204 gpie
|= IXGBE_SDP0_GPIEN_8259X
;
5211 /* Enable fan failure interrupt */
5212 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
)
5213 gpie
|= IXGBE_SDP1_GPIEN(hw
);
5215 switch (hw
->mac
.type
) {
5216 case ixgbe_mac_82599EB
:
5217 gpie
|= IXGBE_SDP1_GPIEN_8259X
| IXGBE_SDP2_GPIEN_8259X
;
5219 case ixgbe_mac_X550EM_x
:
5220 case ixgbe_mac_x550em_a
:
5221 gpie
|= IXGBE_SDP0_GPIEN_X540
;
5227 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
5230 static void ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
5232 struct ixgbe_hw
*hw
= &adapter
->hw
;
5236 ixgbe_get_hw_control(adapter
);
5237 ixgbe_setup_gpie(adapter
);
5239 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
5240 ixgbe_configure_msix(adapter
);
5242 ixgbe_configure_msi_and_legacy(adapter
);
5244 /* enable the optics for 82599 SFP+ fiber */
5245 if (hw
->mac
.ops
.enable_tx_laser
)
5246 hw
->mac
.ops
.enable_tx_laser(hw
);
5248 if (hw
->phy
.ops
.set_phy_power
)
5249 hw
->phy
.ops
.set_phy_power(hw
, true);
5251 smp_mb__before_atomic();
5252 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
5253 ixgbe_napi_enable_all(adapter
);
5255 if (ixgbe_is_sfp(hw
)) {
5256 ixgbe_sfp_link_config(adapter
);
5258 err
= ixgbe_non_sfp_link_config(hw
);
5260 e_err(probe
, "link_config FAILED %d\n", err
);
5263 /* clear any pending interrupts, may auto mask */
5264 IXGBE_READ_REG(hw
, IXGBE_EICR
);
5265 ixgbe_irq_enable(adapter
, true, true);
5268 * If this adapter has a fan, check to see if we had a failure
5269 * before we enabled the interrupt.
5271 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) {
5272 u32 esdp
= IXGBE_READ_REG(hw
, IXGBE_ESDP
);
5273 if (esdp
& IXGBE_ESDP_SDP1
)
5274 e_crit(drv
, "Fan has stopped, replace the adapter\n");
5277 /* bring the link up in the watchdog, this could race with our first
5278 * link up interrupt but shouldn't be a problem */
5279 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
5280 adapter
->link_check_timeout
= jiffies
;
5281 mod_timer(&adapter
->service_timer
, jiffies
);
5283 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
5284 ctrl_ext
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
5285 ctrl_ext
|= IXGBE_CTRL_EXT_PFRSTD
;
5286 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl_ext
);
5289 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
5291 WARN_ON(in_interrupt());
5292 /* put off any impending NetWatchDogTimeout */
5293 netif_trans_update(adapter
->netdev
);
5295 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
5296 usleep_range(1000, 2000);
5297 ixgbe_down(adapter
);
5299 * If SR-IOV enabled then wait a bit before bringing the adapter
5300 * back up to give the VFs time to respond to the reset. The
5301 * two second wait is based upon the watchdog timer cycle in
5304 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
5307 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
5310 void ixgbe_up(struct ixgbe_adapter
*adapter
)
5312 /* hardware has been reset, we need to reload some things */
5313 ixgbe_configure(adapter
);
5315 ixgbe_up_complete(adapter
);
5318 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
5320 struct ixgbe_hw
*hw
= &adapter
->hw
;
5321 struct net_device
*netdev
= adapter
->netdev
;
5324 if (ixgbe_removed(hw
->hw_addr
))
5326 /* lock SFP init bit to prevent race conditions with the watchdog */
5327 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
5328 usleep_range(1000, 2000);
5330 /* clear all SFP and link config related flags while holding SFP_INIT */
5331 adapter
->flags2
&= ~(IXGBE_FLAG2_SEARCH_FOR_SFP
|
5332 IXGBE_FLAG2_SFP_NEEDS_RESET
);
5333 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_CONFIG
;
5335 err
= hw
->mac
.ops
.init_hw(hw
);
5338 case IXGBE_ERR_SFP_NOT_PRESENT
:
5339 case IXGBE_ERR_SFP_NOT_SUPPORTED
:
5341 case IXGBE_ERR_MASTER_REQUESTS_PENDING
:
5342 e_dev_err("master disable timed out\n");
5344 case IXGBE_ERR_EEPROM_VERSION
:
5345 /* We are running on a pre-production device, log a warning */
5346 e_dev_warn("This device is a pre-production adapter/LOM. "
5347 "Please be aware there may be issues associated with "
5348 "your hardware. If you are experiencing problems "
5349 "please contact your Intel or hardware "
5350 "representative who provided you with this "
5354 e_dev_err("Hardware Error: %d\n", err
);
5357 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
5359 /* flush entries out of MAC table */
5360 ixgbe_flush_sw_mac_table(adapter
);
5361 __dev_uc_unsync(netdev
, NULL
);
5363 /* do not flush user set addresses */
5364 ixgbe_mac_set_default_filter(adapter
);
5366 /* update SAN MAC vmdq pool selection */
5367 if (hw
->mac
.san_mac_rar_index
)
5368 hw
->mac
.ops
.set_vmdq_san_mac(hw
, VMDQ_P(0));
5370 if (test_bit(__IXGBE_PTP_RUNNING
, &adapter
->state
))
5371 ixgbe_ptp_reset(adapter
);
5373 if (hw
->phy
.ops
.set_phy_power
) {
5374 if (!netif_running(adapter
->netdev
) && !adapter
->wol
)
5375 hw
->phy
.ops
.set_phy_power(hw
, false);
5377 hw
->phy
.ops
.set_phy_power(hw
, true);
5382 * ixgbe_clean_tx_ring - Free Tx Buffers
5383 * @tx_ring: ring to be cleaned
5385 static void ixgbe_clean_tx_ring(struct ixgbe_ring
*tx_ring
)
5387 struct ixgbe_tx_buffer
*tx_buffer_info
;
5391 /* ring already cleared, nothing to do */
5392 if (!tx_ring
->tx_buffer_info
)
5395 /* Free all the Tx ring sk_buffs */
5396 for (i
= 0; i
< tx_ring
->count
; i
++) {
5397 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
5398 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
5401 netdev_tx_reset_queue(txring_txq(tx_ring
));
5403 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
5404 memset(tx_ring
->tx_buffer_info
, 0, size
);
5406 /* Zero out the descriptor ring */
5407 memset(tx_ring
->desc
, 0, tx_ring
->size
);
5409 tx_ring
->next_to_use
= 0;
5410 tx_ring
->next_to_clean
= 0;
5414 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
5415 * @adapter: board private structure
5417 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
5421 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
5422 ixgbe_clean_rx_ring(adapter
->rx_ring
[i
]);
5426 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
5427 * @adapter: board private structure
5429 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
5433 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
5434 ixgbe_clean_tx_ring(adapter
->tx_ring
[i
]);
5437 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter
*adapter
)
5439 struct hlist_node
*node2
;
5440 struct ixgbe_fdir_filter
*filter
;
5442 spin_lock(&adapter
->fdir_perfect_lock
);
5444 hlist_for_each_entry_safe(filter
, node2
,
5445 &adapter
->fdir_filter_list
, fdir_node
) {
5446 hlist_del(&filter
->fdir_node
);
5449 adapter
->fdir_filter_count
= 0;
5451 spin_unlock(&adapter
->fdir_perfect_lock
);
5454 static int ixgbe_disable_macvlan(struct net_device
*upper
, void *data
)
5456 if (netif_is_macvlan(upper
)) {
5457 struct macvlan_dev
*vlan
= netdev_priv(upper
);
5459 if (vlan
->fwd_priv
) {
5460 netif_tx_stop_all_queues(upper
);
5461 netif_carrier_off(upper
);
5462 netif_tx_disable(upper
);
5469 void ixgbe_down(struct ixgbe_adapter
*adapter
)
5471 struct net_device
*netdev
= adapter
->netdev
;
5472 struct ixgbe_hw
*hw
= &adapter
->hw
;
5475 /* signal that we are down to the interrupt handler */
5476 if (test_and_set_bit(__IXGBE_DOWN
, &adapter
->state
))
5477 return; /* do nothing if already down */
5479 /* disable receives */
5480 hw
->mac
.ops
.disable_rx(hw
);
5482 /* disable all enabled rx queues */
5483 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
5484 /* this call also flushes the previous write */
5485 ixgbe_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
5487 usleep_range(10000, 20000);
5489 netif_tx_stop_all_queues(netdev
);
5491 /* call carrier off first to avoid false dev_watchdog timeouts */
5492 netif_carrier_off(netdev
);
5493 netif_tx_disable(netdev
);
5495 /* disable any upper devices */
5496 netdev_walk_all_upper_dev_rcu(adapter
->netdev
,
5497 ixgbe_disable_macvlan
, NULL
);
5499 ixgbe_irq_disable(adapter
);
5501 ixgbe_napi_disable_all(adapter
);
5503 clear_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
);
5504 adapter
->flags2
&= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT
;
5505 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
5507 del_timer_sync(&adapter
->service_timer
);
5509 if (adapter
->num_vfs
) {
5510 /* Clear EITR Select mapping */
5511 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITRSEL
, 0);
5513 /* Mark all the VFs as inactive */
5514 for (i
= 0 ; i
< adapter
->num_vfs
; i
++)
5515 adapter
->vfinfo
[i
].clear_to_send
= false;
5517 /* ping all the active vfs to let them know we are going down */
5518 ixgbe_ping_all_vfs(adapter
);
5520 /* Disable all VFTE/VFRE TX/RX */
5521 ixgbe_disable_tx_rx(adapter
);
5524 /* disable transmits in the hardware now that interrupts are off */
5525 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
5526 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
5527 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
5530 /* Disable the Tx DMA engine on 82599 and later MAC */
5531 switch (hw
->mac
.type
) {
5532 case ixgbe_mac_82599EB
:
5533 case ixgbe_mac_X540
:
5534 case ixgbe_mac_X550
:
5535 case ixgbe_mac_X550EM_x
:
5536 case ixgbe_mac_x550em_a
:
5537 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
,
5538 (IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
) &
5539 ~IXGBE_DMATXCTL_TE
));
5545 if (!pci_channel_offline(adapter
->pdev
))
5546 ixgbe_reset(adapter
);
5548 /* power down the optics for 82599 SFP+ fiber */
5549 if (hw
->mac
.ops
.disable_tx_laser
)
5550 hw
->mac
.ops
.disable_tx_laser(hw
);
5552 ixgbe_clean_all_tx_rings(adapter
);
5553 ixgbe_clean_all_rx_rings(adapter
);
5557 * ixgbe_tx_timeout - Respond to a Tx Hang
5558 * @netdev: network interface device structure
5560 static void ixgbe_tx_timeout(struct net_device
*netdev
)
5562 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
5564 /* Do the reset outside of interrupt context */
5565 ixgbe_tx_timeout_reset(adapter
);
5568 #ifdef CONFIG_IXGBE_DCB
5569 static void ixgbe_init_dcb(struct ixgbe_adapter
*adapter
)
5571 struct ixgbe_hw
*hw
= &adapter
->hw
;
5572 struct tc_configuration
*tc
;
5575 switch (hw
->mac
.type
) {
5576 case ixgbe_mac_82598EB
:
5577 case ixgbe_mac_82599EB
:
5578 adapter
->dcb_cfg
.num_tcs
.pg_tcs
= MAX_TRAFFIC_CLASS
;
5579 adapter
->dcb_cfg
.num_tcs
.pfc_tcs
= MAX_TRAFFIC_CLASS
;
5581 case ixgbe_mac_X540
:
5582 case ixgbe_mac_X550
:
5583 adapter
->dcb_cfg
.num_tcs
.pg_tcs
= X540_TRAFFIC_CLASS
;
5584 adapter
->dcb_cfg
.num_tcs
.pfc_tcs
= X540_TRAFFIC_CLASS
;
5586 case ixgbe_mac_X550EM_x
:
5587 case ixgbe_mac_x550em_a
:
5589 adapter
->dcb_cfg
.num_tcs
.pg_tcs
= DEF_TRAFFIC_CLASS
;
5590 adapter
->dcb_cfg
.num_tcs
.pfc_tcs
= DEF_TRAFFIC_CLASS
;
5594 /* Configure DCB traffic classes */
5595 for (j
= 0; j
< MAX_TRAFFIC_CLASS
; j
++) {
5596 tc
= &adapter
->dcb_cfg
.tc_config
[j
];
5597 tc
->path
[DCB_TX_CONFIG
].bwg_id
= 0;
5598 tc
->path
[DCB_TX_CONFIG
].bwg_percent
= 12 + (j
& 1);
5599 tc
->path
[DCB_RX_CONFIG
].bwg_id
= 0;
5600 tc
->path
[DCB_RX_CONFIG
].bwg_percent
= 12 + (j
& 1);
5601 tc
->dcb_pfc
= pfc_disabled
;
5604 /* Initialize default user to priority mapping, UPx->TC0 */
5605 tc
= &adapter
->dcb_cfg
.tc_config
[0];
5606 tc
->path
[DCB_TX_CONFIG
].up_to_tc_bitmap
= 0xFF;
5607 tc
->path
[DCB_RX_CONFIG
].up_to_tc_bitmap
= 0xFF;
5609 adapter
->dcb_cfg
.bw_percentage
[DCB_TX_CONFIG
][0] = 100;
5610 adapter
->dcb_cfg
.bw_percentage
[DCB_RX_CONFIG
][0] = 100;
5611 adapter
->dcb_cfg
.pfc_mode_enable
= false;
5612 adapter
->dcb_set_bitmap
= 0x00;
5613 if (adapter
->flags
& IXGBE_FLAG_DCB_CAPABLE
)
5614 adapter
->dcbx_cap
= DCB_CAP_DCBX_HOST
| DCB_CAP_DCBX_VER_CEE
;
5615 memcpy(&adapter
->temp_dcb_cfg
, &adapter
->dcb_cfg
,
5616 sizeof(adapter
->temp_dcb_cfg
));
5621 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5622 * @adapter: board private structure to initialize
5624 * ixgbe_sw_init initializes the Adapter private data structure.
5625 * Fields are initialized based on PCI device information and
5626 * OS network device settings (MTU size).
5628 static int ixgbe_sw_init(struct ixgbe_adapter
*adapter
,
5629 const struct ixgbe_info
*ii
)
5631 struct ixgbe_hw
*hw
= &adapter
->hw
;
5632 struct pci_dev
*pdev
= adapter
->pdev
;
5633 unsigned int rss
, fdir
;
5637 /* PCI config space info */
5639 hw
->vendor_id
= pdev
->vendor
;
5640 hw
->device_id
= pdev
->device
;
5641 hw
->revision_id
= pdev
->revision
;
5642 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
5643 hw
->subsystem_device_id
= pdev
->subsystem_device
;
5645 /* get_invariants needs the device IDs */
5646 ii
->get_invariants(hw
);
5648 /* Set common capability flags and settings */
5649 rss
= min_t(int, ixgbe_max_rss_indices(adapter
), num_online_cpus());
5650 adapter
->ring_feature
[RING_F_RSS
].limit
= rss
;
5651 adapter
->flags2
|= IXGBE_FLAG2_RSC_CAPABLE
;
5652 adapter
->max_q_vectors
= MAX_Q_VECTORS_82599
;
5653 adapter
->atr_sample_rate
= 20;
5654 fdir
= min_t(int, IXGBE_MAX_FDIR_INDICES
, num_online_cpus());
5655 adapter
->ring_feature
[RING_F_FDIR
].limit
= fdir
;
5656 adapter
->fdir_pballoc
= IXGBE_FDIR_PBALLOC_64K
;
5657 #ifdef CONFIG_IXGBE_DCA
5658 adapter
->flags
|= IXGBE_FLAG_DCA_CAPABLE
;
5660 #ifdef CONFIG_IXGBE_DCB
5661 adapter
->flags
|= IXGBE_FLAG_DCB_CAPABLE
;
5662 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
5665 adapter
->flags
|= IXGBE_FLAG_FCOE_CAPABLE
;
5666 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
5667 #ifdef CONFIG_IXGBE_DCB
5668 /* Default traffic class to use for FCoE */
5669 adapter
->fcoe
.up
= IXGBE_FCOE_DEFTC
;
5670 #endif /* CONFIG_IXGBE_DCB */
5671 #endif /* IXGBE_FCOE */
5673 /* initialize static ixgbe jump table entries */
5674 adapter
->jump_tables
[0] = kzalloc(sizeof(*adapter
->jump_tables
[0]),
5676 if (!adapter
->jump_tables
[0])
5678 adapter
->jump_tables
[0]->mat
= ixgbe_ipv4_fields
;
5680 for (i
= 1; i
< IXGBE_MAX_LINK_HANDLE
; i
++)
5681 adapter
->jump_tables
[i
] = NULL
;
5683 adapter
->mac_table
= kzalloc(sizeof(struct ixgbe_mac_addr
) *
5684 hw
->mac
.num_rar_entries
,
5686 if (!adapter
->mac_table
)
5689 /* Set MAC specific capability flags and exceptions */
5690 switch (hw
->mac
.type
) {
5691 case ixgbe_mac_82598EB
:
5692 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_CAPABLE
;
5694 if (hw
->device_id
== IXGBE_DEV_ID_82598AT
)
5695 adapter
->flags
|= IXGBE_FLAG_FAN_FAIL_CAPABLE
;
5697 adapter
->max_q_vectors
= MAX_Q_VECTORS_82598
;
5698 adapter
->ring_feature
[RING_F_FDIR
].limit
= 0;
5699 adapter
->atr_sample_rate
= 0;
5700 adapter
->fdir_pballoc
= 0;
5702 adapter
->flags
&= ~IXGBE_FLAG_FCOE_CAPABLE
;
5703 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
5704 #ifdef CONFIG_IXGBE_DCB
5705 adapter
->fcoe
.up
= 0;
5706 #endif /* IXGBE_DCB */
5707 #endif /* IXGBE_FCOE */
5709 case ixgbe_mac_82599EB
:
5710 if (hw
->device_id
== IXGBE_DEV_ID_82599_T3_LOM
)
5711 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
;
5713 case ixgbe_mac_X540
:
5714 fwsm
= IXGBE_READ_REG(hw
, IXGBE_FWSM(hw
));
5715 if (fwsm
& IXGBE_FWSM_TS_ENABLED
)
5716 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
;
5718 case ixgbe_mac_x550em_a
:
5719 adapter
->flags
|= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE
;
5721 case ixgbe_mac_X550EM_x
:
5722 #ifdef CONFIG_IXGBE_DCB
5723 adapter
->flags
&= ~IXGBE_FLAG_DCB_CAPABLE
;
5726 adapter
->flags
&= ~IXGBE_FLAG_FCOE_CAPABLE
;
5727 #ifdef CONFIG_IXGBE_DCB
5728 adapter
->fcoe
.up
= 0;
5729 #endif /* IXGBE_DCB */
5730 #endif /* IXGBE_FCOE */
5732 case ixgbe_mac_X550
:
5733 #ifdef CONFIG_IXGBE_DCA
5734 adapter
->flags
&= ~IXGBE_FLAG_DCA_CAPABLE
;
5736 adapter
->flags
|= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE
;
5743 /* FCoE support exists, always init the FCoE lock */
5744 spin_lock_init(&adapter
->fcoe
.lock
);
5747 /* n-tuple support exists, always init our spinlock */
5748 spin_lock_init(&adapter
->fdir_perfect_lock
);
5750 #ifdef CONFIG_IXGBE_DCB
5751 ixgbe_init_dcb(adapter
);
5754 /* default flow control settings */
5755 hw
->fc
.requested_mode
= ixgbe_fc_full
;
5756 hw
->fc
.current_mode
= ixgbe_fc_full
; /* init for ethtool output */
5757 ixgbe_pbthresh_setup(adapter
);
5758 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
5759 hw
->fc
.send_xon
= true;
5760 hw
->fc
.disable_fc_autoneg
= ixgbe_device_supports_autoneg_fc(hw
);
5762 #ifdef CONFIG_PCI_IOV
5764 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5766 /* assign number of SR-IOV VFs */
5767 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
5768 if (max_vfs
> IXGBE_MAX_VFS_DRV_LIMIT
) {
5769 adapter
->num_vfs
= 0;
5770 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5772 adapter
->num_vfs
= max_vfs
;
5775 #endif /* CONFIG_PCI_IOV */
5777 /* enable itr by default in dynamic mode */
5778 adapter
->rx_itr_setting
= 1;
5779 adapter
->tx_itr_setting
= 1;
5781 /* set default ring sizes */
5782 adapter
->tx_ring_count
= IXGBE_DEFAULT_TXD
;
5783 adapter
->rx_ring_count
= IXGBE_DEFAULT_RXD
;
5785 /* set default work limits */
5786 adapter
->tx_work_limit
= IXGBE_DEFAULT_TX_WORK
;
5788 /* initialize eeprom parameters */
5789 if (ixgbe_init_eeprom_params_generic(hw
)) {
5790 e_dev_err("EEPROM initialization failed\n");
5794 /* PF holds first pool slot */
5795 set_bit(0, &adapter
->fwd_bitmask
);
5796 set_bit(__IXGBE_DOWN
, &adapter
->state
);
5802 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
5803 * @tx_ring: tx descriptor ring (for a specific queue) to setup
5805 * Return 0 on success, negative on failure
5807 int ixgbe_setup_tx_resources(struct ixgbe_ring
*tx_ring
)
5809 struct device
*dev
= tx_ring
->dev
;
5810 int orig_node
= dev_to_node(dev
);
5814 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
5816 if (tx_ring
->q_vector
)
5817 ring_node
= tx_ring
->q_vector
->numa_node
;
5819 tx_ring
->tx_buffer_info
= vzalloc_node(size
, ring_node
);
5820 if (!tx_ring
->tx_buffer_info
)
5821 tx_ring
->tx_buffer_info
= vzalloc(size
);
5822 if (!tx_ring
->tx_buffer_info
)
5825 u64_stats_init(&tx_ring
->syncp
);
5827 /* round up to nearest 4K */
5828 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
5829 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
5831 set_dev_node(dev
, ring_node
);
5832 tx_ring
->desc
= dma_alloc_coherent(dev
,
5836 set_dev_node(dev
, orig_node
);
5838 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
5839 &tx_ring
->dma
, GFP_KERNEL
);
5843 tx_ring
->next_to_use
= 0;
5844 tx_ring
->next_to_clean
= 0;
5848 vfree(tx_ring
->tx_buffer_info
);
5849 tx_ring
->tx_buffer_info
= NULL
;
5850 dev_err(dev
, "Unable to allocate memory for the Tx descriptor ring\n");
5855 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5856 * @adapter: board private structure
5858 * If this function returns with an error, then it's possible one or
5859 * more of the rings is populated (while the rest are not). It is the
5860 * callers duty to clean those orphaned rings.
5862 * Return 0 on success, negative on failure
5864 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
5868 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
5869 err
= ixgbe_setup_tx_resources(adapter
->tx_ring
[i
]);
5873 e_err(probe
, "Allocation for Tx Queue %u failed\n", i
);
5879 /* rewind the index freeing the rings as we go */
5881 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
5886 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5887 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5889 * Returns 0 on success, negative on failure
5891 int ixgbe_setup_rx_resources(struct ixgbe_ring
*rx_ring
)
5893 struct device
*dev
= rx_ring
->dev
;
5894 int orig_node
= dev_to_node(dev
);
5898 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
5900 if (rx_ring
->q_vector
)
5901 ring_node
= rx_ring
->q_vector
->numa_node
;
5903 rx_ring
->rx_buffer_info
= vzalloc_node(size
, ring_node
);
5904 if (!rx_ring
->rx_buffer_info
)
5905 rx_ring
->rx_buffer_info
= vzalloc(size
);
5906 if (!rx_ring
->rx_buffer_info
)
5909 u64_stats_init(&rx_ring
->syncp
);
5911 /* Round up to nearest 4K */
5912 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
5913 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
5915 set_dev_node(dev
, ring_node
);
5916 rx_ring
->desc
= dma_alloc_coherent(dev
,
5920 set_dev_node(dev
, orig_node
);
5922 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
5923 &rx_ring
->dma
, GFP_KERNEL
);
5927 rx_ring
->next_to_clean
= 0;
5928 rx_ring
->next_to_use
= 0;
5932 vfree(rx_ring
->rx_buffer_info
);
5933 rx_ring
->rx_buffer_info
= NULL
;
5934 dev_err(dev
, "Unable to allocate memory for the Rx descriptor ring\n");
5939 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5940 * @adapter: board private structure
5942 * If this function returns with an error, then it's possible one or
5943 * more of the rings is populated (while the rest are not). It is the
5944 * callers duty to clean those orphaned rings.
5946 * Return 0 on success, negative on failure
5948 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
5952 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
5953 err
= ixgbe_setup_rx_resources(adapter
->rx_ring
[i
]);
5957 e_err(probe
, "Allocation for Rx Queue %u failed\n", i
);
5962 err
= ixgbe_setup_fcoe_ddp_resources(adapter
);
5967 /* rewind the index freeing the rings as we go */
5969 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
5974 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5975 * @tx_ring: Tx descriptor ring for a specific queue
5977 * Free all transmit software resources
5979 void ixgbe_free_tx_resources(struct ixgbe_ring
*tx_ring
)
5981 ixgbe_clean_tx_ring(tx_ring
);
5983 vfree(tx_ring
->tx_buffer_info
);
5984 tx_ring
->tx_buffer_info
= NULL
;
5986 /* if not set, then don't free */
5990 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
5991 tx_ring
->desc
, tx_ring
->dma
);
5993 tx_ring
->desc
= NULL
;
5997 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5998 * @adapter: board private structure
6000 * Free all transmit software resources
6002 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
6006 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
6007 if (adapter
->tx_ring
[i
]->desc
)
6008 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
6012 * ixgbe_free_rx_resources - Free Rx Resources
6013 * @rx_ring: ring to clean the resources from
6015 * Free all receive software resources
6017 void ixgbe_free_rx_resources(struct ixgbe_ring
*rx_ring
)
6019 ixgbe_clean_rx_ring(rx_ring
);
6021 vfree(rx_ring
->rx_buffer_info
);
6022 rx_ring
->rx_buffer_info
= NULL
;
6024 /* if not set, then don't free */
6028 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
6029 rx_ring
->desc
, rx_ring
->dma
);
6031 rx_ring
->desc
= NULL
;
6035 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
6036 * @adapter: board private structure
6038 * Free all receive software resources
6040 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
6045 ixgbe_free_fcoe_ddp_resources(adapter
);
6048 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
6049 if (adapter
->rx_ring
[i
]->desc
)
6050 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
6054 * ixgbe_change_mtu - Change the Maximum Transfer Unit
6055 * @netdev: network interface device structure
6056 * @new_mtu: new value for maximum frame size
6058 * Returns 0 on success, negative on failure
6060 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
6062 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6065 * For 82599EB we cannot allow legacy VFs to enable their receive
6066 * paths when MTU greater than 1500 is configured. So display a
6067 * warning that legacy VFs will be disabled.
6069 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
6070 (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
) &&
6071 (new_mtu
> ETH_DATA_LEN
))
6072 e_warn(probe
, "Setting MTU > 1500 will disable legacy VFs\n");
6074 e_info(probe
, "changing MTU from %d to %d\n", netdev
->mtu
, new_mtu
);
6076 /* must set new MTU before calling down or up */
6077 netdev
->mtu
= new_mtu
;
6079 if (netif_running(netdev
))
6080 ixgbe_reinit_locked(adapter
);
6086 * ixgbe_open - Called when a network interface is made active
6087 * @netdev: network interface device structure
6089 * Returns 0 on success, negative value on failure
6091 * The open entry point is called when a network interface is made
6092 * active by the system (IFF_UP). At this point all resources needed
6093 * for transmit and receive operations are allocated, the interrupt
6094 * handler is registered with the OS, the watchdog timer is started,
6095 * and the stack is notified that the interface is ready.
6097 int ixgbe_open(struct net_device
*netdev
)
6099 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6100 struct ixgbe_hw
*hw
= &adapter
->hw
;
6103 /* disallow open during test */
6104 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
6107 netif_carrier_off(netdev
);
6109 /* allocate transmit descriptors */
6110 err
= ixgbe_setup_all_tx_resources(adapter
);
6114 /* allocate receive descriptors */
6115 err
= ixgbe_setup_all_rx_resources(adapter
);
6119 ixgbe_configure(adapter
);
6121 err
= ixgbe_request_irq(adapter
);
6125 /* Notify the stack of the actual queue counts. */
6126 if (adapter
->num_rx_pools
> 1)
6127 queues
= adapter
->num_rx_queues_per_pool
;
6129 queues
= adapter
->num_tx_queues
;
6131 err
= netif_set_real_num_tx_queues(netdev
, queues
);
6133 goto err_set_queues
;
6135 if (adapter
->num_rx_pools
> 1 &&
6136 adapter
->num_rx_queues
> IXGBE_MAX_L2A_QUEUES
)
6137 queues
= IXGBE_MAX_L2A_QUEUES
;
6139 queues
= adapter
->num_rx_queues
;
6140 err
= netif_set_real_num_rx_queues(netdev
, queues
);
6142 goto err_set_queues
;
6144 ixgbe_ptp_init(adapter
);
6146 ixgbe_up_complete(adapter
);
6148 ixgbe_clear_udp_tunnel_port(adapter
, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK
);
6149 udp_tunnel_get_rx_info(netdev
);
6154 ixgbe_free_irq(adapter
);
6156 ixgbe_free_all_rx_resources(adapter
);
6157 if (hw
->phy
.ops
.set_phy_power
&& !adapter
->wol
)
6158 hw
->phy
.ops
.set_phy_power(&adapter
->hw
, false);
6160 ixgbe_free_all_tx_resources(adapter
);
6162 ixgbe_reset(adapter
);
6167 static void ixgbe_close_suspend(struct ixgbe_adapter
*adapter
)
6169 ixgbe_ptp_suspend(adapter
);
6171 if (adapter
->hw
.phy
.ops
.enter_lplu
) {
6172 adapter
->hw
.phy
.reset_disable
= true;
6173 ixgbe_down(adapter
);
6174 adapter
->hw
.phy
.ops
.enter_lplu(&adapter
->hw
);
6175 adapter
->hw
.phy
.reset_disable
= false;
6177 ixgbe_down(adapter
);
6180 ixgbe_free_irq(adapter
);
6182 ixgbe_free_all_tx_resources(adapter
);
6183 ixgbe_free_all_rx_resources(adapter
);
6187 * ixgbe_close - Disables a network interface
6188 * @netdev: network interface device structure
6190 * Returns 0, this is not allowed to fail
6192 * The close entry point is called when an interface is de-activated
6193 * by the OS. The hardware is still under the drivers control, but
6194 * needs to be disabled. A global MAC reset is issued to stop the
6195 * hardware, and all transmit and receive resources are freed.
6197 int ixgbe_close(struct net_device
*netdev
)
6199 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6201 ixgbe_ptp_stop(adapter
);
6203 ixgbe_close_suspend(adapter
);
6205 ixgbe_fdir_filter_exit(adapter
);
6207 ixgbe_release_hw_control(adapter
);
6213 static int ixgbe_resume(struct pci_dev
*pdev
)
6215 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
6216 struct net_device
*netdev
= adapter
->netdev
;
6219 adapter
->hw
.hw_addr
= adapter
->io_addr
;
6220 pci_set_power_state(pdev
, PCI_D0
);
6221 pci_restore_state(pdev
);
6223 * pci_restore_state clears dev->state_saved so call
6224 * pci_save_state to restore it.
6226 pci_save_state(pdev
);
6228 err
= pci_enable_device_mem(pdev
);
6230 e_dev_err("Cannot enable PCI device from suspend\n");
6233 smp_mb__before_atomic();
6234 clear_bit(__IXGBE_DISABLED
, &adapter
->state
);
6235 pci_set_master(pdev
);
6237 pci_wake_from_d3(pdev
, false);
6239 ixgbe_reset(adapter
);
6241 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
6244 err
= ixgbe_init_interrupt_scheme(adapter
);
6245 if (!err
&& netif_running(netdev
))
6246 err
= ixgbe_open(netdev
);
6253 netif_device_attach(netdev
);
6257 #endif /* CONFIG_PM */
6259 static int __ixgbe_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
6261 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
6262 struct net_device
*netdev
= adapter
->netdev
;
6263 struct ixgbe_hw
*hw
= &adapter
->hw
;
6265 u32 wufc
= adapter
->wol
;
6270 netif_device_detach(netdev
);
6273 if (netif_running(netdev
))
6274 ixgbe_close_suspend(adapter
);
6277 ixgbe_clear_interrupt_scheme(adapter
);
6280 retval
= pci_save_state(pdev
);
6285 if (hw
->mac
.ops
.stop_link_on_d3
)
6286 hw
->mac
.ops
.stop_link_on_d3(hw
);
6289 ixgbe_set_rx_mode(netdev
);
6291 /* enable the optics for 82599 SFP+ fiber as we can WoL */
6292 if (hw
->mac
.ops
.enable_tx_laser
)
6293 hw
->mac
.ops
.enable_tx_laser(hw
);
6295 /* turn on all-multi mode if wake on multicast is enabled */
6296 if (wufc
& IXGBE_WUFC_MC
) {
6297 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
6298 fctrl
|= IXGBE_FCTRL_MPE
;
6299 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
6302 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL
);
6303 ctrl
|= IXGBE_CTRL_GIO_DIS
;
6304 IXGBE_WRITE_REG(hw
, IXGBE_CTRL
, ctrl
);
6306 IXGBE_WRITE_REG(hw
, IXGBE_WUFC
, wufc
);
6308 IXGBE_WRITE_REG(hw
, IXGBE_WUC
, 0);
6309 IXGBE_WRITE_REG(hw
, IXGBE_WUFC
, 0);
6312 switch (hw
->mac
.type
) {
6313 case ixgbe_mac_82598EB
:
6314 pci_wake_from_d3(pdev
, false);
6316 case ixgbe_mac_82599EB
:
6317 case ixgbe_mac_X540
:
6318 case ixgbe_mac_X550
:
6319 case ixgbe_mac_X550EM_x
:
6320 case ixgbe_mac_x550em_a
:
6321 pci_wake_from_d3(pdev
, !!wufc
);
6327 *enable_wake
= !!wufc
;
6328 if (hw
->phy
.ops
.set_phy_power
&& !*enable_wake
)
6329 hw
->phy
.ops
.set_phy_power(hw
, false);
6331 ixgbe_release_hw_control(adapter
);
6333 if (!test_and_set_bit(__IXGBE_DISABLED
, &adapter
->state
))
6334 pci_disable_device(pdev
);
6340 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6345 retval
= __ixgbe_shutdown(pdev
, &wake
);
6350 pci_prepare_to_sleep(pdev
);
6352 pci_wake_from_d3(pdev
, false);
6353 pci_set_power_state(pdev
, PCI_D3hot
);
6358 #endif /* CONFIG_PM */
6360 static void ixgbe_shutdown(struct pci_dev
*pdev
)
6364 __ixgbe_shutdown(pdev
, &wake
);
6366 if (system_state
== SYSTEM_POWER_OFF
) {
6367 pci_wake_from_d3(pdev
, wake
);
6368 pci_set_power_state(pdev
, PCI_D3hot
);
6373 * ixgbe_update_stats - Update the board statistics counters.
6374 * @adapter: board private structure
6376 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
6378 struct net_device
*netdev
= adapter
->netdev
;
6379 struct ixgbe_hw
*hw
= &adapter
->hw
;
6380 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
6382 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
6383 u64 non_eop_descs
= 0, restart_queue
= 0, tx_busy
= 0;
6384 u64 alloc_rx_page_failed
= 0, alloc_rx_buff_failed
= 0;
6385 u64 bytes
= 0, packets
= 0, hw_csum_rx_error
= 0;
6387 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
6388 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
6391 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
6394 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
6395 rsc_count
+= adapter
->rx_ring
[i
]->rx_stats
.rsc_count
;
6396 rsc_flush
+= adapter
->rx_ring
[i
]->rx_stats
.rsc_flush
;
6398 adapter
->rsc_total_count
= rsc_count
;
6399 adapter
->rsc_total_flush
= rsc_flush
;
6402 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
6403 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[i
];
6404 non_eop_descs
+= rx_ring
->rx_stats
.non_eop_descs
;
6405 alloc_rx_page_failed
+= rx_ring
->rx_stats
.alloc_rx_page_failed
;
6406 alloc_rx_buff_failed
+= rx_ring
->rx_stats
.alloc_rx_buff_failed
;
6407 hw_csum_rx_error
+= rx_ring
->rx_stats
.csum_err
;
6408 bytes
+= rx_ring
->stats
.bytes
;
6409 packets
+= rx_ring
->stats
.packets
;
6411 adapter
->non_eop_descs
= non_eop_descs
;
6412 adapter
->alloc_rx_page_failed
= alloc_rx_page_failed
;
6413 adapter
->alloc_rx_buff_failed
= alloc_rx_buff_failed
;
6414 adapter
->hw_csum_rx_error
= hw_csum_rx_error
;
6415 netdev
->stats
.rx_bytes
= bytes
;
6416 netdev
->stats
.rx_packets
= packets
;
6420 /* gather some stats to the adapter struct that are per queue */
6421 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
6422 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
6423 restart_queue
+= tx_ring
->tx_stats
.restart_queue
;
6424 tx_busy
+= tx_ring
->tx_stats
.tx_busy
;
6425 bytes
+= tx_ring
->stats
.bytes
;
6426 packets
+= tx_ring
->stats
.packets
;
6428 adapter
->restart_queue
= restart_queue
;
6429 adapter
->tx_busy
= tx_busy
;
6430 netdev
->stats
.tx_bytes
= bytes
;
6431 netdev
->stats
.tx_packets
= packets
;
6433 hwstats
->crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
6435 /* 8 register reads */
6436 for (i
= 0; i
< 8; i
++) {
6437 /* for packet buffers not used, the register should read 0 */
6438 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
6440 hwstats
->mpc
[i
] += mpc
;
6441 total_mpc
+= hwstats
->mpc
[i
];
6442 hwstats
->pxontxc
[i
] += IXGBE_READ_REG(hw
, IXGBE_PXONTXC(i
));
6443 hwstats
->pxofftxc
[i
] += IXGBE_READ_REG(hw
, IXGBE_PXOFFTXC(i
));
6444 switch (hw
->mac
.type
) {
6445 case ixgbe_mac_82598EB
:
6446 hwstats
->rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
6447 hwstats
->qbtc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBTC(i
));
6448 hwstats
->qbrc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBRC(i
));
6449 hwstats
->pxonrxc
[i
] +=
6450 IXGBE_READ_REG(hw
, IXGBE_PXONRXC(i
));
6452 case ixgbe_mac_82599EB
:
6453 case ixgbe_mac_X540
:
6454 case ixgbe_mac_X550
:
6455 case ixgbe_mac_X550EM_x
:
6456 case ixgbe_mac_x550em_a
:
6457 hwstats
->pxonrxc
[i
] +=
6458 IXGBE_READ_REG(hw
, IXGBE_PXONRXCNT(i
));
6465 /*16 register reads */
6466 for (i
= 0; i
< 16; i
++) {
6467 hwstats
->qptc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPTC(i
));
6468 hwstats
->qprc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPRC(i
));
6469 if ((hw
->mac
.type
== ixgbe_mac_82599EB
) ||
6470 (hw
->mac
.type
== ixgbe_mac_X540
) ||
6471 (hw
->mac
.type
== ixgbe_mac_X550
) ||
6472 (hw
->mac
.type
== ixgbe_mac_X550EM_x
) ||
6473 (hw
->mac
.type
== ixgbe_mac_x550em_a
)) {
6474 hwstats
->qbtc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBTC_L(i
));
6475 IXGBE_READ_REG(hw
, IXGBE_QBTC_H(i
)); /* to clear */
6476 hwstats
->qbrc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBRC_L(i
));
6477 IXGBE_READ_REG(hw
, IXGBE_QBRC_H(i
)); /* to clear */
6481 hwstats
->gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
6482 /* work around hardware counting issue */
6483 hwstats
->gprc
-= missed_rx
;
6485 ixgbe_update_xoff_received(adapter
);
6487 /* 82598 hardware only has a 32 bit counter in the high register */
6488 switch (hw
->mac
.type
) {
6489 case ixgbe_mac_82598EB
:
6490 hwstats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
6491 hwstats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
6492 hwstats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
6493 hwstats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
6495 case ixgbe_mac_X540
:
6496 case ixgbe_mac_X550
:
6497 case ixgbe_mac_X550EM_x
:
6498 case ixgbe_mac_x550em_a
:
6499 /* OS2BMC stats are X540 and later */
6500 hwstats
->o2bgptc
+= IXGBE_READ_REG(hw
, IXGBE_O2BGPTC
);
6501 hwstats
->o2bspc
+= IXGBE_READ_REG(hw
, IXGBE_O2BSPC
);
6502 hwstats
->b2ospc
+= IXGBE_READ_REG(hw
, IXGBE_B2OSPC
);
6503 hwstats
->b2ogprc
+= IXGBE_READ_REG(hw
, IXGBE_B2OGPRC
);
6504 case ixgbe_mac_82599EB
:
6505 for (i
= 0; i
< 16; i
++)
6506 adapter
->hw_rx_no_dma_resources
+=
6507 IXGBE_READ_REG(hw
, IXGBE_QPRDC(i
));
6508 hwstats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCL
);
6509 IXGBE_READ_REG(hw
, IXGBE_GORCH
); /* to clear */
6510 hwstats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCL
);
6511 IXGBE_READ_REG(hw
, IXGBE_GOTCH
); /* to clear */
6512 hwstats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORL
);
6513 IXGBE_READ_REG(hw
, IXGBE_TORH
); /* to clear */
6514 hwstats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXCNT
);
6515 hwstats
->fdirmatch
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMATCH
);
6516 hwstats
->fdirmiss
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMISS
);
6518 hwstats
->fccrc
+= IXGBE_READ_REG(hw
, IXGBE_FCCRC
);
6519 hwstats
->fcoerpdc
+= IXGBE_READ_REG(hw
, IXGBE_FCOERPDC
);
6520 hwstats
->fcoeprc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPRC
);
6521 hwstats
->fcoeptc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPTC
);
6522 hwstats
->fcoedwrc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWRC
);
6523 hwstats
->fcoedwtc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWTC
);
6524 /* Add up per cpu counters for total ddp aloc fail */
6525 if (adapter
->fcoe
.ddp_pool
) {
6526 struct ixgbe_fcoe
*fcoe
= &adapter
->fcoe
;
6527 struct ixgbe_fcoe_ddp_pool
*ddp_pool
;
6529 u64 noddp
= 0, noddp_ext_buff
= 0;
6530 for_each_possible_cpu(cpu
) {
6531 ddp_pool
= per_cpu_ptr(fcoe
->ddp_pool
, cpu
);
6532 noddp
+= ddp_pool
->noddp
;
6533 noddp_ext_buff
+= ddp_pool
->noddp_ext_buff
;
6535 hwstats
->fcoe_noddp
= noddp
;
6536 hwstats
->fcoe_noddp_ext_buff
= noddp_ext_buff
;
6538 #endif /* IXGBE_FCOE */
6543 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
6544 hwstats
->bprc
+= bprc
;
6545 hwstats
->mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
6546 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
6547 hwstats
->mprc
-= bprc
;
6548 hwstats
->roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
6549 hwstats
->prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
6550 hwstats
->prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
6551 hwstats
->prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
6552 hwstats
->prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
6553 hwstats
->prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
6554 hwstats
->prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
6555 hwstats
->rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
6556 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
6557 hwstats
->lxontxc
+= lxon
;
6558 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
6559 hwstats
->lxofftxc
+= lxoff
;
6560 hwstats
->gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
6561 hwstats
->mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
6563 * 82598 errata - tx of flow control packets is included in tx counters
6565 xon_off_tot
= lxon
+ lxoff
;
6566 hwstats
->gptc
-= xon_off_tot
;
6567 hwstats
->mptc
-= xon_off_tot
;
6568 hwstats
->gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
6569 hwstats
->ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
6570 hwstats
->rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
6571 hwstats
->rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
6572 hwstats
->tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
6573 hwstats
->ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
6574 hwstats
->ptc64
-= xon_off_tot
;
6575 hwstats
->ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
6576 hwstats
->ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
6577 hwstats
->ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
6578 hwstats
->ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
6579 hwstats
->ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
6580 hwstats
->bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
6582 /* Fill out the OS statistics structure */
6583 netdev
->stats
.multicast
= hwstats
->mprc
;
6586 netdev
->stats
.rx_errors
= hwstats
->crcerrs
+ hwstats
->rlec
;
6587 netdev
->stats
.rx_dropped
= 0;
6588 netdev
->stats
.rx_length_errors
= hwstats
->rlec
;
6589 netdev
->stats
.rx_crc_errors
= hwstats
->crcerrs
;
6590 netdev
->stats
.rx_missed_errors
= total_mpc
;
6594 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
6595 * @adapter: pointer to the device adapter structure
6597 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter
*adapter
)
6599 struct ixgbe_hw
*hw
= &adapter
->hw
;
6602 if (!(adapter
->flags2
& IXGBE_FLAG2_FDIR_REQUIRES_REINIT
))
6605 adapter
->flags2
&= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT
;
6607 /* if interface is down do nothing */
6608 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
6611 /* do nothing if we are not using signature filters */
6612 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
))
6615 adapter
->fdir_overflow
++;
6617 if (ixgbe_reinit_fdir_tables_82599(hw
) == 0) {
6618 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
6619 set_bit(__IXGBE_TX_FDIR_INIT_DONE
,
6620 &(adapter
->tx_ring
[i
]->state
));
6621 /* re-enable flow director interrupts */
6622 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_FLOW_DIR
);
6624 e_err(probe
, "failed to finish FDIR re-initialization, "
6625 "ignored adding FDIR ATR filters\n");
6630 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
6631 * @adapter: pointer to the device adapter structure
6633 * This function serves two purposes. First it strobes the interrupt lines
6634 * in order to make certain interrupts are occurring. Secondly it sets the
6635 * bits needed to check for TX hangs. As a result we should immediately
6636 * determine if a hang has occurred.
6638 static void ixgbe_check_hang_subtask(struct ixgbe_adapter
*adapter
)
6640 struct ixgbe_hw
*hw
= &adapter
->hw
;
6644 /* If we're down, removing or resetting, just bail */
6645 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
6646 test_bit(__IXGBE_REMOVING
, &adapter
->state
) ||
6647 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
6650 /* Force detection of hung controller */
6651 if (netif_carrier_ok(adapter
->netdev
)) {
6652 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
6653 set_check_for_tx_hang(adapter
->tx_ring
[i
]);
6656 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
6658 * for legacy and MSI interrupts don't set any bits
6659 * that are enabled for EIAM, because this operation
6660 * would set *both* EIMS and EICS for any bit in EIAM
6662 IXGBE_WRITE_REG(hw
, IXGBE_EICS
,
6663 (IXGBE_EICS_TCP_TIMER
| IXGBE_EICS_OTHER
));
6665 /* get one bit for every active tx/rx interrupt vector */
6666 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
6667 struct ixgbe_q_vector
*qv
= adapter
->q_vector
[i
];
6668 if (qv
->rx
.ring
|| qv
->tx
.ring
)
6673 /* Cause software interrupt to ensure rings are cleaned */
6674 ixgbe_irq_rearm_queues(adapter
, eics
);
6678 * ixgbe_watchdog_update_link - update the link status
6679 * @adapter: pointer to the device adapter structure
6680 * @link_speed: pointer to a u32 to store the link_speed
6682 static void ixgbe_watchdog_update_link(struct ixgbe_adapter
*adapter
)
6684 struct ixgbe_hw
*hw
= &adapter
->hw
;
6685 u32 link_speed
= adapter
->link_speed
;
6686 bool link_up
= adapter
->link_up
;
6687 bool pfc_en
= adapter
->dcb_cfg
.pfc_mode_enable
;
6689 if (!(adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
))
6692 if (hw
->mac
.ops
.check_link
) {
6693 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
6695 /* always assume link is up, if no check link function */
6696 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
6700 if (adapter
->ixgbe_ieee_pfc
)
6701 pfc_en
|= !!(adapter
->ixgbe_ieee_pfc
->pfc_en
);
6703 if (link_up
&& !((adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) && pfc_en
)) {
6704 hw
->mac
.ops
.fc_enable(hw
);
6705 ixgbe_set_rx_drop_en(adapter
);
6709 time_after(jiffies
, (adapter
->link_check_timeout
+
6710 IXGBE_TRY_LINK_TIMEOUT
))) {
6711 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
6712 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMC_LSC
);
6713 IXGBE_WRITE_FLUSH(hw
);
6716 adapter
->link_up
= link_up
;
6717 adapter
->link_speed
= link_speed
;
6720 static void ixgbe_update_default_up(struct ixgbe_adapter
*adapter
)
6722 #ifdef CONFIG_IXGBE_DCB
6723 struct net_device
*netdev
= adapter
->netdev
;
6724 struct dcb_app app
= {
6725 .selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
,
6730 if (adapter
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
)
6731 up
= dcb_ieee_getapp_mask(netdev
, &app
);
6733 adapter
->default_up
= (up
> 1) ? (ffs(up
) - 1) : 0;
6737 static int ixgbe_enable_macvlan(struct net_device
*upper
, void *data
)
6739 if (netif_is_macvlan(upper
)) {
6740 struct macvlan_dev
*vlan
= netdev_priv(upper
);
6743 netif_tx_wake_all_queues(upper
);
6750 * ixgbe_watchdog_link_is_up - update netif_carrier status and
6751 * print link up message
6752 * @adapter: pointer to the device adapter structure
6754 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter
*adapter
)
6756 struct net_device
*netdev
= adapter
->netdev
;
6757 struct ixgbe_hw
*hw
= &adapter
->hw
;
6758 u32 link_speed
= adapter
->link_speed
;
6759 const char *speed_str
;
6760 bool flow_rx
, flow_tx
;
6762 /* only continue if link was previously down */
6763 if (netif_carrier_ok(netdev
))
6766 adapter
->flags2
&= ~IXGBE_FLAG2_SEARCH_FOR_SFP
;
6768 switch (hw
->mac
.type
) {
6769 case ixgbe_mac_82598EB
: {
6770 u32 frctl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
6771 u32 rmcs
= IXGBE_READ_REG(hw
, IXGBE_RMCS
);
6772 flow_rx
= !!(frctl
& IXGBE_FCTRL_RFCE
);
6773 flow_tx
= !!(rmcs
& IXGBE_RMCS_TFCE_802_3X
);
6776 case ixgbe_mac_X540
:
6777 case ixgbe_mac_X550
:
6778 case ixgbe_mac_X550EM_x
:
6779 case ixgbe_mac_x550em_a
:
6780 case ixgbe_mac_82599EB
: {
6781 u32 mflcn
= IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
6782 u32 fccfg
= IXGBE_READ_REG(hw
, IXGBE_FCCFG
);
6783 flow_rx
= !!(mflcn
& IXGBE_MFLCN_RFCE
);
6784 flow_tx
= !!(fccfg
& IXGBE_FCCFG_TFCE_802_3X
);
6793 adapter
->last_rx_ptp_check
= jiffies
;
6795 if (test_bit(__IXGBE_PTP_RUNNING
, &adapter
->state
))
6796 ixgbe_ptp_start_cyclecounter(adapter
);
6798 switch (link_speed
) {
6799 case IXGBE_LINK_SPEED_10GB_FULL
:
6800 speed_str
= "10 Gbps";
6802 case IXGBE_LINK_SPEED_2_5GB_FULL
:
6803 speed_str
= "2.5 Gbps";
6805 case IXGBE_LINK_SPEED_1GB_FULL
:
6806 speed_str
= "1 Gbps";
6808 case IXGBE_LINK_SPEED_100_FULL
:
6809 speed_str
= "100 Mbps";
6812 speed_str
= "unknown speed";
6815 e_info(drv
, "NIC Link is Up %s, Flow Control: %s\n", speed_str
,
6816 ((flow_rx
&& flow_tx
) ? "RX/TX" :
6818 (flow_tx
? "TX" : "None"))));
6820 netif_carrier_on(netdev
);
6821 ixgbe_check_vf_rate_limit(adapter
);
6823 /* enable transmits */
6824 netif_tx_wake_all_queues(adapter
->netdev
);
6826 /* enable any upper devices */
6828 netdev_walk_all_upper_dev_rcu(adapter
->netdev
,
6829 ixgbe_enable_macvlan
, NULL
);
6832 /* update the default user priority for VFs */
6833 ixgbe_update_default_up(adapter
);
6835 /* ping all the active vfs to let them know link has changed */
6836 ixgbe_ping_all_vfs(adapter
);
6840 * ixgbe_watchdog_link_is_down - update netif_carrier status and
6841 * print link down message
6842 * @adapter: pointer to the adapter structure
6844 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter
*adapter
)
6846 struct net_device
*netdev
= adapter
->netdev
;
6847 struct ixgbe_hw
*hw
= &adapter
->hw
;
6849 adapter
->link_up
= false;
6850 adapter
->link_speed
= 0;
6852 /* only continue if link was up previously */
6853 if (!netif_carrier_ok(netdev
))
6856 /* poll for SFP+ cable when link is down */
6857 if (ixgbe_is_sfp(hw
) && hw
->mac
.type
== ixgbe_mac_82598EB
)
6858 adapter
->flags2
|= IXGBE_FLAG2_SEARCH_FOR_SFP
;
6860 if (test_bit(__IXGBE_PTP_RUNNING
, &adapter
->state
))
6861 ixgbe_ptp_start_cyclecounter(adapter
);
6863 e_info(drv
, "NIC Link is Down\n");
6864 netif_carrier_off(netdev
);
6866 /* ping all the active vfs to let them know link has changed */
6867 ixgbe_ping_all_vfs(adapter
);
6870 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter
*adapter
)
6874 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
6875 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
6877 if (tx_ring
->next_to_use
!= tx_ring
->next_to_clean
)
6884 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter
*adapter
)
6886 struct ixgbe_hw
*hw
= &adapter
->hw
;
6887 struct ixgbe_ring_feature
*vmdq
= &adapter
->ring_feature
[RING_F_VMDQ
];
6888 u32 q_per_pool
= __ALIGN_MASK(1, ~vmdq
->mask
);
6892 if (!adapter
->num_vfs
)
6895 /* resetting the PF is only needed for MAC before X550 */
6896 if (hw
->mac
.type
>= ixgbe_mac_X550
)
6899 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
6900 for (j
= 0; j
< q_per_pool
; j
++) {
6903 h
= IXGBE_READ_REG(hw
, IXGBE_PVFTDHN(q_per_pool
, i
, j
));
6904 t
= IXGBE_READ_REG(hw
, IXGBE_PVFTDTN(q_per_pool
, i
, j
));
6915 * ixgbe_watchdog_flush_tx - flush queues on link down
6916 * @adapter: pointer to the device adapter structure
6918 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter
*adapter
)
6920 if (!netif_carrier_ok(adapter
->netdev
)) {
6921 if (ixgbe_ring_tx_pending(adapter
) ||
6922 ixgbe_vf_tx_pending(adapter
)) {
6923 /* We've lost link, so the controller stops DMA,
6924 * but we've got queued Tx work that's never going
6925 * to get done, so reset controller to flush Tx.
6926 * (Do the reset outside of interrupt context).
6928 e_warn(drv
, "initiating reset to clear Tx work after link loss\n");
6929 set_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
);
6934 #ifdef CONFIG_PCI_IOV
6935 static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter
*adapter
,
6936 struct pci_dev
*vfdev
)
6938 if (!pci_wait_for_pending_transaction(vfdev
))
6939 e_dev_warn("Issuing VFLR with pending transactions\n");
6941 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev
));
6942 pcie_capability_set_word(vfdev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_BCR_FLR
);
6947 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter
*adapter
)
6949 struct ixgbe_hw
*hw
= &adapter
->hw
;
6950 struct pci_dev
*pdev
= adapter
->pdev
;
6954 if (!(netif_carrier_ok(adapter
->netdev
)))
6957 gpc
= IXGBE_READ_REG(hw
, IXGBE_TXDGPC
);
6958 if (gpc
) /* If incrementing then no need for the check below */
6960 /* Check to see if a bad DMA write target from an errant or
6961 * malicious VF has caused a PCIe error. If so then we can
6962 * issue a VFLR to the offending VF(s) and then resume without
6963 * requesting a full slot reset.
6969 /* check status reg for all VFs owned by this PF */
6970 for (vf
= 0; vf
< adapter
->num_vfs
; ++vf
) {
6971 struct pci_dev
*vfdev
= adapter
->vfinfo
[vf
].vfdev
;
6976 pci_read_config_word(vfdev
, PCI_STATUS
, &status_reg
);
6977 if (status_reg
!= IXGBE_FAILED_READ_CFG_WORD
&&
6978 status_reg
& PCI_STATUS_REC_MASTER_ABORT
)
6979 ixgbe_issue_vf_flr(adapter
, vfdev
);
6983 static void ixgbe_spoof_check(struct ixgbe_adapter
*adapter
)
6987 /* Do not perform spoof check for 82598 or if not in IOV mode */
6988 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
||
6989 adapter
->num_vfs
== 0)
6992 ssvpc
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SSVPC
);
6995 * ssvpc register is cleared on read, if zero then no
6996 * spoofed packets in the last interval.
7001 e_warn(drv
, "%u Spoofed packets detected\n", ssvpc
);
7004 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused
*adapter
)
7009 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused
*adapter
)
7012 #endif /* CONFIG_PCI_IOV */
7016 * ixgbe_watchdog_subtask - check and bring link up
7017 * @adapter: pointer to the device adapter structure
7019 static void ixgbe_watchdog_subtask(struct ixgbe_adapter
*adapter
)
7021 /* if interface is down, removing or resetting, do nothing */
7022 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
7023 test_bit(__IXGBE_REMOVING
, &adapter
->state
) ||
7024 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
7027 ixgbe_watchdog_update_link(adapter
);
7029 if (adapter
->link_up
)
7030 ixgbe_watchdog_link_is_up(adapter
);
7032 ixgbe_watchdog_link_is_down(adapter
);
7034 ixgbe_check_for_bad_vf(adapter
);
7035 ixgbe_spoof_check(adapter
);
7036 ixgbe_update_stats(adapter
);
7038 ixgbe_watchdog_flush_tx(adapter
);
7042 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
7043 * @adapter: the ixgbe adapter structure
7045 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter
*adapter
)
7047 struct ixgbe_hw
*hw
= &adapter
->hw
;
7050 /* not searching for SFP so there is nothing to do here */
7051 if (!(adapter
->flags2
& IXGBE_FLAG2_SEARCH_FOR_SFP
) &&
7052 !(adapter
->flags2
& IXGBE_FLAG2_SFP_NEEDS_RESET
))
7055 if (adapter
->sfp_poll_time
&&
7056 time_after(adapter
->sfp_poll_time
, jiffies
))
7057 return; /* If not yet time to poll for SFP */
7059 /* someone else is in init, wait until next service event */
7060 if (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
7063 adapter
->sfp_poll_time
= jiffies
+ IXGBE_SFP_POLL_JIFFIES
- 1;
7065 err
= hw
->phy
.ops
.identify_sfp(hw
);
7066 if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
)
7069 if (err
== IXGBE_ERR_SFP_NOT_PRESENT
) {
7070 /* If no cable is present, then we need to reset
7071 * the next time we find a good cable. */
7072 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
7079 /* exit if reset not needed */
7080 if (!(adapter
->flags2
& IXGBE_FLAG2_SFP_NEEDS_RESET
))
7083 adapter
->flags2
&= ~IXGBE_FLAG2_SFP_NEEDS_RESET
;
7086 * A module may be identified correctly, but the EEPROM may not have
7087 * support for that module. setup_sfp() will fail in that case, so
7088 * we should not allow that module to load.
7090 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
7091 err
= hw
->phy
.ops
.reset(hw
);
7093 err
= hw
->mac
.ops
.setup_sfp(hw
);
7095 if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
)
7098 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_CONFIG
;
7099 e_info(probe
, "detected SFP+: %d\n", hw
->phy
.sfp_type
);
7102 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
7104 if ((err
== IXGBE_ERR_SFP_NOT_SUPPORTED
) &&
7105 (adapter
->netdev
->reg_state
== NETREG_REGISTERED
)) {
7106 e_dev_err("failed to initialize because an unsupported "
7107 "SFP+ module type was detected.\n");
7108 e_dev_err("Reload the driver after installing a "
7109 "supported module.\n");
7110 unregister_netdev(adapter
->netdev
);
7115 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
7116 * @adapter: the ixgbe adapter structure
7118 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter
*adapter
)
7120 struct ixgbe_hw
*hw
= &adapter
->hw
;
7122 bool autoneg
= false;
7124 if (!(adapter
->flags
& IXGBE_FLAG_NEED_LINK_CONFIG
))
7127 /* someone else is in init, wait until next service event */
7128 if (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
7131 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_CONFIG
;
7133 speed
= hw
->phy
.autoneg_advertised
;
7134 if ((!speed
) && (hw
->mac
.ops
.get_link_capabilities
)) {
7135 hw
->mac
.ops
.get_link_capabilities(hw
, &speed
, &autoneg
);
7137 /* setup the highest link when no autoneg */
7139 if (speed
& IXGBE_LINK_SPEED_10GB_FULL
)
7140 speed
= IXGBE_LINK_SPEED_10GB_FULL
;
7144 if (hw
->mac
.ops
.setup_link
)
7145 hw
->mac
.ops
.setup_link(hw
, speed
, true);
7147 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
7148 adapter
->link_check_timeout
= jiffies
;
7149 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
7153 * ixgbe_service_timer - Timer Call-back
7154 * @data: pointer to adapter cast into an unsigned long
7156 static void ixgbe_service_timer(unsigned long data
)
7158 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
7159 unsigned long next_event_offset
;
7161 /* poll faster when waiting for link */
7162 if (adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
)
7163 next_event_offset
= HZ
/ 10;
7165 next_event_offset
= HZ
* 2;
7167 /* Reset the timer */
7168 mod_timer(&adapter
->service_timer
, next_event_offset
+ jiffies
);
7170 ixgbe_service_event_schedule(adapter
);
7173 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter
*adapter
)
7175 struct ixgbe_hw
*hw
= &adapter
->hw
;
7178 if (!(adapter
->flags2
& IXGBE_FLAG2_PHY_INTERRUPT
))
7181 adapter
->flags2
&= ~IXGBE_FLAG2_PHY_INTERRUPT
;
7183 if (!hw
->phy
.ops
.handle_lasi
)
7186 status
= hw
->phy
.ops
.handle_lasi(&adapter
->hw
);
7187 if (status
!= IXGBE_ERR_OVERTEMP
)
7190 e_crit(drv
, "%s\n", ixgbe_overheat_msg
);
7193 static void ixgbe_reset_subtask(struct ixgbe_adapter
*adapter
)
7195 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED
, &adapter
->state
))
7198 /* If we're already down, removing or resetting, just bail */
7199 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
7200 test_bit(__IXGBE_REMOVING
, &adapter
->state
) ||
7201 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
7204 ixgbe_dump(adapter
);
7205 netdev_err(adapter
->netdev
, "Reset adapter\n");
7206 adapter
->tx_timeout_count
++;
7209 ixgbe_reinit_locked(adapter
);
7214 * ixgbe_service_task - manages and runs subtasks
7215 * @work: pointer to work_struct containing our data
7217 static void ixgbe_service_task(struct work_struct
*work
)
7219 struct ixgbe_adapter
*adapter
= container_of(work
,
7220 struct ixgbe_adapter
,
7222 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
7223 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
7225 ixgbe_down(adapter
);
7228 ixgbe_service_event_complete(adapter
);
7231 if (adapter
->flags2
& IXGBE_FLAG2_UDP_TUN_REREG_NEEDED
) {
7233 adapter
->flags2
&= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED
;
7234 udp_tunnel_get_rx_info(adapter
->netdev
);
7237 ixgbe_reset_subtask(adapter
);
7238 ixgbe_phy_interrupt_subtask(adapter
);
7239 ixgbe_sfp_detection_subtask(adapter
);
7240 ixgbe_sfp_link_config_subtask(adapter
);
7241 ixgbe_check_overtemp_subtask(adapter
);
7242 ixgbe_watchdog_subtask(adapter
);
7243 ixgbe_fdir_reinit_subtask(adapter
);
7244 ixgbe_check_hang_subtask(adapter
);
7246 if (test_bit(__IXGBE_PTP_RUNNING
, &adapter
->state
)) {
7247 ixgbe_ptp_overflow_check(adapter
);
7248 ixgbe_ptp_rx_hang(adapter
);
7251 ixgbe_service_event_complete(adapter
);
7254 static int ixgbe_tso(struct ixgbe_ring
*tx_ring
,
7255 struct ixgbe_tx_buffer
*first
,
7258 u32 vlan_macip_lens
, type_tucmd
, mss_l4len_idx
;
7259 struct sk_buff
*skb
= first
->skb
;
7269 u32 paylen
, l4_offset
;
7272 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
7275 if (!skb_is_gso(skb
))
7278 err
= skb_cow_head(skb
, 0);
7282 ip
.hdr
= skb_network_header(skb
);
7283 l4
.hdr
= skb_checksum_start(skb
);
7285 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
7286 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
7288 /* initialize outer IP header fields */
7289 if (ip
.v4
->version
== 4) {
7290 unsigned char *csum_start
= skb_checksum_start(skb
);
7291 unsigned char *trans_start
= ip
.hdr
+ (ip
.v4
->ihl
* 4);
7293 /* IP header will have to cancel out any data that
7294 * is not a part of the outer IP header
7296 ip
.v4
->check
= csum_fold(csum_partial(trans_start
,
7297 csum_start
- trans_start
,
7299 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
7302 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
7303 IXGBE_TX_FLAGS_CSUM
|
7304 IXGBE_TX_FLAGS_IPV4
;
7306 ip
.v6
->payload_len
= 0;
7307 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
7308 IXGBE_TX_FLAGS_CSUM
;
7311 /* determine offset of inner transport header */
7312 l4_offset
= l4
.hdr
- skb
->data
;
7314 /* compute length of segmentation header */
7315 *hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
7317 /* remove payload length from inner checksum */
7318 paylen
= skb
->len
- l4_offset
;
7319 csum_replace_by_diff(&l4
.tcp
->check
, htonl(paylen
));
7321 /* update gso size and bytecount with header size */
7322 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
7323 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
7325 /* mss_l4len_id: use 0 as index for TSO */
7326 mss_l4len_idx
= (*hdr_len
- l4_offset
) << IXGBE_ADVTXD_L4LEN_SHIFT
;
7327 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
7329 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
7330 vlan_macip_lens
= l4
.hdr
- ip
.hdr
;
7331 vlan_macip_lens
|= (ip
.hdr
- skb
->data
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
7332 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
7334 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, 0, type_tucmd
,
7340 static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff
*skb
)
7342 unsigned int offset
= 0;
7344 ipv6_find_hdr(skb
, &offset
, IPPROTO_SCTP
, NULL
, NULL
);
7346 return offset
== skb_checksum_start_offset(skb
);
7349 static void ixgbe_tx_csum(struct ixgbe_ring
*tx_ring
,
7350 struct ixgbe_tx_buffer
*first
)
7352 struct sk_buff
*skb
= first
->skb
;
7353 u32 vlan_macip_lens
= 0;
7356 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
7358 if (!(first
->tx_flags
& (IXGBE_TX_FLAGS_HW_VLAN
|
7359 IXGBE_TX_FLAGS_CC
)))
7364 switch (skb
->csum_offset
) {
7365 case offsetof(struct tcphdr
, check
):
7366 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
7368 case offsetof(struct udphdr
, check
):
7370 case offsetof(struct sctphdr
, checksum
):
7371 /* validate that this is actually an SCTP request */
7372 if (((first
->protocol
== htons(ETH_P_IP
)) &&
7373 (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)) ||
7374 ((first
->protocol
== htons(ETH_P_IPV6
)) &&
7375 ixgbe_ipv6_csum_is_sctp(skb
))) {
7376 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
7381 skb_checksum_help(skb
);
7385 /* update TX checksum flag */
7386 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
7387 vlan_macip_lens
= skb_checksum_start_offset(skb
) -
7388 skb_network_offset(skb
);
7390 /* vlan_macip_lens: MACLEN, VLAN tag */
7391 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
7392 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
7394 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, 0, type_tucmd
, 0);
7397 #define IXGBE_SET_FLAG(_input, _flag, _result) \
7398 ((_flag <= _result) ? \
7399 ((u32)(_input & _flag) * (_result / _flag)) : \
7400 ((u32)(_input & _flag) / (_flag / _result)))
7402 static u32
ixgbe_tx_cmd_type(struct sk_buff
*skb
, u32 tx_flags
)
7404 /* set type for advanced descriptor with frame checksum insertion */
7405 u32 cmd_type
= IXGBE_ADVTXD_DTYP_DATA
|
7406 IXGBE_ADVTXD_DCMD_DEXT
|
7407 IXGBE_ADVTXD_DCMD_IFCS
;
7409 /* set HW vlan bit if vlan is present */
7410 cmd_type
|= IXGBE_SET_FLAG(tx_flags
, IXGBE_TX_FLAGS_HW_VLAN
,
7411 IXGBE_ADVTXD_DCMD_VLE
);
7413 /* set segmentation enable bits for TSO/FSO */
7414 cmd_type
|= IXGBE_SET_FLAG(tx_flags
, IXGBE_TX_FLAGS_TSO
,
7415 IXGBE_ADVTXD_DCMD_TSE
);
7417 /* set timestamp bit if present */
7418 cmd_type
|= IXGBE_SET_FLAG(tx_flags
, IXGBE_TX_FLAGS_TSTAMP
,
7419 IXGBE_ADVTXD_MAC_TSTAMP
);
7421 /* insert frame checksum */
7422 cmd_type
^= IXGBE_SET_FLAG(skb
->no_fcs
, 1, IXGBE_ADVTXD_DCMD_IFCS
);
7427 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
7428 u32 tx_flags
, unsigned int paylen
)
7430 u32 olinfo_status
= paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
;
7432 /* enable L4 checksum for TSO and TX checksum offload */
7433 olinfo_status
|= IXGBE_SET_FLAG(tx_flags
,
7434 IXGBE_TX_FLAGS_CSUM
,
7435 IXGBE_ADVTXD_POPTS_TXSM
);
7437 /* enble IPv4 checksum for TSO */
7438 olinfo_status
|= IXGBE_SET_FLAG(tx_flags
,
7439 IXGBE_TX_FLAGS_IPV4
,
7440 IXGBE_ADVTXD_POPTS_IXSM
);
7443 * Check Context must be set if Tx switch is enabled, which it
7444 * always is for case where virtual functions are running
7446 olinfo_status
|= IXGBE_SET_FLAG(tx_flags
,
7450 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
7453 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring
*tx_ring
, u16 size
)
7455 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
7457 /* Herbert's original patch had:
7458 * smp_mb__after_netif_stop_queue();
7459 * but since that doesn't exist yet, just open code it.
7463 /* We need to check again in a case another CPU has just
7464 * made room available.
7466 if (likely(ixgbe_desc_unused(tx_ring
) < size
))
7469 /* A reprieve! - use start_queue because it doesn't call schedule */
7470 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
7471 ++tx_ring
->tx_stats
.restart_queue
;
7475 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring
*tx_ring
, u16 size
)
7477 if (likely(ixgbe_desc_unused(tx_ring
) >= size
))
7480 return __ixgbe_maybe_stop_tx(tx_ring
, size
);
7483 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7486 static void ixgbe_tx_map(struct ixgbe_ring
*tx_ring
,
7487 struct ixgbe_tx_buffer
*first
,
7490 struct sk_buff
*skb
= first
->skb
;
7491 struct ixgbe_tx_buffer
*tx_buffer
;
7492 union ixgbe_adv_tx_desc
*tx_desc
;
7493 struct skb_frag_struct
*frag
;
7495 unsigned int data_len
, size
;
7496 u32 tx_flags
= first
->tx_flags
;
7497 u32 cmd_type
= ixgbe_tx_cmd_type(skb
, tx_flags
);
7498 u16 i
= tx_ring
->next_to_use
;
7500 tx_desc
= IXGBE_TX_DESC(tx_ring
, i
);
7502 ixgbe_tx_olinfo_status(tx_desc
, tx_flags
, skb
->len
- hdr_len
);
7504 size
= skb_headlen(skb
);
7505 data_len
= skb
->data_len
;
7508 if (tx_flags
& IXGBE_TX_FLAGS_FCOE
) {
7509 if (data_len
< sizeof(struct fcoe_crc_eof
)) {
7510 size
-= sizeof(struct fcoe_crc_eof
) - data_len
;
7513 data_len
-= sizeof(struct fcoe_crc_eof
);
7518 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
7522 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
7523 if (dma_mapping_error(tx_ring
->dev
, dma
))
7526 /* record length, and DMA address */
7527 dma_unmap_len_set(tx_buffer
, len
, size
);
7528 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
7530 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
7532 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
7533 tx_desc
->read
.cmd_type_len
=
7534 cpu_to_le32(cmd_type
^ IXGBE_MAX_DATA_PER_TXD
);
7538 if (i
== tx_ring
->count
) {
7539 tx_desc
= IXGBE_TX_DESC(tx_ring
, 0);
7542 tx_desc
->read
.olinfo_status
= 0;
7544 dma
+= IXGBE_MAX_DATA_PER_TXD
;
7545 size
-= IXGBE_MAX_DATA_PER_TXD
;
7547 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
7550 if (likely(!data_len
))
7553 tx_desc
->read
.cmd_type_len
= cpu_to_le32(cmd_type
^ size
);
7557 if (i
== tx_ring
->count
) {
7558 tx_desc
= IXGBE_TX_DESC(tx_ring
, 0);
7561 tx_desc
->read
.olinfo_status
= 0;
7564 size
= min_t(unsigned int, data_len
, skb_frag_size(frag
));
7566 size
= skb_frag_size(frag
);
7570 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
7573 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
7576 /* write last descriptor with RS and EOP bits */
7577 cmd_type
|= size
| IXGBE_TXD_CMD
;
7578 tx_desc
->read
.cmd_type_len
= cpu_to_le32(cmd_type
);
7580 netdev_tx_sent_queue(txring_txq(tx_ring
), first
->bytecount
);
7582 /* set the timestamp */
7583 first
->time_stamp
= jiffies
;
7586 * Force memory writes to complete before letting h/w know there
7587 * are new descriptors to fetch. (Only applicable for weak-ordered
7588 * memory model archs, such as IA-64).
7590 * We also need this memory barrier to make certain all of the
7591 * status bits have been updated before next_to_watch is written.
7595 /* set next_to_watch value indicating a packet is present */
7596 first
->next_to_watch
= tx_desc
;
7599 if (i
== tx_ring
->count
)
7602 tx_ring
->next_to_use
= i
;
7604 ixgbe_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
7606 if (netif_xmit_stopped(txring_txq(tx_ring
)) || !skb
->xmit_more
) {
7607 writel(i
, tx_ring
->tail
);
7609 /* we need this if more than one processor can write to our tail
7610 * at a time, it synchronizes IO on IA64/Altix systems
7617 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
7619 /* clear dma mappings for failed tx_buffer_info map */
7621 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
7622 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
7623 if (tx_buffer
== first
)
7630 tx_ring
->next_to_use
= i
;
7633 static void ixgbe_atr(struct ixgbe_ring
*ring
,
7634 struct ixgbe_tx_buffer
*first
)
7636 struct ixgbe_q_vector
*q_vector
= ring
->q_vector
;
7637 union ixgbe_atr_hash_dword input
= { .dword
= 0 };
7638 union ixgbe_atr_hash_dword common
= { .dword
= 0 };
7640 unsigned char *network
;
7642 struct ipv6hdr
*ipv6
;
7646 struct sk_buff
*skb
;
7650 /* if ring doesn't have a interrupt vector, cannot perform ATR */
7654 /* do nothing if sampling is disabled */
7655 if (!ring
->atr_sample_rate
)
7660 /* currently only IPv4/IPv6 with TCP is supported */
7661 if ((first
->protocol
!= htons(ETH_P_IP
)) &&
7662 (first
->protocol
!= htons(ETH_P_IPV6
)))
7665 /* snag network header to get L4 type and address */
7667 hdr
.network
= skb_network_header(skb
);
7668 if (unlikely(hdr
.network
<= skb
->data
))
7670 if (skb
->encapsulation
&&
7671 first
->protocol
== htons(ETH_P_IP
) &&
7672 hdr
.ipv4
->protocol
== IPPROTO_UDP
) {
7673 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
7675 if (unlikely(skb_tail_pointer(skb
) < hdr
.network
+
7679 /* verify the port is recognized as VXLAN */
7680 if (adapter
->vxlan_port
&&
7681 udp_hdr(skb
)->dest
== adapter
->vxlan_port
)
7682 hdr
.network
= skb_inner_network_header(skb
);
7684 if (adapter
->geneve_port
&&
7685 udp_hdr(skb
)->dest
== adapter
->geneve_port
)
7686 hdr
.network
= skb_inner_network_header(skb
);
7689 /* Make sure we have at least [minimum IPv4 header + TCP]
7690 * or [IPv6 header] bytes
7692 if (unlikely(skb_tail_pointer(skb
) < hdr
.network
+ 40))
7695 /* Currently only IPv4/IPv6 with TCP is supported */
7696 switch (hdr
.ipv4
->version
) {
7698 /* access ihl as u8 to avoid unaligned access on ia64 */
7699 hlen
= (hdr
.network
[0] & 0x0F) << 2;
7700 l4_proto
= hdr
.ipv4
->protocol
;
7703 hlen
= hdr
.network
- skb
->data
;
7704 l4_proto
= ipv6_find_hdr(skb
, &hlen
, IPPROTO_TCP
, NULL
, NULL
);
7705 hlen
-= hdr
.network
- skb
->data
;
7711 if (l4_proto
!= IPPROTO_TCP
)
7714 if (unlikely(skb_tail_pointer(skb
) < hdr
.network
+
7715 hlen
+ sizeof(struct tcphdr
)))
7718 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
7720 /* skip this packet since the socket is closing */
7724 /* sample on all syn packets or once every atr sample count */
7725 if (!th
->syn
&& (ring
->atr_count
< ring
->atr_sample_rate
))
7728 /* reset sample count */
7729 ring
->atr_count
= 0;
7731 vlan_id
= htons(first
->tx_flags
>> IXGBE_TX_FLAGS_VLAN_SHIFT
);
7734 * src and dst are inverted, think how the receiver sees them
7736 * The input is broken into two sections, a non-compressed section
7737 * containing vm_pool, vlan_id, and flow_type. The rest of the data
7738 * is XORed together and stored in the compressed dword.
7740 input
.formatted
.vlan_id
= vlan_id
;
7743 * since src port and flex bytes occupy the same word XOR them together
7744 * and write the value to source port portion of compressed dword
7746 if (first
->tx_flags
& (IXGBE_TX_FLAGS_SW_VLAN
| IXGBE_TX_FLAGS_HW_VLAN
))
7747 common
.port
.src
^= th
->dest
^ htons(ETH_P_8021Q
);
7749 common
.port
.src
^= th
->dest
^ first
->protocol
;
7750 common
.port
.dst
^= th
->source
;
7752 switch (hdr
.ipv4
->version
) {
7754 input
.formatted
.flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
7755 common
.ip
^= hdr
.ipv4
->saddr
^ hdr
.ipv4
->daddr
;
7758 input
.formatted
.flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV6
;
7759 common
.ip
^= hdr
.ipv6
->saddr
.s6_addr32
[0] ^
7760 hdr
.ipv6
->saddr
.s6_addr32
[1] ^
7761 hdr
.ipv6
->saddr
.s6_addr32
[2] ^
7762 hdr
.ipv6
->saddr
.s6_addr32
[3] ^
7763 hdr
.ipv6
->daddr
.s6_addr32
[0] ^
7764 hdr
.ipv6
->daddr
.s6_addr32
[1] ^
7765 hdr
.ipv6
->daddr
.s6_addr32
[2] ^
7766 hdr
.ipv6
->daddr
.s6_addr32
[3];
7772 if (hdr
.network
!= skb_network_header(skb
))
7773 input
.formatted
.flow_type
|= IXGBE_ATR_L4TYPE_TUNNEL_MASK
;
7775 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7776 ixgbe_fdir_add_signature_filter_82599(&q_vector
->adapter
->hw
,
7777 input
, common
, ring
->queue_index
);
7780 static u16
ixgbe_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
7781 void *accel_priv
, select_queue_fallback_t fallback
)
7783 struct ixgbe_fwd_adapter
*fwd_adapter
= accel_priv
;
7785 struct ixgbe_adapter
*adapter
;
7786 struct ixgbe_ring_feature
*f
;
7791 return skb
->queue_mapping
+ fwd_adapter
->tx_base_queue
;
7796 * only execute the code below if protocol is FCoE
7797 * or FIP and we have FCoE enabled on the adapter
7799 switch (vlan_get_protocol(skb
)) {
7800 case htons(ETH_P_FCOE
):
7801 case htons(ETH_P_FIP
):
7802 adapter
= netdev_priv(dev
);
7804 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
7807 return fallback(dev
, skb
);
7810 f
= &adapter
->ring_feature
[RING_F_FCOE
];
7812 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) :
7815 while (txq
>= f
->indices
)
7818 return txq
+ f
->offset
;
7820 return fallback(dev
, skb
);
7824 netdev_tx_t
ixgbe_xmit_frame_ring(struct sk_buff
*skb
,
7825 struct ixgbe_adapter
*adapter
,
7826 struct ixgbe_ring
*tx_ring
)
7828 struct ixgbe_tx_buffer
*first
;
7832 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
7833 __be16 protocol
= skb
->protocol
;
7837 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
7838 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
7839 * + 2 desc gap to keep tail from touching head,
7840 * + 1 desc for context descriptor,
7841 * otherwise try next time
7843 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
7844 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
7846 if (ixgbe_maybe_stop_tx(tx_ring
, count
+ 3)) {
7847 tx_ring
->tx_stats
.tx_busy
++;
7848 return NETDEV_TX_BUSY
;
7851 /* record the location of the first descriptor for this packet */
7852 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
7854 first
->bytecount
= skb
->len
;
7855 first
->gso_segs
= 1;
7857 /* if we have a HW VLAN tag being added default to the HW one */
7858 if (skb_vlan_tag_present(skb
)) {
7859 tx_flags
|= skb_vlan_tag_get(skb
) << IXGBE_TX_FLAGS_VLAN_SHIFT
;
7860 tx_flags
|= IXGBE_TX_FLAGS_HW_VLAN
;
7861 /* else if it is a SW VLAN check the next protocol and store the tag */
7862 } else if (protocol
== htons(ETH_P_8021Q
)) {
7863 struct vlan_hdr
*vhdr
, _vhdr
;
7864 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
7868 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) <<
7869 IXGBE_TX_FLAGS_VLAN_SHIFT
;
7870 tx_flags
|= IXGBE_TX_FLAGS_SW_VLAN
;
7872 protocol
= vlan_get_protocol(skb
);
7874 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
7875 adapter
->ptp_clock
&&
7876 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS
,
7878 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7879 tx_flags
|= IXGBE_TX_FLAGS_TSTAMP
;
7881 /* schedule check for Tx timestamp */
7882 adapter
->ptp_tx_skb
= skb_get(skb
);
7883 adapter
->ptp_tx_start
= jiffies
;
7884 schedule_work(&adapter
->ptp_tx_work
);
7887 skb_tx_timestamp(skb
);
7889 #ifdef CONFIG_PCI_IOV
7891 * Use the l2switch_enable flag - would be false if the DMA
7892 * Tx switch had been disabled.
7894 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
7895 tx_flags
|= IXGBE_TX_FLAGS_CC
;
7898 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
7899 if ((adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) &&
7900 ((tx_flags
& (IXGBE_TX_FLAGS_HW_VLAN
| IXGBE_TX_FLAGS_SW_VLAN
)) ||
7901 (skb
->priority
!= TC_PRIO_CONTROL
))) {
7902 tx_flags
&= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK
;
7903 tx_flags
|= (skb
->priority
& 0x7) <<
7904 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT
;
7905 if (tx_flags
& IXGBE_TX_FLAGS_SW_VLAN
) {
7906 struct vlan_ethhdr
*vhdr
;
7908 if (skb_cow_head(skb
, 0))
7910 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
7911 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
7912 IXGBE_TX_FLAGS_VLAN_SHIFT
);
7914 tx_flags
|= IXGBE_TX_FLAGS_HW_VLAN
;
7918 /* record initial flags and protocol */
7919 first
->tx_flags
= tx_flags
;
7920 first
->protocol
= protocol
;
7923 /* setup tx offload for FCoE */
7924 if ((protocol
== htons(ETH_P_FCOE
)) &&
7925 (tx_ring
->netdev
->features
& (NETIF_F_FSO
| NETIF_F_FCOE_CRC
))) {
7926 tso
= ixgbe_fso(tx_ring
, first
, &hdr_len
);
7933 #endif /* IXGBE_FCOE */
7934 tso
= ixgbe_tso(tx_ring
, first
, &hdr_len
);
7938 ixgbe_tx_csum(tx_ring
, first
);
7940 /* add the ATR filter if ATR is on */
7941 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE
, &tx_ring
->state
))
7942 ixgbe_atr(tx_ring
, first
);
7946 #endif /* IXGBE_FCOE */
7947 ixgbe_tx_map(tx_ring
, first
, hdr_len
);
7949 return NETDEV_TX_OK
;
7952 dev_kfree_skb_any(first
->skb
);
7955 return NETDEV_TX_OK
;
7958 static netdev_tx_t
__ixgbe_xmit_frame(struct sk_buff
*skb
,
7959 struct net_device
*netdev
,
7960 struct ixgbe_ring
*ring
)
7962 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
7963 struct ixgbe_ring
*tx_ring
;
7966 * The minimum packet size for olinfo paylen is 17 so pad the skb
7967 * in order to meet this minimum size requirement.
7969 if (skb_put_padto(skb
, 17))
7970 return NETDEV_TX_OK
;
7972 tx_ring
= ring
? ring
: adapter
->tx_ring
[skb
->queue_mapping
];
7974 return ixgbe_xmit_frame_ring(skb
, adapter
, tx_ring
);
7977 static netdev_tx_t
ixgbe_xmit_frame(struct sk_buff
*skb
,
7978 struct net_device
*netdev
)
7980 return __ixgbe_xmit_frame(skb
, netdev
, NULL
);
7984 * ixgbe_set_mac - Change the Ethernet Address of the NIC
7985 * @netdev: network interface device structure
7986 * @p: pointer to an address structure
7988 * Returns 0 on success, negative on failure
7990 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
7992 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
7993 struct ixgbe_hw
*hw
= &adapter
->hw
;
7994 struct sockaddr
*addr
= p
;
7996 if (!is_valid_ether_addr(addr
->sa_data
))
7997 return -EADDRNOTAVAIL
;
7999 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
8000 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
8002 ixgbe_mac_set_default_filter(adapter
);
8008 ixgbe_mdio_read(struct net_device
*netdev
, int prtad
, int devad
, u16 addr
)
8010 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8011 struct ixgbe_hw
*hw
= &adapter
->hw
;
8015 if (prtad
!= hw
->phy
.mdio
.prtad
)
8017 rc
= hw
->phy
.ops
.read_reg(hw
, addr
, devad
, &value
);
8023 static int ixgbe_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
8024 u16 addr
, u16 value
)
8026 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8027 struct ixgbe_hw
*hw
= &adapter
->hw
;
8029 if (prtad
!= hw
->phy
.mdio
.prtad
)
8031 return hw
->phy
.ops
.write_reg(hw
, addr
, devad
, value
);
8034 static int ixgbe_ioctl(struct net_device
*netdev
, struct ifreq
*req
, int cmd
)
8036 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8040 return ixgbe_ptp_set_ts_config(adapter
, req
);
8042 return ixgbe_ptp_get_ts_config(adapter
, req
);
8044 return mdio_mii_ioctl(&adapter
->hw
.phy
.mdio
, if_mii(req
), cmd
);
8049 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
8051 * @netdev: network interface device structure
8053 * Returns non-zero on failure
8055 static int ixgbe_add_sanmac_netdev(struct net_device
*dev
)
8058 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8059 struct ixgbe_hw
*hw
= &adapter
->hw
;
8061 if (is_valid_ether_addr(hw
->mac
.san_addr
)) {
8063 err
= dev_addr_add(dev
, hw
->mac
.san_addr
, NETDEV_HW_ADDR_T_SAN
);
8066 /* update SAN MAC vmdq pool selection */
8067 hw
->mac
.ops
.set_vmdq_san_mac(hw
, VMDQ_P(0));
8073 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
8075 * @netdev: network interface device structure
8077 * Returns non-zero on failure
8079 static int ixgbe_del_sanmac_netdev(struct net_device
*dev
)
8082 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8083 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
8085 if (is_valid_ether_addr(mac
->san_addr
)) {
8087 err
= dev_addr_del(dev
, mac
->san_addr
, NETDEV_HW_ADDR_T_SAN
);
8093 #ifdef CONFIG_NET_POLL_CONTROLLER
8095 * Polling 'interrupt' - used by things like netconsole to send skbs
8096 * without having to re-enable interrupts. It's not called while
8097 * the interrupt routine is executing.
8099 static void ixgbe_netpoll(struct net_device
*netdev
)
8101 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8104 /* if interface is down do nothing */
8105 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
8108 /* loop through and schedule all active queues */
8109 for (i
= 0; i
< adapter
->num_q_vectors
; i
++)
8110 ixgbe_msix_clean_rings(0, adapter
->q_vector
[i
]);
8114 static struct rtnl_link_stats64
*ixgbe_get_stats64(struct net_device
*netdev
,
8115 struct rtnl_link_stats64
*stats
)
8117 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8121 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
8122 struct ixgbe_ring
*ring
= ACCESS_ONCE(adapter
->rx_ring
[i
]);
8128 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
8129 packets
= ring
->stats
.packets
;
8130 bytes
= ring
->stats
.bytes
;
8131 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
8132 stats
->rx_packets
+= packets
;
8133 stats
->rx_bytes
+= bytes
;
8137 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
8138 struct ixgbe_ring
*ring
= ACCESS_ONCE(adapter
->tx_ring
[i
]);
8144 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
8145 packets
= ring
->stats
.packets
;
8146 bytes
= ring
->stats
.bytes
;
8147 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
8148 stats
->tx_packets
+= packets
;
8149 stats
->tx_bytes
+= bytes
;
8153 /* following stats updated by ixgbe_watchdog_task() */
8154 stats
->multicast
= netdev
->stats
.multicast
;
8155 stats
->rx_errors
= netdev
->stats
.rx_errors
;
8156 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
8157 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
8158 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
8162 #ifdef CONFIG_IXGBE_DCB
8164 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
8165 * @adapter: pointer to ixgbe_adapter
8166 * @tc: number of traffic classes currently enabled
8168 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
8169 * 802.1Q priority maps to a packet buffer that exists.
8171 static void ixgbe_validate_rtr(struct ixgbe_adapter
*adapter
, u8 tc
)
8173 struct ixgbe_hw
*hw
= &adapter
->hw
;
8177 /* 82598 have a static priority to TC mapping that can not
8178 * be changed so no validation is needed.
8180 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
8183 reg
= IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
8186 for (i
= 0; i
< MAX_TRAFFIC_CLASS
; i
++) {
8187 u8 up2tc
= reg
>> (i
* IXGBE_RTRUP2TC_UP_SHIFT
);
8189 /* If up2tc is out of bounds default to zero */
8191 reg
&= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT
);
8195 IXGBE_WRITE_REG(hw
, IXGBE_RTRUP2TC
, reg
);
8201 * ixgbe_set_prio_tc_map - Configure netdev prio tc map
8202 * @adapter: Pointer to adapter struct
8204 * Populate the netdev user priority to tc map
8206 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter
*adapter
)
8208 struct net_device
*dev
= adapter
->netdev
;
8209 struct ixgbe_dcb_config
*dcb_cfg
= &adapter
->dcb_cfg
;
8210 struct ieee_ets
*ets
= adapter
->ixgbe_ieee_ets
;
8213 for (prio
= 0; prio
< MAX_USER_PRIORITY
; prio
++) {
8216 if (adapter
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
)
8217 tc
= ixgbe_dcb_get_tc_from_up(dcb_cfg
, 0, prio
);
8219 tc
= ets
->prio_tc
[prio
];
8221 netdev_set_prio_tc_map(dev
, prio
, tc
);
8225 #endif /* CONFIG_IXGBE_DCB */
8227 * ixgbe_setup_tc - configure net_device for multiple traffic classes
8229 * @netdev: net device to configure
8230 * @tc: number of traffic classes to enable
8232 int ixgbe_setup_tc(struct net_device
*dev
, u8 tc
)
8234 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8235 struct ixgbe_hw
*hw
= &adapter
->hw
;
8238 /* Hardware supports up to 8 traffic classes */
8239 if (tc
> adapter
->dcb_cfg
.num_tcs
.pg_tcs
)
8242 if (hw
->mac
.type
== ixgbe_mac_82598EB
&& tc
&& tc
< MAX_TRAFFIC_CLASS
)
8245 pools
= (find_first_zero_bit(&adapter
->fwd_bitmask
, 32) > 1);
8246 if (tc
&& pools
&& adapter
->num_rx_pools
> IXGBE_MAX_DCBMACVLANS
)
8249 /* Hardware has to reinitialize queues and interrupts to
8250 * match packet buffer alignment. Unfortunately, the
8251 * hardware is not flexible enough to do this dynamically.
8253 if (netif_running(dev
))
8256 ixgbe_reset(adapter
);
8258 ixgbe_clear_interrupt_scheme(adapter
);
8260 #ifdef CONFIG_IXGBE_DCB
8262 netdev_set_num_tc(dev
, tc
);
8263 ixgbe_set_prio_tc_map(adapter
);
8265 adapter
->flags
|= IXGBE_FLAG_DCB_ENABLED
;
8267 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
8268 adapter
->last_lfc_mode
= adapter
->hw
.fc
.requested_mode
;
8269 adapter
->hw
.fc
.requested_mode
= ixgbe_fc_none
;
8272 netdev_reset_tc(dev
);
8274 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
8275 adapter
->hw
.fc
.requested_mode
= adapter
->last_lfc_mode
;
8277 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
8279 adapter
->temp_dcb_cfg
.pfc_mode_enable
= false;
8280 adapter
->dcb_cfg
.pfc_mode_enable
= false;
8283 ixgbe_validate_rtr(adapter
, tc
);
8285 #endif /* CONFIG_IXGBE_DCB */
8286 ixgbe_init_interrupt_scheme(adapter
);
8288 if (netif_running(dev
))
8289 return ixgbe_open(dev
);
8294 static int ixgbe_delete_clsu32(struct ixgbe_adapter
*adapter
,
8295 struct tc_cls_u32_offload
*cls
)
8297 u32 hdl
= cls
->knode
.handle
;
8298 u32 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
8299 u32 loc
= cls
->knode
.handle
& 0xfffff;
8301 struct ixgbe_jump_table
*jump
= NULL
;
8303 if (loc
> IXGBE_MAX_HW_ENTRIES
)
8306 if ((uhtid
!= 0x800) && (uhtid
>= IXGBE_MAX_LINK_HANDLE
))
8309 /* Clear this filter in the link data it is associated with */
8310 if (uhtid
!= 0x800) {
8311 jump
= adapter
->jump_tables
[uhtid
];
8314 if (!test_bit(loc
- 1, jump
->child_loc_map
))
8316 clear_bit(loc
- 1, jump
->child_loc_map
);
8319 /* Check if the filter being deleted is a link */
8320 for (i
= 1; i
< IXGBE_MAX_LINK_HANDLE
; i
++) {
8321 jump
= adapter
->jump_tables
[i
];
8322 if (jump
&& jump
->link_hdl
== hdl
) {
8323 /* Delete filters in the hardware in the child hash
8324 * table associated with this link
8326 for (j
= 0; j
< IXGBE_MAX_HW_ENTRIES
; j
++) {
8327 if (!test_bit(j
, jump
->child_loc_map
))
8329 spin_lock(&adapter
->fdir_perfect_lock
);
8330 err
= ixgbe_update_ethtool_fdir_entry(adapter
,
8333 spin_unlock(&adapter
->fdir_perfect_lock
);
8334 clear_bit(j
, jump
->child_loc_map
);
8336 /* Remove resources for this link */
8340 adapter
->jump_tables
[i
] = NULL
;
8345 spin_lock(&adapter
->fdir_perfect_lock
);
8346 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, loc
);
8347 spin_unlock(&adapter
->fdir_perfect_lock
);
8351 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter
*adapter
,
8353 struct tc_cls_u32_offload
*cls
)
8355 u32 uhtid
= TC_U32_USERHTID(cls
->hnode
.handle
);
8357 if (uhtid
>= IXGBE_MAX_LINK_HANDLE
)
8360 /* This ixgbe devices do not support hash tables at the moment
8361 * so abort when given hash tables.
8363 if (cls
->hnode
.divisor
> 0)
8366 set_bit(uhtid
- 1, &adapter
->tables
);
8370 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter
*adapter
,
8371 struct tc_cls_u32_offload
*cls
)
8373 u32 uhtid
= TC_U32_USERHTID(cls
->hnode
.handle
);
8375 if (uhtid
>= IXGBE_MAX_LINK_HANDLE
)
8378 clear_bit(uhtid
- 1, &adapter
->tables
);
8382 #ifdef CONFIG_NET_CLS_ACT
8383 struct upper_walk_data
{
8384 struct ixgbe_adapter
*adapter
;
8390 static int get_macvlan_queue(struct net_device
*upper
, void *_data
)
8392 if (netif_is_macvlan(upper
)) {
8393 struct macvlan_dev
*dfwd
= netdev_priv(upper
);
8394 struct ixgbe_fwd_adapter
*vadapter
= dfwd
->fwd_priv
;
8395 struct upper_walk_data
*data
= _data
;
8396 struct ixgbe_adapter
*adapter
= data
->adapter
;
8397 int ifindex
= data
->ifindex
;
8399 if (vadapter
&& vadapter
->netdev
->ifindex
== ifindex
) {
8400 data
->queue
= adapter
->rx_ring
[vadapter
->rx_base_queue
]->reg_idx
;
8401 data
->action
= data
->queue
;
8409 static int handle_redirect_action(struct ixgbe_adapter
*adapter
, int ifindex
,
8410 u8
*queue
, u64
*action
)
8412 unsigned int num_vfs
= adapter
->num_vfs
, vf
;
8413 struct upper_walk_data data
;
8414 struct net_device
*upper
;
8416 /* redirect to a SRIOV VF */
8417 for (vf
= 0; vf
< num_vfs
; ++vf
) {
8418 upper
= pci_get_drvdata(adapter
->vfinfo
[vf
].vfdev
);
8419 if (upper
->ifindex
== ifindex
) {
8420 if (adapter
->num_rx_pools
> 1)
8423 *queue
= vf
* adapter
->num_rx_queues_per_pool
;
8426 *action
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
8431 /* redirect to a offloaded macvlan netdev */
8432 data
.adapter
= adapter
;
8433 data
.ifindex
= ifindex
;
8436 if (netdev_walk_all_upper_dev_rcu(adapter
->netdev
,
8437 get_macvlan_queue
, &data
)) {
8438 *action
= data
.action
;
8439 *queue
= data
.queue
;
8447 static int parse_tc_actions(struct ixgbe_adapter
*adapter
,
8448 struct tcf_exts
*exts
, u64
*action
, u8
*queue
)
8450 const struct tc_action
*a
;
8454 if (tc_no_actions(exts
))
8457 tcf_exts_to_list(exts
, &actions
);
8458 list_for_each_entry(a
, &actions
, list
) {
8461 if (is_tcf_gact_shot(a
)) {
8462 *action
= IXGBE_FDIR_DROP_QUEUE
;
8463 *queue
= IXGBE_FDIR_DROP_QUEUE
;
8467 /* Redirect to a VF or a offloaded macvlan */
8468 if (is_tcf_mirred_egress_redirect(a
)) {
8469 int ifindex
= tcf_mirred_ifindex(a
);
8471 err
= handle_redirect_action(adapter
, ifindex
, queue
,
8481 static int parse_tc_actions(struct ixgbe_adapter
*adapter
,
8482 struct tcf_exts
*exts
, u64
*action
, u8
*queue
)
8486 #endif /* CONFIG_NET_CLS_ACT */
8488 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter
*input
,
8489 union ixgbe_atr_input
*mask
,
8490 struct tc_cls_u32_offload
*cls
,
8491 struct ixgbe_mat_field
*field_ptr
,
8492 struct ixgbe_nexthdr
*nexthdr
)
8496 bool found_entry
= false, found_jump_field
= false;
8498 for (i
= 0; i
< cls
->knode
.sel
->nkeys
; i
++) {
8499 off
= cls
->knode
.sel
->keys
[i
].off
;
8500 val
= cls
->knode
.sel
->keys
[i
].val
;
8501 m
= cls
->knode
.sel
->keys
[i
].mask
;
8503 for (j
= 0; field_ptr
[j
].val
; j
++) {
8504 if (field_ptr
[j
].off
== off
) {
8505 field_ptr
[j
].val(input
, mask
, val
, m
);
8506 input
->filter
.formatted
.flow_type
|=
8513 if (nexthdr
->off
== cls
->knode
.sel
->keys
[i
].off
&&
8514 nexthdr
->val
== cls
->knode
.sel
->keys
[i
].val
&&
8515 nexthdr
->mask
== cls
->knode
.sel
->keys
[i
].mask
)
8516 found_jump_field
= true;
8522 if (nexthdr
&& !found_jump_field
)
8528 mask
->formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
8529 IXGBE_ATR_L4TYPE_MASK
;
8531 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
8532 mask
->formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
8537 static int ixgbe_configure_clsu32(struct ixgbe_adapter
*adapter
,
8539 struct tc_cls_u32_offload
*cls
)
8541 u32 loc
= cls
->knode
.handle
& 0xfffff;
8542 struct ixgbe_hw
*hw
= &adapter
->hw
;
8543 struct ixgbe_mat_field
*field_ptr
;
8544 struct ixgbe_fdir_filter
*input
= NULL
;
8545 union ixgbe_atr_input
*mask
= NULL
;
8546 struct ixgbe_jump_table
*jump
= NULL
;
8547 int i
, err
= -EINVAL
;
8549 u32 uhtid
, link_uhtid
;
8551 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
8552 link_uhtid
= TC_U32_USERHTID(cls
->knode
.link_handle
);
8554 /* At the moment cls_u32 jumps to network layer and skips past
8555 * L2 headers. The canonical method to match L2 frames is to use
8556 * negative values. However this is error prone at best but really
8557 * just broken because there is no way to "know" what sort of hdr
8558 * is in front of the network layer. Fix cls_u32 to support L2
8559 * headers when needed.
8561 if (protocol
!= htons(ETH_P_IP
))
8564 if (loc
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
8565 e_err(drv
, "Location out of range\n");
8569 /* cls u32 is a graph starting at root node 0x800. The driver tracks
8570 * links and also the fields used to advance the parser across each
8571 * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
8572 * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
8573 * To add support for new nodes update ixgbe_model.h parse structures
8574 * this function _should_ be generic try not to hardcode values here.
8576 if (uhtid
== 0x800) {
8577 field_ptr
= (adapter
->jump_tables
[0])->mat
;
8579 if (uhtid
>= IXGBE_MAX_LINK_HANDLE
)
8581 if (!adapter
->jump_tables
[uhtid
])
8583 field_ptr
= (adapter
->jump_tables
[uhtid
])->mat
;
8589 /* At this point we know the field_ptr is valid and need to either
8590 * build cls_u32 link or attach filter. Because adding a link to
8591 * a handle that does not exist is invalid and the same for adding
8592 * rules to handles that don't exist.
8596 struct ixgbe_nexthdr
*nexthdr
= ixgbe_ipv4_jumps
;
8598 if (link_uhtid
>= IXGBE_MAX_LINK_HANDLE
)
8601 if (!test_bit(link_uhtid
- 1, &adapter
->tables
))
8604 /* Multiple filters as links to the same hash table are not
8605 * supported. To add a new filter with the same next header
8606 * but different match/jump conditions, create a new hash table
8609 if (adapter
->jump_tables
[link_uhtid
] &&
8610 (adapter
->jump_tables
[link_uhtid
])->link_hdl
) {
8611 e_err(drv
, "Link filter exists for link: %x\n",
8616 for (i
= 0; nexthdr
[i
].jump
; i
++) {
8617 if (nexthdr
[i
].o
!= cls
->knode
.sel
->offoff
||
8618 nexthdr
[i
].s
!= cls
->knode
.sel
->offshift
||
8619 nexthdr
[i
].m
!= cls
->knode
.sel
->offmask
)
8622 jump
= kzalloc(sizeof(*jump
), GFP_KERNEL
);
8625 input
= kzalloc(sizeof(*input
), GFP_KERNEL
);
8630 mask
= kzalloc(sizeof(*mask
), GFP_KERNEL
);
8635 jump
->input
= input
;
8637 jump
->link_hdl
= cls
->knode
.handle
;
8639 err
= ixgbe_clsu32_build_input(input
, mask
, cls
,
8640 field_ptr
, &nexthdr
[i
]);
8642 jump
->mat
= nexthdr
[i
].jump
;
8643 adapter
->jump_tables
[link_uhtid
] = jump
;
8650 input
= kzalloc(sizeof(*input
), GFP_KERNEL
);
8653 mask
= kzalloc(sizeof(*mask
), GFP_KERNEL
);
8659 if ((uhtid
!= 0x800) && (adapter
->jump_tables
[uhtid
])) {
8660 if ((adapter
->jump_tables
[uhtid
])->input
)
8661 memcpy(input
, (adapter
->jump_tables
[uhtid
])->input
,
8663 if ((adapter
->jump_tables
[uhtid
])->mask
)
8664 memcpy(mask
, (adapter
->jump_tables
[uhtid
])->mask
,
8667 /* Lookup in all child hash tables if this location is already
8668 * filled with a filter
8670 for (i
= 1; i
< IXGBE_MAX_LINK_HANDLE
; i
++) {
8671 struct ixgbe_jump_table
*link
= adapter
->jump_tables
[i
];
8673 if (link
&& (test_bit(loc
- 1, link
->child_loc_map
))) {
8674 e_err(drv
, "Filter exists in location: %x\n",
8681 err
= ixgbe_clsu32_build_input(input
, mask
, cls
, field_ptr
, NULL
);
8685 err
= parse_tc_actions(adapter
, cls
->knode
.exts
, &input
->action
,
8690 input
->sw_idx
= loc
;
8692 spin_lock(&adapter
->fdir_perfect_lock
);
8694 if (hlist_empty(&adapter
->fdir_filter_list
)) {
8695 memcpy(&adapter
->fdir_mask
, mask
, sizeof(*mask
));
8696 err
= ixgbe_fdir_set_input_mask_82599(hw
, mask
);
8698 goto err_out_w_lock
;
8699 } else if (memcmp(&adapter
->fdir_mask
, mask
, sizeof(*mask
))) {
8701 goto err_out_w_lock
;
8704 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, mask
);
8705 err
= ixgbe_fdir_write_perfect_filter_82599(hw
, &input
->filter
,
8706 input
->sw_idx
, queue
);
8708 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
8709 spin_unlock(&adapter
->fdir_perfect_lock
);
8711 if ((uhtid
!= 0x800) && (adapter
->jump_tables
[uhtid
]))
8712 set_bit(loc
- 1, (adapter
->jump_tables
[uhtid
])->child_loc_map
);
8717 spin_unlock(&adapter
->fdir_perfect_lock
);
8727 static int __ixgbe_setup_tc(struct net_device
*dev
, u32 handle
, __be16 proto
,
8728 struct tc_to_netdev
*tc
)
8730 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8732 if (TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
) &&
8733 tc
->type
== TC_SETUP_CLSU32
) {
8734 switch (tc
->cls_u32
->command
) {
8735 case TC_CLSU32_NEW_KNODE
:
8736 case TC_CLSU32_REPLACE_KNODE
:
8737 return ixgbe_configure_clsu32(adapter
,
8738 proto
, tc
->cls_u32
);
8739 case TC_CLSU32_DELETE_KNODE
:
8740 return ixgbe_delete_clsu32(adapter
, tc
->cls_u32
);
8741 case TC_CLSU32_NEW_HNODE
:
8742 case TC_CLSU32_REPLACE_HNODE
:
8743 return ixgbe_configure_clsu32_add_hnode(adapter
, proto
,
8745 case TC_CLSU32_DELETE_HNODE
:
8746 return ixgbe_configure_clsu32_del_hnode(adapter
,
8753 if (tc
->type
!= TC_SETUP_MQPRIO
)
8756 return ixgbe_setup_tc(dev
, tc
->tc
);
8759 #ifdef CONFIG_PCI_IOV
8760 void ixgbe_sriov_reinit(struct ixgbe_adapter
*adapter
)
8762 struct net_device
*netdev
= adapter
->netdev
;
8765 ixgbe_setup_tc(netdev
, netdev_get_num_tc(netdev
));
8770 void ixgbe_do_reset(struct net_device
*netdev
)
8772 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8774 if (netif_running(netdev
))
8775 ixgbe_reinit_locked(adapter
);
8777 ixgbe_reset(adapter
);
8780 static netdev_features_t
ixgbe_fix_features(struct net_device
*netdev
,
8781 netdev_features_t features
)
8783 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8785 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
8786 if (!(features
& NETIF_F_RXCSUM
))
8787 features
&= ~NETIF_F_LRO
;
8789 /* Turn off LRO if not RSC capable */
8790 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
))
8791 features
&= ~NETIF_F_LRO
;
8796 static int ixgbe_set_features(struct net_device
*netdev
,
8797 netdev_features_t features
)
8799 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
8800 netdev_features_t changed
= netdev
->features
^ features
;
8801 bool need_reset
= false;
8803 /* Make sure RSC matches LRO, reset if change */
8804 if (!(features
& NETIF_F_LRO
)) {
8805 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)
8807 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
8808 } else if ((adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) &&
8809 !(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
8810 if (adapter
->rx_itr_setting
== 1 ||
8811 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
8812 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
8814 } else if ((changed
^ features
) & NETIF_F_LRO
) {
8815 e_info(probe
, "rx-usecs set too low, "
8821 * Check if Flow Director n-tuple support or hw_tc support was
8822 * enabled or disabled. If the state changed, we need to reset.
8824 if ((features
& NETIF_F_NTUPLE
) || (features
& NETIF_F_HW_TC
)) {
8825 /* turn off ATR, enable perfect filters and reset */
8826 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
8829 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
8830 adapter
->flags
|= IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
8832 /* turn off perfect filters, enable ATR and reset */
8833 if (adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
8836 adapter
->flags
&= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
8838 /* We cannot enable ATR if SR-IOV is enabled */
8839 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
||
8840 /* We cannot enable ATR if we have 2 or more tcs */
8841 (netdev_get_num_tc(netdev
) > 1) ||
8842 /* We cannot enable ATR if RSS is disabled */
8843 (adapter
->ring_feature
[RING_F_RSS
].limit
<= 1) ||
8844 /* A sample rate of 0 indicates ATR disabled */
8845 (!adapter
->atr_sample_rate
))
8846 ; /* do nothing not supported */
8847 else /* otherwise supported and set the flag */
8848 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
8851 if (changed
& NETIF_F_RXALL
)
8854 netdev
->features
= features
;
8856 if ((adapter
->flags
& IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE
)) {
8857 if (features
& NETIF_F_RXCSUM
) {
8858 adapter
->flags2
|= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED
;
8860 u32 port_mask
= IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK
;
8862 ixgbe_clear_udp_tunnel_port(adapter
, port_mask
);
8866 if ((adapter
->flags
& IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE
)) {
8867 if (features
& NETIF_F_RXCSUM
) {
8868 adapter
->flags2
|= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED
;
8870 u32 port_mask
= IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK
;
8872 ixgbe_clear_udp_tunnel_port(adapter
, port_mask
);
8877 ixgbe_do_reset(netdev
);
8878 else if (changed
& (NETIF_F_HW_VLAN_CTAG_RX
|
8879 NETIF_F_HW_VLAN_CTAG_FILTER
))
8880 ixgbe_set_rx_mode(netdev
);
8886 * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
8887 * @dev: The port's netdev
8888 * @ti: Tunnel endpoint information
8890 static void ixgbe_add_udp_tunnel_port(struct net_device
*dev
,
8891 struct udp_tunnel_info
*ti
)
8893 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8894 struct ixgbe_hw
*hw
= &adapter
->hw
;
8895 __be16 port
= ti
->port
;
8899 if (ti
->sa_family
!= AF_INET
)
8903 case UDP_TUNNEL_TYPE_VXLAN
:
8904 if (!(adapter
->flags
& IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE
))
8907 if (adapter
->vxlan_port
== port
)
8910 if (adapter
->vxlan_port
) {
8912 "VXLAN port %d set, not adding port %d\n",
8913 ntohs(adapter
->vxlan_port
),
8918 adapter
->vxlan_port
= port
;
8920 case UDP_TUNNEL_TYPE_GENEVE
:
8921 if (!(adapter
->flags
& IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE
))
8924 if (adapter
->geneve_port
== port
)
8927 if (adapter
->geneve_port
) {
8929 "GENEVE port %d set, not adding port %d\n",
8930 ntohs(adapter
->geneve_port
),
8935 port_shift
= IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT
;
8936 adapter
->geneve_port
= port
;
8942 reg
= IXGBE_READ_REG(hw
, IXGBE_VXLANCTRL
) | ntohs(port
) << port_shift
;
8943 IXGBE_WRITE_REG(hw
, IXGBE_VXLANCTRL
, reg
);
8947 * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
8948 * @dev: The port's netdev
8949 * @ti: Tunnel endpoint information
8951 static void ixgbe_del_udp_tunnel_port(struct net_device
*dev
,
8952 struct udp_tunnel_info
*ti
)
8954 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
8957 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
&&
8958 ti
->type
!= UDP_TUNNEL_TYPE_GENEVE
)
8961 if (ti
->sa_family
!= AF_INET
)
8965 case UDP_TUNNEL_TYPE_VXLAN
:
8966 if (!(adapter
->flags
& IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE
))
8969 if (adapter
->vxlan_port
!= ti
->port
) {
8970 netdev_info(dev
, "VXLAN port %d not found\n",
8975 port_mask
= IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK
;
8977 case UDP_TUNNEL_TYPE_GENEVE
:
8978 if (!(adapter
->flags
& IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE
))
8981 if (adapter
->geneve_port
!= ti
->port
) {
8982 netdev_info(dev
, "GENEVE port %d not found\n",
8987 port_mask
= IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK
;
8993 ixgbe_clear_udp_tunnel_port(adapter
, port_mask
);
8994 adapter
->flags2
|= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED
;
8997 static int ixgbe_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
8998 struct net_device
*dev
,
8999 const unsigned char *addr
, u16 vid
,
9002 /* guarantee we can provide a unique filter for the unicast address */
9003 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
)) {
9004 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
9005 u16 pool
= VMDQ_P(0);
9007 if (netdev_uc_count(dev
) >= ixgbe_available_rars(adapter
, pool
))
9011 return ndo_dflt_fdb_add(ndm
, tb
, dev
, addr
, vid
, flags
);
9015 * ixgbe_configure_bridge_mode - set various bridge modes
9016 * @adapter - the private structure
9017 * @mode - requested bridge mode
9019 * Configure some settings require for various bridge modes.
9021 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter
*adapter
,
9024 struct ixgbe_hw
*hw
= &adapter
->hw
;
9025 unsigned int p
, num_pools
;
9029 case BRIDGE_MODE_VEPA
:
9030 /* disable Tx loopback, rely on switch hairpin mode */
9031 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_PFDTXGSWC
, 0);
9033 /* must enable Rx switching replication to allow multicast
9034 * packet reception on all VFs, and to enable source address
9037 vmdctl
= IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
9038 vmdctl
|= IXGBE_VT_CTL_REPLEN
;
9039 IXGBE_WRITE_REG(hw
, IXGBE_VMD_CTL
, vmdctl
);
9041 /* enable Rx source address pruning. Note, this requires
9042 * replication to be enabled or else it does nothing.
9044 num_pools
= adapter
->num_vfs
+ adapter
->num_rx_pools
;
9045 for (p
= 0; p
< num_pools
; p
++) {
9046 if (hw
->mac
.ops
.set_source_address_pruning
)
9047 hw
->mac
.ops
.set_source_address_pruning(hw
,
9052 case BRIDGE_MODE_VEB
:
9053 /* enable Tx loopback for internal VF/PF communication */
9054 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_PFDTXGSWC
,
9055 IXGBE_PFDTXGSWC_VT_LBEN
);
9057 /* disable Rx switching replication unless we have SR-IOV
9060 vmdctl
= IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
9061 if (!adapter
->num_vfs
)
9062 vmdctl
&= ~IXGBE_VT_CTL_REPLEN
;
9063 IXGBE_WRITE_REG(hw
, IXGBE_VMD_CTL
, vmdctl
);
9065 /* disable Rx source address pruning, since we don't expect to
9066 * be receiving external loopback of our transmitted frames.
9068 num_pools
= adapter
->num_vfs
+ adapter
->num_rx_pools
;
9069 for (p
= 0; p
< num_pools
; p
++) {
9070 if (hw
->mac
.ops
.set_source_address_pruning
)
9071 hw
->mac
.ops
.set_source_address_pruning(hw
,
9080 adapter
->bridge_mode
= mode
;
9082 e_info(drv
, "enabling bridge mode: %s\n",
9083 mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
9088 static int ixgbe_ndo_bridge_setlink(struct net_device
*dev
,
9089 struct nlmsghdr
*nlh
, u16 flags
)
9091 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
9092 struct nlattr
*attr
, *br_spec
;
9095 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
9098 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
9102 nla_for_each_nested(attr
, br_spec
, rem
) {
9106 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
9109 if (nla_len(attr
) < sizeof(mode
))
9112 mode
= nla_get_u16(attr
);
9113 status
= ixgbe_configure_bridge_mode(adapter
, mode
);
9123 static int ixgbe_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
9124 struct net_device
*dev
,
9125 u32 filter_mask
, int nlflags
)
9127 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
9129 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
9132 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
9133 adapter
->bridge_mode
, 0, 0, nlflags
,
9137 static void *ixgbe_fwd_add(struct net_device
*pdev
, struct net_device
*vdev
)
9139 struct ixgbe_fwd_adapter
*fwd_adapter
= NULL
;
9140 struct ixgbe_adapter
*adapter
= netdev_priv(pdev
);
9141 int used_pools
= adapter
->num_vfs
+ adapter
->num_rx_pools
;
9145 /* Hardware has a limited number of available pools. Each VF, and the
9146 * PF require a pool. Check to ensure we don't attempt to use more
9147 * then the available number of pools.
9149 if (used_pools
>= IXGBE_MAX_VF_FUNCTIONS
)
9150 return ERR_PTR(-EINVAL
);
9153 if (vdev
->num_rx_queues
!= vdev
->num_tx_queues
) {
9154 netdev_info(pdev
, "%s: Only supports a single queue count for TX and RX\n",
9156 return ERR_PTR(-EINVAL
);
9159 /* Check for hardware restriction on number of rx/tx queues */
9160 if (vdev
->num_tx_queues
> IXGBE_MAX_L2A_QUEUES
||
9161 vdev
->num_tx_queues
== IXGBE_BAD_L2A_QUEUE
) {
9163 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
9165 return ERR_PTR(-EINVAL
);
9168 if (((adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) &&
9169 adapter
->num_rx_pools
> IXGBE_MAX_DCBMACVLANS
- 1) ||
9170 (adapter
->num_rx_pools
> IXGBE_MAX_MACVLANS
))
9171 return ERR_PTR(-EBUSY
);
9173 fwd_adapter
= kzalloc(sizeof(*fwd_adapter
), GFP_KERNEL
);
9175 return ERR_PTR(-ENOMEM
);
9177 pool
= find_first_zero_bit(&adapter
->fwd_bitmask
, 32);
9178 adapter
->num_rx_pools
++;
9179 set_bit(pool
, &adapter
->fwd_bitmask
);
9180 limit
= find_last_bit(&adapter
->fwd_bitmask
, 32);
9182 /* Enable VMDq flag so device will be set in VM mode */
9183 adapter
->flags
|= IXGBE_FLAG_VMDQ_ENABLED
| IXGBE_FLAG_SRIOV_ENABLED
;
9184 adapter
->ring_feature
[RING_F_VMDQ
].limit
= limit
+ 1;
9185 adapter
->ring_feature
[RING_F_RSS
].limit
= vdev
->num_tx_queues
;
9187 /* Force reinit of ring allocation with VMDQ enabled */
9188 err
= ixgbe_setup_tc(pdev
, netdev_get_num_tc(pdev
));
9191 fwd_adapter
->pool
= pool
;
9192 fwd_adapter
->real_adapter
= adapter
;
9194 if (netif_running(pdev
)) {
9195 err
= ixgbe_fwd_ring_up(vdev
, fwd_adapter
);
9198 netif_tx_start_all_queues(vdev
);
9203 /* unwind counter and free adapter struct */
9205 "%s: dfwd hardware acceleration failed\n", vdev
->name
);
9206 clear_bit(pool
, &adapter
->fwd_bitmask
);
9207 adapter
->num_rx_pools
--;
9209 return ERR_PTR(err
);
9212 static void ixgbe_fwd_del(struct net_device
*pdev
, void *priv
)
9214 struct ixgbe_fwd_adapter
*fwd_adapter
= priv
;
9215 struct ixgbe_adapter
*adapter
= fwd_adapter
->real_adapter
;
9218 clear_bit(fwd_adapter
->pool
, &adapter
->fwd_bitmask
);
9219 adapter
->num_rx_pools
--;
9221 limit
= find_last_bit(&adapter
->fwd_bitmask
, 32);
9222 adapter
->ring_feature
[RING_F_VMDQ
].limit
= limit
+ 1;
9223 ixgbe_fwd_ring_down(fwd_adapter
->netdev
, fwd_adapter
);
9224 ixgbe_setup_tc(pdev
, netdev_get_num_tc(pdev
));
9225 netdev_dbg(pdev
, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
9226 fwd_adapter
->pool
, adapter
->num_rx_pools
,
9227 fwd_adapter
->rx_base_queue
,
9228 fwd_adapter
->rx_base_queue
+ adapter
->num_rx_queues_per_pool
,
9229 adapter
->fwd_bitmask
);
9233 #define IXGBE_MAX_MAC_HDR_LEN 127
9234 #define IXGBE_MAX_NETWORK_HDR_LEN 511
9236 static netdev_features_t
9237 ixgbe_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
9238 netdev_features_t features
)
9240 unsigned int network_hdr_len
, mac_hdr_len
;
9242 /* Make certain the headers can be described by a context descriptor */
9243 mac_hdr_len
= skb_network_header(skb
) - skb
->data
;
9244 if (unlikely(mac_hdr_len
> IXGBE_MAX_MAC_HDR_LEN
))
9245 return features
& ~(NETIF_F_HW_CSUM
|
9247 NETIF_F_HW_VLAN_CTAG_TX
|
9251 network_hdr_len
= skb_checksum_start(skb
) - skb_network_header(skb
);
9252 if (unlikely(network_hdr_len
> IXGBE_MAX_NETWORK_HDR_LEN
))
9253 return features
& ~(NETIF_F_HW_CSUM
|
9258 /* We can only support IPV4 TSO in tunnels if we can mangle the
9259 * inner IP ID field, so strip TSO if MANGLEID is not supported.
9261 if (skb
->encapsulation
&& !(features
& NETIF_F_TSO_MANGLEID
))
9262 features
&= ~NETIF_F_TSO
;
9267 static const struct net_device_ops ixgbe_netdev_ops
= {
9268 .ndo_open
= ixgbe_open
,
9269 .ndo_stop
= ixgbe_close
,
9270 .ndo_start_xmit
= ixgbe_xmit_frame
,
9271 .ndo_select_queue
= ixgbe_select_queue
,
9272 .ndo_set_rx_mode
= ixgbe_set_rx_mode
,
9273 .ndo_validate_addr
= eth_validate_addr
,
9274 .ndo_set_mac_address
= ixgbe_set_mac
,
9275 .ndo_change_mtu
= ixgbe_change_mtu
,
9276 .ndo_tx_timeout
= ixgbe_tx_timeout
,
9277 .ndo_set_tx_maxrate
= ixgbe_tx_maxrate
,
9278 .ndo_vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
,
9279 .ndo_vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
,
9280 .ndo_do_ioctl
= ixgbe_ioctl
,
9281 .ndo_set_vf_mac
= ixgbe_ndo_set_vf_mac
,
9282 .ndo_set_vf_vlan
= ixgbe_ndo_set_vf_vlan
,
9283 .ndo_set_vf_rate
= ixgbe_ndo_set_vf_bw
,
9284 .ndo_set_vf_spoofchk
= ixgbe_ndo_set_vf_spoofchk
,
9285 .ndo_set_vf_rss_query_en
= ixgbe_ndo_set_vf_rss_query_en
,
9286 .ndo_set_vf_trust
= ixgbe_ndo_set_vf_trust
,
9287 .ndo_get_vf_config
= ixgbe_ndo_get_vf_config
,
9288 .ndo_get_stats64
= ixgbe_get_stats64
,
9289 .ndo_setup_tc
= __ixgbe_setup_tc
,
9290 #ifdef CONFIG_NET_POLL_CONTROLLER
9291 .ndo_poll_controller
= ixgbe_netpoll
,
9293 #ifdef CONFIG_NET_RX_BUSY_POLL
9294 .ndo_busy_poll
= ixgbe_low_latency_recv
,
9297 .ndo_fcoe_ddp_setup
= ixgbe_fcoe_ddp_get
,
9298 .ndo_fcoe_ddp_target
= ixgbe_fcoe_ddp_target
,
9299 .ndo_fcoe_ddp_done
= ixgbe_fcoe_ddp_put
,
9300 .ndo_fcoe_enable
= ixgbe_fcoe_enable
,
9301 .ndo_fcoe_disable
= ixgbe_fcoe_disable
,
9302 .ndo_fcoe_get_wwn
= ixgbe_fcoe_get_wwn
,
9303 .ndo_fcoe_get_hbainfo
= ixgbe_fcoe_get_hbainfo
,
9304 #endif /* IXGBE_FCOE */
9305 .ndo_set_features
= ixgbe_set_features
,
9306 .ndo_fix_features
= ixgbe_fix_features
,
9307 .ndo_fdb_add
= ixgbe_ndo_fdb_add
,
9308 .ndo_bridge_setlink
= ixgbe_ndo_bridge_setlink
,
9309 .ndo_bridge_getlink
= ixgbe_ndo_bridge_getlink
,
9310 .ndo_dfwd_add_station
= ixgbe_fwd_add
,
9311 .ndo_dfwd_del_station
= ixgbe_fwd_del
,
9312 .ndo_udp_tunnel_add
= ixgbe_add_udp_tunnel_port
,
9313 .ndo_udp_tunnel_del
= ixgbe_del_udp_tunnel_port
,
9314 .ndo_features_check
= ixgbe_features_check
,
9318 * ixgbe_enumerate_functions - Get the number of ports this device has
9319 * @adapter: adapter structure
9321 * This function enumerates the phsyical functions co-located on a single slot,
9322 * in order to determine how many ports a device has. This is most useful in
9323 * determining the required GT/s of PCIe bandwidth necessary for optimal
9326 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter
*adapter
)
9328 struct pci_dev
*entry
, *pdev
= adapter
->pdev
;
9331 /* Some cards can not use the generic count PCIe functions method,
9332 * because they are behind a parent switch, so we hardcode these with
9333 * the correct number of functions.
9335 if (ixgbe_pcie_from_parent(&adapter
->hw
))
9338 list_for_each_entry(entry
, &adapter
->pdev
->bus
->devices
, bus_list
) {
9339 /* don't count virtual functions */
9340 if (entry
->is_virtfn
)
9343 /* When the devices on the bus don't all match our device ID,
9344 * we can't reliably determine the correct number of
9345 * functions. This can occur if a function has been direct
9346 * attached to a virtual machine using VT-d, for example. In
9347 * this case, simply return -1 to indicate this.
9349 if ((entry
->vendor
!= pdev
->vendor
) ||
9350 (entry
->device
!= pdev
->device
))
9360 * ixgbe_wol_supported - Check whether device supports WoL
9361 * @adapter: the adapter private structure
9362 * @device_id: the device ID
9363 * @subdev_id: the subsystem device ID
9365 * This function is used by probe and ethtool to determine
9366 * which devices have WoL support
9369 bool ixgbe_wol_supported(struct ixgbe_adapter
*adapter
, u16 device_id
,
9372 struct ixgbe_hw
*hw
= &adapter
->hw
;
9373 u16 wol_cap
= adapter
->eeprom_cap
& IXGBE_DEVICE_CAPS_WOL_MASK
;
9375 /* WOL not supported on 82598 */
9376 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
9379 /* check eeprom to see if WOL is enabled for X540 and newer */
9380 if (hw
->mac
.type
>= ixgbe_mac_X540
) {
9381 if ((wol_cap
== IXGBE_DEVICE_CAPS_WOL_PORT0_1
) ||
9382 ((wol_cap
== IXGBE_DEVICE_CAPS_WOL_PORT0
) &&
9383 (hw
->bus
.func
== 0)))
9387 /* WOL is determined based on device IDs for 82599 MACs */
9388 switch (device_id
) {
9389 case IXGBE_DEV_ID_82599_SFP
:
9390 /* Only these subdevices could supports WOL */
9391 switch (subdevice_id
) {
9392 case IXGBE_SUBDEV_ID_82599_560FLR
:
9393 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6
:
9394 case IXGBE_SUBDEV_ID_82599_SFP_WOL0
:
9395 case IXGBE_SUBDEV_ID_82599_SFP_2OCP
:
9396 /* only support first port */
9397 if (hw
->bus
.func
!= 0)
9399 case IXGBE_SUBDEV_ID_82599_SP_560FLR
:
9400 case IXGBE_SUBDEV_ID_82599_SFP
:
9401 case IXGBE_SUBDEV_ID_82599_RNDC
:
9402 case IXGBE_SUBDEV_ID_82599_ECNA_DP
:
9403 case IXGBE_SUBDEV_ID_82599_SFP_1OCP
:
9404 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1
:
9405 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2
:
9409 case IXGBE_DEV_ID_82599EN_SFP
:
9410 /* Only these subdevices support WOL */
9411 switch (subdevice_id
) {
9412 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1
:
9416 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE
:
9417 /* All except this subdevice support WOL */
9418 if (subdevice_id
!= IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ
)
9421 case IXGBE_DEV_ID_82599_KX4
:
9431 * ixgbe_probe - Device Initialization Routine
9432 * @pdev: PCI device information struct
9433 * @ent: entry in ixgbe_pci_tbl
9435 * Returns 0 on success, negative on failure
9437 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
9438 * The OS initialization, configuring of the adapter private structure,
9439 * and a hardware reset occur.
9441 static int ixgbe_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
9443 struct net_device
*netdev
;
9444 struct ixgbe_adapter
*adapter
= NULL
;
9445 struct ixgbe_hw
*hw
;
9446 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
9447 int i
, err
, pci_using_dac
, expected_gts
;
9448 unsigned int indices
= MAX_TX_QUEUES
;
9449 u8 part_str
[IXGBE_PBANUM_LENGTH
];
9450 bool disable_dev
= false;
9456 /* Catch broken hardware that put the wrong VF device ID in
9457 * the PCIe SR-IOV capability.
9459 if (pdev
->is_virtfn
) {
9460 WARN(1, KERN_ERR
"%s (%hx:%hx) should not be a VF!\n",
9461 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
9465 err
= pci_enable_device_mem(pdev
);
9469 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
9472 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
9475 "No usable DMA configuration, aborting\n");
9481 err
= pci_request_mem_regions(pdev
, ixgbe_driver_name
);
9484 "pci_request_selected_regions failed 0x%x\n", err
);
9488 pci_enable_pcie_error_reporting(pdev
);
9490 pci_set_master(pdev
);
9491 pci_save_state(pdev
);
9493 if (ii
->mac
== ixgbe_mac_82598EB
) {
9494 #ifdef CONFIG_IXGBE_DCB
9495 /* 8 TC w/ 4 queues per TC */
9496 indices
= 4 * MAX_TRAFFIC_CLASS
;
9498 indices
= IXGBE_MAX_RSS_INDICES
;
9502 netdev
= alloc_etherdev_mq(sizeof(struct ixgbe_adapter
), indices
);
9505 goto err_alloc_etherdev
;
9508 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
9510 adapter
= netdev_priv(netdev
);
9512 adapter
->netdev
= netdev
;
9513 adapter
->pdev
= pdev
;
9516 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
9518 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
9519 pci_resource_len(pdev
, 0));
9520 adapter
->io_addr
= hw
->hw_addr
;
9526 netdev
->netdev_ops
= &ixgbe_netdev_ops
;
9527 ixgbe_set_ethtool_ops(netdev
);
9528 netdev
->watchdog_timeo
= 5 * HZ
;
9529 strlcpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
));
9532 hw
->mac
.ops
= *ii
->mac_ops
;
9533 hw
->mac
.type
= ii
->mac
;
9534 hw
->mvals
= ii
->mvals
;
9536 hw
->link
.ops
= *ii
->link_ops
;
9539 hw
->eeprom
.ops
= *ii
->eeprom_ops
;
9540 eec
= IXGBE_READ_REG(hw
, IXGBE_EEC(hw
));
9541 if (ixgbe_removed(hw
->hw_addr
)) {
9545 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
9546 if (!(eec
& BIT(8)))
9547 hw
->eeprom
.ops
.read
= &ixgbe_read_eeprom_bit_bang_generic
;
9550 hw
->phy
.ops
= *ii
->phy_ops
;
9551 hw
->phy
.sfp_type
= ixgbe_sfp_type_unknown
;
9552 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
9553 hw
->phy
.mdio
.prtad
= MDIO_PRTAD_NONE
;
9554 hw
->phy
.mdio
.mmds
= 0;
9555 hw
->phy
.mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
9556 hw
->phy
.mdio
.dev
= netdev
;
9557 hw
->phy
.mdio
.mdio_read
= ixgbe_mdio_read
;
9558 hw
->phy
.mdio
.mdio_write
= ixgbe_mdio_write
;
9560 /* setup the private structure */
9561 err
= ixgbe_sw_init(adapter
, ii
);
9565 /* Make sure the SWFW semaphore is in a valid state */
9566 if (hw
->mac
.ops
.init_swfw_sync
)
9567 hw
->mac
.ops
.init_swfw_sync(hw
);
9569 /* Make it possible the adapter to be woken up via WOL */
9570 switch (adapter
->hw
.mac
.type
) {
9571 case ixgbe_mac_82599EB
:
9572 case ixgbe_mac_X540
:
9573 case ixgbe_mac_X550
:
9574 case ixgbe_mac_X550EM_x
:
9575 case ixgbe_mac_x550em_a
:
9576 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
9583 * If there is a fan on this device and it has failed log the
9586 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) {
9587 u32 esdp
= IXGBE_READ_REG(hw
, IXGBE_ESDP
);
9588 if (esdp
& IXGBE_ESDP_SDP1
)
9589 e_crit(probe
, "Fan has stopped, replace the adapter\n");
9592 if (allow_unsupported_sfp
)
9593 hw
->allow_unsupported_sfp
= allow_unsupported_sfp
;
9595 /* reset_hw fills in the perm_addr as well */
9596 hw
->phy
.reset_if_overtemp
= true;
9597 err
= hw
->mac
.ops
.reset_hw(hw
);
9598 hw
->phy
.reset_if_overtemp
= false;
9599 if (err
== IXGBE_ERR_SFP_NOT_PRESENT
) {
9601 } else if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
) {
9602 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
9603 e_dev_err("Reload the driver after installing a supported module.\n");
9606 e_dev_err("HW Init failed: %d\n", err
);
9610 #ifdef CONFIG_PCI_IOV
9611 /* SR-IOV not supported on the 82598 */
9612 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
9615 ixgbe_init_mbx_params_pf(hw
);
9616 hw
->mbx
.ops
= ii
->mbx_ops
;
9617 pci_sriov_set_totalvfs(pdev
, IXGBE_MAX_VFS_DRV_LIMIT
);
9618 ixgbe_enable_sriov(adapter
);
9622 netdev
->features
= NETIF_F_SG
|
9629 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
9630 NETIF_F_GSO_GRE_CSUM | \
9631 NETIF_F_GSO_IPXIP4 | \
9632 NETIF_F_GSO_IPXIP6 | \
9633 NETIF_F_GSO_UDP_TUNNEL | \
9634 NETIF_F_GSO_UDP_TUNNEL_CSUM)
9636 netdev
->gso_partial_features
= IXGBE_GSO_PARTIAL_FEATURES
;
9637 netdev
->features
|= NETIF_F_GSO_PARTIAL
|
9638 IXGBE_GSO_PARTIAL_FEATURES
;
9640 if (hw
->mac
.type
>= ixgbe_mac_82599EB
)
9641 netdev
->features
|= NETIF_F_SCTP_CRC
;
9643 /* copy netdev features into list of user selectable features */
9644 netdev
->hw_features
|= netdev
->features
|
9645 NETIF_F_HW_VLAN_CTAG_FILTER
|
9646 NETIF_F_HW_VLAN_CTAG_RX
|
9647 NETIF_F_HW_VLAN_CTAG_TX
|
9649 NETIF_F_HW_L2FW_DOFFLOAD
;
9651 if (hw
->mac
.type
>= ixgbe_mac_82599EB
)
9652 netdev
->hw_features
|= NETIF_F_NTUPLE
|
9656 netdev
->features
|= NETIF_F_HIGHDMA
;
9658 netdev
->vlan_features
|= netdev
->features
| NETIF_F_TSO_MANGLEID
;
9659 netdev
->hw_enc_features
|= netdev
->vlan_features
;
9660 netdev
->mpls_features
|= NETIF_F_HW_CSUM
;
9662 /* set this bit last since it cannot be part of vlan_features */
9663 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
9664 NETIF_F_HW_VLAN_CTAG_RX
|
9665 NETIF_F_HW_VLAN_CTAG_TX
;
9667 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
9668 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
9670 /* MTU range: 68 - 9710 */
9671 netdev
->min_mtu
= ETH_MIN_MTU
;
9672 netdev
->max_mtu
= IXGBE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
9674 #ifdef CONFIG_IXGBE_DCB
9675 if (adapter
->flags
& IXGBE_FLAG_DCB_CAPABLE
)
9676 netdev
->dcbnl_ops
= &dcbnl_ops
;
9680 if (adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
) {
9681 unsigned int fcoe_l
;
9683 if (hw
->mac
.ops
.get_device_caps
) {
9684 hw
->mac
.ops
.get_device_caps(hw
, &device_caps
);
9685 if (device_caps
& IXGBE_DEVICE_CAPS_FCOE_OFFLOADS
)
9686 adapter
->flags
&= ~IXGBE_FLAG_FCOE_CAPABLE
;
9690 fcoe_l
= min_t(int, IXGBE_FCRETA_SIZE
, num_online_cpus());
9691 adapter
->ring_feature
[RING_F_FCOE
].limit
= fcoe_l
;
9693 netdev
->features
|= NETIF_F_FSO
|
9696 netdev
->vlan_features
|= NETIF_F_FSO
|
9700 #endif /* IXGBE_FCOE */
9702 if (adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
)
9703 netdev
->hw_features
|= NETIF_F_LRO
;
9704 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)
9705 netdev
->features
|= NETIF_F_LRO
;
9707 /* make sure the EEPROM is good */
9708 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
) < 0) {
9709 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9714 eth_platform_get_mac_address(&adapter
->pdev
->dev
,
9715 adapter
->hw
.mac
.perm_addr
);
9717 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
9719 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
9720 e_dev_err("invalid MAC address\n");
9725 /* Set hw->mac.addr to permanent MAC address */
9726 ether_addr_copy(hw
->mac
.addr
, hw
->mac
.perm_addr
);
9727 ixgbe_mac_set_default_filter(adapter
);
9729 setup_timer(&adapter
->service_timer
, &ixgbe_service_timer
,
9730 (unsigned long) adapter
);
9732 if (ixgbe_removed(hw
->hw_addr
)) {
9736 INIT_WORK(&adapter
->service_task
, ixgbe_service_task
);
9737 set_bit(__IXGBE_SERVICE_INITED
, &adapter
->state
);
9738 clear_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
);
9740 err
= ixgbe_init_interrupt_scheme(adapter
);
9744 /* WOL not supported for all devices */
9746 hw
->eeprom
.ops
.read(hw
, 0x2c, &adapter
->eeprom_cap
);
9747 hw
->wol_enabled
= ixgbe_wol_supported(adapter
, pdev
->device
,
9748 pdev
->subsystem_device
);
9749 if (hw
->wol_enabled
)
9750 adapter
->wol
= IXGBE_WUFC_MAG
;
9752 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
9754 /* save off EEPROM version number */
9755 hw
->eeprom
.ops
.read(hw
, 0x2e, &adapter
->eeprom_verh
);
9756 hw
->eeprom
.ops
.read(hw
, 0x2d, &adapter
->eeprom_verl
);
9758 /* pick up the PCI bus settings for reporting later */
9759 if (ixgbe_pcie_from_parent(hw
))
9760 ixgbe_get_parent_bus_info(adapter
);
9762 hw
->mac
.ops
.get_bus_info(hw
);
9764 /* calculate the expected PCIe bandwidth required for optimal
9765 * performance. Note that some older parts will never have enough
9766 * bandwidth due to being older generation PCIe parts. We clamp these
9767 * parts to ensure no warning is displayed if it can't be fixed.
9769 switch (hw
->mac
.type
) {
9770 case ixgbe_mac_82598EB
:
9771 expected_gts
= min(ixgbe_enumerate_functions(adapter
) * 10, 16);
9774 expected_gts
= ixgbe_enumerate_functions(adapter
) * 10;
9778 /* don't check link if we failed to enumerate functions */
9779 if (expected_gts
> 0)
9780 ixgbe_check_minimum_link(adapter
, expected_gts
);
9782 err
= ixgbe_read_pba_string_generic(hw
, part_str
, sizeof(part_str
));
9784 strlcpy(part_str
, "Unknown", sizeof(part_str
));
9785 if (ixgbe_is_sfp(hw
) && hw
->phy
.sfp_type
!= ixgbe_sfp_type_not_present
)
9786 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
9787 hw
->mac
.type
, hw
->phy
.type
, hw
->phy
.sfp_type
,
9790 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
9791 hw
->mac
.type
, hw
->phy
.type
, part_str
);
9793 e_dev_info("%pM\n", netdev
->dev_addr
);
9795 /* reset the hardware with the new settings */
9796 err
= hw
->mac
.ops
.start_hw(hw
);
9797 if (err
== IXGBE_ERR_EEPROM_VERSION
) {
9798 /* We are running on a pre-production device, log a warning */
9799 e_dev_warn("This device is a pre-production adapter/LOM. "
9800 "Please be aware there may be issues associated "
9801 "with your hardware. If you are experiencing "
9802 "problems please contact your Intel or hardware "
9803 "representative who provided you with this "
9806 strcpy(netdev
->name
, "eth%d");
9807 err
= register_netdev(netdev
);
9811 pci_set_drvdata(pdev
, adapter
);
9813 /* power down the optics for 82599 SFP+ fiber */
9814 if (hw
->mac
.ops
.disable_tx_laser
)
9815 hw
->mac
.ops
.disable_tx_laser(hw
);
9817 /* carrier off reporting is important to ethtool even BEFORE open */
9818 netif_carrier_off(netdev
);
9820 #ifdef CONFIG_IXGBE_DCA
9821 if (dca_add_requester(&pdev
->dev
) == 0) {
9822 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
9823 ixgbe_setup_dca(adapter
);
9826 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
9827 e_info(probe
, "IOV is enabled with %d VFs\n", adapter
->num_vfs
);
9828 for (i
= 0; i
< adapter
->num_vfs
; i
++)
9829 ixgbe_vf_configuration(pdev
, (i
| 0x10000000));
9832 /* firmware requires driver version to be 0xFFFFFFFF
9833 * since os does not support feature
9835 if (hw
->mac
.ops
.set_fw_drv_ver
)
9836 hw
->mac
.ops
.set_fw_drv_ver(hw
, 0xFF, 0xFF, 0xFF,
9839 /* add san mac addr to netdev */
9840 ixgbe_add_sanmac_netdev(netdev
);
9842 e_dev_info("%s\n", ixgbe_default_device_descr
);
9844 #ifdef CONFIG_IXGBE_HWMON
9845 if (ixgbe_sysfs_init(adapter
))
9846 e_err(probe
, "failed to allocate sysfs resources\n");
9847 #endif /* CONFIG_IXGBE_HWMON */
9849 ixgbe_dbg_adapter_init(adapter
);
9851 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
9852 if (ixgbe_mng_enabled(hw
) && ixgbe_is_sfp(hw
) && hw
->mac
.ops
.setup_link
)
9853 hw
->mac
.ops
.setup_link(hw
,
9854 IXGBE_LINK_SPEED_10GB_FULL
| IXGBE_LINK_SPEED_1GB_FULL
,
9860 ixgbe_release_hw_control(adapter
);
9861 ixgbe_clear_interrupt_scheme(adapter
);
9863 ixgbe_disable_sriov(adapter
);
9864 adapter
->flags2
&= ~IXGBE_FLAG2_SEARCH_FOR_SFP
;
9865 iounmap(adapter
->io_addr
);
9866 kfree(adapter
->jump_tables
[0]);
9867 kfree(adapter
->mac_table
);
9869 disable_dev
= !test_and_set_bit(__IXGBE_DISABLED
, &adapter
->state
);
9870 free_netdev(netdev
);
9872 pci_release_mem_regions(pdev
);
9875 if (!adapter
|| disable_dev
)
9876 pci_disable_device(pdev
);
9881 * ixgbe_remove - Device Removal Routine
9882 * @pdev: PCI device information struct
9884 * ixgbe_remove is called by the PCI subsystem to alert the driver
9885 * that it should release a PCI device. The could be caused by a
9886 * Hot-Plug event, or because the driver is going to be removed from
9889 static void ixgbe_remove(struct pci_dev
*pdev
)
9891 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
9892 struct net_device
*netdev
;
9896 /* if !adapter then we already cleaned up in probe */
9900 netdev
= adapter
->netdev
;
9901 ixgbe_dbg_adapter_exit(adapter
);
9903 set_bit(__IXGBE_REMOVING
, &adapter
->state
);
9904 cancel_work_sync(&adapter
->service_task
);
9907 #ifdef CONFIG_IXGBE_DCA
9908 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
9909 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
9910 dca_remove_requester(&pdev
->dev
);
9911 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
,
9912 IXGBE_DCA_CTRL_DCA_DISABLE
);
9916 #ifdef CONFIG_IXGBE_HWMON
9917 ixgbe_sysfs_exit(adapter
);
9918 #endif /* CONFIG_IXGBE_HWMON */
9920 /* remove the added san mac */
9921 ixgbe_del_sanmac_netdev(netdev
);
9923 #ifdef CONFIG_PCI_IOV
9924 ixgbe_disable_sriov(adapter
);
9926 if (netdev
->reg_state
== NETREG_REGISTERED
)
9927 unregister_netdev(netdev
);
9929 ixgbe_clear_interrupt_scheme(adapter
);
9931 ixgbe_release_hw_control(adapter
);
9934 kfree(adapter
->ixgbe_ieee_pfc
);
9935 kfree(adapter
->ixgbe_ieee_ets
);
9938 iounmap(adapter
->io_addr
);
9939 pci_release_mem_regions(pdev
);
9941 e_dev_info("complete\n");
9943 for (i
= 0; i
< IXGBE_MAX_LINK_HANDLE
; i
++) {
9944 if (adapter
->jump_tables
[i
]) {
9945 kfree(adapter
->jump_tables
[i
]->input
);
9946 kfree(adapter
->jump_tables
[i
]->mask
);
9948 kfree(adapter
->jump_tables
[i
]);
9951 kfree(adapter
->mac_table
);
9952 disable_dev
= !test_and_set_bit(__IXGBE_DISABLED
, &adapter
->state
);
9953 free_netdev(netdev
);
9955 pci_disable_pcie_error_reporting(pdev
);
9958 pci_disable_device(pdev
);
9962 * ixgbe_io_error_detected - called when PCI error is detected
9963 * @pdev: Pointer to PCI device
9964 * @state: The current pci connection state
9966 * This function is called after a PCI bus error affecting
9967 * this device has been detected.
9969 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
9970 pci_channel_state_t state
)
9972 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
9973 struct net_device
*netdev
= adapter
->netdev
;
9975 #ifdef CONFIG_PCI_IOV
9976 struct ixgbe_hw
*hw
= &adapter
->hw
;
9977 struct pci_dev
*bdev
, *vfdev
;
9978 u32 dw0
, dw1
, dw2
, dw3
;
9980 u16 req_id
, pf_func
;
9982 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
||
9983 adapter
->num_vfs
== 0)
9984 goto skip_bad_vf_detection
;
9986 bdev
= pdev
->bus
->self
;
9987 while (bdev
&& (pci_pcie_type(bdev
) != PCI_EXP_TYPE_ROOT_PORT
))
9988 bdev
= bdev
->bus
->self
;
9991 goto skip_bad_vf_detection
;
9993 pos
= pci_find_ext_capability(bdev
, PCI_EXT_CAP_ID_ERR
);
9995 goto skip_bad_vf_detection
;
9997 dw0
= ixgbe_read_pci_cfg_dword(hw
, pos
+ PCI_ERR_HEADER_LOG
);
9998 dw1
= ixgbe_read_pci_cfg_dword(hw
, pos
+ PCI_ERR_HEADER_LOG
+ 4);
9999 dw2
= ixgbe_read_pci_cfg_dword(hw
, pos
+ PCI_ERR_HEADER_LOG
+ 8);
10000 dw3
= ixgbe_read_pci_cfg_dword(hw
, pos
+ PCI_ERR_HEADER_LOG
+ 12);
10001 if (ixgbe_removed(hw
->hw_addr
))
10002 goto skip_bad_vf_detection
;
10004 req_id
= dw1
>> 16;
10005 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
10006 if (!(req_id
& 0x0080))
10007 goto skip_bad_vf_detection
;
10009 pf_func
= req_id
& 0x01;
10010 if ((pf_func
& 1) == (pdev
->devfn
& 1)) {
10011 unsigned int device_id
;
10013 vf
= (req_id
& 0x7F) >> 1;
10014 e_dev_err("VF %d has caused a PCIe error\n", vf
);
10015 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
10016 "%8.8x\tdw3: %8.8x\n",
10017 dw0
, dw1
, dw2
, dw3
);
10018 switch (adapter
->hw
.mac
.type
) {
10019 case ixgbe_mac_82599EB
:
10020 device_id
= IXGBE_82599_VF_DEVICE_ID
;
10022 case ixgbe_mac_X540
:
10023 device_id
= IXGBE_X540_VF_DEVICE_ID
;
10025 case ixgbe_mac_X550
:
10026 device_id
= IXGBE_DEV_ID_X550_VF
;
10028 case ixgbe_mac_X550EM_x
:
10029 device_id
= IXGBE_DEV_ID_X550EM_X_VF
;
10031 case ixgbe_mac_x550em_a
:
10032 device_id
= IXGBE_DEV_ID_X550EM_A_VF
;
10039 /* Find the pci device of the offending VF */
10040 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, device_id
, NULL
);
10042 if (vfdev
->devfn
== (req_id
& 0xFF))
10044 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
10048 * There's a slim chance the VF could have been hot plugged,
10049 * so if it is no longer present we don't need to issue the
10050 * VFLR. Just clean up the AER in that case.
10053 ixgbe_issue_vf_flr(adapter
, vfdev
);
10054 /* Free device reference count */
10055 pci_dev_put(vfdev
);
10058 pci_cleanup_aer_uncorrect_error_status(pdev
);
10062 * Even though the error may have occurred on the other port
10063 * we still need to increment the vf error reference count for
10064 * both ports because the I/O resume function will be called
10065 * for both of them.
10067 adapter
->vferr_refcount
++;
10069 return PCI_ERS_RESULT_RECOVERED
;
10071 skip_bad_vf_detection
:
10072 #endif /* CONFIG_PCI_IOV */
10073 if (!test_bit(__IXGBE_SERVICE_INITED
, &adapter
->state
))
10074 return PCI_ERS_RESULT_DISCONNECT
;
10077 netif_device_detach(netdev
);
10079 if (state
== pci_channel_io_perm_failure
) {
10081 return PCI_ERS_RESULT_DISCONNECT
;
10084 if (netif_running(netdev
))
10085 ixgbe_down(adapter
);
10087 if (!test_and_set_bit(__IXGBE_DISABLED
, &adapter
->state
))
10088 pci_disable_device(pdev
);
10091 /* Request a slot reset. */
10092 return PCI_ERS_RESULT_NEED_RESET
;
10096 * ixgbe_io_slot_reset - called after the pci bus has been reset.
10097 * @pdev: Pointer to PCI device
10099 * Restart the card from scratch, as if from a cold-boot.
10101 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
10103 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
10104 pci_ers_result_t result
;
10107 if (pci_enable_device_mem(pdev
)) {
10108 e_err(probe
, "Cannot re-enable PCI device after reset.\n");
10109 result
= PCI_ERS_RESULT_DISCONNECT
;
10111 smp_mb__before_atomic();
10112 clear_bit(__IXGBE_DISABLED
, &adapter
->state
);
10113 adapter
->hw
.hw_addr
= adapter
->io_addr
;
10114 pci_set_master(pdev
);
10115 pci_restore_state(pdev
);
10116 pci_save_state(pdev
);
10118 pci_wake_from_d3(pdev
, false);
10120 ixgbe_reset(adapter
);
10121 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
10122 result
= PCI_ERS_RESULT_RECOVERED
;
10125 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
10127 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
10128 "failed 0x%0x\n", err
);
10129 /* non-fatal, continue */
10136 * ixgbe_io_resume - called when traffic can start flowing again.
10137 * @pdev: Pointer to PCI device
10139 * This callback is called when the error recovery driver tells us that
10140 * its OK to resume normal operation.
10142 static void ixgbe_io_resume(struct pci_dev
*pdev
)
10144 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
10145 struct net_device
*netdev
= adapter
->netdev
;
10147 #ifdef CONFIG_PCI_IOV
10148 if (adapter
->vferr_refcount
) {
10149 e_info(drv
, "Resuming after VF err\n");
10150 adapter
->vferr_refcount
--;
10155 if (netif_running(netdev
))
10158 netif_device_attach(netdev
);
10161 static const struct pci_error_handlers ixgbe_err_handler
= {
10162 .error_detected
= ixgbe_io_error_detected
,
10163 .slot_reset
= ixgbe_io_slot_reset
,
10164 .resume
= ixgbe_io_resume
,
10167 static struct pci_driver ixgbe_driver
= {
10168 .name
= ixgbe_driver_name
,
10169 .id_table
= ixgbe_pci_tbl
,
10170 .probe
= ixgbe_probe
,
10171 .remove
= ixgbe_remove
,
10173 .suspend
= ixgbe_suspend
,
10174 .resume
= ixgbe_resume
,
10176 .shutdown
= ixgbe_shutdown
,
10177 .sriov_configure
= ixgbe_pci_sriov_configure
,
10178 .err_handler
= &ixgbe_err_handler
10182 * ixgbe_init_module - Driver Registration Routine
10184 * ixgbe_init_module is the first routine called when the driver is
10185 * loaded. All it does is register with the PCI subsystem.
10187 static int __init
ixgbe_init_module(void)
10190 pr_info("%s - version %s\n", ixgbe_driver_string
, ixgbe_driver_version
);
10191 pr_info("%s\n", ixgbe_copyright
);
10193 ixgbe_wq
= create_singlethread_workqueue(ixgbe_driver_name
);
10195 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name
);
10201 ret
= pci_register_driver(&ixgbe_driver
);
10203 destroy_workqueue(ixgbe_wq
);
10208 #ifdef CONFIG_IXGBE_DCA
10209 dca_register_notify(&dca_notifier
);
10215 module_init(ixgbe_init_module
);
10218 * ixgbe_exit_module - Driver Exit Cleanup Routine
10220 * ixgbe_exit_module is called just before the driver is removed
10223 static void __exit
ixgbe_exit_module(void)
10225 #ifdef CONFIG_IXGBE_DCA
10226 dca_unregister_notify(&dca_notifier
);
10228 pci_unregister_driver(&ixgbe_driver
);
10232 destroy_workqueue(ixgbe_wq
);
10237 #ifdef CONFIG_IXGBE_DCA
10238 static int ixgbe_notify_dca(struct notifier_block
*nb
, unsigned long event
,
10243 ret_val
= driver_for_each_device(&ixgbe_driver
.driver
, NULL
, &event
,
10244 __ixgbe_notify_dca
);
10246 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
10249 #endif /* CONFIG_IXGBE_DCA */
10251 module_exit(ixgbe_exit_module
);