]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2016 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #include <sys/queue.h> | |
6 | #include <stdio.h> | |
7 | #include <errno.h> | |
8 | #include <stdint.h> | |
9 | #include <stdarg.h> | |
10 | ||
11 | #include <rte_common.h> | |
12 | #include <rte_interrupts.h> | |
13 | #include <rte_byteorder.h> | |
7c673cae FG |
14 | #include <rte_debug.h> |
15 | #include <rte_pci.h> | |
9f95a23c | 16 | #include <rte_bus_pci.h> |
7c673cae | 17 | #include <rte_ether.h> |
9f95a23c | 18 | #include <rte_ethdev_driver.h> |
11fdf7f2 | 19 | #include <rte_ethdev_pci.h> |
7c673cae | 20 | #include <rte_memory.h> |
7c673cae | 21 | #include <rte_eal.h> |
7c673cae FG |
22 | #include <rte_malloc.h> |
23 | #include <rte_dev.h> | |
24 | ||
25 | #include "e1000_logs.h" | |
26 | #include "base/e1000_api.h" | |
27 | #include "e1000_ethdev.h" | |
28 | ||
29 | #define EM_EIAC 0x000DC | |
30 | ||
31 | #define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) | |
32 | ||
33 | ||
34 | static int eth_em_configure(struct rte_eth_dev *dev); | |
35 | static int eth_em_start(struct rte_eth_dev *dev); | |
36 | static void eth_em_stop(struct rte_eth_dev *dev); | |
37 | static void eth_em_close(struct rte_eth_dev *dev); | |
38 | static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); | |
39 | static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); | |
40 | static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); | |
41 | static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); | |
42 | static int eth_em_link_update(struct rte_eth_dev *dev, | |
43 | int wait_to_complete); | |
9f95a23c | 44 | static int eth_em_stats_get(struct rte_eth_dev *dev, |
7c673cae FG |
45 | struct rte_eth_stats *rte_stats); |
46 | static void eth_em_stats_reset(struct rte_eth_dev *dev); | |
47 | static void eth_em_infos_get(struct rte_eth_dev *dev, | |
48 | struct rte_eth_dev_info *dev_info); | |
49 | static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, | |
50 | struct rte_eth_fc_conf *fc_conf); | |
51 | static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, | |
52 | struct rte_eth_fc_conf *fc_conf); | |
53 | static int eth_em_interrupt_setup(struct rte_eth_dev *dev); | |
54 | static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); | |
55 | static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); | |
11fdf7f2 TL |
56 | static int eth_em_interrupt_action(struct rte_eth_dev *dev, |
57 | struct rte_intr_handle *handle); | |
58 | static void eth_em_interrupt_handler(void *param); | |
7c673cae FG |
59 | |
60 | static int em_hw_init(struct e1000_hw *hw); | |
61 | static int em_hardware_init(struct e1000_hw *hw); | |
62 | static void em_hw_control_acquire(struct e1000_hw *hw); | |
63 | static void em_hw_control_release(struct e1000_hw *hw); | |
64 | static void em_init_manageability(struct e1000_hw *hw); | |
65 | static void em_release_manageability(struct e1000_hw *hw); | |
66 | ||
67 | static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); | |
68 | ||
69 | static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, | |
70 | uint16_t vlan_id, int on); | |
9f95a23c | 71 | static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); |
7c673cae FG |
72 | static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); |
73 | static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); | |
74 | static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); | |
75 | static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); | |
76 | ||
77 | /* | |
78 | static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, | |
79 | uint16_t vlan_id, int on); | |
80 | */ | |
81 | ||
82 | static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); | |
83 | static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); | |
84 | static void em_lsc_intr_disable(struct e1000_hw *hw); | |
85 | static void em_rxq_intr_enable(struct e1000_hw *hw); | |
86 | static void em_rxq_intr_disable(struct e1000_hw *hw); | |
87 | ||
88 | static int eth_em_led_on(struct rte_eth_dev *dev); | |
89 | static int eth_em_led_off(struct rte_eth_dev *dev); | |
90 | ||
91 | static int em_get_rx_buffer_size(struct e1000_hw *hw); | |
11fdf7f2 TL |
92 | static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, |
93 | uint32_t index, uint32_t pool); | |
7c673cae | 94 | static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); |
9f95a23c TL |
95 | static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, |
96 | struct ether_addr *addr); | |
7c673cae FG |
97 | |
98 | static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, | |
99 | struct ether_addr *mc_addr_set, | |
100 | uint32_t nb_mc_addr); | |
101 | ||
102 | #define EM_FC_PAUSE_TIME 0x0680 | |
103 | #define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ | |
104 | #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ | |
105 | ||
106 | static enum e1000_fc_mode em_fc_setting = e1000_fc_full; | |
107 | ||
108 | /* | |
109 | * The set of PCI devices this driver supports | |
110 | */ | |
111 | static const struct rte_pci_id pci_id_em_map[] = { | |
112 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, | |
113 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, | |
114 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, | |
115 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, | |
116 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, | |
117 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, | |
118 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, | |
119 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, | |
120 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, | |
121 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, | |
122 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, | |
123 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, | |
124 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, | |
125 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, | |
126 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, | |
127 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, | |
128 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, | |
129 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, | |
130 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, | |
131 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, | |
132 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, | |
133 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, | |
134 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, | |
9f95a23c | 135 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, |
7c673cae FG |
136 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, |
137 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, | |
138 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, | |
139 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, | |
140 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, | |
141 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, | |
142 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, | |
143 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, | |
11fdf7f2 TL |
144 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) }, |
145 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) }, | |
146 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) }, | |
147 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) }, | |
148 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) }, | |
149 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) }, | |
150 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) }, | |
151 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) }, | |
152 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) }, | |
153 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) }, | |
154 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) }, | |
155 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) }, | |
156 | { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) }, | |
7c673cae FG |
157 | { .vendor_id = 0, /* sentinel */ }, |
158 | }; | |
159 | ||
160 | static const struct eth_dev_ops eth_em_ops = { | |
161 | .dev_configure = eth_em_configure, | |
162 | .dev_start = eth_em_start, | |
163 | .dev_stop = eth_em_stop, | |
164 | .dev_close = eth_em_close, | |
165 | .promiscuous_enable = eth_em_promiscuous_enable, | |
166 | .promiscuous_disable = eth_em_promiscuous_disable, | |
167 | .allmulticast_enable = eth_em_allmulticast_enable, | |
168 | .allmulticast_disable = eth_em_allmulticast_disable, | |
169 | .link_update = eth_em_link_update, | |
170 | .stats_get = eth_em_stats_get, | |
171 | .stats_reset = eth_em_stats_reset, | |
172 | .dev_infos_get = eth_em_infos_get, | |
173 | .mtu_set = eth_em_mtu_set, | |
174 | .vlan_filter_set = eth_em_vlan_filter_set, | |
175 | .vlan_offload_set = eth_em_vlan_offload_set, | |
176 | .rx_queue_setup = eth_em_rx_queue_setup, | |
177 | .rx_queue_release = eth_em_rx_queue_release, | |
178 | .rx_queue_count = eth_em_rx_queue_count, | |
179 | .rx_descriptor_done = eth_em_rx_descriptor_done, | |
11fdf7f2 TL |
180 | .rx_descriptor_status = eth_em_rx_descriptor_status, |
181 | .tx_descriptor_status = eth_em_tx_descriptor_status, | |
7c673cae FG |
182 | .tx_queue_setup = eth_em_tx_queue_setup, |
183 | .tx_queue_release = eth_em_tx_queue_release, | |
184 | .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, | |
185 | .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, | |
186 | .dev_led_on = eth_em_led_on, | |
187 | .dev_led_off = eth_em_led_off, | |
188 | .flow_ctrl_get = eth_em_flow_ctrl_get, | |
189 | .flow_ctrl_set = eth_em_flow_ctrl_set, | |
9f95a23c | 190 | .mac_addr_set = eth_em_default_mac_addr_set, |
7c673cae FG |
191 | .mac_addr_add = eth_em_rar_set, |
192 | .mac_addr_remove = eth_em_rar_clear, | |
193 | .set_mc_addr_list = eth_em_set_mc_addr_list, | |
194 | .rxq_info_get = em_rxq_info_get, | |
195 | .txq_info_get = em_txq_info_get, | |
196 | }; | |
197 | ||
7c673cae FG |
198 | |
199 | /** | |
200 | * eth_em_dev_is_ich8 - Check for ICH8 device | |
201 | * @hw: pointer to the HW structure | |
202 | * | |
203 | * return TRUE for ICH8, otherwise FALSE | |
204 | **/ | |
205 | static bool | |
206 | eth_em_dev_is_ich8(struct e1000_hw *hw) | |
207 | { | |
208 | DEBUGFUNC("eth_em_dev_is_ich8"); | |
209 | ||
210 | switch (hw->device_id) { | |
9f95a23c | 211 | case E1000_DEV_ID_PCH2_LV_LM: |
7c673cae FG |
212 | case E1000_DEV_ID_PCH_LPT_I217_LM: |
213 | case E1000_DEV_ID_PCH_LPT_I217_V: | |
214 | case E1000_DEV_ID_PCH_LPTLP_I218_LM: | |
215 | case E1000_DEV_ID_PCH_LPTLP_I218_V: | |
216 | case E1000_DEV_ID_PCH_I218_V2: | |
217 | case E1000_DEV_ID_PCH_I218_LM2: | |
218 | case E1000_DEV_ID_PCH_I218_V3: | |
219 | case E1000_DEV_ID_PCH_I218_LM3: | |
11fdf7f2 TL |
220 | case E1000_DEV_ID_PCH_SPT_I219_LM: |
221 | case E1000_DEV_ID_PCH_SPT_I219_V: | |
222 | case E1000_DEV_ID_PCH_SPT_I219_LM2: | |
223 | case E1000_DEV_ID_PCH_SPT_I219_V2: | |
224 | case E1000_DEV_ID_PCH_LBG_I219_LM3: | |
225 | case E1000_DEV_ID_PCH_SPT_I219_LM4: | |
226 | case E1000_DEV_ID_PCH_SPT_I219_V4: | |
227 | case E1000_DEV_ID_PCH_SPT_I219_LM5: | |
228 | case E1000_DEV_ID_PCH_SPT_I219_V5: | |
229 | case E1000_DEV_ID_PCH_CNP_I219_LM6: | |
230 | case E1000_DEV_ID_PCH_CNP_I219_V6: | |
231 | case E1000_DEV_ID_PCH_CNP_I219_LM7: | |
232 | case E1000_DEV_ID_PCH_CNP_I219_V7: | |
7c673cae FG |
233 | return 1; |
234 | default: | |
235 | return 0; | |
236 | } | |
237 | } | |
238 | ||
239 | static int | |
240 | eth_em_dev_init(struct rte_eth_dev *eth_dev) | |
241 | { | |
9f95a23c | 242 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 | 243 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; |
7c673cae FG |
244 | struct e1000_adapter *adapter = |
245 | E1000_DEV_PRIVATE(eth_dev->data->dev_private); | |
246 | struct e1000_hw *hw = | |
247 | E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
248 | struct e1000_vfta * shadow_vfta = | |
249 | E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); | |
250 | ||
7c673cae FG |
251 | eth_dev->dev_ops = ð_em_ops; |
252 | eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; | |
253 | eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; | |
11fdf7f2 | 254 | eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; |
7c673cae FG |
255 | |
256 | /* for secondary processes, we don't initialise any further as primary | |
257 | * has already done this work. Only check we don't need a different | |
258 | * RX function */ | |
259 | if (rte_eal_process_type() != RTE_PROC_PRIMARY){ | |
260 | if (eth_dev->data->scattered_rx) | |
261 | eth_dev->rx_pkt_burst = | |
262 | (eth_rx_burst_t)ð_em_recv_scattered_pkts; | |
263 | return 0; | |
264 | } | |
265 | ||
266 | rte_eth_copy_pci_info(eth_dev, pci_dev); | |
267 | ||
268 | hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; | |
269 | hw->device_id = pci_dev->id.device_id; | |
270 | adapter->stopped = 0; | |
271 | ||
272 | /* For ICH8 support we'll need to map the flash memory BAR */ | |
273 | if (eth_em_dev_is_ich8(hw)) | |
274 | hw->flash_address = (void *)pci_dev->mem_resource[1].addr; | |
275 | ||
276 | if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || | |
277 | em_hw_init(hw) != 0) { | |
278 | PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " | |
279 | "failed to init HW", | |
280 | eth_dev->data->port_id, pci_dev->id.vendor_id, | |
281 | pci_dev->id.device_id); | |
282 | return -ENODEV; | |
283 | } | |
284 | ||
285 | /* Allocate memory for storing MAC addresses */ | |
286 | eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * | |
287 | hw->mac.rar_entry_count, 0); | |
288 | if (eth_dev->data->mac_addrs == NULL) { | |
289 | PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " | |
290 | "store MAC addresses", | |
291 | ETHER_ADDR_LEN * hw->mac.rar_entry_count); | |
292 | return -ENOMEM; | |
293 | } | |
294 | ||
295 | /* Copy the permanent MAC address */ | |
296 | ether_addr_copy((struct ether_addr *) hw->mac.addr, | |
297 | eth_dev->data->mac_addrs); | |
298 | ||
299 | /* initialize the vfta */ | |
300 | memset(shadow_vfta, 0, sizeof(*shadow_vfta)); | |
301 | ||
302 | PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", | |
303 | eth_dev->data->port_id, pci_dev->id.vendor_id, | |
304 | pci_dev->id.device_id); | |
305 | ||
11fdf7f2 TL |
306 | rte_intr_callback_register(intr_handle, |
307 | eth_em_interrupt_handler, eth_dev); | |
7c673cae FG |
308 | |
309 | return 0; | |
310 | } | |
311 | ||
312 | static int | |
313 | eth_em_dev_uninit(struct rte_eth_dev *eth_dev) | |
314 | { | |
9f95a23c | 315 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
7c673cae FG |
316 | struct e1000_adapter *adapter = |
317 | E1000_DEV_PRIVATE(eth_dev->data->dev_private); | |
11fdf7f2 | 318 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; |
7c673cae FG |
319 | |
320 | PMD_INIT_FUNC_TRACE(); | |
321 | ||
322 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
323 | return -EPERM; | |
324 | ||
7c673cae FG |
325 | if (adapter->stopped == 0) |
326 | eth_em_close(eth_dev); | |
327 | ||
328 | eth_dev->dev_ops = NULL; | |
329 | eth_dev->rx_pkt_burst = NULL; | |
330 | eth_dev->tx_pkt_burst = NULL; | |
331 | ||
7c673cae | 332 | /* disable uio intr before callback unregister */ |
11fdf7f2 TL |
333 | rte_intr_disable(intr_handle); |
334 | rte_intr_callback_unregister(intr_handle, | |
335 | eth_em_interrupt_handler, eth_dev); | |
7c673cae FG |
336 | |
337 | return 0; | |
338 | } | |
339 | ||
11fdf7f2 TL |
340 | static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, |
341 | struct rte_pci_device *pci_dev) | |
342 | { | |
343 | return rte_eth_dev_pci_generic_probe(pci_dev, | |
344 | sizeof(struct e1000_adapter), eth_em_dev_init); | |
345 | } | |
346 | ||
347 | static int eth_em_pci_remove(struct rte_pci_device *pci_dev) | |
348 | { | |
349 | return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit); | |
350 | } | |
351 | ||
352 | static struct rte_pci_driver rte_em_pmd = { | |
353 | .id_table = pci_id_em_map, | |
9f95a23c TL |
354 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | |
355 | RTE_PCI_DRV_IOVA_AS_VA, | |
11fdf7f2 TL |
356 | .probe = eth_em_pci_probe, |
357 | .remove = eth_em_pci_remove, | |
7c673cae FG |
358 | }; |
359 | ||
360 | static int | |
361 | em_hw_init(struct e1000_hw *hw) | |
362 | { | |
363 | int diag; | |
364 | ||
365 | diag = hw->mac.ops.init_params(hw); | |
366 | if (diag != 0) { | |
367 | PMD_INIT_LOG(ERR, "MAC Initialization Error"); | |
368 | return diag; | |
369 | } | |
370 | diag = hw->nvm.ops.init_params(hw); | |
371 | if (diag != 0) { | |
372 | PMD_INIT_LOG(ERR, "NVM Initialization Error"); | |
373 | return diag; | |
374 | } | |
375 | diag = hw->phy.ops.init_params(hw); | |
376 | if (diag != 0) { | |
377 | PMD_INIT_LOG(ERR, "PHY Initialization Error"); | |
378 | return diag; | |
379 | } | |
380 | (void) e1000_get_bus_info(hw); | |
381 | ||
382 | hw->mac.autoneg = 1; | |
383 | hw->phy.autoneg_wait_to_complete = 0; | |
384 | hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; | |
385 | ||
386 | e1000_init_script_state_82541(hw, TRUE); | |
387 | e1000_set_tbi_compatibility_82543(hw, TRUE); | |
388 | ||
389 | /* Copper options */ | |
390 | if (hw->phy.media_type == e1000_media_type_copper) { | |
391 | hw->phy.mdix = 0; /* AUTO_ALL_MODES */ | |
392 | hw->phy.disable_polarity_correction = 0; | |
393 | hw->phy.ms_type = e1000_ms_hw_default; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Start from a known state, this is important in reading the nvm | |
398 | * and mac from that. | |
399 | */ | |
400 | e1000_reset_hw(hw); | |
401 | ||
402 | /* Make sure we have a good EEPROM before we read from it */ | |
403 | if (e1000_validate_nvm_checksum(hw) < 0) { | |
404 | /* | |
405 | * Some PCI-E parts fail the first check due to | |
406 | * the link being in sleep state, call it again, | |
407 | * if it fails a second time its a real issue. | |
408 | */ | |
409 | diag = e1000_validate_nvm_checksum(hw); | |
410 | if (diag < 0) { | |
411 | PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); | |
412 | goto error; | |
413 | } | |
414 | } | |
415 | ||
416 | /* Read the permanent MAC address out of the EEPROM */ | |
417 | diag = e1000_read_mac_addr(hw); | |
418 | if (diag != 0) { | |
419 | PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); | |
420 | goto error; | |
421 | } | |
422 | ||
423 | /* Now initialize the hardware */ | |
424 | diag = em_hardware_init(hw); | |
425 | if (diag != 0) { | |
426 | PMD_INIT_LOG(ERR, "Hardware initialization failed"); | |
427 | goto error; | |
428 | } | |
429 | ||
430 | hw->mac.get_link_status = 1; | |
431 | ||
432 | /* Indicate SOL/IDER usage */ | |
433 | diag = e1000_check_reset_block(hw); | |
434 | if (diag < 0) { | |
435 | PMD_INIT_LOG(ERR, "PHY reset is blocked due to " | |
436 | "SOL/IDER session"); | |
437 | } | |
438 | return 0; | |
439 | ||
440 | error: | |
441 | em_hw_control_release(hw); | |
442 | return diag; | |
443 | } | |
444 | ||
445 | static int | |
446 | eth_em_configure(struct rte_eth_dev *dev) | |
447 | { | |
448 | struct e1000_interrupt *intr = | |
449 | E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
450 | ||
451 | PMD_INIT_FUNC_TRACE(); | |
452 | intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; | |
9f95a23c | 453 | |
7c673cae FG |
454 | PMD_INIT_FUNC_TRACE(); |
455 | ||
456 | return 0; | |
457 | } | |
458 | ||
459 | static void | |
460 | em_set_pba(struct e1000_hw *hw) | |
461 | { | |
462 | uint32_t pba; | |
463 | ||
464 | /* | |
465 | * Packet Buffer Allocation (PBA) | |
466 | * Writing PBA sets the receive portion of the buffer | |
467 | * the remainder is used for the transmit buffer. | |
468 | * Devices before the 82547 had a Packet Buffer of 64K. | |
469 | * After the 82547 the buffer was reduced to 40K. | |
470 | */ | |
471 | switch (hw->mac.type) { | |
472 | case e1000_82547: | |
473 | case e1000_82547_rev_2: | |
474 | /* 82547: Total Packet Buffer is 40K */ | |
475 | pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ | |
476 | break; | |
477 | case e1000_82571: | |
478 | case e1000_82572: | |
479 | case e1000_80003es2lan: | |
480 | pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ | |
481 | break; | |
482 | case e1000_82573: /* 82573: Total Packet Buffer is 32K */ | |
483 | pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ | |
484 | break; | |
485 | case e1000_82574: | |
486 | case e1000_82583: | |
487 | pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ | |
488 | break; | |
489 | case e1000_ich8lan: | |
490 | pba = E1000_PBA_8K; | |
491 | break; | |
492 | case e1000_ich9lan: | |
493 | case e1000_ich10lan: | |
494 | pba = E1000_PBA_10K; | |
495 | break; | |
496 | case e1000_pchlan: | |
497 | case e1000_pch2lan: | |
498 | case e1000_pch_lpt: | |
11fdf7f2 TL |
499 | case e1000_pch_spt: |
500 | case e1000_pch_cnp: | |
7c673cae FG |
501 | pba = E1000_PBA_26K; |
502 | break; | |
503 | default: | |
504 | pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ | |
505 | } | |
506 | ||
507 | E1000_WRITE_REG(hw, E1000_PBA, pba); | |
508 | } | |
509 | ||
9f95a23c TL |
510 | static void |
511 | eth_em_rxtx_control(struct rte_eth_dev *dev, | |
512 | bool enable) | |
513 | { | |
514 | struct e1000_hw *hw = | |
515 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
516 | uint32_t tctl, rctl; | |
517 | ||
518 | tctl = E1000_READ_REG(hw, E1000_TCTL); | |
519 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
520 | if (enable) { | |
521 | /* enable Tx/Rx */ | |
522 | tctl |= E1000_TCTL_EN; | |
523 | rctl |= E1000_RCTL_EN; | |
524 | } else { | |
525 | /* disable Tx/Rx */ | |
526 | tctl &= ~E1000_TCTL_EN; | |
527 | rctl &= ~E1000_RCTL_EN; | |
528 | } | |
529 | E1000_WRITE_REG(hw, E1000_TCTL, tctl); | |
530 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
531 | E1000_WRITE_FLUSH(hw); | |
532 | } | |
533 | ||
7c673cae FG |
534 | static int |
535 | eth_em_start(struct rte_eth_dev *dev) | |
536 | { | |
537 | struct e1000_adapter *adapter = | |
538 | E1000_DEV_PRIVATE(dev->data->dev_private); | |
539 | struct e1000_hw *hw = | |
540 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
9f95a23c | 541 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
11fdf7f2 | 542 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; |
7c673cae FG |
543 | int ret, mask; |
544 | uint32_t intr_vector = 0; | |
545 | uint32_t *speeds; | |
546 | int num_speeds; | |
547 | bool autoneg; | |
548 | ||
549 | PMD_INIT_FUNC_TRACE(); | |
550 | ||
551 | eth_em_stop(dev); | |
552 | ||
553 | e1000_power_up_phy(hw); | |
554 | ||
555 | /* Set default PBA value */ | |
556 | em_set_pba(hw); | |
557 | ||
558 | /* Put the address into the Receive Address Array */ | |
559 | e1000_rar_set(hw, hw->mac.addr, 0); | |
560 | ||
561 | /* | |
562 | * With the 82571 adapter, RAR[0] may be overwritten | |
563 | * when the other port is reset, we make a duplicate | |
564 | * in RAR[14] for that eventuality, this assures | |
565 | * the interface continues to function. | |
566 | */ | |
567 | if (hw->mac.type == e1000_82571) { | |
568 | e1000_set_laa_state_82571(hw, TRUE); | |
569 | e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); | |
570 | } | |
571 | ||
572 | /* Initialize the hardware */ | |
573 | if (em_hardware_init(hw)) { | |
574 | PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); | |
575 | return -EIO; | |
576 | } | |
577 | ||
578 | E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); | |
579 | ||
580 | /* Configure for OS presence */ | |
581 | em_init_manageability(hw); | |
582 | ||
583 | if (dev->data->dev_conf.intr_conf.rxq != 0) { | |
584 | intr_vector = dev->data->nb_rx_queues; | |
585 | if (rte_intr_efd_enable(intr_handle, intr_vector)) | |
586 | return -1; | |
587 | } | |
588 | ||
589 | if (rte_intr_dp_is_en(intr_handle)) { | |
590 | intr_handle->intr_vec = | |
591 | rte_zmalloc("intr_vec", | |
592 | dev->data->nb_rx_queues * sizeof(int), 0); | |
593 | if (intr_handle->intr_vec == NULL) { | |
594 | PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" | |
11fdf7f2 | 595 | " intr_vec", dev->data->nb_rx_queues); |
7c673cae FG |
596 | return -ENOMEM; |
597 | } | |
598 | ||
599 | /* enable rx interrupt */ | |
600 | em_rxq_intr_enable(hw); | |
601 | } | |
602 | ||
603 | eth_em_tx_init(dev); | |
604 | ||
605 | ret = eth_em_rx_init(dev); | |
606 | if (ret) { | |
607 | PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); | |
608 | em_dev_clear_queues(dev); | |
609 | return ret; | |
610 | } | |
611 | ||
612 | e1000_clear_hw_cntrs_base_generic(hw); | |
613 | ||
614 | mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ | |
615 | ETH_VLAN_EXTEND_MASK; | |
9f95a23c TL |
616 | ret = eth_em_vlan_offload_set(dev, mask); |
617 | if (ret) { | |
618 | PMD_INIT_LOG(ERR, "Unable to update vlan offload"); | |
619 | em_dev_clear_queues(dev); | |
620 | return ret; | |
621 | } | |
7c673cae FG |
622 | |
623 | /* Set Interrupt Throttling Rate to maximum allowed value. */ | |
624 | E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); | |
625 | ||
626 | /* Setup link speed and duplex */ | |
627 | speeds = &dev->data->dev_conf.link_speeds; | |
628 | if (*speeds == ETH_LINK_SPEED_AUTONEG) { | |
629 | hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; | |
630 | hw->mac.autoneg = 1; | |
631 | } else { | |
632 | num_speeds = 0; | |
633 | autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; | |
634 | ||
635 | /* Reset */ | |
636 | hw->phy.autoneg_advertised = 0; | |
637 | ||
638 | if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | | |
639 | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | | |
640 | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { | |
641 | num_speeds = -1; | |
642 | goto error_invalid_config; | |
643 | } | |
644 | if (*speeds & ETH_LINK_SPEED_10M_HD) { | |
645 | hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; | |
646 | num_speeds++; | |
647 | } | |
648 | if (*speeds & ETH_LINK_SPEED_10M) { | |
649 | hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; | |
650 | num_speeds++; | |
651 | } | |
652 | if (*speeds & ETH_LINK_SPEED_100M_HD) { | |
653 | hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; | |
654 | num_speeds++; | |
655 | } | |
656 | if (*speeds & ETH_LINK_SPEED_100M) { | |
657 | hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; | |
658 | num_speeds++; | |
659 | } | |
660 | if (*speeds & ETH_LINK_SPEED_1G) { | |
661 | hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; | |
662 | num_speeds++; | |
663 | } | |
664 | if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) | |
665 | goto error_invalid_config; | |
666 | ||
667 | /* Set/reset the mac.autoneg based on the link speed, | |
668 | * fixed or not | |
669 | */ | |
670 | if (!autoneg) { | |
671 | hw->mac.autoneg = 0; | |
672 | hw->mac.forced_speed_duplex = | |
673 | hw->phy.autoneg_advertised; | |
674 | } else { | |
675 | hw->mac.autoneg = 1; | |
676 | } | |
677 | } | |
678 | ||
679 | e1000_setup_link(hw); | |
680 | ||
681 | if (rte_intr_allow_others(intr_handle)) { | |
682 | /* check if lsc interrupt is enabled */ | |
683 | if (dev->data->dev_conf.intr_conf.lsc != 0) { | |
684 | ret = eth_em_interrupt_setup(dev); | |
685 | if (ret) { | |
686 | PMD_INIT_LOG(ERR, "Unable to setup interrupts"); | |
687 | em_dev_clear_queues(dev); | |
688 | return ret; | |
689 | } | |
690 | } | |
691 | } else { | |
692 | rte_intr_callback_unregister(intr_handle, | |
693 | eth_em_interrupt_handler, | |
694 | (void *)dev); | |
695 | if (dev->data->dev_conf.intr_conf.lsc != 0) | |
696 | PMD_INIT_LOG(INFO, "lsc won't enable because of" | |
11fdf7f2 | 697 | " no intr multiplexn"); |
7c673cae FG |
698 | } |
699 | /* check if rxq interrupt is enabled */ | |
700 | if (dev->data->dev_conf.intr_conf.rxq != 0) | |
701 | eth_em_rxq_interrupt_setup(dev); | |
702 | ||
703 | rte_intr_enable(intr_handle); | |
704 | ||
705 | adapter->stopped = 0; | |
706 | ||
9f95a23c TL |
707 | eth_em_rxtx_control(dev, true); |
708 | eth_em_link_update(dev, 0); | |
709 | ||
7c673cae FG |
710 | PMD_INIT_LOG(DEBUG, "<<"); |
711 | ||
712 | return 0; | |
713 | ||
714 | error_invalid_config: | |
715 | PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", | |
716 | dev->data->dev_conf.link_speeds, dev->data->port_id); | |
717 | em_dev_clear_queues(dev); | |
718 | return -EINVAL; | |
719 | } | |
720 | ||
721 | /********************************************************************* | |
722 | * | |
723 | * This routine disables all traffic on the adapter by issuing a | |
724 | * global reset on the MAC. | |
725 | * | |
726 | **********************************************************************/ | |
727 | static void | |
728 | eth_em_stop(struct rte_eth_dev *dev) | |
729 | { | |
730 | struct rte_eth_link link; | |
731 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
9f95a23c | 732 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
11fdf7f2 | 733 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; |
7c673cae | 734 | |
9f95a23c | 735 | eth_em_rxtx_control(dev, false); |
7c673cae FG |
736 | em_rxq_intr_disable(hw); |
737 | em_lsc_intr_disable(hw); | |
738 | ||
739 | e1000_reset_hw(hw); | |
740 | if (hw->mac.type >= e1000_82544) | |
741 | E1000_WRITE_REG(hw, E1000_WUC, 0); | |
742 | ||
743 | /* Power down the phy. Needed to make the link go down */ | |
744 | e1000_power_down_phy(hw); | |
745 | ||
746 | em_dev_clear_queues(dev); | |
747 | ||
748 | /* clear the recorded link status */ | |
749 | memset(&link, 0, sizeof(link)); | |
9f95a23c | 750 | rte_eth_linkstatus_set(dev, &link); |
7c673cae FG |
751 | |
752 | if (!rte_intr_allow_others(intr_handle)) | |
753 | /* resume to the default handler */ | |
754 | rte_intr_callback_register(intr_handle, | |
755 | eth_em_interrupt_handler, | |
756 | (void *)dev); | |
757 | ||
758 | /* Clean datapath event and queue/vec mapping */ | |
759 | rte_intr_efd_disable(intr_handle); | |
760 | if (intr_handle->intr_vec != NULL) { | |
761 | rte_free(intr_handle->intr_vec); | |
762 | intr_handle->intr_vec = NULL; | |
763 | } | |
764 | } | |
765 | ||
766 | static void | |
767 | eth_em_close(struct rte_eth_dev *dev) | |
768 | { | |
769 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
770 | struct e1000_adapter *adapter = | |
771 | E1000_DEV_PRIVATE(dev->data->dev_private); | |
772 | ||
773 | eth_em_stop(dev); | |
774 | adapter->stopped = 1; | |
775 | em_dev_free_queues(dev); | |
776 | e1000_phy_hw_reset(hw); | |
777 | em_release_manageability(hw); | |
778 | em_hw_control_release(hw); | |
779 | } | |
780 | ||
781 | static int | |
782 | em_get_rx_buffer_size(struct e1000_hw *hw) | |
783 | { | |
784 | uint32_t rx_buf_size; | |
785 | ||
786 | rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); | |
787 | return rx_buf_size; | |
788 | } | |
789 | ||
790 | /********************************************************************* | |
791 | * | |
792 | * Initialize the hardware | |
793 | * | |
794 | **********************************************************************/ | |
795 | static int | |
796 | em_hardware_init(struct e1000_hw *hw) | |
797 | { | |
798 | uint32_t rx_buf_size; | |
799 | int diag; | |
800 | ||
801 | /* Issue a global reset */ | |
802 | e1000_reset_hw(hw); | |
803 | ||
804 | /* Let the firmware know the OS is in control */ | |
805 | em_hw_control_acquire(hw); | |
806 | ||
807 | /* | |
808 | * These parameters control the automatic generation (Tx) and | |
809 | * response (Rx) to Ethernet PAUSE frames. | |
810 | * - High water mark should allow for at least two standard size (1518) | |
811 | * frames to be received after sending an XOFF. | |
812 | * - Low water mark works best when it is very near the high water mark. | |
813 | * This allows the receiver to restart by sending XON when it has | |
814 | * drained a bit. Here we use an arbitrary value of 1500 which will | |
815 | * restart after one full frame is pulled from the buffer. There | |
816 | * could be several smaller frames in the buffer and if so they will | |
817 | * not trigger the XON until their total number reduces the buffer | |
818 | * by 1500. | |
819 | * - The pause time is fairly large at 1000 x 512ns = 512 usec. | |
820 | */ | |
821 | rx_buf_size = em_get_rx_buffer_size(hw); | |
822 | ||
823 | hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); | |
824 | hw->fc.low_water = hw->fc.high_water - 1500; | |
825 | ||
826 | if (hw->mac.type == e1000_80003es2lan) | |
827 | hw->fc.pause_time = UINT16_MAX; | |
828 | else | |
829 | hw->fc.pause_time = EM_FC_PAUSE_TIME; | |
830 | ||
831 | hw->fc.send_xon = 1; | |
832 | ||
833 | /* Set Flow control, use the tunable location if sane */ | |
834 | if (em_fc_setting <= e1000_fc_full) | |
835 | hw->fc.requested_mode = em_fc_setting; | |
836 | else | |
837 | hw->fc.requested_mode = e1000_fc_none; | |
838 | ||
839 | /* Workaround: no TX flow ctrl for PCH */ | |
840 | if (hw->mac.type == e1000_pchlan) | |
841 | hw->fc.requested_mode = e1000_fc_rx_pause; | |
842 | ||
843 | /* Override - settings for PCH2LAN, ya its magic :) */ | |
844 | if (hw->mac.type == e1000_pch2lan) { | |
845 | hw->fc.high_water = 0x5C20; | |
846 | hw->fc.low_water = 0x5048; | |
847 | hw->fc.pause_time = 0x0650; | |
848 | hw->fc.refresh_time = 0x0400; | |
11fdf7f2 TL |
849 | } else if (hw->mac.type == e1000_pch_lpt || |
850 | hw->mac.type == e1000_pch_spt || | |
851 | hw->mac.type == e1000_pch_cnp) { | |
7c673cae FG |
852 | hw->fc.requested_mode = e1000_fc_full; |
853 | } | |
854 | ||
855 | diag = e1000_init_hw(hw); | |
856 | if (diag < 0) | |
857 | return diag; | |
858 | e1000_check_for_link(hw); | |
859 | return 0; | |
860 | } | |
861 | ||
862 | /* This function is based on em_update_stats_counters() in e1000/if_em.c */ | |
9f95a23c | 863 | static int |
7c673cae FG |
864 | eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) |
865 | { | |
866 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
867 | struct e1000_hw_stats *stats = | |
868 | E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
869 | int pause_frames; | |
870 | ||
871 | if(hw->phy.media_type == e1000_media_type_copper || | |
872 | (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { | |
873 | stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); | |
874 | stats->sec += E1000_READ_REG(hw, E1000_SEC); | |
875 | } | |
876 | ||
877 | stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); | |
878 | stats->mpc += E1000_READ_REG(hw, E1000_MPC); | |
879 | stats->scc += E1000_READ_REG(hw, E1000_SCC); | |
880 | stats->ecol += E1000_READ_REG(hw, E1000_ECOL); | |
881 | ||
882 | stats->mcc += E1000_READ_REG(hw, E1000_MCC); | |
883 | stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); | |
884 | stats->colc += E1000_READ_REG(hw, E1000_COLC); | |
885 | stats->dc += E1000_READ_REG(hw, E1000_DC); | |
886 | stats->rlec += E1000_READ_REG(hw, E1000_RLEC); | |
887 | stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); | |
888 | stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); | |
889 | ||
890 | /* | |
891 | * For watchdog management we need to know if we have been | |
892 | * paused during the last interval, so capture that here. | |
893 | */ | |
894 | pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); | |
895 | stats->xoffrxc += pause_frames; | |
896 | stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); | |
897 | stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); | |
898 | stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); | |
899 | stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); | |
900 | stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); | |
901 | stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); | |
902 | stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); | |
903 | stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); | |
904 | stats->gprc += E1000_READ_REG(hw, E1000_GPRC); | |
905 | stats->bprc += E1000_READ_REG(hw, E1000_BPRC); | |
906 | stats->mprc += E1000_READ_REG(hw, E1000_MPRC); | |
907 | stats->gptc += E1000_READ_REG(hw, E1000_GPTC); | |
908 | ||
909 | /* | |
910 | * For the 64-bit byte counters the low dword must be read first. | |
911 | * Both registers clear on the read of the high dword. | |
912 | */ | |
913 | ||
914 | stats->gorc += E1000_READ_REG(hw, E1000_GORCL); | |
915 | stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); | |
916 | stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); | |
917 | stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); | |
918 | ||
919 | stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); | |
920 | stats->ruc += E1000_READ_REG(hw, E1000_RUC); | |
921 | stats->rfc += E1000_READ_REG(hw, E1000_RFC); | |
922 | stats->roc += E1000_READ_REG(hw, E1000_ROC); | |
923 | stats->rjc += E1000_READ_REG(hw, E1000_RJC); | |
924 | ||
925 | stats->tor += E1000_READ_REG(hw, E1000_TORH); | |
926 | stats->tot += E1000_READ_REG(hw, E1000_TOTH); | |
927 | ||
928 | stats->tpr += E1000_READ_REG(hw, E1000_TPR); | |
929 | stats->tpt += E1000_READ_REG(hw, E1000_TPT); | |
930 | stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); | |
931 | stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); | |
932 | stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); | |
933 | stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); | |
934 | stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); | |
935 | stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); | |
936 | stats->mptc += E1000_READ_REG(hw, E1000_MPTC); | |
937 | stats->bptc += E1000_READ_REG(hw, E1000_BPTC); | |
938 | ||
939 | /* Interrupt Counts */ | |
940 | ||
941 | if (hw->mac.type >= e1000_82571) { | |
942 | stats->iac += E1000_READ_REG(hw, E1000_IAC); | |
943 | stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); | |
944 | stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); | |
945 | stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); | |
946 | stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); | |
947 | stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); | |
948 | stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); | |
949 | stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); | |
950 | stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); | |
951 | } | |
952 | ||
953 | if (hw->mac.type >= e1000_82543) { | |
954 | stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); | |
955 | stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); | |
956 | stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); | |
957 | stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); | |
958 | stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); | |
959 | stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); | |
960 | } | |
961 | ||
962 | if (rte_stats == NULL) | |
9f95a23c | 963 | return -EINVAL; |
7c673cae FG |
964 | |
965 | /* Rx Errors */ | |
966 | rte_stats->imissed = stats->mpc; | |
967 | rte_stats->ierrors = stats->crcerrs + | |
968 | stats->rlec + stats->ruc + stats->roc + | |
969 | stats->rxerrc + stats->algnerrc + stats->cexterr; | |
970 | ||
971 | /* Tx Errors */ | |
972 | rte_stats->oerrors = stats->ecol + stats->latecol; | |
973 | ||
974 | rte_stats->ipackets = stats->gprc; | |
975 | rte_stats->opackets = stats->gptc; | |
976 | rte_stats->ibytes = stats->gorc; | |
977 | rte_stats->obytes = stats->gotc; | |
9f95a23c | 978 | return 0; |
7c673cae FG |
979 | } |
980 | ||
981 | static void | |
982 | eth_em_stats_reset(struct rte_eth_dev *dev) | |
983 | { | |
984 | struct e1000_hw_stats *hw_stats = | |
985 | E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
986 | ||
987 | /* HW registers are cleared on read */ | |
988 | eth_em_stats_get(dev, NULL); | |
989 | ||
990 | /* Reset software totals */ | |
991 | memset(hw_stats, 0, sizeof(*hw_stats)); | |
992 | } | |
993 | ||
994 | static int | |
995 | eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) | |
996 | { | |
997 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
9f95a23c | 998 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
11fdf7f2 | 999 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; |
7c673cae FG |
1000 | |
1001 | em_rxq_intr_enable(hw); | |
11fdf7f2 | 1002 | rte_intr_enable(intr_handle); |
7c673cae FG |
1003 | |
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static int | |
1008 | eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) | |
1009 | { | |
1010 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1011 | ||
1012 | em_rxq_intr_disable(hw); | |
1013 | ||
1014 | return 0; | |
1015 | } | |
1016 | ||
9f95a23c TL |
1017 | uint32_t |
1018 | em_get_max_pktlen(struct rte_eth_dev *dev) | |
7c673cae | 1019 | { |
9f95a23c TL |
1020 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); |
1021 | ||
7c673cae FG |
1022 | switch (hw->mac.type) { |
1023 | case e1000_82571: | |
1024 | case e1000_82572: | |
1025 | case e1000_ich9lan: | |
1026 | case e1000_ich10lan: | |
1027 | case e1000_pch2lan: | |
1028 | case e1000_pch_lpt: | |
11fdf7f2 TL |
1029 | case e1000_pch_spt: |
1030 | case e1000_pch_cnp: | |
7c673cae FG |
1031 | case e1000_82574: |
1032 | case e1000_80003es2lan: /* 9K Jumbo Frame size */ | |
1033 | case e1000_82583: | |
1034 | return 0x2412; | |
1035 | case e1000_pchlan: | |
1036 | return 0x1000; | |
1037 | /* Adapters that do not support jumbo frames */ | |
1038 | case e1000_ich8lan: | |
1039 | return ETHER_MAX_LEN; | |
1040 | default: | |
1041 | return MAX_JUMBO_FRAME_SIZE; | |
1042 | } | |
1043 | } | |
1044 | ||
1045 | static void | |
1046 | eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) | |
1047 | { | |
1048 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1049 | ||
1050 | dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ | |
9f95a23c | 1051 | dev_info->max_rx_pktlen = em_get_max_pktlen(dev); |
7c673cae FG |
1052 | dev_info->max_mac_addrs = hw->mac.rar_entry_count; |
1053 | ||
1054 | /* | |
1055 | * Starting with 631xESB hw supports 2 TX/RX queues per port. | |
1056 | * Unfortunatelly, all these nics have just one TX context. | |
1057 | * So we have few choises for TX: | |
1058 | * - Use just one TX queue. | |
1059 | * - Allow cksum offload only for one TX queue. | |
1060 | * - Don't allow TX cksum offload at all. | |
1061 | * For now, option #1 was chosen. | |
1062 | * To use second RX queue we have to use extended RX descriptor | |
1063 | * (Multiple Receive Queues are mutually exclusive with UDP | |
1064 | * fragmentation and are not supported when a legacy receive | |
1065 | * descriptor format is used). | |
1066 | * Which means separate RX routinies - as legacy nics (82540, 82545) | |
1067 | * don't support extended RXD. | |
1068 | * To avoid it we support just one RX queue for now (no RSS). | |
1069 | */ | |
1070 | ||
1071 | dev_info->max_rx_queues = 1; | |
1072 | dev_info->max_tx_queues = 1; | |
1073 | ||
9f95a23c TL |
1074 | dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); |
1075 | dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | | |
1076 | dev_info->rx_queue_offload_capa; | |
1077 | dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); | |
1078 | dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | | |
1079 | dev_info->tx_queue_offload_capa; | |
1080 | ||
7c673cae FG |
1081 | dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { |
1082 | .nb_max = E1000_MAX_RING_DESC, | |
1083 | .nb_min = E1000_MIN_RING_DESC, | |
1084 | .nb_align = EM_RXD_ALIGN, | |
1085 | }; | |
1086 | ||
1087 | dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { | |
1088 | .nb_max = E1000_MAX_RING_DESC, | |
1089 | .nb_min = E1000_MIN_RING_DESC, | |
1090 | .nb_align = EM_TXD_ALIGN, | |
11fdf7f2 TL |
1091 | .nb_seg_max = EM_TX_MAX_SEG, |
1092 | .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, | |
7c673cae FG |
1093 | }; |
1094 | ||
1095 | dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | | |
1096 | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | | |
1097 | ETH_LINK_SPEED_1G; | |
9f95a23c TL |
1098 | |
1099 | /* Preferred queue parameters */ | |
1100 | dev_info->default_rxportconf.nb_queues = 1; | |
1101 | dev_info->default_txportconf.nb_queues = 1; | |
1102 | dev_info->default_txportconf.ring_size = 256; | |
1103 | dev_info->default_rxportconf.ring_size = 256; | |
7c673cae FG |
1104 | } |
1105 | ||
1106 | /* return 0 means link status changed, -1 means not changed */ | |
1107 | static int | |
1108 | eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) | |
1109 | { | |
1110 | struct e1000_hw *hw = | |
1111 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
9f95a23c | 1112 | struct rte_eth_link link; |
7c673cae FG |
1113 | int link_check, count; |
1114 | ||
1115 | link_check = 0; | |
1116 | hw->mac.get_link_status = 1; | |
1117 | ||
1118 | /* possible wait-to-complete in up to 9 seconds */ | |
1119 | for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { | |
1120 | /* Read the real link status */ | |
1121 | switch (hw->phy.media_type) { | |
1122 | case e1000_media_type_copper: | |
1123 | /* Do the work to read phy */ | |
1124 | e1000_check_for_link(hw); | |
1125 | link_check = !hw->mac.get_link_status; | |
1126 | break; | |
1127 | ||
1128 | case e1000_media_type_fiber: | |
1129 | e1000_check_for_link(hw); | |
1130 | link_check = (E1000_READ_REG(hw, E1000_STATUS) & | |
1131 | E1000_STATUS_LU); | |
1132 | break; | |
1133 | ||
1134 | case e1000_media_type_internal_serdes: | |
1135 | e1000_check_for_link(hw); | |
1136 | link_check = hw->mac.serdes_has_link; | |
1137 | break; | |
1138 | ||
1139 | default: | |
1140 | break; | |
1141 | } | |
1142 | if (link_check || wait_to_complete == 0) | |
1143 | break; | |
1144 | rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); | |
1145 | } | |
1146 | memset(&link, 0, sizeof(link)); | |
7c673cae FG |
1147 | |
1148 | /* Now we check if a transition has happened */ | |
1149 | if (link_check && (link.link_status == ETH_LINK_DOWN)) { | |
1150 | uint16_t duplex, speed; | |
1151 | hw->mac.ops.get_link_up_info(hw, &speed, &duplex); | |
1152 | link.link_duplex = (duplex == FULL_DUPLEX) ? | |
1153 | ETH_LINK_FULL_DUPLEX : | |
1154 | ETH_LINK_HALF_DUPLEX; | |
1155 | link.link_speed = speed; | |
1156 | link.link_status = ETH_LINK_UP; | |
1157 | link.link_autoneg = !(dev->data->dev_conf.link_speeds & | |
1158 | ETH_LINK_SPEED_FIXED); | |
1159 | } else if (!link_check && (link.link_status == ETH_LINK_UP)) { | |
9f95a23c | 1160 | link.link_speed = ETH_SPEED_NUM_NONE; |
7c673cae FG |
1161 | link.link_duplex = ETH_LINK_HALF_DUPLEX; |
1162 | link.link_status = ETH_LINK_DOWN; | |
9f95a23c | 1163 | link.link_autoneg = ETH_LINK_FIXED; |
7c673cae | 1164 | } |
7c673cae | 1165 | |
9f95a23c | 1166 | return rte_eth_linkstatus_set(dev, &link); |
7c673cae FG |
1167 | } |
1168 | ||
1169 | /* | |
1170 | * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | |
1171 | * For ASF and Pass Through versions of f/w this means | |
1172 | * that the driver is loaded. For AMT version type f/w | |
1173 | * this means that the network i/f is open. | |
1174 | */ | |
1175 | static void | |
1176 | em_hw_control_acquire(struct e1000_hw *hw) | |
1177 | { | |
1178 | uint32_t ctrl_ext, swsm; | |
1179 | ||
1180 | /* Let firmware know the driver has taken over */ | |
1181 | if (hw->mac.type == e1000_82573) { | |
1182 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
1183 | E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); | |
1184 | ||
1185 | } else { | |
1186 | ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); | |
1187 | E1000_WRITE_REG(hw, E1000_CTRL_EXT, | |
1188 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | |
1189 | } | |
1190 | } | |
1191 | ||
1192 | /* | |
1193 | * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. | |
1194 | * For ASF and Pass Through versions of f/w this means that the | |
1195 | * driver is no longer loaded. For AMT versions of the | |
1196 | * f/w this means that the network i/f is closed. | |
1197 | */ | |
1198 | static void | |
1199 | em_hw_control_release(struct e1000_hw *hw) | |
1200 | { | |
1201 | uint32_t ctrl_ext, swsm; | |
1202 | ||
1203 | /* Let firmware taken over control of h/w */ | |
1204 | if (hw->mac.type == e1000_82573) { | |
1205 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
1206 | E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); | |
1207 | } else { | |
1208 | ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); | |
1209 | E1000_WRITE_REG(hw, E1000_CTRL_EXT, | |
1210 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | /* | |
1215 | * Bit of a misnomer, what this really means is | |
1216 | * to enable OS management of the system... aka | |
1217 | * to disable special hardware management features. | |
1218 | */ | |
1219 | static void | |
1220 | em_init_manageability(struct e1000_hw *hw) | |
1221 | { | |
1222 | if (e1000_enable_mng_pass_thru(hw)) { | |
1223 | uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); | |
1224 | uint32_t manc = E1000_READ_REG(hw, E1000_MANC); | |
1225 | ||
1226 | /* disable hardware interception of ARP */ | |
1227 | manc &= ~(E1000_MANC_ARP_EN); | |
1228 | ||
1229 | /* enable receiving management packets to the host */ | |
1230 | manc |= E1000_MANC_EN_MNG2HOST; | |
1231 | manc2h |= 1 << 5; /* Mng Port 623 */ | |
1232 | manc2h |= 1 << 6; /* Mng Port 664 */ | |
1233 | E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); | |
1234 | E1000_WRITE_REG(hw, E1000_MANC, manc); | |
1235 | } | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * Give control back to hardware management | |
1240 | * controller if there is one. | |
1241 | */ | |
1242 | static void | |
1243 | em_release_manageability(struct e1000_hw *hw) | |
1244 | { | |
1245 | uint32_t manc; | |
1246 | ||
1247 | if (e1000_enable_mng_pass_thru(hw)) { | |
1248 | manc = E1000_READ_REG(hw, E1000_MANC); | |
1249 | ||
1250 | /* re-enable hardware interception of ARP */ | |
1251 | manc |= E1000_MANC_ARP_EN; | |
1252 | manc &= ~E1000_MANC_EN_MNG2HOST; | |
1253 | ||
1254 | E1000_WRITE_REG(hw, E1000_MANC, manc); | |
1255 | } | |
1256 | } | |
1257 | ||
1258 | static void | |
1259 | eth_em_promiscuous_enable(struct rte_eth_dev *dev) | |
1260 | { | |
1261 | struct e1000_hw *hw = | |
1262 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1263 | uint32_t rctl; | |
1264 | ||
1265 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1266 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | |
1267 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1268 | } | |
1269 | ||
1270 | static void | |
1271 | eth_em_promiscuous_disable(struct rte_eth_dev *dev) | |
1272 | { | |
1273 | struct e1000_hw *hw = | |
1274 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1275 | uint32_t rctl; | |
1276 | ||
1277 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1278 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); | |
1279 | if (dev->data->all_multicast == 1) | |
1280 | rctl |= E1000_RCTL_MPE; | |
1281 | else | |
1282 | rctl &= (~E1000_RCTL_MPE); | |
1283 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1284 | } | |
1285 | ||
1286 | static void | |
1287 | eth_em_allmulticast_enable(struct rte_eth_dev *dev) | |
1288 | { | |
1289 | struct e1000_hw *hw = | |
1290 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1291 | uint32_t rctl; | |
1292 | ||
1293 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1294 | rctl |= E1000_RCTL_MPE; | |
1295 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1296 | } | |
1297 | ||
1298 | static void | |
1299 | eth_em_allmulticast_disable(struct rte_eth_dev *dev) | |
1300 | { | |
1301 | struct e1000_hw *hw = | |
1302 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1303 | uint32_t rctl; | |
1304 | ||
1305 | if (dev->data->promiscuous == 1) | |
1306 | return; /* must remain in all_multicast mode */ | |
1307 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1308 | rctl &= (~E1000_RCTL_MPE); | |
1309 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1310 | } | |
1311 | ||
1312 | static int | |
1313 | eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) | |
1314 | { | |
1315 | struct e1000_hw *hw = | |
1316 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1317 | struct e1000_vfta * shadow_vfta = | |
1318 | E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
1319 | uint32_t vfta; | |
1320 | uint32_t vid_idx; | |
1321 | uint32_t vid_bit; | |
1322 | ||
1323 | vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & | |
1324 | E1000_VFTA_ENTRY_MASK); | |
1325 | vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); | |
1326 | vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); | |
1327 | if (on) | |
1328 | vfta |= vid_bit; | |
1329 | else | |
1330 | vfta &= ~vid_bit; | |
1331 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); | |
1332 | ||
1333 | /* update local VFTA copy */ | |
1334 | shadow_vfta->vfta[vid_idx] = vfta; | |
1335 | ||
1336 | return 0; | |
1337 | } | |
1338 | ||
1339 | static void | |
1340 | em_vlan_hw_filter_disable(struct rte_eth_dev *dev) | |
1341 | { | |
1342 | struct e1000_hw *hw = | |
1343 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1344 | uint32_t reg; | |
1345 | ||
1346 | /* Filter Table Disable */ | |
1347 | reg = E1000_READ_REG(hw, E1000_RCTL); | |
1348 | reg &= ~E1000_RCTL_CFIEN; | |
1349 | reg &= ~E1000_RCTL_VFE; | |
1350 | E1000_WRITE_REG(hw, E1000_RCTL, reg); | |
1351 | } | |
1352 | ||
1353 | static void | |
1354 | em_vlan_hw_filter_enable(struct rte_eth_dev *dev) | |
1355 | { | |
1356 | struct e1000_hw *hw = | |
1357 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1358 | struct e1000_vfta * shadow_vfta = | |
1359 | E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
1360 | uint32_t reg; | |
1361 | int i; | |
1362 | ||
1363 | /* Filter Table Enable, CFI not used for packet acceptance */ | |
1364 | reg = E1000_READ_REG(hw, E1000_RCTL); | |
1365 | reg &= ~E1000_RCTL_CFIEN; | |
1366 | reg |= E1000_RCTL_VFE; | |
1367 | E1000_WRITE_REG(hw, E1000_RCTL, reg); | |
1368 | ||
1369 | /* restore vfta from local copy */ | |
1370 | for (i = 0; i < IGB_VFTA_SIZE; i++) | |
1371 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); | |
1372 | } | |
1373 | ||
1374 | static void | |
1375 | em_vlan_hw_strip_disable(struct rte_eth_dev *dev) | |
1376 | { | |
1377 | struct e1000_hw *hw = | |
1378 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1379 | uint32_t reg; | |
1380 | ||
1381 | /* VLAN Mode Disable */ | |
1382 | reg = E1000_READ_REG(hw, E1000_CTRL); | |
1383 | reg &= ~E1000_CTRL_VME; | |
1384 | E1000_WRITE_REG(hw, E1000_CTRL, reg); | |
1385 | ||
1386 | } | |
1387 | ||
1388 | static void | |
1389 | em_vlan_hw_strip_enable(struct rte_eth_dev *dev) | |
1390 | { | |
1391 | struct e1000_hw *hw = | |
1392 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1393 | uint32_t reg; | |
1394 | ||
1395 | /* VLAN Mode Enable */ | |
1396 | reg = E1000_READ_REG(hw, E1000_CTRL); | |
1397 | reg |= E1000_CTRL_VME; | |
1398 | E1000_WRITE_REG(hw, E1000_CTRL, reg); | |
1399 | } | |
1400 | ||
9f95a23c | 1401 | static int |
7c673cae FG |
1402 | eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) |
1403 | { | |
9f95a23c TL |
1404 | struct rte_eth_rxmode *rxmode; |
1405 | ||
1406 | rxmode = &dev->data->dev_conf.rxmode; | |
7c673cae | 1407 | if(mask & ETH_VLAN_STRIP_MASK){ |
9f95a23c | 1408 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) |
7c673cae FG |
1409 | em_vlan_hw_strip_enable(dev); |
1410 | else | |
1411 | em_vlan_hw_strip_disable(dev); | |
1412 | } | |
1413 | ||
1414 | if(mask & ETH_VLAN_FILTER_MASK){ | |
9f95a23c | 1415 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) |
7c673cae FG |
1416 | em_vlan_hw_filter_enable(dev); |
1417 | else | |
1418 | em_vlan_hw_filter_disable(dev); | |
1419 | } | |
9f95a23c TL |
1420 | |
1421 | return 0; | |
7c673cae FG |
1422 | } |
1423 | ||
1424 | /* | |
1425 | * It enables the interrupt mask and then enable the interrupt. | |
1426 | * | |
1427 | * @param dev | |
1428 | * Pointer to struct rte_eth_dev. | |
1429 | * | |
1430 | * @return | |
1431 | * - On success, zero. | |
1432 | * - On failure, a negative value. | |
1433 | */ | |
1434 | static int | |
1435 | eth_em_interrupt_setup(struct rte_eth_dev *dev) | |
1436 | { | |
1437 | uint32_t regval; | |
1438 | struct e1000_hw *hw = | |
1439 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1440 | ||
1441 | /* clear interrupt */ | |
1442 | E1000_READ_REG(hw, E1000_ICR); | |
1443 | regval = E1000_READ_REG(hw, E1000_IMS); | |
9f95a23c TL |
1444 | E1000_WRITE_REG(hw, E1000_IMS, |
1445 | regval | E1000_ICR_LSC | E1000_ICR_OTHER); | |
7c673cae FG |
1446 | return 0; |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * It clears the interrupt causes and enables the interrupt. | |
1451 | * It will be called once only during nic initialized. | |
1452 | * | |
1453 | * @param dev | |
1454 | * Pointer to struct rte_eth_dev. | |
1455 | * | |
1456 | * @return | |
1457 | * - On success, zero. | |
1458 | * - On failure, a negative value. | |
1459 | */ | |
1460 | static int | |
1461 | eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) | |
1462 | { | |
1463 | struct e1000_hw *hw = | |
1464 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1465 | ||
1466 | E1000_READ_REG(hw, E1000_ICR); | |
1467 | em_rxq_intr_enable(hw); | |
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | /* | |
1472 | * It enable receive packet interrupt. | |
1473 | * @param hw | |
1474 | * Pointer to struct e1000_hw | |
1475 | * | |
1476 | * @return | |
1477 | */ | |
1478 | static void | |
1479 | em_rxq_intr_enable(struct e1000_hw *hw) | |
1480 | { | |
1481 | E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); | |
1482 | E1000_WRITE_FLUSH(hw); | |
1483 | } | |
1484 | ||
1485 | /* | |
1486 | * It disabled lsc interrupt. | |
1487 | * @param hw | |
1488 | * Pointer to struct e1000_hw | |
1489 | * | |
1490 | * @return | |
1491 | */ | |
1492 | static void | |
1493 | em_lsc_intr_disable(struct e1000_hw *hw) | |
1494 | { | |
9f95a23c | 1495 | E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); |
7c673cae FG |
1496 | E1000_WRITE_FLUSH(hw); |
1497 | } | |
1498 | ||
1499 | /* | |
1500 | * It disabled receive packet interrupt. | |
1501 | * @param hw | |
1502 | * Pointer to struct e1000_hw | |
1503 | * | |
1504 | * @return | |
1505 | */ | |
1506 | static void | |
1507 | em_rxq_intr_disable(struct e1000_hw *hw) | |
1508 | { | |
1509 | E1000_READ_REG(hw, E1000_ICR); | |
1510 | E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); | |
1511 | E1000_WRITE_FLUSH(hw); | |
1512 | } | |
1513 | ||
1514 | /* | |
1515 | * It reads ICR and gets interrupt causes, check it and set a bit flag | |
1516 | * to update link status. | |
1517 | * | |
1518 | * @param dev | |
1519 | * Pointer to struct rte_eth_dev. | |
1520 | * | |
1521 | * @return | |
1522 | * - On success, zero. | |
1523 | * - On failure, a negative value. | |
1524 | */ | |
1525 | static int | |
1526 | eth_em_interrupt_get_status(struct rte_eth_dev *dev) | |
1527 | { | |
1528 | uint32_t icr; | |
1529 | struct e1000_hw *hw = | |
1530 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1531 | struct e1000_interrupt *intr = | |
1532 | E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
1533 | ||
1534 | /* read-on-clear nic registers here */ | |
1535 | icr = E1000_READ_REG(hw, E1000_ICR); | |
1536 | if (icr & E1000_ICR_LSC) { | |
1537 | intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; | |
1538 | } | |
1539 | ||
1540 | return 0; | |
1541 | } | |
1542 | ||
1543 | /* | |
1544 | * It executes link_update after knowing an interrupt is prsent. | |
1545 | * | |
1546 | * @param dev | |
1547 | * Pointer to struct rte_eth_dev. | |
1548 | * | |
1549 | * @return | |
1550 | * - On success, zero. | |
1551 | * - On failure, a negative value. | |
1552 | */ | |
1553 | static int | |
11fdf7f2 TL |
1554 | eth_em_interrupt_action(struct rte_eth_dev *dev, |
1555 | struct rte_intr_handle *intr_handle) | |
7c673cae | 1556 | { |
9f95a23c | 1557 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
7c673cae FG |
1558 | struct e1000_hw *hw = |
1559 | E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1560 | struct e1000_interrupt *intr = | |
1561 | E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
7c673cae FG |
1562 | struct rte_eth_link link; |
1563 | int ret; | |
1564 | ||
1565 | if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) | |
1566 | return -1; | |
1567 | ||
1568 | intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; | |
11fdf7f2 | 1569 | rte_intr_enable(intr_handle); |
7c673cae FG |
1570 | |
1571 | /* set get_link_status to check register later */ | |
1572 | hw->mac.get_link_status = 1; | |
1573 | ret = eth_em_link_update(dev, 0); | |
1574 | ||
1575 | /* check if link has changed */ | |
1576 | if (ret < 0) | |
1577 | return 0; | |
1578 | ||
9f95a23c TL |
1579 | rte_eth_linkstatus_get(dev, &link); |
1580 | ||
7c673cae FG |
1581 | if (link.link_status) { |
1582 | PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", | |
9f95a23c | 1583 | dev->data->port_id, link.link_speed, |
7c673cae FG |
1584 | link.link_duplex == ETH_LINK_FULL_DUPLEX ? |
1585 | "full-duplex" : "half-duplex"); | |
1586 | } else { | |
1587 | PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); | |
1588 | } | |
1589 | PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", | |
11fdf7f2 TL |
1590 | pci_dev->addr.domain, pci_dev->addr.bus, |
1591 | pci_dev->addr.devid, pci_dev->addr.function); | |
7c673cae | 1592 | |
7c673cae FG |
1593 | return 0; |
1594 | } | |
1595 | ||
1596 | /** | |
1597 | * Interrupt handler which shall be registered at first. | |
1598 | * | |
1599 | * @param handle | |
1600 | * Pointer to interrupt handle. | |
1601 | * @param param | |
1602 | * The address of parameter (struct rte_eth_dev *) regsitered before. | |
1603 | * | |
1604 | * @return | |
1605 | * void | |
1606 | */ | |
1607 | static void | |
11fdf7f2 | 1608 | eth_em_interrupt_handler(void *param) |
7c673cae FG |
1609 | { |
1610 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
1611 | ||
1612 | eth_em_interrupt_get_status(dev); | |
11fdf7f2 | 1613 | eth_em_interrupt_action(dev, dev->intr_handle); |
7c673cae FG |
1614 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); |
1615 | } | |
1616 | ||
1617 | static int | |
1618 | eth_em_led_on(struct rte_eth_dev *dev) | |
1619 | { | |
1620 | struct e1000_hw *hw; | |
1621 | ||
1622 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1623 | return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; | |
1624 | } | |
1625 | ||
1626 | static int | |
1627 | eth_em_led_off(struct rte_eth_dev *dev) | |
1628 | { | |
1629 | struct e1000_hw *hw; | |
1630 | ||
1631 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1632 | return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; | |
1633 | } | |
1634 | ||
1635 | static int | |
1636 | eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) | |
1637 | { | |
1638 | struct e1000_hw *hw; | |
1639 | uint32_t ctrl; | |
1640 | int tx_pause; | |
1641 | int rx_pause; | |
1642 | ||
1643 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1644 | fc_conf->pause_time = hw->fc.pause_time; | |
1645 | fc_conf->high_water = hw->fc.high_water; | |
1646 | fc_conf->low_water = hw->fc.low_water; | |
1647 | fc_conf->send_xon = hw->fc.send_xon; | |
1648 | fc_conf->autoneg = hw->mac.autoneg; | |
1649 | ||
1650 | /* | |
1651 | * Return rx_pause and tx_pause status according to actual setting of | |
1652 | * the TFCE and RFCE bits in the CTRL register. | |
1653 | */ | |
1654 | ctrl = E1000_READ_REG(hw, E1000_CTRL); | |
1655 | if (ctrl & E1000_CTRL_TFCE) | |
1656 | tx_pause = 1; | |
1657 | else | |
1658 | tx_pause = 0; | |
1659 | ||
1660 | if (ctrl & E1000_CTRL_RFCE) | |
1661 | rx_pause = 1; | |
1662 | else | |
1663 | rx_pause = 0; | |
1664 | ||
1665 | if (rx_pause && tx_pause) | |
1666 | fc_conf->mode = RTE_FC_FULL; | |
1667 | else if (rx_pause) | |
1668 | fc_conf->mode = RTE_FC_RX_PAUSE; | |
1669 | else if (tx_pause) | |
1670 | fc_conf->mode = RTE_FC_TX_PAUSE; | |
1671 | else | |
1672 | fc_conf->mode = RTE_FC_NONE; | |
1673 | ||
1674 | return 0; | |
1675 | } | |
1676 | ||
1677 | static int | |
1678 | eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) | |
1679 | { | |
1680 | struct e1000_hw *hw; | |
1681 | int err; | |
1682 | enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { | |
1683 | e1000_fc_none, | |
1684 | e1000_fc_rx_pause, | |
1685 | e1000_fc_tx_pause, | |
1686 | e1000_fc_full | |
1687 | }; | |
1688 | uint32_t rx_buf_size; | |
1689 | uint32_t max_high_water; | |
1690 | uint32_t rctl; | |
1691 | ||
1692 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1693 | if (fc_conf->autoneg != hw->mac.autoneg) | |
1694 | return -ENOTSUP; | |
1695 | rx_buf_size = em_get_rx_buffer_size(hw); | |
1696 | PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); | |
1697 | ||
1698 | /* At least reserve one Ethernet frame for watermark */ | |
1699 | max_high_water = rx_buf_size - ETHER_MAX_LEN; | |
1700 | if ((fc_conf->high_water > max_high_water) || | |
1701 | (fc_conf->high_water < fc_conf->low_water)) { | |
1702 | PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); | |
1703 | PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); | |
1704 | return -EINVAL; | |
1705 | } | |
1706 | ||
1707 | hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; | |
1708 | hw->fc.pause_time = fc_conf->pause_time; | |
1709 | hw->fc.high_water = fc_conf->high_water; | |
1710 | hw->fc.low_water = fc_conf->low_water; | |
1711 | hw->fc.send_xon = fc_conf->send_xon; | |
1712 | ||
1713 | err = e1000_setup_link_generic(hw); | |
1714 | if (err == E1000_SUCCESS) { | |
1715 | ||
1716 | /* check if we want to forward MAC frames - driver doesn't have native | |
1717 | * capability to do that, so we'll write the registers ourselves */ | |
1718 | ||
1719 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1720 | ||
1721 | /* set or clear MFLCN.PMCF bit depending on configuration */ | |
1722 | if (fc_conf->mac_ctrl_frame_fwd != 0) | |
1723 | rctl |= E1000_RCTL_PMCF; | |
1724 | else | |
1725 | rctl &= ~E1000_RCTL_PMCF; | |
1726 | ||
1727 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1728 | E1000_WRITE_FLUSH(hw); | |
1729 | ||
1730 | return 0; | |
1731 | } | |
1732 | ||
1733 | PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); | |
1734 | return -EIO; | |
1735 | } | |
1736 | ||
11fdf7f2 | 1737 | static int |
7c673cae FG |
1738 | eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, |
1739 | uint32_t index, __rte_unused uint32_t pool) | |
1740 | { | |
1741 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1742 | ||
11fdf7f2 | 1743 | return e1000_rar_set(hw, mac_addr->addr_bytes, index); |
7c673cae FG |
1744 | } |
1745 | ||
1746 | static void | |
1747 | eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) | |
1748 | { | |
1749 | uint8_t addr[ETHER_ADDR_LEN]; | |
1750 | struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1751 | ||
1752 | memset(addr, 0, sizeof(addr)); | |
1753 | ||
1754 | e1000_rar_set(hw, addr, index); | |
1755 | } | |
1756 | ||
9f95a23c TL |
1757 | static int |
1758 | eth_em_default_mac_addr_set(struct rte_eth_dev *dev, | |
1759 | struct ether_addr *addr) | |
1760 | { | |
1761 | eth_em_rar_clear(dev, 0); | |
1762 | ||
1763 | return eth_em_rar_set(dev, (void *)addr, 0, 0); | |
1764 | } | |
1765 | ||
7c673cae FG |
1766 | static int |
1767 | eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) | |
1768 | { | |
1769 | struct rte_eth_dev_info dev_info; | |
1770 | struct e1000_hw *hw; | |
1771 | uint32_t frame_size; | |
1772 | uint32_t rctl; | |
1773 | ||
1774 | eth_em_infos_get(dev, &dev_info); | |
1775 | frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; | |
1776 | ||
1777 | /* check that mtu is within the allowed range */ | |
1778 | if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) | |
1779 | return -EINVAL; | |
1780 | ||
1781 | /* refuse mtu that requires the support of scattered packets when this | |
1782 | * feature has not been enabled before. */ | |
1783 | if (!dev->data->scattered_rx && | |
1784 | frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) | |
1785 | return -EINVAL; | |
1786 | ||
1787 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1788 | rctl = E1000_READ_REG(hw, E1000_RCTL); | |
1789 | ||
1790 | /* switch to jumbo mode if needed */ | |
1791 | if (frame_size > ETHER_MAX_LEN) { | |
9f95a23c TL |
1792 | dev->data->dev_conf.rxmode.offloads |= |
1793 | DEV_RX_OFFLOAD_JUMBO_FRAME; | |
7c673cae FG |
1794 | rctl |= E1000_RCTL_LPE; |
1795 | } else { | |
9f95a23c TL |
1796 | dev->data->dev_conf.rxmode.offloads &= |
1797 | ~DEV_RX_OFFLOAD_JUMBO_FRAME; | |
7c673cae FG |
1798 | rctl &= ~E1000_RCTL_LPE; |
1799 | } | |
1800 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); | |
1801 | ||
1802 | /* update max frame size */ | |
1803 | dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; | |
1804 | return 0; | |
1805 | } | |
1806 | ||
1807 | static int | |
1808 | eth_em_set_mc_addr_list(struct rte_eth_dev *dev, | |
1809 | struct ether_addr *mc_addr_set, | |
1810 | uint32_t nb_mc_addr) | |
1811 | { | |
1812 | struct e1000_hw *hw; | |
1813 | ||
1814 | hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1815 | e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); | |
1816 | return 0; | |
1817 | } | |
1818 | ||
11fdf7f2 | 1819 | RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); |
7c673cae | 1820 | RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); |
9f95a23c TL |
1821 | RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); |
1822 | ||
1823 | /* see e1000_logs.c */ | |
1824 | RTE_INIT(igb_init_log) | |
1825 | { | |
1826 | e1000_igb_init_log(); | |
1827 | } |