]>
Commit | Line | Data |
---|---|---|
01f2e4ea SF |
1 | /* |
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This program is free software; you may redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
16 | * SOFTWARE. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/workqueue.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/if_ether.h> | |
31 | #include <linux/if_vlan.h> | |
32 | #include <linux/ethtool.h> | |
33 | #include <linux/in.h> | |
34 | #include <linux/ip.h> | |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
b7c6bfb7 | 37 | #include <net/ip6_checksum.h> |
01f2e4ea SF |
38 | |
39 | #include "cq_enet_desc.h" | |
40 | #include "vnic_dev.h" | |
41 | #include "vnic_intr.h" | |
42 | #include "vnic_stats.h" | |
43 | #include "enic_res.h" | |
44 | #include "enic.h" | |
45 | ||
46 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | |
ea0d7d91 SF |
47 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) |
48 | #define MAX_TSO (1 << 16) | |
49 | #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) | |
50 | ||
51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | |
01f2e4ea SF |
52 | |
53 | /* Supported devices */ | |
54 | static struct pci_device_id enic_id_table[] = { | |
ea0d7d91 | 55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, |
01f2e4ea SF |
56 | { 0, } /* end of table */ |
57 | }; | |
58 | ||
59 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
60 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | |
61 | MODULE_LICENSE("GPL"); | |
62 | MODULE_VERSION(DRV_VERSION); | |
63 | MODULE_DEVICE_TABLE(pci, enic_id_table); | |
64 | ||
65 | struct enic_stat { | |
66 | char name[ETH_GSTRING_LEN]; | |
67 | unsigned int offset; | |
68 | }; | |
69 | ||
70 | #define ENIC_TX_STAT(stat) \ | |
71 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | |
72 | #define ENIC_RX_STAT(stat) \ | |
73 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | |
74 | ||
75 | static const struct enic_stat enic_tx_stats[] = { | |
76 | ENIC_TX_STAT(tx_frames_ok), | |
77 | ENIC_TX_STAT(tx_unicast_frames_ok), | |
78 | ENIC_TX_STAT(tx_multicast_frames_ok), | |
79 | ENIC_TX_STAT(tx_broadcast_frames_ok), | |
80 | ENIC_TX_STAT(tx_bytes_ok), | |
81 | ENIC_TX_STAT(tx_unicast_bytes_ok), | |
82 | ENIC_TX_STAT(tx_multicast_bytes_ok), | |
83 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | |
84 | ENIC_TX_STAT(tx_drops), | |
85 | ENIC_TX_STAT(tx_errors), | |
86 | ENIC_TX_STAT(tx_tso), | |
87 | }; | |
88 | ||
89 | static const struct enic_stat enic_rx_stats[] = { | |
90 | ENIC_RX_STAT(rx_frames_ok), | |
91 | ENIC_RX_STAT(rx_frames_total), | |
92 | ENIC_RX_STAT(rx_unicast_frames_ok), | |
93 | ENIC_RX_STAT(rx_multicast_frames_ok), | |
94 | ENIC_RX_STAT(rx_broadcast_frames_ok), | |
95 | ENIC_RX_STAT(rx_bytes_ok), | |
96 | ENIC_RX_STAT(rx_unicast_bytes_ok), | |
97 | ENIC_RX_STAT(rx_multicast_bytes_ok), | |
98 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | |
99 | ENIC_RX_STAT(rx_drop), | |
100 | ENIC_RX_STAT(rx_no_bufs), | |
101 | ENIC_RX_STAT(rx_errors), | |
102 | ENIC_RX_STAT(rx_rss), | |
103 | ENIC_RX_STAT(rx_crc_errors), | |
104 | ENIC_RX_STAT(rx_frames_64), | |
105 | ENIC_RX_STAT(rx_frames_127), | |
106 | ENIC_RX_STAT(rx_frames_255), | |
107 | ENIC_RX_STAT(rx_frames_511), | |
108 | ENIC_RX_STAT(rx_frames_1023), | |
109 | ENIC_RX_STAT(rx_frames_1518), | |
110 | ENIC_RX_STAT(rx_frames_to_max), | |
111 | }; | |
112 | ||
113 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | |
114 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | |
115 | ||
116 | static int enic_get_settings(struct net_device *netdev, | |
117 | struct ethtool_cmd *ecmd) | |
118 | { | |
119 | struct enic *enic = netdev_priv(netdev); | |
120 | ||
121 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | |
122 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | |
123 | ecmd->port = PORT_FIBRE; | |
124 | ecmd->transceiver = XCVR_EXTERNAL; | |
125 | ||
126 | if (netif_carrier_ok(netdev)) { | |
127 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | |
128 | ecmd->duplex = DUPLEX_FULL; | |
129 | } else { | |
130 | ecmd->speed = -1; | |
131 | ecmd->duplex = -1; | |
132 | } | |
133 | ||
134 | ecmd->autoneg = AUTONEG_DISABLE; | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
139 | static void enic_get_drvinfo(struct net_device *netdev, | |
140 | struct ethtool_drvinfo *drvinfo) | |
141 | { | |
142 | struct enic *enic = netdev_priv(netdev); | |
143 | struct vnic_devcmd_fw_info *fw_info; | |
144 | ||
145 | spin_lock(&enic->devcmd_lock); | |
146 | vnic_dev_fw_info(enic->vdev, &fw_info); | |
147 | spin_unlock(&enic->devcmd_lock); | |
148 | ||
149 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | |
150 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | |
151 | strncpy(drvinfo->fw_version, fw_info->fw_version, | |
152 | sizeof(drvinfo->fw_version)); | |
153 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | |
154 | sizeof(drvinfo->bus_info)); | |
155 | } | |
156 | ||
157 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |
158 | { | |
159 | unsigned int i; | |
160 | ||
161 | switch (stringset) { | |
162 | case ETH_SS_STATS: | |
163 | for (i = 0; i < enic_n_tx_stats; i++) { | |
164 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | |
165 | data += ETH_GSTRING_LEN; | |
166 | } | |
167 | for (i = 0; i < enic_n_rx_stats; i++) { | |
168 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | |
169 | data += ETH_GSTRING_LEN; | |
170 | } | |
171 | break; | |
172 | } | |
173 | } | |
174 | ||
25f0a061 | 175 | static int enic_get_sset_count(struct net_device *netdev, int sset) |
01f2e4ea | 176 | { |
25f0a061 SF |
177 | switch (sset) { |
178 | case ETH_SS_STATS: | |
179 | return enic_n_tx_stats + enic_n_rx_stats; | |
180 | default: | |
181 | return -EOPNOTSUPP; | |
182 | } | |
01f2e4ea SF |
183 | } |
184 | ||
185 | static void enic_get_ethtool_stats(struct net_device *netdev, | |
186 | struct ethtool_stats *stats, u64 *data) | |
187 | { | |
188 | struct enic *enic = netdev_priv(netdev); | |
189 | struct vnic_stats *vstats; | |
190 | unsigned int i; | |
191 | ||
192 | spin_lock(&enic->devcmd_lock); | |
193 | vnic_dev_stats_dump(enic->vdev, &vstats); | |
194 | spin_unlock(&enic->devcmd_lock); | |
195 | ||
196 | for (i = 0; i < enic_n_tx_stats; i++) | |
197 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | |
198 | for (i = 0; i < enic_n_rx_stats; i++) | |
199 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | |
200 | } | |
201 | ||
202 | static u32 enic_get_rx_csum(struct net_device *netdev) | |
203 | { | |
204 | struct enic *enic = netdev_priv(netdev); | |
205 | return enic->csum_rx_enabled; | |
206 | } | |
207 | ||
208 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | |
209 | { | |
210 | struct enic *enic = netdev_priv(netdev); | |
211 | ||
25f0a061 SF |
212 | if (data && !ENIC_SETTING(enic, RXCSUM)) |
213 | return -EINVAL; | |
214 | ||
215 | enic->csum_rx_enabled = !!data; | |
01f2e4ea SF |
216 | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | |
221 | { | |
222 | struct enic *enic = netdev_priv(netdev); | |
223 | ||
25f0a061 SF |
224 | if (data && !ENIC_SETTING(enic, TXCSUM)) |
225 | return -EINVAL; | |
226 | ||
227 | if (data) | |
01f2e4ea SF |
228 | netdev->features |= NETIF_F_HW_CSUM; |
229 | else | |
230 | netdev->features &= ~NETIF_F_HW_CSUM; | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | static int enic_set_tso(struct net_device *netdev, u32 data) | |
236 | { | |
237 | struct enic *enic = netdev_priv(netdev); | |
238 | ||
25f0a061 SF |
239 | if (data && !ENIC_SETTING(enic, TSO)) |
240 | return -EINVAL; | |
241 | ||
242 | if (data) | |
01f2e4ea SF |
243 | netdev->features |= |
244 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
245 | else | |
246 | netdev->features &= | |
247 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static u32 enic_get_msglevel(struct net_device *netdev) | |
253 | { | |
254 | struct enic *enic = netdev_priv(netdev); | |
255 | return enic->msg_enable; | |
256 | } | |
257 | ||
258 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | |
259 | { | |
260 | struct enic *enic = netdev_priv(netdev); | |
261 | enic->msg_enable = value; | |
262 | } | |
263 | ||
0fc0b732 | 264 | static const struct ethtool_ops enic_ethtool_ops = { |
01f2e4ea SF |
265 | .get_settings = enic_get_settings, |
266 | .get_drvinfo = enic_get_drvinfo, | |
267 | .get_msglevel = enic_get_msglevel, | |
268 | .set_msglevel = enic_set_msglevel, | |
269 | .get_link = ethtool_op_get_link, | |
270 | .get_strings = enic_get_strings, | |
25f0a061 | 271 | .get_sset_count = enic_get_sset_count, |
01f2e4ea SF |
272 | .get_ethtool_stats = enic_get_ethtool_stats, |
273 | .get_rx_csum = enic_get_rx_csum, | |
274 | .set_rx_csum = enic_set_rx_csum, | |
275 | .get_tx_csum = ethtool_op_get_tx_csum, | |
276 | .set_tx_csum = enic_set_tx_csum, | |
277 | .get_sg = ethtool_op_get_sg, | |
278 | .set_sg = ethtool_op_set_sg, | |
279 | .get_tso = ethtool_op_get_tso, | |
280 | .set_tso = enic_set_tso, | |
86ca9db7 SF |
281 | .get_flags = ethtool_op_get_flags, |
282 | .set_flags = ethtool_op_set_flags, | |
01f2e4ea SF |
283 | }; |
284 | ||
285 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | |
286 | { | |
287 | struct enic *enic = vnic_dev_priv(wq->vdev); | |
288 | ||
289 | if (buf->sop) | |
290 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
291 | buf->len, PCI_DMA_TODEVICE); | |
292 | else | |
293 | pci_unmap_page(enic->pdev, buf->dma_addr, | |
294 | buf->len, PCI_DMA_TODEVICE); | |
295 | ||
296 | if (buf->os_buf) | |
297 | dev_kfree_skb_any(buf->os_buf); | |
298 | } | |
299 | ||
300 | static void enic_wq_free_buf(struct vnic_wq *wq, | |
301 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | |
302 | { | |
303 | enic_free_wq_buf(wq, buf); | |
304 | } | |
305 | ||
306 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
307 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
308 | { | |
309 | struct enic *enic = vnic_dev_priv(vdev); | |
310 | ||
311 | spin_lock(&enic->wq_lock[q_number]); | |
312 | ||
313 | vnic_wq_service(&enic->wq[q_number], cq_desc, | |
314 | completed_index, enic_wq_free_buf, | |
315 | opaque); | |
316 | ||
317 | if (netif_queue_stopped(enic->netdev) && | |
ea0d7d91 SF |
318 | vnic_wq_desc_avail(&enic->wq[q_number]) >= |
319 | (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) | |
01f2e4ea SF |
320 | netif_wake_queue(enic->netdev); |
321 | ||
322 | spin_unlock(&enic->wq_lock[q_number]); | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static void enic_log_q_error(struct enic *enic) | |
328 | { | |
329 | unsigned int i; | |
330 | u32 error_status; | |
331 | ||
332 | for (i = 0; i < enic->wq_count; i++) { | |
333 | error_status = vnic_wq_error_status(&enic->wq[i]); | |
334 | if (error_status) | |
335 | printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", | |
336 | enic->netdev->name, i, error_status); | |
337 | } | |
338 | ||
339 | for (i = 0; i < enic->rq_count; i++) { | |
340 | error_status = vnic_rq_error_status(&enic->rq[i]); | |
341 | if (error_status) | |
342 | printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", | |
343 | enic->netdev->name, i, error_status); | |
344 | } | |
345 | } | |
346 | ||
347 | static void enic_link_check(struct enic *enic) | |
348 | { | |
349 | int link_status = vnic_dev_link_status(enic->vdev); | |
350 | int carrier_ok = netif_carrier_ok(enic->netdev); | |
351 | ||
352 | if (link_status && !carrier_ok) { | |
353 | printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); | |
354 | netif_carrier_on(enic->netdev); | |
355 | } else if (!link_status && carrier_ok) { | |
356 | printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); | |
357 | netif_carrier_off(enic->netdev); | |
358 | } | |
359 | } | |
360 | ||
361 | static void enic_mtu_check(struct enic *enic) | |
362 | { | |
363 | u32 mtu = vnic_dev_mtu(enic->vdev); | |
364 | ||
491598a4 | 365 | if (mtu && mtu != enic->port_mtu) { |
01f2e4ea SF |
366 | if (mtu < enic->netdev->mtu) |
367 | printk(KERN_WARNING PFX | |
368 | "%s: interface MTU (%d) set higher " | |
369 | "than switch port MTU (%d)\n", | |
370 | enic->netdev->name, enic->netdev->mtu, mtu); | |
371 | enic->port_mtu = mtu; | |
372 | } | |
373 | } | |
374 | ||
375 | static void enic_msglvl_check(struct enic *enic) | |
376 | { | |
377 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | |
378 | ||
379 | if (msg_enable != enic->msg_enable) { | |
380 | printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", | |
381 | enic->netdev->name, enic->msg_enable, msg_enable); | |
382 | enic->msg_enable = msg_enable; | |
383 | } | |
384 | } | |
385 | ||
386 | static void enic_notify_check(struct enic *enic) | |
387 | { | |
388 | enic_msglvl_check(enic); | |
389 | enic_mtu_check(enic); | |
390 | enic_link_check(enic); | |
391 | } | |
392 | ||
393 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | |
394 | ||
395 | static irqreturn_t enic_isr_legacy(int irq, void *data) | |
396 | { | |
397 | struct net_device *netdev = data; | |
398 | struct enic *enic = netdev_priv(netdev); | |
399 | u32 pba; | |
400 | ||
401 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
402 | ||
403 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | |
404 | if (!pba) { | |
405 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
406 | return IRQ_NONE; /* not our interrupt */ | |
407 | } | |
408 | ||
ed8af6b2 SF |
409 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { |
410 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); | |
01f2e4ea | 411 | enic_notify_check(enic); |
ed8af6b2 | 412 | } |
01f2e4ea SF |
413 | |
414 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | |
ed8af6b2 | 415 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); |
01f2e4ea SF |
416 | enic_log_q_error(enic); |
417 | /* schedule recovery from WQ/RQ error */ | |
418 | schedule_work(&enic->reset); | |
419 | return IRQ_HANDLED; | |
420 | } | |
421 | ||
422 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | |
288379f0 BH |
423 | if (napi_schedule_prep(&enic->napi)) |
424 | __napi_schedule(&enic->napi); | |
01f2e4ea SF |
425 | } else { |
426 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
427 | } | |
428 | ||
429 | return IRQ_HANDLED; | |
430 | } | |
431 | ||
432 | static irqreturn_t enic_isr_msi(int irq, void *data) | |
433 | { | |
434 | struct enic *enic = data; | |
435 | ||
436 | /* With MSI, there is no sharing of interrupts, so this is | |
437 | * our interrupt and there is no need to ack it. The device | |
438 | * is not providing per-vector masking, so the OS will not | |
439 | * write to PCI config space to mask/unmask the interrupt. | |
440 | * We're using mask_on_assertion for MSI, so the device | |
441 | * automatically masks the interrupt when the interrupt is | |
442 | * generated. Later, when exiting polling, the interrupt | |
443 | * will be unmasked (see enic_poll). | |
444 | * | |
445 | * Also, the device uses the same PCIe Traffic Class (TC) | |
446 | * for Memory Write data and MSI, so there are no ordering | |
447 | * issues; the MSI will always arrive at the Root Complex | |
448 | * _after_ corresponding Memory Writes (i.e. descriptor | |
449 | * writes). | |
450 | */ | |
451 | ||
288379f0 | 452 | napi_schedule(&enic->napi); |
01f2e4ea SF |
453 | |
454 | return IRQ_HANDLED; | |
455 | } | |
456 | ||
457 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |
458 | { | |
459 | struct enic *enic = data; | |
460 | ||
461 | /* schedule NAPI polling for RQ cleanup */ | |
288379f0 | 462 | napi_schedule(&enic->napi); |
01f2e4ea SF |
463 | |
464 | return IRQ_HANDLED; | |
465 | } | |
466 | ||
467 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | |
468 | { | |
469 | struct enic *enic = data; | |
470 | unsigned int wq_work_to_do = -1; /* no limit */ | |
471 | unsigned int wq_work_done; | |
472 | ||
473 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
474 | wq_work_to_do, enic_wq_service, NULL); | |
475 | ||
476 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | |
477 | wq_work_done, | |
478 | 1 /* unmask intr */, | |
479 | 1 /* reset intr timer */); | |
480 | ||
481 | return IRQ_HANDLED; | |
482 | } | |
483 | ||
484 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | |
485 | { | |
486 | struct enic *enic = data; | |
487 | ||
ed8af6b2 SF |
488 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); |
489 | ||
01f2e4ea SF |
490 | enic_log_q_error(enic); |
491 | ||
492 | /* schedule recovery from WQ/RQ error */ | |
493 | schedule_work(&enic->reset); | |
494 | ||
495 | return IRQ_HANDLED; | |
496 | } | |
497 | ||
498 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |
499 | { | |
500 | struct enic *enic = data; | |
501 | ||
ed8af6b2 | 502 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); |
01f2e4ea | 503 | enic_notify_check(enic); |
01f2e4ea SF |
504 | |
505 | return IRQ_HANDLED; | |
506 | } | |
507 | ||
508 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | |
509 | struct vnic_wq *wq, struct sk_buff *skb, | |
510 | unsigned int len_left) | |
511 | { | |
512 | skb_frag_t *frag; | |
513 | ||
514 | /* Queue additional data fragments */ | |
515 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
516 | len_left -= frag->size; | |
517 | enic_queue_wq_desc_cont(wq, skb, | |
518 | pci_map_page(enic->pdev, frag->page, | |
519 | frag->page_offset, frag->size, | |
520 | PCI_DMA_TODEVICE), | |
521 | frag->size, | |
522 | (len_left == 0)); /* EOP? */ | |
523 | } | |
524 | } | |
525 | ||
526 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | |
527 | struct vnic_wq *wq, struct sk_buff *skb, | |
528 | int vlan_tag_insert, unsigned int vlan_tag) | |
529 | { | |
530 | unsigned int head_len = skb_headlen(skb); | |
531 | unsigned int len_left = skb->len - head_len; | |
532 | int eop = (len_left == 0); | |
533 | ||
ea0d7d91 SF |
534 | /* Queue the main skb fragment. The fragments are no larger |
535 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
536 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
537 | * per fragment is queued. | |
538 | */ | |
01f2e4ea SF |
539 | enic_queue_wq_desc(wq, skb, |
540 | pci_map_single(enic->pdev, skb->data, | |
541 | head_len, PCI_DMA_TODEVICE), | |
542 | head_len, | |
543 | vlan_tag_insert, vlan_tag, | |
544 | eop); | |
545 | ||
546 | if (!eop) | |
547 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
548 | } | |
549 | ||
550 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |
551 | struct vnic_wq *wq, struct sk_buff *skb, | |
552 | int vlan_tag_insert, unsigned int vlan_tag) | |
553 | { | |
554 | unsigned int head_len = skb_headlen(skb); | |
555 | unsigned int len_left = skb->len - head_len; | |
556 | unsigned int hdr_len = skb_transport_offset(skb); | |
557 | unsigned int csum_offset = hdr_len + skb->csum_offset; | |
558 | int eop = (len_left == 0); | |
559 | ||
ea0d7d91 SF |
560 | /* Queue the main skb fragment. The fragments are no larger |
561 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
562 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
563 | * per fragment is queued. | |
564 | */ | |
01f2e4ea SF |
565 | enic_queue_wq_desc_csum_l4(wq, skb, |
566 | pci_map_single(enic->pdev, skb->data, | |
567 | head_len, PCI_DMA_TODEVICE), | |
568 | head_len, | |
569 | csum_offset, | |
570 | hdr_len, | |
571 | vlan_tag_insert, vlan_tag, | |
572 | eop); | |
573 | ||
574 | if (!eop) | |
575 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
576 | } | |
577 | ||
578 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | |
579 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | |
580 | int vlan_tag_insert, unsigned int vlan_tag) | |
581 | { | |
ea0d7d91 SF |
582 | unsigned int frag_len_left = skb_headlen(skb); |
583 | unsigned int len_left = skb->len - frag_len_left; | |
01f2e4ea SF |
584 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
585 | int eop = (len_left == 0); | |
ea0d7d91 SF |
586 | unsigned int len; |
587 | dma_addr_t dma_addr; | |
588 | unsigned int offset = 0; | |
589 | skb_frag_t *frag; | |
01f2e4ea SF |
590 | |
591 | /* Preload TCP csum field with IP pseudo hdr calculated | |
592 | * with IP length set to zero. HW will later add in length | |
593 | * to each TCP segment resulting from the TSO. | |
594 | */ | |
595 | ||
09640e63 | 596 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { |
01f2e4ea SF |
597 | ip_hdr(skb)->check = 0; |
598 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
599 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
09640e63 | 600 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { |
01f2e4ea SF |
601 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
602 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
603 | } | |
604 | ||
ea0d7d91 SF |
605 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors |
606 | * for the main skb fragment | |
607 | */ | |
608 | while (frag_len_left) { | |
609 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
610 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, | |
611 | len, PCI_DMA_TODEVICE); | |
612 | enic_queue_wq_desc_tso(wq, skb, | |
613 | dma_addr, | |
614 | len, | |
615 | mss, hdr_len, | |
616 | vlan_tag_insert, vlan_tag, | |
617 | eop && (len == frag_len_left)); | |
618 | frag_len_left -= len; | |
619 | offset += len; | |
620 | } | |
01f2e4ea | 621 | |
ea0d7d91 SF |
622 | if (eop) |
623 | return; | |
624 | ||
625 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | |
626 | * for additional data fragments | |
627 | */ | |
628 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
629 | len_left -= frag->size; | |
630 | frag_len_left = frag->size; | |
631 | offset = frag->page_offset; | |
632 | ||
633 | while (frag_len_left) { | |
634 | len = min(frag_len_left, | |
635 | (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
636 | dma_addr = pci_map_page(enic->pdev, frag->page, | |
637 | offset, len, | |
638 | PCI_DMA_TODEVICE); | |
639 | enic_queue_wq_desc_cont(wq, skb, | |
640 | dma_addr, | |
641 | len, | |
642 | (len_left == 0) && | |
643 | (len == frag_len_left)); /* EOP? */ | |
644 | frag_len_left -= len; | |
645 | offset += len; | |
646 | } | |
647 | } | |
01f2e4ea SF |
648 | } |
649 | ||
650 | static inline void enic_queue_wq_skb(struct enic *enic, | |
651 | struct vnic_wq *wq, struct sk_buff *skb) | |
652 | { | |
653 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
654 | unsigned int vlan_tag = 0; | |
655 | int vlan_tag_insert = 0; | |
656 | ||
657 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | |
658 | /* VLAN tag from trunking driver */ | |
659 | vlan_tag_insert = 1; | |
660 | vlan_tag = vlan_tx_tag_get(skb); | |
661 | } | |
662 | ||
663 | if (mss) | |
664 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | |
665 | vlan_tag_insert, vlan_tag); | |
666 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | |
667 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | |
668 | vlan_tag_insert, vlan_tag); | |
669 | else | |
670 | enic_queue_wq_skb_vlan(enic, wq, skb, | |
671 | vlan_tag_insert, vlan_tag); | |
672 | } | |
673 | ||
ed8af6b2 | 674 | /* netif_tx_lock held, process context with BHs disabled, or BH */ |
61357325 SH |
675 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, |
676 | struct net_device *netdev) | |
01f2e4ea SF |
677 | { |
678 | struct enic *enic = netdev_priv(netdev); | |
679 | struct vnic_wq *wq = &enic->wq[0]; | |
680 | unsigned long flags; | |
681 | ||
682 | if (skb->len <= 0) { | |
683 | dev_kfree_skb(skb); | |
684 | return NETDEV_TX_OK; | |
685 | } | |
686 | ||
687 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | |
688 | * which is very likely. In the off chance it's going to take | |
689 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | |
690 | */ | |
691 | ||
692 | if (skb_shinfo(skb)->gso_size == 0 && | |
693 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | |
694 | skb_linearize(skb)) { | |
695 | dev_kfree_skb(skb); | |
696 | return NETDEV_TX_OK; | |
697 | } | |
698 | ||
699 | spin_lock_irqsave(&enic->wq_lock[0], flags); | |
700 | ||
ea0d7d91 SF |
701 | if (vnic_wq_desc_avail(wq) < |
702 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | |
01f2e4ea SF |
703 | netif_stop_queue(netdev); |
704 | /* This is a hard error, log it */ | |
705 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | |
706 | "queue awake!\n", netdev->name); | |
707 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | |
708 | return NETDEV_TX_BUSY; | |
709 | } | |
710 | ||
711 | enic_queue_wq_skb(enic, wq, skb); | |
712 | ||
ea0d7d91 | 713 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
01f2e4ea SF |
714 | netif_stop_queue(netdev); |
715 | ||
01f2e4ea SF |
716 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); |
717 | ||
718 | return NETDEV_TX_OK; | |
719 | } | |
720 | ||
721 | /* dev_base_lock rwlock held, nominally process context */ | |
722 | static struct net_device_stats *enic_get_stats(struct net_device *netdev) | |
723 | { | |
724 | struct enic *enic = netdev_priv(netdev); | |
25f0a061 | 725 | struct net_device_stats *net_stats = &netdev->stats; |
01f2e4ea SF |
726 | struct vnic_stats *stats; |
727 | ||
728 | spin_lock(&enic->devcmd_lock); | |
729 | vnic_dev_stats_dump(enic->vdev, &stats); | |
730 | spin_unlock(&enic->devcmd_lock); | |
731 | ||
25f0a061 SF |
732 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
733 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | |
734 | net_stats->tx_errors = stats->tx.tx_errors; | |
735 | net_stats->tx_dropped = stats->tx.tx_drops; | |
01f2e4ea | 736 | |
25f0a061 SF |
737 | net_stats->rx_packets = stats->rx.rx_frames_ok; |
738 | net_stats->rx_bytes = stats->rx.rx_bytes_ok; | |
739 | net_stats->rx_errors = stats->rx.rx_errors; | |
740 | net_stats->multicast = stats->rx.rx_multicast_frames_ok; | |
350991e1 | 741 | net_stats->rx_over_errors = enic->rq_truncated_pkts; |
bd9fb1a4 | 742 | net_stats->rx_crc_errors = enic->rq_bad_fcs; |
350991e1 | 743 | net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; |
01f2e4ea | 744 | |
25f0a061 | 745 | return net_stats; |
01f2e4ea SF |
746 | } |
747 | ||
748 | static void enic_reset_mcaddrs(struct enic *enic) | |
749 | { | |
750 | enic->mc_count = 0; | |
751 | } | |
752 | ||
753 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |
754 | { | |
755 | if (!is_valid_ether_addr(addr)) | |
756 | return -EADDRNOTAVAIL; | |
757 | ||
758 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | /* netif_tx_lock held, BHs disabled */ | |
764 | static void enic_set_multicast_list(struct net_device *netdev) | |
765 | { | |
766 | struct enic *enic = netdev_priv(netdev); | |
767 | struct dev_mc_list *list = netdev->mc_list; | |
768 | int directed = 1; | |
769 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | |
770 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | |
771 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | |
772 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | |
773 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | |
774 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | |
775 | unsigned int mc_count = netdev->mc_count; | |
776 | unsigned int i, j; | |
777 | ||
778 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | |
779 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | |
780 | ||
781 | spin_lock(&enic->devcmd_lock); | |
782 | ||
783 | vnic_dev_packet_filter(enic->vdev, directed, | |
784 | multicast, broadcast, promisc, allmulti); | |
785 | ||
786 | /* Is there an easier way? Trying to minimize to | |
787 | * calls to add/del multicast addrs. We keep the | |
788 | * addrs from the last call in enic->mc_addr and | |
789 | * look for changes to add/del. | |
790 | */ | |
791 | ||
792 | for (i = 0; list && i < mc_count; i++) { | |
793 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | |
794 | list = list->next; | |
795 | } | |
796 | ||
797 | for (i = 0; i < enic->mc_count; i++) { | |
798 | for (j = 0; j < mc_count; j++) | |
799 | if (compare_ether_addr(enic->mc_addr[i], | |
800 | mc_addr[j]) == 0) | |
801 | break; | |
802 | if (j == mc_count) | |
803 | enic_del_multicast_addr(enic, enic->mc_addr[i]); | |
804 | } | |
805 | ||
806 | for (i = 0; i < mc_count; i++) { | |
807 | for (j = 0; j < enic->mc_count; j++) | |
808 | if (compare_ether_addr(mc_addr[i], | |
809 | enic->mc_addr[j]) == 0) | |
810 | break; | |
811 | if (j == enic->mc_count) | |
812 | enic_add_multicast_addr(enic, mc_addr[i]); | |
813 | } | |
814 | ||
815 | /* Save the list to compare against next time | |
816 | */ | |
817 | ||
818 | for (i = 0; i < mc_count; i++) | |
819 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | |
820 | ||
821 | enic->mc_count = mc_count; | |
822 | ||
823 | spin_unlock(&enic->devcmd_lock); | |
824 | } | |
825 | ||
826 | /* rtnl lock is held */ | |
827 | static void enic_vlan_rx_register(struct net_device *netdev, | |
828 | struct vlan_group *vlan_group) | |
829 | { | |
830 | struct enic *enic = netdev_priv(netdev); | |
831 | enic->vlan_group = vlan_group; | |
832 | } | |
833 | ||
834 | /* rtnl lock is held */ | |
835 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
836 | { | |
837 | struct enic *enic = netdev_priv(netdev); | |
838 | ||
839 | spin_lock(&enic->devcmd_lock); | |
840 | enic_add_vlan(enic, vid); | |
841 | spin_unlock(&enic->devcmd_lock); | |
842 | } | |
843 | ||
844 | /* rtnl lock is held */ | |
845 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
846 | { | |
847 | struct enic *enic = netdev_priv(netdev); | |
848 | ||
849 | spin_lock(&enic->devcmd_lock); | |
850 | enic_del_vlan(enic, vid); | |
851 | spin_unlock(&enic->devcmd_lock); | |
852 | } | |
853 | ||
854 | /* netif_tx_lock held, BHs disabled */ | |
855 | static void enic_tx_timeout(struct net_device *netdev) | |
856 | { | |
857 | struct enic *enic = netdev_priv(netdev); | |
858 | schedule_work(&enic->reset); | |
859 | } | |
860 | ||
861 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |
862 | { | |
863 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
864 | ||
865 | if (!buf->os_buf) | |
866 | return; | |
867 | ||
868 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
869 | buf->len, PCI_DMA_FROMDEVICE); | |
870 | dev_kfree_skb_any(buf->os_buf); | |
871 | } | |
872 | ||
01f2e4ea SF |
873 | static int enic_rq_alloc_buf(struct vnic_rq *rq) |
874 | { | |
875 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
d19e22dc | 876 | struct net_device *netdev = enic->netdev; |
01f2e4ea | 877 | struct sk_buff *skb; |
d19e22dc | 878 | unsigned int len = netdev->mtu + ETH_HLEN; |
01f2e4ea SF |
879 | unsigned int os_buf_index = 0; |
880 | dma_addr_t dma_addr; | |
881 | ||
89d71a66 | 882 | skb = netdev_alloc_skb_ip_align(netdev, len); |
01f2e4ea SF |
883 | if (!skb) |
884 | return -ENOMEM; | |
885 | ||
886 | dma_addr = pci_map_single(enic->pdev, skb->data, | |
887 | len, PCI_DMA_FROMDEVICE); | |
888 | ||
889 | enic_queue_rq_desc(rq, skb, os_buf_index, | |
890 | dma_addr, len); | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
4badc385 SF |
895 | static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) |
896 | { | |
897 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | |
898 | ||
899 | if (vnic_rq_posting_soon(rq)) { | |
900 | ||
901 | /* SW workaround for A0 HW erratum: if we're just about | |
902 | * to write posted_index, insert a dummy desc | |
903 | * of type resvd | |
904 | */ | |
905 | ||
906 | rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); | |
907 | vnic_rq_post(rq, 0, 0, 0, 0); | |
908 | } else { | |
909 | return enic_rq_alloc_buf(rq); | |
910 | } | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | static int enic_set_rq_alloc_buf(struct enic *enic) | |
916 | { | |
917 | enum vnic_dev_hw_version hw_ver; | |
918 | int err; | |
919 | ||
920 | err = vnic_dev_hw_version(enic->vdev, &hw_ver); | |
921 | if (err) | |
922 | return err; | |
923 | ||
924 | switch (hw_ver) { | |
925 | case VNIC_DEV_HW_VER_A1: | |
926 | enic->rq_alloc_buf = enic_rq_alloc_buf_a1; | |
927 | break; | |
928 | case VNIC_DEV_HW_VER_A2: | |
929 | case VNIC_DEV_HW_VER_UNKNOWN: | |
930 | enic->rq_alloc_buf = enic_rq_alloc_buf; | |
931 | break; | |
932 | default: | |
933 | return -ENODEV; | |
934 | } | |
935 | ||
936 | return 0; | |
937 | } | |
938 | ||
01f2e4ea SF |
939 | static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, |
940 | void **tcph, u64 *hdr_flags, void *priv) | |
941 | { | |
942 | struct cq_enet_rq_desc *cq_desc = priv; | |
943 | unsigned int ip_len; | |
944 | struct iphdr *iph; | |
945 | ||
946 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
947 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
948 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
949 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
950 | u8 packet_error; | |
951 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
952 | u32 rss_hash; | |
953 | ||
954 | cq_enet_rq_desc_dec(cq_desc, | |
955 | &type, &color, &q_number, &completed_index, | |
956 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
957 | &csum_not_calc, &rss_hash, &bytes_written, | |
958 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
959 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
960 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
961 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
962 | &fcs_ok); | |
963 | ||
964 | if (!(ipv4 && tcp && !ipv4_fragment)) | |
965 | return -1; | |
966 | ||
967 | skb_reset_network_header(skb); | |
968 | iph = ip_hdr(skb); | |
969 | ||
970 | ip_len = ip_hdrlen(skb); | |
971 | skb_set_transport_header(skb, ip_len); | |
972 | ||
973 | /* check if ip header and tcp header are complete */ | |
974 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | |
975 | return -1; | |
976 | ||
977 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
978 | *tcph = tcp_hdr(skb); | |
979 | *iphdr = iph; | |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
984 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | |
985 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
986 | int skipped, void *opaque) | |
987 | { | |
988 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
86ca9db7 | 989 | struct net_device *netdev = enic->netdev; |
01f2e4ea SF |
990 | struct sk_buff *skb; |
991 | ||
992 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
993 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
994 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
995 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
996 | u8 packet_error; | |
997 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
998 | u32 rss_hash; | |
999 | ||
1000 | if (skipped) | |
1001 | return; | |
1002 | ||
1003 | skb = buf->os_buf; | |
1004 | prefetch(skb->data - NET_IP_ALIGN); | |
1005 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
1006 | buf->len, PCI_DMA_FROMDEVICE); | |
1007 | ||
1008 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | |
1009 | &type, &color, &q_number, &completed_index, | |
1010 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
1011 | &csum_not_calc, &rss_hash, &bytes_written, | |
1012 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
1013 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
1014 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
1015 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
1016 | &fcs_ok); | |
1017 | ||
1018 | if (packet_error) { | |
1019 | ||
350991e1 SF |
1020 | if (!fcs_ok) { |
1021 | if (bytes_written > 0) | |
1022 | enic->rq_bad_fcs++; | |
1023 | else if (bytes_written == 0) | |
1024 | enic->rq_truncated_pkts++; | |
1025 | } | |
01f2e4ea SF |
1026 | |
1027 | dev_kfree_skb_any(skb); | |
1028 | ||
1029 | return; | |
1030 | } | |
1031 | ||
1032 | if (eop && bytes_written > 0) { | |
1033 | ||
1034 | /* Good receive | |
1035 | */ | |
1036 | ||
1037 | skb_put(skb, bytes_written); | |
86ca9db7 | 1038 | skb->protocol = eth_type_trans(skb, netdev); |
01f2e4ea SF |
1039 | |
1040 | if (enic->csum_rx_enabled && !csum_not_calc) { | |
1041 | skb->csum = htons(checksum); | |
1042 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1043 | } | |
1044 | ||
86ca9db7 | 1045 | skb->dev = netdev; |
01f2e4ea SF |
1046 | |
1047 | if (enic->vlan_group && vlan_stripped) { | |
1048 | ||
86ca9db7 | 1049 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1050 | lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, |
1051 | skb, enic->vlan_group, | |
1052 | vlan, cq_desc); | |
1053 | else | |
1054 | vlan_hwaccel_receive_skb(skb, | |
1055 | enic->vlan_group, vlan); | |
1056 | ||
1057 | } else { | |
1058 | ||
86ca9db7 | 1059 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1060 | lro_receive_skb(&enic->lro_mgr, skb, cq_desc); |
1061 | else | |
1062 | netif_receive_skb(skb); | |
1063 | ||
1064 | } | |
1065 | ||
1066 | } else { | |
1067 | ||
1068 | /* Buffer overflow | |
1069 | */ | |
1070 | ||
1071 | dev_kfree_skb_any(skb); | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
1076 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1077 | { | |
1078 | struct enic *enic = vnic_dev_priv(vdev); | |
1079 | ||
1080 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1081 | completed_index, VNIC_RQ_RETURN_DESC, | |
1082 | enic_rq_indicate_buf, opaque); | |
1083 | ||
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static void enic_rq_drop_buf(struct vnic_rq *rq, | |
1088 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
1089 | int skipped, void *opaque) | |
1090 | { | |
1091 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
1092 | struct sk_buff *skb = buf->os_buf; | |
1093 | ||
1094 | if (skipped) | |
1095 | return; | |
1096 | ||
1097 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
1098 | buf->len, PCI_DMA_FROMDEVICE); | |
1099 | ||
1100 | dev_kfree_skb_any(skb); | |
1101 | } | |
1102 | ||
1103 | static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
1104 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1105 | { | |
1106 | struct enic *enic = vnic_dev_priv(vdev); | |
1107 | ||
1108 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1109 | completed_index, VNIC_RQ_RETURN_DESC, | |
1110 | enic_rq_drop_buf, opaque); | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | static int enic_poll(struct napi_struct *napi, int budget) | |
1116 | { | |
1117 | struct enic *enic = container_of(napi, struct enic, napi); | |
1118 | struct net_device *netdev = enic->netdev; | |
1119 | unsigned int rq_work_to_do = budget; | |
1120 | unsigned int wq_work_to_do = -1; /* no limit */ | |
1121 | unsigned int work_done, rq_work_done, wq_work_done; | |
1122 | ||
1123 | /* Service RQ (first) and WQ | |
1124 | */ | |
1125 | ||
1126 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1127 | rq_work_to_do, enic_rq_service, NULL); | |
1128 | ||
1129 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1130 | wq_work_to_do, enic_wq_service, NULL); | |
1131 | ||
1132 | /* Accumulate intr event credits for this polling | |
1133 | * cycle. An intr event is the completion of a | |
1134 | * a WQ or RQ packet. | |
1135 | */ | |
1136 | ||
1137 | work_done = rq_work_done + wq_work_done; | |
1138 | ||
1139 | if (work_done > 0) | |
1140 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | |
1141 | work_done, | |
1142 | 0 /* don't unmask intr */, | |
1143 | 0 /* don't reset intr timer */); | |
1144 | ||
1145 | if (rq_work_done > 0) { | |
1146 | ||
1147 | /* Replenish RQ | |
1148 | */ | |
1149 | ||
4badc385 | 1150 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
01f2e4ea SF |
1151 | |
1152 | } else { | |
1153 | ||
1154 | /* If no work done, flush all LROs and exit polling | |
1155 | */ | |
1156 | ||
86ca9db7 | 1157 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1158 | lro_flush_all(&enic->lro_mgr); |
1159 | ||
288379f0 | 1160 | napi_complete(napi); |
ed8af6b2 | 1161 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); |
01f2e4ea SF |
1162 | } |
1163 | ||
1164 | return rq_work_done; | |
1165 | } | |
1166 | ||
1167 | static int enic_poll_msix(struct napi_struct *napi, int budget) | |
1168 | { | |
1169 | struct enic *enic = container_of(napi, struct enic, napi); | |
1170 | struct net_device *netdev = enic->netdev; | |
1171 | unsigned int work_to_do = budget; | |
1172 | unsigned int work_done; | |
1173 | ||
1174 | /* Service RQ | |
1175 | */ | |
1176 | ||
1177 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1178 | work_to_do, enic_rq_service, NULL); | |
1179 | ||
1180 | if (work_done > 0) { | |
1181 | ||
1182 | /* Replenish RQ | |
1183 | */ | |
1184 | ||
4badc385 | 1185 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
01f2e4ea | 1186 | |
ed8af6b2 | 1187 | /* Return intr event credits for this polling |
01f2e4ea | 1188 | * cycle. An intr event is the completion of a |
ed8af6b2 | 1189 | * RQ packet. |
01f2e4ea SF |
1190 | */ |
1191 | ||
1192 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | |
1193 | work_done, | |
1194 | 0 /* don't unmask intr */, | |
1195 | 0 /* don't reset intr timer */); | |
1196 | } else { | |
1197 | ||
1198 | /* If no work done, flush all LROs and exit polling | |
1199 | */ | |
1200 | ||
86ca9db7 | 1201 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1202 | lro_flush_all(&enic->lro_mgr); |
1203 | ||
288379f0 | 1204 | napi_complete(napi); |
01f2e4ea SF |
1205 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); |
1206 | } | |
1207 | ||
1208 | return work_done; | |
1209 | } | |
1210 | ||
1211 | static void enic_notify_timer(unsigned long data) | |
1212 | { | |
1213 | struct enic *enic = (struct enic *)data; | |
1214 | ||
1215 | enic_notify_check(enic); | |
1216 | ||
25f0a061 SF |
1217 | mod_timer(&enic->notify_timer, |
1218 | round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); | |
01f2e4ea SF |
1219 | } |
1220 | ||
1221 | static void enic_free_intr(struct enic *enic) | |
1222 | { | |
1223 | struct net_device *netdev = enic->netdev; | |
1224 | unsigned int i; | |
1225 | ||
1226 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1227 | case VNIC_DEV_INTR_MODE_INTX: | |
01f2e4ea SF |
1228 | free_irq(enic->pdev->irq, netdev); |
1229 | break; | |
8f4d248c SF |
1230 | case VNIC_DEV_INTR_MODE_MSI: |
1231 | free_irq(enic->pdev->irq, enic); | |
1232 | break; | |
01f2e4ea SF |
1233 | case VNIC_DEV_INTR_MODE_MSIX: |
1234 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | |
1235 | if (enic->msix[i].requested) | |
1236 | free_irq(enic->msix_entry[i].vector, | |
1237 | enic->msix[i].devid); | |
1238 | break; | |
1239 | default: | |
1240 | break; | |
1241 | } | |
1242 | } | |
1243 | ||
1244 | static int enic_request_intr(struct enic *enic) | |
1245 | { | |
1246 | struct net_device *netdev = enic->netdev; | |
1247 | unsigned int i; | |
1248 | int err = 0; | |
1249 | ||
1250 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1251 | ||
1252 | case VNIC_DEV_INTR_MODE_INTX: | |
1253 | ||
1254 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | |
1255 | IRQF_SHARED, netdev->name, netdev); | |
1256 | break; | |
1257 | ||
1258 | case VNIC_DEV_INTR_MODE_MSI: | |
1259 | ||
1260 | err = request_irq(enic->pdev->irq, enic_isr_msi, | |
1261 | 0, netdev->name, enic); | |
1262 | break; | |
1263 | ||
1264 | case VNIC_DEV_INTR_MODE_MSIX: | |
1265 | ||
1266 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | |
8f4d248c | 1267 | "%.11s-rx-0", netdev->name); |
01f2e4ea SF |
1268 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; |
1269 | enic->msix[ENIC_MSIX_RQ].devid = enic; | |
1270 | ||
1271 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | |
8f4d248c | 1272 | "%.11s-tx-0", netdev->name); |
01f2e4ea SF |
1273 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; |
1274 | enic->msix[ENIC_MSIX_WQ].devid = enic; | |
1275 | ||
1276 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | |
1277 | "%.11s-err", netdev->name); | |
1278 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | |
1279 | enic->msix[ENIC_MSIX_ERR].devid = enic; | |
1280 | ||
1281 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | |
1282 | "%.11s-notify", netdev->name); | |
1283 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | |
1284 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | |
1285 | ||
1286 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | |
1287 | err = request_irq(enic->msix_entry[i].vector, | |
1288 | enic->msix[i].isr, 0, | |
1289 | enic->msix[i].devname, | |
1290 | enic->msix[i].devid); | |
1291 | if (err) { | |
1292 | enic_free_intr(enic); | |
1293 | break; | |
1294 | } | |
1295 | enic->msix[i].requested = 1; | |
1296 | } | |
1297 | ||
1298 | break; | |
1299 | ||
1300 | default: | |
1301 | break; | |
1302 | } | |
1303 | ||
1304 | return err; | |
1305 | } | |
1306 | ||
1307 | static int enic_notify_set(struct enic *enic) | |
1308 | { | |
1309 | int err; | |
1310 | ||
56ac88b3 | 1311 | spin_lock(&enic->devcmd_lock); |
01f2e4ea SF |
1312 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1313 | case VNIC_DEV_INTR_MODE_INTX: | |
1314 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | |
1315 | break; | |
1316 | case VNIC_DEV_INTR_MODE_MSIX: | |
1317 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | |
1318 | break; | |
1319 | default: | |
1320 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | |
1321 | break; | |
1322 | } | |
56ac88b3 | 1323 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1324 | |
1325 | return err; | |
1326 | } | |
1327 | ||
1328 | static void enic_notify_timer_start(struct enic *enic) | |
1329 | { | |
1330 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1331 | case VNIC_DEV_INTR_MODE_MSI: | |
1332 | mod_timer(&enic->notify_timer, jiffies); | |
1333 | break; | |
1334 | default: | |
1335 | /* Using intr for notification for INTx/MSI-X */ | |
1336 | break; | |
1337 | }; | |
1338 | } | |
1339 | ||
1340 | /* rtnl lock is held, process context */ | |
1341 | static int enic_open(struct net_device *netdev) | |
1342 | { | |
1343 | struct enic *enic = netdev_priv(netdev); | |
1344 | unsigned int i; | |
1345 | int err; | |
1346 | ||
4b75a442 SF |
1347 | err = enic_request_intr(enic); |
1348 | if (err) { | |
1349 | printk(KERN_ERR PFX "%s: Unable to request irq.\n", | |
1350 | netdev->name); | |
1351 | return err; | |
1352 | } | |
1353 | ||
1354 | err = enic_notify_set(enic); | |
1355 | if (err) { | |
1356 | printk(KERN_ERR PFX | |
1357 | "%s: Failed to alloc notify buffer, aborting.\n", | |
1358 | netdev->name); | |
1359 | goto err_out_free_intr; | |
1360 | } | |
1361 | ||
01f2e4ea | 1362 | for (i = 0; i < enic->rq_count; i++) { |
4badc385 | 1363 | err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); |
01f2e4ea SF |
1364 | if (err) { |
1365 | printk(KERN_ERR PFX | |
1366 | "%s: Unable to alloc receive buffers.\n", | |
1367 | netdev->name); | |
4b75a442 | 1368 | goto err_out_notify_unset; |
01f2e4ea SF |
1369 | } |
1370 | } | |
1371 | ||
1372 | for (i = 0; i < enic->wq_count; i++) | |
1373 | vnic_wq_enable(&enic->wq[i]); | |
1374 | for (i = 0; i < enic->rq_count; i++) | |
1375 | vnic_rq_enable(&enic->rq[i]); | |
1376 | ||
56ac88b3 | 1377 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1378 | enic_add_station_addr(enic); |
56ac88b3 | 1379 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1380 | enic_set_multicast_list(netdev); |
1381 | ||
1382 | netif_wake_queue(netdev); | |
1383 | napi_enable(&enic->napi); | |
56ac88b3 | 1384 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1385 | vnic_dev_enable(enic->vdev); |
56ac88b3 | 1386 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1387 | |
1388 | for (i = 0; i < enic->intr_count; i++) | |
1389 | vnic_intr_unmask(&enic->intr[i]); | |
1390 | ||
1391 | enic_notify_timer_start(enic); | |
1392 | ||
1393 | return 0; | |
4b75a442 SF |
1394 | |
1395 | err_out_notify_unset: | |
56ac88b3 | 1396 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1397 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1398 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1399 | err_out_free_intr: |
1400 | enic_free_intr(enic); | |
1401 | ||
1402 | return err; | |
01f2e4ea SF |
1403 | } |
1404 | ||
1405 | /* rtnl lock is held, process context */ | |
1406 | static int enic_stop(struct net_device *netdev) | |
1407 | { | |
1408 | struct enic *enic = netdev_priv(netdev); | |
1409 | unsigned int i; | |
1410 | int err; | |
1411 | ||
1412 | del_timer_sync(&enic->notify_timer); | |
1413 | ||
56ac88b3 | 1414 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1415 | vnic_dev_disable(enic->vdev); |
56ac88b3 | 1416 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1417 | napi_disable(&enic->napi); |
1418 | netif_stop_queue(netdev); | |
1419 | ||
1420 | for (i = 0; i < enic->intr_count; i++) | |
1421 | vnic_intr_mask(&enic->intr[i]); | |
1422 | ||
1423 | for (i = 0; i < enic->wq_count; i++) { | |
1424 | err = vnic_wq_disable(&enic->wq[i]); | |
1425 | if (err) | |
1426 | return err; | |
1427 | } | |
1428 | for (i = 0; i < enic->rq_count; i++) { | |
1429 | err = vnic_rq_disable(&enic->rq[i]); | |
1430 | if (err) | |
1431 | return err; | |
1432 | } | |
1433 | ||
56ac88b3 | 1434 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1435 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1436 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1437 | enic_free_intr(enic); |
1438 | ||
01f2e4ea SF |
1439 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ], |
1440 | -1, enic_rq_service_drop, NULL); | |
1441 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1442 | -1, enic_wq_service, NULL); | |
1443 | ||
1444 | for (i = 0; i < enic->wq_count; i++) | |
1445 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | |
1446 | for (i = 0; i < enic->rq_count; i++) | |
1447 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | |
1448 | for (i = 0; i < enic->cq_count; i++) | |
1449 | vnic_cq_clean(&enic->cq[i]); | |
1450 | for (i = 0; i < enic->intr_count; i++) | |
1451 | vnic_intr_clean(&enic->intr[i]); | |
1452 | ||
1453 | return 0; | |
1454 | } | |
1455 | ||
1456 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | |
1457 | { | |
1458 | struct enic *enic = netdev_priv(netdev); | |
1459 | int running = netif_running(netdev); | |
1460 | ||
25f0a061 SF |
1461 | if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) |
1462 | return -EINVAL; | |
1463 | ||
01f2e4ea SF |
1464 | if (running) |
1465 | enic_stop(netdev); | |
1466 | ||
01f2e4ea SF |
1467 | netdev->mtu = new_mtu; |
1468 | ||
1469 | if (netdev->mtu > enic->port_mtu) | |
1470 | printk(KERN_WARNING PFX | |
1471 | "%s: interface MTU (%d) set higher " | |
1472 | "than port MTU (%d)\n", | |
1473 | netdev->name, netdev->mtu, enic->port_mtu); | |
1474 | ||
1475 | if (running) | |
1476 | enic_open(netdev); | |
1477 | ||
1478 | return 0; | |
1479 | } | |
1480 | ||
1481 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1482 | static void enic_poll_controller(struct net_device *netdev) | |
1483 | { | |
1484 | struct enic *enic = netdev_priv(netdev); | |
1485 | struct vnic_dev *vdev = enic->vdev; | |
1486 | ||
1487 | switch (vnic_dev_get_intr_mode(vdev)) { | |
1488 | case VNIC_DEV_INTR_MODE_MSIX: | |
1489 | enic_isr_msix_rq(enic->pdev->irq, enic); | |
1490 | enic_isr_msix_wq(enic->pdev->irq, enic); | |
1491 | break; | |
1492 | case VNIC_DEV_INTR_MODE_MSI: | |
1493 | enic_isr_msi(enic->pdev->irq, enic); | |
1494 | break; | |
1495 | case VNIC_DEV_INTR_MODE_INTX: | |
1496 | enic_isr_legacy(enic->pdev->irq, netdev); | |
1497 | break; | |
1498 | default: | |
1499 | break; | |
1500 | } | |
1501 | } | |
1502 | #endif | |
1503 | ||
1504 | static int enic_dev_wait(struct vnic_dev *vdev, | |
1505 | int (*start)(struct vnic_dev *, int), | |
1506 | int (*finished)(struct vnic_dev *, int *), | |
1507 | int arg) | |
1508 | { | |
1509 | unsigned long time; | |
1510 | int done; | |
1511 | int err; | |
1512 | ||
1513 | BUG_ON(in_interrupt()); | |
1514 | ||
1515 | err = start(vdev, arg); | |
1516 | if (err) | |
1517 | return err; | |
1518 | ||
1519 | /* Wait for func to complete...2 seconds max | |
1520 | */ | |
1521 | ||
1522 | time = jiffies + (HZ * 2); | |
1523 | do { | |
1524 | ||
1525 | err = finished(vdev, &done); | |
1526 | if (err) | |
1527 | return err; | |
1528 | ||
1529 | if (done) | |
1530 | return 0; | |
1531 | ||
1532 | schedule_timeout_uninterruptible(HZ / 10); | |
1533 | ||
1534 | } while (time_after(time, jiffies)); | |
1535 | ||
1536 | return -ETIMEDOUT; | |
1537 | } | |
1538 | ||
1539 | static int enic_dev_open(struct enic *enic) | |
1540 | { | |
1541 | int err; | |
1542 | ||
1543 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | |
1544 | vnic_dev_open_done, 0); | |
1545 | if (err) | |
1546 | printk(KERN_ERR PFX | |
1547 | "vNIC device open failed, err %d.\n", err); | |
1548 | ||
1549 | return err; | |
1550 | } | |
1551 | ||
1552 | static int enic_dev_soft_reset(struct enic *enic) | |
1553 | { | |
1554 | int err; | |
1555 | ||
1556 | err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, | |
1557 | vnic_dev_soft_reset_done, 0); | |
1558 | if (err) | |
1559 | printk(KERN_ERR PFX | |
1560 | "vNIC soft reset failed, err %d.\n", err); | |
1561 | ||
1562 | return err; | |
1563 | } | |
1564 | ||
68f71708 SF |
1565 | static int enic_set_niccfg(struct enic *enic) |
1566 | { | |
1567 | const u8 rss_default_cpu = 0; | |
1568 | const u8 rss_hash_type = 0; | |
1569 | const u8 rss_hash_bits = 0; | |
1570 | const u8 rss_base_cpu = 0; | |
1571 | const u8 rss_enable = 0; | |
1572 | const u8 tso_ipid_split_en = 0; | |
1573 | const u8 ig_vlan_strip_en = 1; | |
1574 | ||
1575 | /* Enable VLAN tag stripping. RSS not enabled (yet). | |
6ba9cdc0 | 1576 | */ |
68f71708 SF |
1577 | |
1578 | return enic_set_nic_cfg(enic, | |
1579 | rss_default_cpu, rss_hash_type, | |
1580 | rss_hash_bits, rss_base_cpu, | |
1581 | rss_enable, tso_ipid_split_en, | |
1582 | ig_vlan_strip_en); | |
1583 | } | |
1584 | ||
01f2e4ea SF |
1585 | static void enic_reset(struct work_struct *work) |
1586 | { | |
1587 | struct enic *enic = container_of(work, struct enic, reset); | |
1588 | ||
1589 | if (!netif_running(enic->netdev)) | |
1590 | return; | |
1591 | ||
1592 | rtnl_lock(); | |
1593 | ||
1594 | spin_lock(&enic->devcmd_lock); | |
1595 | vnic_dev_hang_notify(enic->vdev); | |
1596 | spin_unlock(&enic->devcmd_lock); | |
1597 | ||
1598 | enic_stop(enic->netdev); | |
1599 | enic_dev_soft_reset(enic); | |
68f71708 | 1600 | vnic_dev_init(enic->vdev, 0); |
01f2e4ea SF |
1601 | enic_reset_mcaddrs(enic); |
1602 | enic_init_vnic_resources(enic); | |
68f71708 | 1603 | enic_set_niccfg(enic); |
01f2e4ea SF |
1604 | enic_open(enic->netdev); |
1605 | ||
1606 | rtnl_unlock(); | |
1607 | } | |
1608 | ||
1609 | static int enic_set_intr_mode(struct enic *enic) | |
1610 | { | |
6ba9cdc0 SF |
1611 | unsigned int n = 1; |
1612 | unsigned int m = 1; | |
01f2e4ea SF |
1613 | unsigned int i; |
1614 | ||
1615 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | |
1616 | * system capabilities. | |
1617 | * | |
1618 | * Try MSI-X first | |
1619 | * | |
1620 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | |
1621 | * (the second to last INTR is used for WQ/RQ errors) | |
1622 | * (the last INTR is used for notifications) | |
1623 | */ | |
1624 | ||
1625 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | |
1626 | for (i = 0; i < n + m + 2; i++) | |
1627 | enic->msix_entry[i].entry = i; | |
1628 | ||
1629 | if (enic->config.intr_mode < 1 && | |
1630 | enic->rq_count >= n && | |
1631 | enic->wq_count >= m && | |
1632 | enic->cq_count >= n + m && | |
1633 | enic->intr_count >= n + m + 2 && | |
1634 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | |
1635 | ||
1636 | enic->rq_count = n; | |
1637 | enic->wq_count = m; | |
1638 | enic->cq_count = n + m; | |
1639 | enic->intr_count = n + m + 2; | |
1640 | ||
1641 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | |
1642 | ||
1643 | return 0; | |
1644 | } | |
1645 | ||
1646 | /* Next try MSI | |
1647 | * | |
1648 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | |
1649 | */ | |
1650 | ||
1651 | if (enic->config.intr_mode < 2 && | |
1652 | enic->rq_count >= 1 && | |
1653 | enic->wq_count >= 1 && | |
1654 | enic->cq_count >= 2 && | |
1655 | enic->intr_count >= 1 && | |
1656 | !pci_enable_msi(enic->pdev)) { | |
1657 | ||
1658 | enic->rq_count = 1; | |
1659 | enic->wq_count = 1; | |
1660 | enic->cq_count = 2; | |
1661 | enic->intr_count = 1; | |
1662 | ||
1663 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | |
1664 | ||
1665 | return 0; | |
1666 | } | |
1667 | ||
1668 | /* Next try INTx | |
1669 | * | |
1670 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | |
1671 | * (the first INTR is used for WQ/RQ) | |
1672 | * (the second INTR is used for WQ/RQ errors) | |
1673 | * (the last INTR is used for notifications) | |
1674 | */ | |
1675 | ||
1676 | if (enic->config.intr_mode < 3 && | |
1677 | enic->rq_count >= 1 && | |
1678 | enic->wq_count >= 1 && | |
1679 | enic->cq_count >= 2 && | |
1680 | enic->intr_count >= 3) { | |
1681 | ||
1682 | enic->rq_count = 1; | |
1683 | enic->wq_count = 1; | |
1684 | enic->cq_count = 2; | |
1685 | enic->intr_count = 3; | |
1686 | ||
1687 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | |
1688 | ||
1689 | return 0; | |
1690 | } | |
1691 | ||
1692 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1693 | ||
1694 | return -EINVAL; | |
1695 | } | |
1696 | ||
1697 | static void enic_clear_intr_mode(struct enic *enic) | |
1698 | { | |
1699 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1700 | case VNIC_DEV_INTR_MODE_MSIX: | |
1701 | pci_disable_msix(enic->pdev); | |
1702 | break; | |
1703 | case VNIC_DEV_INTR_MODE_MSI: | |
1704 | pci_disable_msi(enic->pdev); | |
1705 | break; | |
1706 | default: | |
1707 | break; | |
1708 | } | |
1709 | ||
1710 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1711 | } | |
1712 | ||
afe29f7a SH |
1713 | static const struct net_device_ops enic_netdev_ops = { |
1714 | .ndo_open = enic_open, | |
1715 | .ndo_stop = enic_stop, | |
00829823 | 1716 | .ndo_start_xmit = enic_hard_start_xmit, |
afe29f7a SH |
1717 | .ndo_get_stats = enic_get_stats, |
1718 | .ndo_validate_addr = eth_validate_addr, | |
fe96aaa1 | 1719 | .ndo_set_mac_address = eth_mac_addr, |
afe29f7a SH |
1720 | .ndo_set_multicast_list = enic_set_multicast_list, |
1721 | .ndo_change_mtu = enic_change_mtu, | |
1722 | .ndo_vlan_rx_register = enic_vlan_rx_register, | |
1723 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | |
1724 | .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, | |
1725 | .ndo_tx_timeout = enic_tx_timeout, | |
1726 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1727 | .ndo_poll_controller = enic_poll_controller, | |
1728 | #endif | |
1729 | }; | |
1730 | ||
6fdfa970 SF |
1731 | void enic_dev_deinit(struct enic *enic) |
1732 | { | |
1733 | netif_napi_del(&enic->napi); | |
1734 | enic_free_vnic_resources(enic); | |
1735 | enic_clear_intr_mode(enic); | |
1736 | } | |
1737 | ||
1738 | int enic_dev_init(struct enic *enic) | |
1739 | { | |
1740 | struct net_device *netdev = enic->netdev; | |
1741 | int err; | |
1742 | ||
1743 | /* Get vNIC configuration | |
1744 | */ | |
1745 | ||
1746 | err = enic_get_vnic_config(enic); | |
1747 | if (err) { | |
1748 | printk(KERN_ERR PFX | |
1749 | "Get vNIC configuration failed, aborting.\n"); | |
1750 | return err; | |
1751 | } | |
1752 | ||
1753 | /* Get available resource counts | |
1754 | */ | |
1755 | ||
1756 | enic_get_res_counts(enic); | |
1757 | ||
1758 | /* Set interrupt mode based on resource counts and system | |
1759 | * capabilities | |
1760 | */ | |
1761 | ||
1762 | err = enic_set_intr_mode(enic); | |
1763 | if (err) { | |
1764 | printk(KERN_ERR PFX | |
1765 | "Failed to set intr mode, aborting.\n"); | |
1766 | return err; | |
1767 | } | |
1768 | ||
1769 | /* Allocate and configure vNIC resources | |
1770 | */ | |
1771 | ||
1772 | err = enic_alloc_vnic_resources(enic); | |
1773 | if (err) { | |
1774 | printk(KERN_ERR PFX | |
1775 | "Failed to alloc vNIC resources, aborting.\n"); | |
1776 | goto err_out_free_vnic_resources; | |
1777 | } | |
1778 | ||
1779 | enic_init_vnic_resources(enic); | |
1780 | ||
1781 | err = enic_set_rq_alloc_buf(enic); | |
1782 | if (err) { | |
1783 | printk(KERN_ERR PFX | |
1784 | "Failed to set RQ buffer allocator, aborting.\n"); | |
1785 | goto err_out_free_vnic_resources; | |
1786 | } | |
1787 | ||
1788 | err = enic_set_niccfg(enic); | |
1789 | if (err) { | |
1790 | printk(KERN_ERR PFX | |
1791 | "Failed to config nic, aborting.\n"); | |
1792 | goto err_out_free_vnic_resources; | |
1793 | } | |
1794 | ||
1795 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1796 | default: | |
1797 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | |
1798 | break; | |
1799 | case VNIC_DEV_INTR_MODE_MSIX: | |
1800 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | |
1801 | break; | |
1802 | } | |
1803 | ||
1804 | return 0; | |
1805 | ||
1806 | err_out_free_vnic_resources: | |
1807 | enic_clear_intr_mode(enic); | |
1808 | enic_free_vnic_resources(enic); | |
1809 | ||
1810 | return err; | |
1811 | } | |
1812 | ||
27e6c7d3 SF |
1813 | static void enic_iounmap(struct enic *enic) |
1814 | { | |
1815 | unsigned int i; | |
1816 | ||
1817 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) | |
1818 | if (enic->bar[i].vaddr) | |
1819 | iounmap(enic->bar[i].vaddr); | |
1820 | } | |
1821 | ||
01f2e4ea SF |
1822 | static int __devinit enic_probe(struct pci_dev *pdev, |
1823 | const struct pci_device_id *ent) | |
1824 | { | |
1825 | struct net_device *netdev; | |
1826 | struct enic *enic; | |
1827 | int using_dac = 0; | |
1828 | unsigned int i; | |
1829 | int err; | |
1830 | ||
01f2e4ea SF |
1831 | /* Allocate net device structure and initialize. Private |
1832 | * instance data is initialized to zero. | |
1833 | */ | |
1834 | ||
1835 | netdev = alloc_etherdev(sizeof(struct enic)); | |
1836 | if (!netdev) { | |
1837 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | |
1838 | return -ENOMEM; | |
1839 | } | |
1840 | ||
01f2e4ea SF |
1841 | pci_set_drvdata(pdev, netdev); |
1842 | ||
1843 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1844 | ||
1845 | enic = netdev_priv(netdev); | |
1846 | enic->netdev = netdev; | |
1847 | enic->pdev = pdev; | |
1848 | ||
1849 | /* Setup PCI resources | |
1850 | */ | |
1851 | ||
1852 | err = pci_enable_device(pdev); | |
1853 | if (err) { | |
1854 | printk(KERN_ERR PFX | |
4b75a442 | 1855 | "Cannot enable PCI device, aborting.\n"); |
01f2e4ea SF |
1856 | goto err_out_free_netdev; |
1857 | } | |
1858 | ||
1859 | err = pci_request_regions(pdev, DRV_NAME); | |
1860 | if (err) { | |
1861 | printk(KERN_ERR PFX | |
4b75a442 | 1862 | "Cannot request PCI regions, aborting.\n"); |
01f2e4ea SF |
1863 | goto err_out_disable_device; |
1864 | } | |
1865 | ||
1866 | pci_set_master(pdev); | |
1867 | ||
1868 | /* Query PCI controller on system for DMA addressing | |
1869 | * limitation for the device. Try 40-bit first, and | |
1870 | * fail to 32-bit. | |
1871 | */ | |
1872 | ||
50cf156a | 1873 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea | 1874 | if (err) { |
284901a9 | 1875 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1876 | if (err) { |
1877 | printk(KERN_ERR PFX | |
4b75a442 | 1878 | "No usable DMA configuration, aborting.\n"); |
01f2e4ea SF |
1879 | goto err_out_release_regions; |
1880 | } | |
284901a9 | 1881 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1882 | if (err) { |
1883 | printk(KERN_ERR PFX | |
4b75a442 SF |
1884 | "Unable to obtain 32-bit DMA " |
1885 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1886 | goto err_out_release_regions; |
1887 | } | |
1888 | } else { | |
50cf156a | 1889 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea SF |
1890 | if (err) { |
1891 | printk(KERN_ERR PFX | |
4b75a442 SF |
1892 | "Unable to obtain 40-bit DMA " |
1893 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1894 | goto err_out_release_regions; |
1895 | } | |
1896 | using_dac = 1; | |
1897 | } | |
1898 | ||
27e6c7d3 | 1899 | /* Map vNIC resources from BAR0-5 |
01f2e4ea SF |
1900 | */ |
1901 | ||
27e6c7d3 SF |
1902 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { |
1903 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | |
1904 | continue; | |
1905 | enic->bar[i].len = pci_resource_len(pdev, i); | |
1906 | enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); | |
1907 | if (!enic->bar[i].vaddr) { | |
1908 | printk(KERN_ERR PFX | |
1909 | "Cannot memory-map BAR %d, aborting.\n", i); | |
1910 | err = -ENODEV; | |
1911 | goto err_out_iounmap; | |
1912 | } | |
1913 | enic->bar[i].bus_addr = pci_resource_start(pdev, i); | |
01f2e4ea SF |
1914 | } |
1915 | ||
1916 | /* Register vNIC device | |
1917 | */ | |
1918 | ||
27e6c7d3 SF |
1919 | enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, |
1920 | ARRAY_SIZE(enic->bar)); | |
01f2e4ea SF |
1921 | if (!enic->vdev) { |
1922 | printk(KERN_ERR PFX | |
4b75a442 | 1923 | "vNIC registration failed, aborting.\n"); |
01f2e4ea SF |
1924 | err = -ENODEV; |
1925 | goto err_out_iounmap; | |
1926 | } | |
1927 | ||
1928 | /* Issue device open to get device in known state | |
1929 | */ | |
1930 | ||
1931 | err = enic_dev_open(enic); | |
1932 | if (err) { | |
1933 | printk(KERN_ERR PFX | |
4b75a442 | 1934 | "vNIC dev open failed, aborting.\n"); |
01f2e4ea SF |
1935 | goto err_out_vnic_unregister; |
1936 | } | |
1937 | ||
1938 | /* Issue device init to initialize the vnic-to-switch link. | |
1939 | * We'll start with carrier off and wait for link UP | |
1940 | * notification later to turn on carrier. We don't need | |
1941 | * to wait here for the vnic-to-switch link initialization | |
1942 | * to complete; link UP notification is the indication that | |
1943 | * the process is complete. | |
1944 | */ | |
1945 | ||
1946 | netif_carrier_off(netdev); | |
1947 | ||
1948 | err = vnic_dev_init(enic->vdev, 0); | |
1949 | if (err) { | |
1950 | printk(KERN_ERR PFX | |
4b75a442 | 1951 | "vNIC dev init failed, aborting.\n"); |
01f2e4ea SF |
1952 | goto err_out_dev_close; |
1953 | } | |
1954 | ||
6fdfa970 | 1955 | err = enic_dev_init(enic); |
01f2e4ea SF |
1956 | if (err) { |
1957 | printk(KERN_ERR PFX | |
6fdfa970 | 1958 | "Device initialization failed, aborting.\n"); |
01f2e4ea SF |
1959 | goto err_out_dev_close; |
1960 | } | |
1961 | ||
01f2e4ea SF |
1962 | /* Setup notification timer, HW reset task, and locks |
1963 | */ | |
1964 | ||
1965 | init_timer(&enic->notify_timer); | |
1966 | enic->notify_timer.function = enic_notify_timer; | |
1967 | enic->notify_timer.data = (unsigned long)enic; | |
1968 | ||
1969 | INIT_WORK(&enic->reset, enic_reset); | |
1970 | ||
1971 | for (i = 0; i < enic->wq_count; i++) | |
1972 | spin_lock_init(&enic->wq_lock[i]); | |
1973 | ||
1974 | spin_lock_init(&enic->devcmd_lock); | |
1975 | ||
1976 | /* Register net device | |
1977 | */ | |
1978 | ||
1979 | enic->port_mtu = enic->config.mtu; | |
1980 | (void)enic_change_mtu(netdev, enic->port_mtu); | |
1981 | ||
1982 | err = enic_set_mac_addr(netdev, enic->mac_addr); | |
1983 | if (err) { | |
1984 | printk(KERN_ERR PFX | |
4b75a442 | 1985 | "Invalid MAC address, aborting.\n"); |
6fdfa970 | 1986 | goto err_out_dev_deinit; |
01f2e4ea SF |
1987 | } |
1988 | ||
afe29f7a | 1989 | netdev->netdev_ops = &enic_netdev_ops; |
01f2e4ea SF |
1990 | netdev->watchdog_timeo = 2 * HZ; |
1991 | netdev->ethtool_ops = &enic_ethtool_ops; | |
01f2e4ea | 1992 | |
9f63a7c6 SF |
1993 | netdev->features |= NETIF_F_HW_VLAN_TX | |
1994 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
01f2e4ea SF |
1995 | if (ENIC_SETTING(enic, TXCSUM)) |
1996 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | |
1997 | if (ENIC_SETTING(enic, TSO)) | |
1998 | netdev->features |= NETIF_F_TSO | | |
1999 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
86ca9db7 SF |
2000 | if (ENIC_SETTING(enic, LRO)) |
2001 | netdev->features |= NETIF_F_LRO; | |
01f2e4ea SF |
2002 | if (using_dac) |
2003 | netdev->features |= NETIF_F_HIGHDMA; | |
2004 | ||
01f2e4ea SF |
2005 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); |
2006 | ||
86ca9db7 SF |
2007 | enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; |
2008 | enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; | |
2009 | enic->lro_mgr.lro_arr = enic->lro_desc; | |
2010 | enic->lro_mgr.get_skb_header = enic_get_skb_header; | |
2011 | enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | |
2012 | enic->lro_mgr.dev = netdev; | |
2013 | enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; | |
2014 | enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
2015 | ||
01f2e4ea SF |
2016 | err = register_netdev(netdev); |
2017 | if (err) { | |
2018 | printk(KERN_ERR PFX | |
4b75a442 | 2019 | "Cannot register net device, aborting.\n"); |
6fdfa970 | 2020 | goto err_out_dev_deinit; |
01f2e4ea SF |
2021 | } |
2022 | ||
2023 | return 0; | |
2024 | ||
6fdfa970 SF |
2025 | err_out_dev_deinit: |
2026 | enic_dev_deinit(enic); | |
01f2e4ea SF |
2027 | err_out_dev_close: |
2028 | vnic_dev_close(enic->vdev); | |
2029 | err_out_vnic_unregister: | |
01f2e4ea SF |
2030 | vnic_dev_unregister(enic->vdev); |
2031 | err_out_iounmap: | |
2032 | enic_iounmap(enic); | |
2033 | err_out_release_regions: | |
2034 | pci_release_regions(pdev); | |
2035 | err_out_disable_device: | |
2036 | pci_disable_device(pdev); | |
2037 | err_out_free_netdev: | |
2038 | pci_set_drvdata(pdev, NULL); | |
2039 | free_netdev(netdev); | |
2040 | ||
2041 | return err; | |
2042 | } | |
2043 | ||
2044 | static void __devexit enic_remove(struct pci_dev *pdev) | |
2045 | { | |
2046 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2047 | ||
2048 | if (netdev) { | |
2049 | struct enic *enic = netdev_priv(netdev); | |
2050 | ||
2051 | flush_scheduled_work(); | |
2052 | unregister_netdev(netdev); | |
6fdfa970 | 2053 | enic_dev_deinit(enic); |
01f2e4ea | 2054 | vnic_dev_close(enic->vdev); |
01f2e4ea SF |
2055 | vnic_dev_unregister(enic->vdev); |
2056 | enic_iounmap(enic); | |
2057 | pci_release_regions(pdev); | |
2058 | pci_disable_device(pdev); | |
2059 | pci_set_drvdata(pdev, NULL); | |
2060 | free_netdev(netdev); | |
2061 | } | |
2062 | } | |
2063 | ||
2064 | static struct pci_driver enic_driver = { | |
2065 | .name = DRV_NAME, | |
2066 | .id_table = enic_id_table, | |
2067 | .probe = enic_probe, | |
2068 | .remove = __devexit_p(enic_remove), | |
2069 | }; | |
2070 | ||
2071 | static int __init enic_init_module(void) | |
2072 | { | |
2073 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | |
2074 | ||
2075 | return pci_register_driver(&enic_driver); | |
2076 | } | |
2077 | ||
2078 | static void __exit enic_cleanup_module(void) | |
2079 | { | |
2080 | pci_unregister_driver(&enic_driver); | |
2081 | } | |
2082 | ||
2083 | module_init(enic_init_module); | |
2084 | module_exit(enic_cleanup_module); |