]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: Memory leak fix
[mirror_ubuntu-artful-kernel.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491
AKS
15#include <linux/dma-mapping.h>
16#include <linux/if_vlan.h>
17#include <net/ip.h>
18#include <linux/ipv6.h>
19#include <linux/inetdevice.h>
20#include <linux/sysfs.h>
451724c8 21#include <linux/aer.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c
AKS
93static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 95static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
96static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
97static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *);
af19b491
AKS
101/* PCI Device ID Table */
102#define ENTRY(device) \
103 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
104 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
105
106#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
107
6a902881 108static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
109 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
110 {0,}
111};
112
113MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
114
115
116void
117qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
118 struct qlcnic_host_tx_ring *tx_ring)
119{
120 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
121}
122
123static const u32 msi_tgt_status[8] = {
124 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
125 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
126 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
127 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
128};
129
130static const
131struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
132
133static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
134{
135 writel(0, sds_ring->crb_intr_mask);
136}
137
138static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
139{
140 struct qlcnic_adapter *adapter = sds_ring->adapter;
141
142 writel(0x1, sds_ring->crb_intr_mask);
143
144 if (!QLCNIC_IS_MSI_FAMILY(adapter))
145 writel(0xfbff, adapter->tgt_mask_reg);
146}
147
148static int
149qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
150{
151 int size = sizeof(struct qlcnic_host_sds_ring) * count;
152
153 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
154
807540ba 155 return recv_ctx->sds_rings == NULL;
af19b491
AKS
156}
157
158static void
159qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
160{
161 if (recv_ctx->sds_rings != NULL)
162 kfree(recv_ctx->sds_rings);
163
164 recv_ctx->sds_rings = NULL;
165}
166
167static int
168qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
169{
170 int ring;
171 struct qlcnic_host_sds_ring *sds_ring;
172 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
173
174 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
175 return -ENOMEM;
176
177 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
178 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 179
180 if (ring == adapter->max_sds_rings - 1)
181 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
182 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
183 else
184 netif_napi_add(netdev, &sds_ring->napi,
185 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
186 }
187
188 return 0;
189}
190
191static void
192qlcnic_napi_del(struct qlcnic_adapter *adapter)
193{
194 int ring;
195 struct qlcnic_host_sds_ring *sds_ring;
196 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
197
198 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
199 sds_ring = &recv_ctx->sds_rings[ring];
200 netif_napi_del(&sds_ring->napi);
201 }
202
203 qlcnic_free_sds_rings(&adapter->recv_ctx);
204}
205
206static void
207qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208{
209 int ring;
210 struct qlcnic_host_sds_ring *sds_ring;
211 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
212
780ab790
AKS
213 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
214 return;
215
af19b491
AKS
216 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
217 sds_ring = &recv_ctx->sds_rings[ring];
218 napi_enable(&sds_ring->napi);
219 qlcnic_enable_int(sds_ring);
220 }
221}
222
223static void
224qlcnic_napi_disable(struct qlcnic_adapter *adapter)
225{
226 int ring;
227 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
229
780ab790
AKS
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return;
232
af19b491
AKS
233 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
234 sds_ring = &recv_ctx->sds_rings[ring];
235 qlcnic_disable_int(sds_ring);
236 napi_synchronize(&sds_ring->napi);
237 napi_disable(&sds_ring->napi);
238 }
239}
240
241static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
242{
243 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
244}
245
af19b491
AKS
246static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
247{
248 u32 control;
249 int pos;
250
251 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
252 if (pos) {
253 pci_read_config_dword(pdev, pos, &control);
254 if (enable)
255 control |= PCI_MSIX_FLAGS_ENABLE;
256 else
257 control = 0;
258 pci_write_config_dword(pdev, pos, control);
259 }
260}
261
262static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
263{
264 int i;
265
266 for (i = 0; i < count; i++)
267 adapter->msix_entries[i].entry = i;
268}
269
270static int
271qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
272{
2e9d722d 273 u8 mac_addr[ETH_ALEN];
af19b491
AKS
274 struct net_device *netdev = adapter->netdev;
275 struct pci_dev *pdev = adapter->pdev;
276
da48e6c3 277 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
278 return -EIO;
279
2e9d722d 280 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
281 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
282 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
283
284 /* set station address */
285
286 if (!is_valid_ether_addr(netdev->perm_addr))
287 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
288 netdev->dev_addr);
289
290 return 0;
291}
292
293static int qlcnic_set_mac(struct net_device *netdev, void *p)
294{
295 struct qlcnic_adapter *adapter = netdev_priv(netdev);
296 struct sockaddr *addr = p;
297
7373373d
RB
298 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
299 return -EOPNOTSUPP;
300
af19b491
AKS
301 if (!is_valid_ether_addr(addr->sa_data))
302 return -EINVAL;
303
8a15ad1f 304 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
305 netif_device_detach(netdev);
306 qlcnic_napi_disable(adapter);
307 }
308
309 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
310 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
311 qlcnic_set_multi(adapter->netdev);
312
8a15ad1f 313 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
314 netif_device_attach(netdev);
315 qlcnic_napi_enable(adapter);
316 }
317 return 0;
318}
319
d5790663
AKS
320static void qlcnic_vlan_rx_register(struct net_device *netdev,
321 struct vlan_group *grp)
322{
323 struct qlcnic_adapter *adapter = netdev_priv(netdev);
324 adapter->vlgrp = grp;
325}
326
af19b491
AKS
327static const struct net_device_ops qlcnic_netdev_ops = {
328 .ndo_open = qlcnic_open,
329 .ndo_stop = qlcnic_close,
330 .ndo_start_xmit = qlcnic_xmit_frame,
331 .ndo_get_stats = qlcnic_get_stats,
332 .ndo_validate_addr = eth_validate_addr,
333 .ndo_set_multicast_list = qlcnic_set_multi,
334 .ndo_set_mac_address = qlcnic_set_mac,
335 .ndo_change_mtu = qlcnic_change_mtu,
336 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 337 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
338#ifdef CONFIG_NET_POLL_CONTROLLER
339 .ndo_poll_controller = qlcnic_poll_controller,
340#endif
341};
342
2e9d722d 343static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
344 .config_bridged_mode = qlcnic_config_bridged_mode,
345 .config_led = qlcnic_config_led,
9f26f547
AC
346 .start_firmware = qlcnic_start_firmware
347};
348
349static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
350 .config_bridged_mode = qlcnicvf_config_bridged_mode,
351 .config_led = qlcnicvf_config_led,
9f26f547 352 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
353};
354
af19b491
AKS
355static void
356qlcnic_setup_intr(struct qlcnic_adapter *adapter)
357{
358 const struct qlcnic_legacy_intr_set *legacy_intrp;
359 struct pci_dev *pdev = adapter->pdev;
360 int err, num_msix;
361
362 if (adapter->rss_supported) {
363 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
364 MSIX_ENTRIES_PER_ADAPTER : 2;
365 } else
366 num_msix = 1;
367
368 adapter->max_sds_rings = 1;
369
370 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
371
372 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
373
374 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
375 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
376 legacy_intrp->tgt_status_reg);
377 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
378 legacy_intrp->tgt_mask_reg);
379 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
380
381 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
382 ISR_INT_STATE_REG);
383
384 qlcnic_set_msix_bit(pdev, 0);
385
386 if (adapter->msix_supported) {
387
388 qlcnic_init_msix_entries(adapter, num_msix);
389 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
390 if (err == 0) {
391 adapter->flags |= QLCNIC_MSIX_ENABLED;
392 qlcnic_set_msix_bit(pdev, 1);
393
394 if (adapter->rss_supported)
395 adapter->max_sds_rings = num_msix;
396
397 dev_info(&pdev->dev, "using msi-x interrupts\n");
398 return;
399 }
400
401 if (err > 0)
402 pci_disable_msix(pdev);
403
404 /* fall through for msi */
405 }
406
407 if (use_msi && !pci_enable_msi(pdev)) {
408 adapter->flags |= QLCNIC_MSI_ENABLED;
409 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
410 msi_tgt_status[adapter->ahw.pci_func]);
411 dev_info(&pdev->dev, "using msi interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
413 return;
414 }
415
416 dev_info(&pdev->dev, "using legacy interrupts\n");
417 adapter->msix_entries[0].vector = pdev->irq;
418}
419
420static void
421qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
422{
423 if (adapter->flags & QLCNIC_MSIX_ENABLED)
424 pci_disable_msix(adapter->pdev);
425 if (adapter->flags & QLCNIC_MSI_ENABLED)
426 pci_disable_msi(adapter->pdev);
427}
428
429static void
430qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
431{
432 if (adapter->ahw.pci_base0 != NULL)
433 iounmap(adapter->ahw.pci_base0);
434}
435
346fe763
RB
436static int
437qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
438{
e88db3bd 439 struct qlcnic_pci_info *pci_info;
900853a4 440 int i, ret = 0;
346fe763
RB
441 u8 pfn;
442
e88db3bd
DC
443 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
444 if (!pci_info)
445 return -ENOMEM;
446
ca315ac2 447 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 448 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 449 if (!adapter->npars) {
900853a4 450 ret = -ENOMEM;
e88db3bd
DC
451 goto err_pci_info;
452 }
346fe763 453
ca315ac2 454 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
455 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
456 if (!adapter->eswitch) {
900853a4 457 ret = -ENOMEM;
ca315ac2 458 goto err_npars;
346fe763
RB
459 }
460
461 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
462 if (ret)
463 goto err_eswitch;
346fe763 464
ca315ac2
DC
465 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
466 pfn = pci_info[i].id;
f848d6dd
SC
467 if (pfn > QLCNIC_MAX_PCI_FUNC) {
468 ret = QL_STATUS_INVALID_PARAM;
469 goto err_eswitch;
470 }
a1c0c459
SC
471 adapter->npars[pfn].active = (u8)pci_info[i].active;
472 adapter->npars[pfn].type = (u8)pci_info[i].type;
473 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
474 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
475 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
476 }
477
ca315ac2
DC
478 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
479 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
480
e88db3bd 481 kfree(pci_info);
ca315ac2
DC
482 return 0;
483
484err_eswitch:
346fe763
RB
485 kfree(adapter->eswitch);
486 adapter->eswitch = NULL;
ca315ac2 487err_npars:
346fe763 488 kfree(adapter->npars);
ca315ac2 489 adapter->npars = NULL;
e88db3bd
DC
490err_pci_info:
491 kfree(pci_info);
346fe763
RB
492
493 return ret;
494}
495
2e9d722d
AC
496static int
497qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
498{
499 u8 id;
500 u32 ref_count;
501 int i, ret = 1;
502 u32 data = QLCNIC_MGMT_FUNC;
503 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
504
505 /* If other drivers are not in use set their privilege level */
31018e06 506 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
507 ret = qlcnic_api_lock(adapter);
508 if (ret)
509 goto err_lock;
2e9d722d 510
0e33c664
AC
511 if (qlcnic_config_npars) {
512 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 513 id = i;
0e33c664
AC
514 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
515 id == adapter->ahw.pci_func)
516 continue;
517 data |= (qlcnic_config_npars &
518 QLC_DEV_SET_DRV(0xf, id));
519 }
520 } else {
521 data = readl(priv_op);
522 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
523 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
524 adapter->ahw.pci_func));
2e9d722d
AC
525 }
526 writel(data, priv_op);
2e9d722d
AC
527 qlcnic_api_unlock(adapter);
528err_lock:
529 return ret;
530}
531
0866d96d
AC
532static void
533qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
534{
535 void __iomem *msix_base_addr;
536 void __iomem *priv_op;
537 u32 func;
538 u32 msix_base;
539 u32 op_mode, priv_level;
540
541 /* Determine FW API version */
542 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
543
544 /* Find PCI function number */
545 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
546 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
547 msix_base = readl(msix_base_addr);
548 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
549 adapter->ahw.pci_func = func;
550
551 /* Determine function privilege level */
552 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
553 op_mode = readl(priv_op);
0e33c664 554 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 555 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 556 else
2e9d722d
AC
557 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
558
0866d96d 559 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
560 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
561 dev_info(&adapter->pdev->dev,
562 "HAL Version: %d Non Privileged function\n",
563 adapter->fw_hal_version);
564 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
565 } else
566 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
567}
568
af19b491
AKS
569static int
570qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
571{
572 void __iomem *mem_ptr0 = NULL;
573 resource_size_t mem_base;
574 unsigned long mem_len, pci_len0 = 0;
575
576 struct pci_dev *pdev = adapter->pdev;
af19b491 577
af19b491
AKS
578 /* remap phys address */
579 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
580 mem_len = pci_resource_len(pdev, 0);
581
582 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
583
584 mem_ptr0 = pci_ioremap_bar(pdev, 0);
585 if (mem_ptr0 == NULL) {
586 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
587 return -EIO;
588 }
589 pci_len0 = mem_len;
590 } else {
591 return -EIO;
592 }
593
594 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
595
596 adapter->ahw.pci_base0 = mem_ptr0;
597 adapter->ahw.pci_len0 = pci_len0;
598
0866d96d 599 qlcnic_check_vf(adapter);
2e9d722d 600
af19b491 601 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 602 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
603
604 return 0;
605}
606
607static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
608{
609 struct pci_dev *pdev = adapter->pdev;
610 int i, found = 0;
611
612 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
613 if (qlcnic_boards[i].vendor == pdev->vendor &&
614 qlcnic_boards[i].device == pdev->device &&
615 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
616 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
617 sprintf(name, "%pM: %s" ,
618 adapter->mac_addr,
619 qlcnic_boards[i].short_name);
af19b491
AKS
620 found = 1;
621 break;
622 }
623
624 }
625
626 if (!found)
7f9a0c34 627 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
628}
629
630static void
631qlcnic_check_options(struct qlcnic_adapter *adapter)
632{
633 u32 fw_major, fw_minor, fw_build;
af19b491 634 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
635
636 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
637 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
638 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
639
640 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
641
251a84c9
AKS
642 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
643 fw_major, fw_minor, fw_build);
af19b491 644 if (adapter->ahw.port_type == QLCNIC_XGBE) {
90d19005
SC
645 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
646 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
647 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
648 } else {
649 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
650 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
651 }
652
af19b491 653 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
654 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
655
af19b491
AKS
656 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
657 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
658 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
659 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
660 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
661 }
662
663 adapter->msix_supported = !!use_msi_x;
664 adapter->rss_supported = !!use_msi_x;
665
666 adapter->num_txd = MAX_CMD_DESCRIPTORS;
667
251b036a 668 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
669}
670
174240a8
RB
671static int
672qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
673{
674 int err;
675 struct qlcnic_info nic_info;
676
677 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
678 if (err)
679 return err;
680
a1c0c459 681 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
682 adapter->switch_mode = nic_info.switch_mode;
683 adapter->max_tx_ques = nic_info.max_tx_ques;
684 adapter->max_rx_ques = nic_info.max_rx_ques;
685 adapter->capabilities = nic_info.capabilities;
686 adapter->max_mac_filters = nic_info.max_mac_filters;
687 adapter->max_mtu = nic_info.max_mtu;
688
689 if (adapter->capabilities & BIT_6)
690 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
691 else
692 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
693
694 return err;
695}
696
8cf61f89
AKS
697static void
698qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
699 struct qlcnic_esw_func_cfg *esw_cfg)
700{
701 if (esw_cfg->discard_tagged)
702 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
703 else
704 adapter->flags |= QLCNIC_TAGGING_ENABLED;
705
706 if (esw_cfg->vlan_id)
707 adapter->pvid = esw_cfg->vlan_id;
708 else
709 adapter->pvid = 0;
710}
711
0325d69b
RB
712static void
713qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
714 struct qlcnic_esw_func_cfg *esw_cfg)
715{
ee07c1a7
RB
716 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
717 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
718
719 if (esw_cfg->mac_anti_spoof)
720 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 721
7373373d
RB
722 if (!esw_cfg->mac_override)
723 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
724
ee07c1a7
RB
725 if (!esw_cfg->promisc_mode)
726 adapter->flags |= QLCNIC_PROMISC_DISABLED;
727
0325d69b
RB
728 qlcnic_set_netdev_features(adapter, esw_cfg);
729}
730
731static int
732qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
733{
734 struct qlcnic_esw_func_cfg esw_cfg;
735
736 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
737 return 0;
738
739 esw_cfg.pci_func = adapter->ahw.pci_func;
740 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
741 return -EIO;
8cf61f89 742 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
743 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
744
745 return 0;
746}
747
748static void
749qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
750 struct qlcnic_esw_func_cfg *esw_cfg)
751{
752 struct net_device *netdev = adapter->netdev;
753 unsigned long features, vlan_features;
754
755 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
756 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
757 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
758 NETIF_F_IPV6_CSUM);
759
760 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
761 features |= (NETIF_F_TSO | NETIF_F_TSO6);
762 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
763 }
764 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
765 features |= NETIF_F_LRO;
766
767 if (esw_cfg->offload_flags & BIT_0) {
768 netdev->features |= features;
769 adapter->rx_csum = 1;
770 if (!(esw_cfg->offload_flags & BIT_1))
771 netdev->features &= ~NETIF_F_TSO;
772 if (!(esw_cfg->offload_flags & BIT_2))
773 netdev->features &= ~NETIF_F_TSO6;
774 } else {
775 netdev->features &= ~features;
776 adapter->rx_csum = 0;
777 }
778
779 netdev->vlan_features = (features & vlan_features);
780}
781
0866d96d
AC
782static int
783qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
784{
785 void __iomem *priv_op;
786 u32 op_mode, priv_level;
787 int err = 0;
788
174240a8
RB
789 err = qlcnic_initialize_nic(adapter);
790 if (err)
791 return err;
792
0866d96d
AC
793 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
794 return 0;
795
796 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
797 op_mode = readl(priv_op);
798 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
799
800 if (op_mode == QLC_DEV_DRV_DEFAULT)
801 priv_level = QLCNIC_MGMT_FUNC;
802 else
803 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
804
174240a8 805 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
806 if (priv_level == QLCNIC_MGMT_FUNC) {
807 adapter->op_mode = QLCNIC_MGMT_FUNC;
808 err = qlcnic_init_pci_info(adapter);
809 if (err)
810 return err;
811 /* Set privilege level for other functions */
812 qlcnic_set_function_modes(adapter);
813 dev_info(&adapter->pdev->dev,
814 "HAL Version: %d, Management function\n",
815 adapter->fw_hal_version);
816 } else if (priv_level == QLCNIC_PRIV_FUNC) {
817 adapter->op_mode = QLCNIC_PRIV_FUNC;
818 dev_info(&adapter->pdev->dev,
819 "HAL Version: %d, Privileged function\n",
820 adapter->fw_hal_version);
821 }
174240a8 822 }
0866d96d
AC
823
824 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
825
826 return err;
827}
828
0325d69b
RB
829static int
830qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
831{
832 struct qlcnic_esw_func_cfg esw_cfg;
833 struct qlcnic_npar_info *npar;
834 u8 i;
835
174240a8 836 if (adapter->need_fw_reset)
0325d69b
RB
837 return 0;
838
839 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
840 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
841 continue;
842 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
843 esw_cfg.pci_func = i;
844 esw_cfg.offload_flags = BIT_0;
7373373d 845 esw_cfg.mac_override = BIT_0;
ee07c1a7 846 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
847 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
848 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
849 if (qlcnic_config_switch_port(adapter, &esw_cfg))
850 return -EIO;
851 npar = &adapter->npars[i];
852 npar->pvid = esw_cfg.vlan_id;
7373373d 853 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
854 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
855 npar->discard_tagged = esw_cfg.discard_tagged;
856 npar->promisc_mode = esw_cfg.promisc_mode;
857 npar->offload_flags = esw_cfg.offload_flags;
858 }
859
860 return 0;
861}
862
4e8acb01
RB
863static int
864qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
865 struct qlcnic_npar_info *npar, int pci_func)
866{
867 struct qlcnic_esw_func_cfg esw_cfg;
868 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
869 esw_cfg.pci_func = pci_func;
870 esw_cfg.vlan_id = npar->pvid;
7373373d 871 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
872 esw_cfg.discard_tagged = npar->discard_tagged;
873 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
874 esw_cfg.offload_flags = npar->offload_flags;
875 esw_cfg.promisc_mode = npar->promisc_mode;
876 if (qlcnic_config_switch_port(adapter, &esw_cfg))
877 return -EIO;
878
879 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
880 if (qlcnic_config_switch_port(adapter, &esw_cfg))
881 return -EIO;
882
883 return 0;
884}
885
cea8975e
AC
886static int
887qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
888{
4e8acb01 889 int i, err;
cea8975e
AC
890 struct qlcnic_npar_info *npar;
891 struct qlcnic_info nic_info;
892
174240a8 893 if (!adapter->need_fw_reset)
cea8975e
AC
894 return 0;
895
4e8acb01
RB
896 /* Set the NPAR config data after FW reset */
897 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
898 npar = &adapter->npars[i];
899 if (npar->type != QLCNIC_TYPE_NIC)
900 continue;
901 err = qlcnic_get_nic_info(adapter, &nic_info, i);
902 if (err)
903 return err;
904 nic_info.min_tx_bw = npar->min_bw;
905 nic_info.max_tx_bw = npar->max_bw;
906 err = qlcnic_set_nic_info(adapter, &nic_info);
907 if (err)
908 return err;
cea8975e 909
4e8acb01
RB
910 if (npar->enable_pm) {
911 err = qlcnic_config_port_mirroring(adapter,
912 npar->dest_npar, 1, i);
913 if (err)
914 return err;
cea8975e 915 }
4e8acb01
RB
916 err = qlcnic_reset_eswitch_config(adapter, npar, i);
917 if (err)
918 return err;
cea8975e 919 }
4e8acb01 920 return 0;
cea8975e
AC
921}
922
78f84e1a
AKS
923static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
924{
925 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
926 u32 npar_state;
927
928 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
929 return 0;
930
931 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
932 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
933 msleep(1000);
934 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
935 }
936 if (!npar_opt_timeo) {
937 dev_err(&adapter->pdev->dev,
938 "Waiting for NPAR state to opertional timeout\n");
939 return -EIO;
940 }
941 return 0;
942}
943
174240a8
RB
944static int
945qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
946{
947 int err;
948
949 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
950 adapter->op_mode != QLCNIC_MGMT_FUNC)
951 return 0;
952
953 err = qlcnic_set_default_offload_settings(adapter);
954 if (err)
955 return err;
956
957 err = qlcnic_reset_npar_config(adapter);
958 if (err)
959 return err;
960
961 qlcnic_dev_set_npar_ready(adapter);
962
963 return err;
964}
965
af19b491
AKS
966static int
967qlcnic_start_firmware(struct qlcnic_adapter *adapter)
968{
d4066833 969 int err;
af19b491 970
aa5e18c0
SC
971 err = qlcnic_can_start_firmware(adapter);
972 if (err < 0)
973 return err;
974 else if (!err)
d4066833 975 goto check_fw_status;
af19b491 976
4d5bdb38
AKS
977 if (load_fw_file)
978 qlcnic_request_firmware(adapter);
8f891387 979 else {
8cfdce08
SC
980 err = qlcnic_check_flash_fw_ver(adapter);
981 if (err)
8f891387 982 goto err_out;
983
4d5bdb38 984 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 985 }
af19b491
AKS
986
987 err = qlcnic_need_fw_reset(adapter);
af19b491 988 if (err == 0)
4e70812b 989 goto check_fw_status;
af19b491 990
d4066833
SC
991 err = qlcnic_pinit_from_rom(adapter);
992 if (err)
993 goto err_out;
af19b491
AKS
994
995 err = qlcnic_load_firmware(adapter);
996 if (err)
997 goto err_out;
998
999 qlcnic_release_firmware(adapter);
d4066833 1000 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1001
d4066833
SC
1002check_fw_status:
1003 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1004 if (err)
1005 goto err_out;
1006
1007 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1008 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1009
0866d96d
AC
1010 err = qlcnic_check_eswitch_mode(adapter);
1011 if (err) {
1012 dev_err(&adapter->pdev->dev,
1013 "Memory allocation failed for eswitch\n");
1014 goto err_out;
1015 }
174240a8
RB
1016 err = qlcnic_set_mgmt_operations(adapter);
1017 if (err)
1018 goto err_out;
1019
1020 qlcnic_check_options(adapter);
af19b491
AKS
1021 adapter->need_fw_reset = 0;
1022
a7fc948f
AKS
1023 qlcnic_release_firmware(adapter);
1024 return 0;
af19b491
AKS
1025
1026err_out:
a7fc948f
AKS
1027 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1028 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1029
af19b491
AKS
1030 qlcnic_release_firmware(adapter);
1031 return err;
1032}
1033
1034static int
1035qlcnic_request_irq(struct qlcnic_adapter *adapter)
1036{
1037 irq_handler_t handler;
1038 struct qlcnic_host_sds_ring *sds_ring;
1039 int err, ring;
1040
1041 unsigned long flags = 0;
1042 struct net_device *netdev = adapter->netdev;
1043 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1044
7eb9855d
AKS
1045 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1046 handler = qlcnic_tmp_intr;
1047 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1048 flags |= IRQF_SHARED;
1049
1050 } else {
1051 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1052 handler = qlcnic_msix_intr;
1053 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1054 handler = qlcnic_msi_intr;
1055 else {
1056 flags |= IRQF_SHARED;
1057 handler = qlcnic_intr;
1058 }
af19b491
AKS
1059 }
1060 adapter->irq = netdev->irq;
1061
1062 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1063 sds_ring = &recv_ctx->sds_rings[ring];
1064 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1065 err = request_irq(sds_ring->irq, handler,
1066 flags, sds_ring->name, sds_ring);
1067 if (err)
1068 return err;
1069 }
1070
1071 return 0;
1072}
1073
1074static void
1075qlcnic_free_irq(struct qlcnic_adapter *adapter)
1076{
1077 int ring;
1078 struct qlcnic_host_sds_ring *sds_ring;
1079
1080 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1081
1082 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1083 sds_ring = &recv_ctx->sds_rings[ring];
1084 free_irq(sds_ring->irq, sds_ring);
1085 }
1086}
1087
1088static void
1089qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1090{
1091 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1092 adapter->coal.normal.data.rx_time_us =
1093 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1094 adapter->coal.normal.data.rx_packets =
1095 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1096 adapter->coal.normal.data.tx_time_us =
1097 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1098 adapter->coal.normal.data.tx_packets =
1099 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1100}
1101
1102static int
1103__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1104{
8a15ad1f
AKS
1105 int ring;
1106 struct qlcnic_host_rds_ring *rds_ring;
1107
af19b491
AKS
1108 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1109 return -EIO;
1110
8a15ad1f
AKS
1111 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1112 return 0;
0325d69b
RB
1113 if (qlcnic_set_eswitch_port_config(adapter))
1114 return -EIO;
8a15ad1f
AKS
1115
1116 if (qlcnic_fw_create_ctx(adapter))
1117 return -EIO;
1118
1119 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1120 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1121 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1122 }
1123
af19b491
AKS
1124 qlcnic_set_multi(netdev);
1125 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1126
1127 adapter->ahw.linkup = 0;
1128
1129 if (adapter->max_sds_rings > 1)
1130 qlcnic_config_rss(adapter, 1);
1131
1132 qlcnic_config_intr_coalesce(adapter);
1133
24763d80 1134 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1135 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1136
1137 qlcnic_napi_enable(adapter);
1138
1139 qlcnic_linkevent_request(adapter, 1);
1140
68bf1c68 1141 adapter->reset_context = 0;
af19b491
AKS
1142 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1143 return 0;
1144}
1145
1146/* Usage: During resume and firmware recovery module.*/
1147
1148static int
1149qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1150{
1151 int err = 0;
1152
1153 rtnl_lock();
1154 if (netif_running(netdev))
1155 err = __qlcnic_up(adapter, netdev);
1156 rtnl_unlock();
1157
1158 return err;
1159}
1160
1161static void
1162__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1163{
1164 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1165 return;
1166
1167 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1168 return;
1169
1170 smp_mb();
1171 spin_lock(&adapter->tx_clean_lock);
1172 netif_carrier_off(netdev);
1173 netif_tx_disable(netdev);
1174
1175 qlcnic_free_mac_list(adapter);
1176
b5e5492c
AKS
1177 if (adapter->fhash.fnum)
1178 qlcnic_delete_lb_filters(adapter);
1179
af19b491
AKS
1180 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1181
1182 qlcnic_napi_disable(adapter);
1183
8a15ad1f
AKS
1184 qlcnic_fw_destroy_ctx(adapter);
1185
1186 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1187 qlcnic_release_tx_buffers(adapter);
1188 spin_unlock(&adapter->tx_clean_lock);
1189}
1190
1191/* Usage: During suspend and firmware recovery module */
1192
1193static void
1194qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1195{
1196 rtnl_lock();
1197 if (netif_running(netdev))
1198 __qlcnic_down(adapter, netdev);
1199 rtnl_unlock();
1200
1201}
1202
1203static int
1204qlcnic_attach(struct qlcnic_adapter *adapter)
1205{
1206 struct net_device *netdev = adapter->netdev;
1207 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1208 int err;
af19b491
AKS
1209
1210 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1211 return 0;
1212
af19b491
AKS
1213 err = qlcnic_napi_add(adapter, netdev);
1214 if (err)
1215 return err;
1216
1217 err = qlcnic_alloc_sw_resources(adapter);
1218 if (err) {
1219 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1220 goto err_out_napi_del;
af19b491
AKS
1221 }
1222
1223 err = qlcnic_alloc_hw_resources(adapter);
1224 if (err) {
1225 dev_err(&pdev->dev, "Error in setting hw resources\n");
1226 goto err_out_free_sw;
1227 }
1228
af19b491
AKS
1229 err = qlcnic_request_irq(adapter);
1230 if (err) {
1231 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1232 goto err_out_free_hw;
af19b491
AKS
1233 }
1234
1235 qlcnic_init_coalesce_defaults(adapter);
1236
1237 qlcnic_create_sysfs_entries(adapter);
1238
1239 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1240 return 0;
1241
8a15ad1f 1242err_out_free_hw:
af19b491
AKS
1243 qlcnic_free_hw_resources(adapter);
1244err_out_free_sw:
1245 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1246err_out_napi_del:
1247 qlcnic_napi_del(adapter);
af19b491
AKS
1248 return err;
1249}
1250
1251static void
1252qlcnic_detach(struct qlcnic_adapter *adapter)
1253{
1254 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1255 return;
1256
1257 qlcnic_remove_sysfs_entries(adapter);
1258
1259 qlcnic_free_hw_resources(adapter);
1260 qlcnic_release_rx_buffers(adapter);
1261 qlcnic_free_irq(adapter);
1262 qlcnic_napi_del(adapter);
1263 qlcnic_free_sw_resources(adapter);
1264
1265 adapter->is_up = 0;
1266}
1267
7eb9855d
AKS
1268void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1269{
1270 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1271 struct qlcnic_host_sds_ring *sds_ring;
1272 int ring;
1273
78ad3892 1274 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1275 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1276 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1277 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1278 qlcnic_disable_int(sds_ring);
1279 }
7eb9855d
AKS
1280 }
1281
8a15ad1f
AKS
1282 qlcnic_fw_destroy_ctx(adapter);
1283
7eb9855d
AKS
1284 qlcnic_detach(adapter);
1285
1286 adapter->diag_test = 0;
1287 adapter->max_sds_rings = max_sds_rings;
1288
1289 if (qlcnic_attach(adapter))
34ce3626 1290 goto out;
7eb9855d
AKS
1291
1292 if (netif_running(netdev))
1293 __qlcnic_up(adapter, netdev);
34ce3626 1294out:
7eb9855d
AKS
1295 netif_device_attach(netdev);
1296}
1297
1298int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1299{
1300 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1301 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1302 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1303 int ring;
1304 int ret;
1305
1306 netif_device_detach(netdev);
1307
1308 if (netif_running(netdev))
1309 __qlcnic_down(adapter, netdev);
1310
1311 qlcnic_detach(adapter);
1312
1313 adapter->max_sds_rings = 1;
1314 adapter->diag_test = test;
1315
1316 ret = qlcnic_attach(adapter);
34ce3626
AKS
1317 if (ret) {
1318 netif_device_attach(netdev);
7eb9855d 1319 return ret;
34ce3626 1320 }
7eb9855d 1321
8a15ad1f
AKS
1322 ret = qlcnic_fw_create_ctx(adapter);
1323 if (ret) {
1324 qlcnic_detach(adapter);
57e46248 1325 netif_device_attach(netdev);
8a15ad1f
AKS
1326 return ret;
1327 }
1328
1329 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1330 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1331 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1332 }
1333
cdaff185
AKS
1334 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1335 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1336 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1337 qlcnic_enable_int(sds_ring);
1338 }
7eb9855d 1339 }
78ad3892 1340 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1341
1342 return 0;
1343}
1344
68bf1c68
AKS
1345/* Reset context in hardware only */
1346static int
1347qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1348{
1349 struct net_device *netdev = adapter->netdev;
1350
1351 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1352 return -EBUSY;
1353
1354 netif_device_detach(netdev);
1355
1356 qlcnic_down(adapter, netdev);
1357
1358 qlcnic_up(adapter, netdev);
1359
1360 netif_device_attach(netdev);
1361
1362 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1363 return 0;
1364}
1365
af19b491
AKS
1366int
1367qlcnic_reset_context(struct qlcnic_adapter *adapter)
1368{
1369 int err = 0;
1370 struct net_device *netdev = adapter->netdev;
1371
1372 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1373 return -EBUSY;
1374
1375 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1376
1377 netif_device_detach(netdev);
1378
1379 if (netif_running(netdev))
1380 __qlcnic_down(adapter, netdev);
1381
1382 qlcnic_detach(adapter);
1383
1384 if (netif_running(netdev)) {
1385 err = qlcnic_attach(adapter);
1386 if (!err)
34ce3626 1387 __qlcnic_up(adapter, netdev);
af19b491
AKS
1388 }
1389
1390 netif_device_attach(netdev);
1391 }
1392
af19b491
AKS
1393 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1394 return err;
1395}
1396
1397static int
1398qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1399 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1400{
1401 int err;
1402 struct pci_dev *pdev = adapter->pdev;
1403
1404 adapter->rx_csum = 1;
1405 adapter->mc_enabled = 0;
1406 adapter->max_mc_count = 38;
1407
1408 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1409 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1410
1411 qlcnic_change_mtu(netdev, netdev->mtu);
1412
1413 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1414
2e9d722d 1415 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1416 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1417 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1418 NETIF_F_IPV6_CSUM);
1419
1420 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1421 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1422 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1423 }
af19b491 1424
1bb09fb9 1425 if (pci_using_dac) {
af19b491
AKS
1426 netdev->features |= NETIF_F_HIGHDMA;
1427 netdev->vlan_features |= NETIF_F_HIGHDMA;
1428 }
1429
1430 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1431 netdev->features |= (NETIF_F_HW_VLAN_TX);
1432
1433 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1434 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1435 netdev->irq = adapter->msix_entries[0].vector;
1436
af19b491 1437 netif_carrier_off(netdev);
af19b491
AKS
1438
1439 err = register_netdev(netdev);
1440 if (err) {
1441 dev_err(&pdev->dev, "failed to register net device\n");
1442 return err;
1443 }
1444
1445 return 0;
1446}
1447
1bb09fb9
AKS
1448static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1449{
1450 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1451 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1452 *pci_using_dac = 1;
1453 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1454 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1455 *pci_using_dac = 0;
1456 else {
1457 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1458 return -EIO;
1459 }
1460
1461 return 0;
1462}
1463
af19b491
AKS
1464static int __devinit
1465qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1466{
1467 struct net_device *netdev = NULL;
1468 struct qlcnic_adapter *adapter = NULL;
1469 int err;
af19b491 1470 uint8_t revision_id;
1bb09fb9 1471 uint8_t pci_using_dac;
da48e6c3 1472 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1473
1474 err = pci_enable_device(pdev);
1475 if (err)
1476 return err;
1477
1478 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1479 err = -ENODEV;
1480 goto err_out_disable_pdev;
1481 }
1482
1bb09fb9
AKS
1483 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1484 if (err)
1485 goto err_out_disable_pdev;
1486
af19b491
AKS
1487 err = pci_request_regions(pdev, qlcnic_driver_name);
1488 if (err)
1489 goto err_out_disable_pdev;
1490
1491 pci_set_master(pdev);
451724c8 1492 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1493
1494 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1495 if (!netdev) {
1496 dev_err(&pdev->dev, "failed to allocate net_device\n");
1497 err = -ENOMEM;
1498 goto err_out_free_res;
1499 }
1500
1501 SET_NETDEV_DEV(netdev, &pdev->dev);
1502
1503 adapter = netdev_priv(netdev);
1504 adapter->netdev = netdev;
1505 adapter->pdev = pdev;
6df900e9 1506 adapter->dev_rst_time = jiffies;
af19b491
AKS
1507
1508 revision_id = pdev->revision;
1509 adapter->ahw.revision_id = revision_id;
1510
1511 rwlock_init(&adapter->ahw.crb_lock);
1512 mutex_init(&adapter->ahw.mem_lock);
1513
1514 spin_lock_init(&adapter->tx_clean_lock);
1515 INIT_LIST_HEAD(&adapter->mac_list);
1516
1517 err = qlcnic_setup_pci_map(adapter);
1518 if (err)
1519 goto err_out_free_netdev;
1520
1521 /* This will be reset for mezz cards */
2e9d722d 1522 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1523
1524 err = qlcnic_get_board_info(adapter);
1525 if (err) {
1526 dev_err(&pdev->dev, "Error getting board config info.\n");
1527 goto err_out_iounmap;
1528 }
1529
8cfdce08
SC
1530 err = qlcnic_setup_idc_param(adapter);
1531 if (err)
b3a24649 1532 goto err_out_iounmap;
af19b491 1533
1dc0f3c5 1534 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1535
9f26f547 1536 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1537 if (err) {
1538 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1539 goto err_out_decr_ref;
a7fc948f 1540 }
af19b491 1541
da48e6c3
RB
1542 if (qlcnic_read_mac_addr(adapter))
1543 dev_warn(&pdev->dev, "failed to read mac addr\n");
1544
1545 if (adapter->portnum == 0) {
1546 get_brd_name(adapter, brd_name);
1547
1548 pr_info("%s: %s Board Chip rev 0x%x\n",
1549 module_name(THIS_MODULE),
1550 brd_name, adapter->ahw.revision_id);
1551 }
1552
af19b491
AKS
1553 qlcnic_clear_stats(adapter);
1554
1555 qlcnic_setup_intr(adapter);
1556
1bb09fb9 1557 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1558 if (err)
1559 goto err_out_disable_msi;
1560
1561 pci_set_drvdata(pdev, adapter);
1562
1563 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1564
1565 switch (adapter->ahw.port_type) {
1566 case QLCNIC_GBE:
1567 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1568 adapter->netdev->name);
1569 break;
1570 case QLCNIC_XGBE:
1571 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1572 adapter->netdev->name);
1573 break;
1574 }
1575
b5e5492c 1576 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1577 qlcnic_create_diag_entries(adapter);
1578
1579 return 0;
1580
1581err_out_disable_msi:
1582 qlcnic_teardown_intr(adapter);
1583
1584err_out_decr_ref:
21854f02 1585 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1586
1587err_out_iounmap:
1588 qlcnic_cleanup_pci_map(adapter);
1589
1590err_out_free_netdev:
1591 free_netdev(netdev);
1592
1593err_out_free_res:
1594 pci_release_regions(pdev);
1595
1596err_out_disable_pdev:
1597 pci_set_drvdata(pdev, NULL);
1598 pci_disable_device(pdev);
1599 return err;
1600}
1601
1602static void __devexit qlcnic_remove(struct pci_dev *pdev)
1603{
1604 struct qlcnic_adapter *adapter;
1605 struct net_device *netdev;
1606
1607 adapter = pci_get_drvdata(pdev);
1608 if (adapter == NULL)
1609 return;
1610
1611 netdev = adapter->netdev;
1612
1613 qlcnic_cancel_fw_work(adapter);
1614
1615 unregister_netdev(netdev);
1616
af19b491
AKS
1617 qlcnic_detach(adapter);
1618
2e9d722d
AC
1619 if (adapter->npars != NULL)
1620 kfree(adapter->npars);
1621 if (adapter->eswitch != NULL)
1622 kfree(adapter->eswitch);
1623
21854f02 1624 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1625
1626 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1627
b5e5492c
AKS
1628 qlcnic_free_lb_filters_mem(adapter);
1629
af19b491
AKS
1630 qlcnic_teardown_intr(adapter);
1631
1632 qlcnic_remove_diag_entries(adapter);
1633
1634 qlcnic_cleanup_pci_map(adapter);
1635
1636 qlcnic_release_firmware(adapter);
1637
451724c8 1638 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1639 pci_release_regions(pdev);
1640 pci_disable_device(pdev);
1641 pci_set_drvdata(pdev, NULL);
1642
1643 free_netdev(netdev);
1644}
1645static int __qlcnic_shutdown(struct pci_dev *pdev)
1646{
1647 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1648 struct net_device *netdev = adapter->netdev;
1649 int retval;
1650
1651 netif_device_detach(netdev);
1652
1653 qlcnic_cancel_fw_work(adapter);
1654
1655 if (netif_running(netdev))
1656 qlcnic_down(adapter, netdev);
1657
21854f02 1658 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1659
1660 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1661
1662 retval = pci_save_state(pdev);
1663 if (retval)
1664 return retval;
1665
1666 if (qlcnic_wol_supported(adapter)) {
1667 pci_enable_wake(pdev, PCI_D3cold, 1);
1668 pci_enable_wake(pdev, PCI_D3hot, 1);
1669 }
1670
1671 return 0;
1672}
1673
1674static void qlcnic_shutdown(struct pci_dev *pdev)
1675{
1676 if (__qlcnic_shutdown(pdev))
1677 return;
1678
1679 pci_disable_device(pdev);
1680}
1681
1682#ifdef CONFIG_PM
1683static int
1684qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1685{
1686 int retval;
1687
1688 retval = __qlcnic_shutdown(pdev);
1689 if (retval)
1690 return retval;
1691
1692 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1693 return 0;
1694}
1695
1696static int
1697qlcnic_resume(struct pci_dev *pdev)
1698{
1699 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1700 struct net_device *netdev = adapter->netdev;
1701 int err;
1702
1703 err = pci_enable_device(pdev);
1704 if (err)
1705 return err;
1706
1707 pci_set_power_state(pdev, PCI_D0);
1708 pci_set_master(pdev);
1709 pci_restore_state(pdev);
1710
9f26f547 1711 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1712 if (err) {
1713 dev_err(&pdev->dev, "failed to start firmware\n");
1714 return err;
1715 }
1716
1717 if (netif_running(netdev)) {
af19b491
AKS
1718 err = qlcnic_up(adapter, netdev);
1719 if (err)
52486a3a 1720 goto done;
af19b491 1721
aec1e845 1722 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1723 }
52486a3a 1724done:
af19b491
AKS
1725 netif_device_attach(netdev);
1726 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1727 return 0;
af19b491
AKS
1728}
1729#endif
1730
1731static int qlcnic_open(struct net_device *netdev)
1732{
1733 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1734 int err;
1735
af19b491
AKS
1736 err = qlcnic_attach(adapter);
1737 if (err)
1738 return err;
1739
1740 err = __qlcnic_up(adapter, netdev);
1741 if (err)
1742 goto err_out;
1743
1744 netif_start_queue(netdev);
1745
1746 return 0;
1747
1748err_out:
1749 qlcnic_detach(adapter);
1750 return err;
1751}
1752
1753/*
1754 * qlcnic_close - Disables a network interface entry point
1755 */
1756static int qlcnic_close(struct net_device *netdev)
1757{
1758 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1759
1760 __qlcnic_down(adapter, netdev);
1761 return 0;
1762}
1763
b5e5492c
AKS
1764static void
1765qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1766{
1767 void *head;
1768 int i;
1769
1770 if (!qlcnic_mac_learn)
1771 return;
1772
1773 spin_lock_init(&adapter->mac_learn_lock);
1774
1775 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1776 GFP_KERNEL);
1777 if (!head)
1778 return;
1779
1780 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1781 adapter->fhash.fhead = (struct hlist_head *)head;
1782
1783 for (i = 0; i < adapter->fhash.fmax; i++)
1784 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1785}
1786
1787static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1788{
1789 if (adapter->fhash.fmax && adapter->fhash.fhead)
1790 kfree(adapter->fhash.fhead);
1791
1792 adapter->fhash.fhead = NULL;
1793 adapter->fhash.fmax = 0;
1794}
1795
1796static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1797 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1798{
1799 struct cmd_desc_type0 *hwdesc;
1800 struct qlcnic_nic_req *req;
1801 struct qlcnic_mac_req *mac_req;
7e56cac4 1802 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1803 u32 producer;
1804 u64 word;
1805
1806 producer = tx_ring->producer;
1807 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1808
1809 req = (struct qlcnic_nic_req *)hwdesc;
1810 memset(req, 0, sizeof(struct qlcnic_nic_req));
1811 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1812
1813 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1814 req->req_hdr = cpu_to_le64(word);
1815
1816 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1817 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1818 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1819
7e56cac4
SC
1820 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1821 vlan_req->vlan_id = vlan_id;
03c5d770 1822
b5e5492c
AKS
1823 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1824}
1825
1826#define QLCNIC_MAC_HASH(MAC)\
1827 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1828
1829static void
1830qlcnic_send_filter(struct qlcnic_adapter *adapter,
1831 struct qlcnic_host_tx_ring *tx_ring,
1832 struct cmd_desc_type0 *first_desc,
1833 struct sk_buff *skb)
1834{
1835 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1836 struct qlcnic_filter *fil, *tmp_fil;
1837 struct hlist_node *tmp_hnode, *n;
1838 struct hlist_head *head;
1839 u64 src_addr = 0;
7e56cac4 1840 __le16 vlan_id = 0;
b5e5492c
AKS
1841 u8 hindex;
1842
1843 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1844 return;
1845
1846 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1847 return;
1848
03c5d770
AKS
1849 /* Only NPAR capable devices support vlan based learning*/
1850 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1851 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1852 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1853 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1854 head = &(adapter->fhash.fhead[hindex]);
1855
1856 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1857 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1858 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1859
1860 if (jiffies >
1861 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1862 qlcnic_change_filter(adapter, src_addr, vlan_id,
1863 tx_ring);
b5e5492c
AKS
1864 tmp_fil->ftime = jiffies;
1865 return;
1866 }
1867 }
1868
1869 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1870 if (!fil)
1871 return;
1872
03c5d770 1873 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1874
1875 fil->ftime = jiffies;
03c5d770 1876 fil->vlan_id = vlan_id;
b5e5492c
AKS
1877 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1878 spin_lock(&adapter->mac_learn_lock);
1879 hlist_add_head(&(fil->fnode), head);
1880 adapter->fhash.fnum++;
1881 spin_unlock(&adapter->mac_learn_lock);
1882}
1883
af19b491
AKS
1884static void
1885qlcnic_tso_check(struct net_device *netdev,
1886 struct qlcnic_host_tx_ring *tx_ring,
1887 struct cmd_desc_type0 *first_desc,
1888 struct sk_buff *skb)
1889{
1890 u8 opcode = TX_ETHER_PKT;
1891 __be16 protocol = skb->protocol;
8cf61f89
AKS
1892 u16 flags = 0;
1893 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1894 struct cmd_desc_type0 *hwdesc;
1895 struct vlan_ethhdr *vh;
8bfe8b91 1896 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1897 u32 producer = tx_ring->producer;
7e56cac4
SC
1898 __le16 vlan_oob = first_desc->flags_opcode &
1899 cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1900
2e9d722d
AC
1901 if (*(skb->data) & BIT_0) {
1902 flags |= BIT_0;
1903 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1904 }
1905
af19b491
AKS
1906 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1907 skb_shinfo(skb)->gso_size > 0) {
1908
1909 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1910
1911 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1912 first_desc->total_hdr_length = hdr_len;
1913 if (vlan_oob) {
1914 first_desc->total_hdr_length += VLAN_HLEN;
1915 first_desc->tcp_hdr_offset = VLAN_HLEN;
1916 first_desc->ip_hdr_offset = VLAN_HLEN;
1917 /* Only in case of TSO on vlan device */
1918 flags |= FLAGS_VLAN_TAGGED;
1919 }
1920
1921 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1922 TX_TCP_LSO6 : TX_TCP_LSO;
1923 tso = 1;
1924
1925 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1926 u8 l4proto;
1927
1928 if (protocol == cpu_to_be16(ETH_P_IP)) {
1929 l4proto = ip_hdr(skb)->protocol;
1930
1931 if (l4proto == IPPROTO_TCP)
1932 opcode = TX_TCP_PKT;
1933 else if (l4proto == IPPROTO_UDP)
1934 opcode = TX_UDP_PKT;
1935 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1936 l4proto = ipv6_hdr(skb)->nexthdr;
1937
1938 if (l4proto == IPPROTO_TCP)
1939 opcode = TX_TCPV6_PKT;
1940 else if (l4proto == IPPROTO_UDP)
1941 opcode = TX_UDPV6_PKT;
1942 }
1943 }
1944
1945 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1946 first_desc->ip_hdr_offset += skb_network_offset(skb);
1947 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1948
1949 if (!tso)
1950 return;
1951
1952 /* For LSO, we need to copy the MAC/IP/TCP headers into
1953 * the descriptor ring
1954 */
af19b491
AKS
1955 copied = 0;
1956 offset = 2;
1957
1958 if (vlan_oob) {
1959 /* Create a TSO vlan header template for firmware */
1960
1961 hwdesc = &tx_ring->desc_head[producer];
1962 tx_ring->cmd_buf_arr[producer].skb = NULL;
1963
1964 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1965 hdr_len + VLAN_HLEN);
1966
1967 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1968 skb_copy_from_linear_data(skb, vh, 12);
1969 vh->h_vlan_proto = htons(ETH_P_8021Q);
7e56cac4
SC
1970 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1971
af19b491
AKS
1972 skb_copy_from_linear_data_offset(skb, 12,
1973 (char *)vh + 16, copy_len - 16);
1974
1975 copied = copy_len - VLAN_HLEN;
1976 offset = 0;
1977
1978 producer = get_next_index(producer, tx_ring->num_desc);
1979 }
1980
1981 while (copied < hdr_len) {
1982
1983 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1984 (hdr_len - copied));
1985
1986 hwdesc = &tx_ring->desc_head[producer];
1987 tx_ring->cmd_buf_arr[producer].skb = NULL;
1988
1989 skb_copy_from_linear_data_offset(skb, copied,
1990 (char *)hwdesc + offset, copy_len);
1991
1992 copied += copy_len;
1993 offset = 0;
1994
1995 producer = get_next_index(producer, tx_ring->num_desc);
1996 }
1997
1998 tx_ring->producer = producer;
1999 barrier();
8bfe8b91 2000 adapter->stats.lso_frames++;
af19b491
AKS
2001}
2002
2003static int
2004qlcnic_map_tx_skb(struct pci_dev *pdev,
2005 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2006{
2007 struct qlcnic_skb_frag *nf;
2008 struct skb_frag_struct *frag;
2009 int i, nr_frags;
2010 dma_addr_t map;
2011
2012 nr_frags = skb_shinfo(skb)->nr_frags;
2013 nf = &pbuf->frag_array[0];
2014
2015 map = pci_map_single(pdev, skb->data,
2016 skb_headlen(skb), PCI_DMA_TODEVICE);
2017 if (pci_dma_mapping_error(pdev, map))
2018 goto out_err;
2019
2020 nf->dma = map;
2021 nf->length = skb_headlen(skb);
2022
2023 for (i = 0; i < nr_frags; i++) {
2024 frag = &skb_shinfo(skb)->frags[i];
2025 nf = &pbuf->frag_array[i+1];
2026
2027 map = pci_map_page(pdev, frag->page, frag->page_offset,
2028 frag->size, PCI_DMA_TODEVICE);
2029 if (pci_dma_mapping_error(pdev, map))
2030 goto unwind;
2031
2032 nf->dma = map;
2033 nf->length = frag->size;
2034 }
2035
2036 return 0;
2037
2038unwind:
2039 while (--i >= 0) {
2040 nf = &pbuf->frag_array[i+1];
2041 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2042 }
2043
2044 nf = &pbuf->frag_array[0];
2045 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2046
2047out_err:
2048 return -ENOMEM;
2049}
2050
8cf61f89
AKS
2051static int
2052qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2053 struct sk_buff *skb,
2054 struct cmd_desc_type0 *first_desc)
2055{
2056 u8 opcode = 0;
2057 u16 flags = 0;
2058 __be16 protocol = skb->protocol;
2059 struct vlan_ethhdr *vh;
2060
2061 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2062 vh = (struct vlan_ethhdr *)skb->data;
2063 protocol = vh->h_vlan_encapsulated_proto;
2064 flags = FLAGS_VLAN_TAGGED;
2065 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2066 } else if (vlan_tx_tag_present(skb)) {
2067 flags = FLAGS_VLAN_OOB;
2068 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2069 }
2070 if (unlikely(adapter->pvid)) {
2071 if (first_desc->vlan_TCI &&
2072 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2073 return -EIO;
2074 if (first_desc->vlan_TCI &&
2075 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2076 goto set_flags;
2077
2078 flags = FLAGS_VLAN_OOB;
2079 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2080 }
2081set_flags:
2082 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2083 return 0;
2084}
2085
af19b491
AKS
2086static inline void
2087qlcnic_clear_cmddesc(u64 *desc)
2088{
2089 desc[0] = 0ULL;
2090 desc[2] = 0ULL;
8cf61f89 2091 desc[7] = 0ULL;
af19b491
AKS
2092}
2093
cdaff185 2094netdev_tx_t
af19b491
AKS
2095qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2096{
2097 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2098 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2099 struct qlcnic_cmd_buffer *pbuf;
2100 struct qlcnic_skb_frag *buffrag;
2101 struct cmd_desc_type0 *hwdesc, *first_desc;
2102 struct pci_dev *pdev;
dcb50aff 2103 struct ethhdr *phdr;
af19b491
AKS
2104 int i, k;
2105
2106 u32 producer;
2107 int frag_count, no_of_desc;
2108 u32 num_txd = tx_ring->num_desc;
2109
780ab790
AKS
2110 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2111 netif_stop_queue(netdev);
2112 return NETDEV_TX_BUSY;
2113 }
2114
fe4d434d 2115 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2116 phdr = (struct ethhdr *)skb->data;
2117 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2118 adapter->mac_addr))
2119 goto drop_packet;
2120 }
2121
af19b491
AKS
2122 frag_count = skb_shinfo(skb)->nr_frags + 1;
2123
2124 /* 4 fragments per cmd des */
2125 no_of_desc = (frag_count + 3) >> 2;
2126
ef71ff83 2127 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2128 netif_stop_queue(netdev);
ef71ff83
RB
2129 smp_mb();
2130 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2131 netif_start_queue(netdev);
2132 else {
2133 adapter->stats.xmit_off++;
2134 return NETDEV_TX_BUSY;
2135 }
af19b491
AKS
2136 }
2137
2138 producer = tx_ring->producer;
2139 pbuf = &tx_ring->cmd_buf_arr[producer];
2140
2141 pdev = adapter->pdev;
2142
8cf61f89
AKS
2143 first_desc = hwdesc = &tx_ring->desc_head[producer];
2144 qlcnic_clear_cmddesc((u64 *)hwdesc);
2145
2146 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2147 goto drop_packet;
2148
8ae6df97
AKS
2149 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2150 adapter->stats.tx_dma_map_error++;
af19b491 2151 goto drop_packet;
8ae6df97 2152 }
af19b491
AKS
2153
2154 pbuf->skb = skb;
2155 pbuf->frag_count = frag_count;
2156
af19b491
AKS
2157 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2158 qlcnic_set_tx_port(first_desc, adapter->portnum);
2159
2160 for (i = 0; i < frag_count; i++) {
2161
2162 k = i % 4;
2163
2164 if ((k == 0) && (i > 0)) {
2165 /* move to next desc.*/
2166 producer = get_next_index(producer, num_txd);
2167 hwdesc = &tx_ring->desc_head[producer];
2168 qlcnic_clear_cmddesc((u64 *)hwdesc);
2169 tx_ring->cmd_buf_arr[producer].skb = NULL;
2170 }
2171
2172 buffrag = &pbuf->frag_array[i];
2173
2174 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2175 switch (k) {
2176 case 0:
2177 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2178 break;
2179 case 1:
2180 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2181 break;
2182 case 2:
2183 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2184 break;
2185 case 3:
2186 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2187 break;
2188 }
2189 }
2190
2191 tx_ring->producer = get_next_index(producer, num_txd);
2192
2193 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2194
b5e5492c
AKS
2195 if (qlcnic_mac_learn)
2196 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2197
af19b491
AKS
2198 qlcnic_update_cmd_producer(adapter, tx_ring);
2199
2200 adapter->stats.txbytes += skb->len;
2201 adapter->stats.xmitcalled++;
2202
2203 return NETDEV_TX_OK;
2204
2205drop_packet:
2206 adapter->stats.txdropped++;
2207 dev_kfree_skb_any(skb);
2208 return NETDEV_TX_OK;
2209}
2210
2211static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2212{
2213 struct net_device *netdev = adapter->netdev;
2214 u32 temp, temp_state, temp_val;
2215 int rv = 0;
2216
2217 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2218
2219 temp_state = qlcnic_get_temp_state(temp);
2220 temp_val = qlcnic_get_temp_val(temp);
2221
2222 if (temp_state == QLCNIC_TEMP_PANIC) {
2223 dev_err(&netdev->dev,
2224 "Device temperature %d degrees C exceeds"
2225 " maximum allowed. Hardware has been shut down.\n",
2226 temp_val);
2227 rv = 1;
2228 } else if (temp_state == QLCNIC_TEMP_WARN) {
2229 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2230 dev_err(&netdev->dev,
2231 "Device temperature %d degrees C "
2232 "exceeds operating range."
2233 " Immediate action needed.\n",
2234 temp_val);
2235 }
2236 } else {
2237 if (adapter->temp == QLCNIC_TEMP_WARN) {
2238 dev_info(&netdev->dev,
2239 "Device temperature is now %d degrees C"
2240 " in normal range.\n", temp_val);
2241 }
2242 }
2243 adapter->temp = temp_state;
2244 return rv;
2245}
2246
2247void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2248{
2249 struct net_device *netdev = adapter->netdev;
2250
2251 if (adapter->ahw.linkup && !linkup) {
69324275 2252 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2253 adapter->ahw.linkup = 0;
2254 if (netif_running(netdev)) {
2255 netif_carrier_off(netdev);
2256 netif_stop_queue(netdev);
2257 }
2258 } else if (!adapter->ahw.linkup && linkup) {
69324275 2259 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2260 adapter->ahw.linkup = 1;
2261 if (netif_running(netdev)) {
2262 netif_carrier_on(netdev);
2263 netif_wake_queue(netdev);
2264 }
2265 }
2266}
2267
2268static void qlcnic_tx_timeout(struct net_device *netdev)
2269{
2270 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2271
2272 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2273 return;
2274
2275 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2276
2277 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2278 adapter->need_fw_reset = 1;
2279 else
2280 adapter->reset_context = 1;
af19b491
AKS
2281}
2282
2283static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2284{
2285 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2286 struct net_device_stats *stats = &netdev->stats;
2287
af19b491
AKS
2288 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2289 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2290 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2291 stats->tx_bytes = adapter->stats.txbytes;
2292 stats->rx_dropped = adapter->stats.rxdropped;
2293 stats->tx_dropped = adapter->stats.txdropped;
2294
2295 return stats;
2296}
2297
7eb9855d 2298static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2299{
af19b491
AKS
2300 u32 status;
2301
2302 status = readl(adapter->isr_int_vec);
2303
2304 if (!(status & adapter->int_vec_bit))
2305 return IRQ_NONE;
2306
2307 /* check interrupt state machine, to be sure */
2308 status = readl(adapter->crb_int_state_reg);
2309 if (!ISR_LEGACY_INT_TRIGGERED(status))
2310 return IRQ_NONE;
2311
2312 writel(0xffffffff, adapter->tgt_status_reg);
2313 /* read twice to ensure write is flushed */
2314 readl(adapter->isr_int_vec);
2315 readl(adapter->isr_int_vec);
2316
7eb9855d
AKS
2317 return IRQ_HANDLED;
2318}
2319
2320static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2321{
2322 struct qlcnic_host_sds_ring *sds_ring = data;
2323 struct qlcnic_adapter *adapter = sds_ring->adapter;
2324
2325 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2326 goto done;
2327 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2328 writel(0xffffffff, adapter->tgt_status_reg);
2329 goto done;
2330 }
2331
2332 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2333 return IRQ_NONE;
2334
2335done:
2336 adapter->diag_cnt++;
2337 qlcnic_enable_int(sds_ring);
2338 return IRQ_HANDLED;
2339}
2340
2341static irqreturn_t qlcnic_intr(int irq, void *data)
2342{
2343 struct qlcnic_host_sds_ring *sds_ring = data;
2344 struct qlcnic_adapter *adapter = sds_ring->adapter;
2345
2346 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2347 return IRQ_NONE;
2348
af19b491
AKS
2349 napi_schedule(&sds_ring->napi);
2350
2351 return IRQ_HANDLED;
2352}
2353
2354static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2355{
2356 struct qlcnic_host_sds_ring *sds_ring = data;
2357 struct qlcnic_adapter *adapter = sds_ring->adapter;
2358
2359 /* clear interrupt */
2360 writel(0xffffffff, adapter->tgt_status_reg);
2361
2362 napi_schedule(&sds_ring->napi);
2363 return IRQ_HANDLED;
2364}
2365
2366static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2367{
2368 struct qlcnic_host_sds_ring *sds_ring = data;
2369
2370 napi_schedule(&sds_ring->napi);
2371 return IRQ_HANDLED;
2372}
2373
2374static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2375{
2376 u32 sw_consumer, hw_consumer;
2377 int count = 0, i;
2378 struct qlcnic_cmd_buffer *buffer;
2379 struct pci_dev *pdev = adapter->pdev;
2380 struct net_device *netdev = adapter->netdev;
2381 struct qlcnic_skb_frag *frag;
2382 int done;
2383 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2384
2385 if (!spin_trylock(&adapter->tx_clean_lock))
2386 return 1;
2387
2388 sw_consumer = tx_ring->sw_consumer;
2389 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2390
2391 while (sw_consumer != hw_consumer) {
2392 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2393 if (buffer->skb) {
2394 frag = &buffer->frag_array[0];
2395 pci_unmap_single(pdev, frag->dma, frag->length,
2396 PCI_DMA_TODEVICE);
2397 frag->dma = 0ULL;
2398 for (i = 1; i < buffer->frag_count; i++) {
2399 frag++;
2400 pci_unmap_page(pdev, frag->dma, frag->length,
2401 PCI_DMA_TODEVICE);
2402 frag->dma = 0ULL;
2403 }
2404
2405 adapter->stats.xmitfinished++;
2406 dev_kfree_skb_any(buffer->skb);
2407 buffer->skb = NULL;
2408 }
2409
2410 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2411 if (++count >= MAX_STATUS_HANDLE)
2412 break;
2413 }
2414
2415 if (count && netif_running(netdev)) {
2416 tx_ring->sw_consumer = sw_consumer;
2417
2418 smp_mb();
2419
2420 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2421 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2422 netif_wake_queue(netdev);
8bfe8b91 2423 adapter->stats.xmit_on++;
af19b491 2424 }
af19b491 2425 }
ef71ff83 2426 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2427 }
2428 /*
2429 * If everything is freed up to consumer then check if the ring is full
2430 * If the ring is full then check if more needs to be freed and
2431 * schedule the call back again.
2432 *
2433 * This happens when there are 2 CPUs. One could be freeing and the
2434 * other filling it. If the ring is full when we get out of here and
2435 * the card has already interrupted the host then the host can miss the
2436 * interrupt.
2437 *
2438 * There is still a possible race condition and the host could miss an
2439 * interrupt. The card has to take care of this.
2440 */
2441 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2442 done = (sw_consumer == hw_consumer);
2443 spin_unlock(&adapter->tx_clean_lock);
2444
2445 return done;
2446}
2447
2448static int qlcnic_poll(struct napi_struct *napi, int budget)
2449{
2450 struct qlcnic_host_sds_ring *sds_ring =
2451 container_of(napi, struct qlcnic_host_sds_ring, napi);
2452
2453 struct qlcnic_adapter *adapter = sds_ring->adapter;
2454
2455 int tx_complete;
2456 int work_done;
2457
2458 tx_complete = qlcnic_process_cmd_ring(adapter);
2459
2460 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2461
2462 if ((work_done < budget) && tx_complete) {
2463 napi_complete(&sds_ring->napi);
2464 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2465 qlcnic_enable_int(sds_ring);
2466 }
2467
2468 return work_done;
2469}
2470
8f891387 2471static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2472{
2473 struct qlcnic_host_sds_ring *sds_ring =
2474 container_of(napi, struct qlcnic_host_sds_ring, napi);
2475
2476 struct qlcnic_adapter *adapter = sds_ring->adapter;
2477 int work_done;
2478
2479 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2480
2481 if (work_done < budget) {
2482 napi_complete(&sds_ring->napi);
2483 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2484 qlcnic_enable_int(sds_ring);
2485 }
2486
2487 return work_done;
2488}
2489
af19b491
AKS
2490#ifdef CONFIG_NET_POLL_CONTROLLER
2491static void qlcnic_poll_controller(struct net_device *netdev)
2492{
bf82791e
YL
2493 int ring;
2494 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2495 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2496 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2497
af19b491 2498 disable_irq(adapter->irq);
bf82791e
YL
2499 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2500 sds_ring = &recv_ctx->sds_rings[ring];
2501 qlcnic_intr(adapter->irq, sds_ring);
2502 }
af19b491
AKS
2503 enable_irq(adapter->irq);
2504}
2505#endif
2506
6df900e9
SC
2507static void
2508qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2509{
2510 u32 val;
2511
2512 val = adapter->portnum & 0xf;
2513 val |= encoding << 7;
2514 val |= (jiffies - adapter->dev_rst_time) << 8;
2515
2516 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2517 adapter->dev_rst_time = jiffies;
2518}
2519
ade91f8e
AKS
2520static int
2521qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2522{
2523 u32 val;
2524
2525 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2526 state != QLCNIC_DEV_NEED_QUISCENT);
2527
2528 if (qlcnic_api_lock(adapter))
ade91f8e 2529 return -EIO;
af19b491
AKS
2530
2531 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2532
2533 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2534 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2535 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2536 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2537
2538 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2539
2540 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2541
2542 return 0;
af19b491
AKS
2543}
2544
1b95a839
AKS
2545static int
2546qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2547{
2548 u32 val;
2549
2550 if (qlcnic_api_lock(adapter))
2551 return -EBUSY;
2552
2553 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2554 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2555 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2556
2557 qlcnic_api_unlock(adapter);
2558
2559 return 0;
2560}
2561
af19b491 2562static void
21854f02 2563qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2564{
2565 u32 val;
2566
2567 if (qlcnic_api_lock(adapter))
2568 goto err;
2569
31018e06 2570 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2571 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2572 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2573
21854f02
AKS
2574 if (failed) {
2575 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2576 dev_info(&adapter->pdev->dev,
2577 "Device state set to Failed. Please Reboot\n");
2578 } else if (!(val & 0x11111111))
af19b491
AKS
2579 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2580
2581 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2582 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2583 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2584
2585 qlcnic_api_unlock(adapter);
2586err:
2587 adapter->fw_fail_cnt = 0;
2588 clear_bit(__QLCNIC_START_FW, &adapter->state);
2589 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2590}
2591
f73dfc50 2592/* Grab api lock, before checking state */
af19b491
AKS
2593static int
2594qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2595{
2596 int act, state;
2597
2598 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2599 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2600
2601 if (((state & 0x11111111) == (act & 0x11111111)) ||
2602 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2603 return 0;
2604 else
2605 return 1;
2606}
2607
96f8118c
SC
2608static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2609{
2610 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2611
2612 if (val != QLCNIC_DRV_IDC_VER) {
2613 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2614 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2615 }
2616
2617 return 0;
2618}
2619
af19b491
AKS
2620static int
2621qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2622{
2623 u32 val, prev_state;
aa5e18c0 2624 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2625 u8 portnum = adapter->portnum;
96f8118c 2626 u8 ret;
af19b491 2627
f73dfc50
AKS
2628 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2629 return 1;
2630
af19b491
AKS
2631 if (qlcnic_api_lock(adapter))
2632 return -1;
2633
31018e06 2634 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2635 if (!(val & (1 << (portnum * 4)))) {
2636 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2637 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2638 }
2639
2640 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2641 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2642
2643 switch (prev_state) {
2644 case QLCNIC_DEV_COLD:
bbd8c6a4 2645 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2646 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2647 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2648 qlcnic_api_unlock(adapter);
2649 return 1;
2650
2651 case QLCNIC_DEV_READY:
96f8118c 2652 ret = qlcnic_check_idc_ver(adapter);
af19b491 2653 qlcnic_api_unlock(adapter);
96f8118c 2654 return ret;
af19b491
AKS
2655
2656 case QLCNIC_DEV_NEED_RESET:
2657 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2658 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2659 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2660 break;
2661
2662 case QLCNIC_DEV_NEED_QUISCENT:
2663 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2664 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2665 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2666 break;
2667
2668 case QLCNIC_DEV_FAILED:
a7fc948f 2669 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2670 qlcnic_api_unlock(adapter);
2671 return -1;
bbd8c6a4
AKS
2672
2673 case QLCNIC_DEV_INITIALIZING:
2674 case QLCNIC_DEV_QUISCENT:
2675 break;
af19b491
AKS
2676 }
2677
2678 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2679
2680 do {
af19b491 2681 msleep(1000);
a5e463d0
SC
2682 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2683
2684 if (prev_state == QLCNIC_DEV_QUISCENT)
2685 continue;
2686 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2687
65b5b420
AKS
2688 if (!dev_init_timeo) {
2689 dev_err(&adapter->pdev->dev,
2690 "Waiting for device to initialize timeout\n");
af19b491 2691 return -1;
65b5b420 2692 }
af19b491
AKS
2693
2694 if (qlcnic_api_lock(adapter))
2695 return -1;
2696
2697 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2698 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2699 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2700
96f8118c 2701 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2702 qlcnic_api_unlock(adapter);
2703
96f8118c 2704 return ret;
af19b491
AKS
2705}
2706
2707static void
2708qlcnic_fwinit_work(struct work_struct *work)
2709{
2710 struct qlcnic_adapter *adapter = container_of(work,
2711 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2712 u32 dev_state = 0xf;
af19b491 2713
f73dfc50
AKS
2714 if (qlcnic_api_lock(adapter))
2715 goto err_ret;
af19b491 2716
a5e463d0 2717 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2718 if (dev_state == QLCNIC_DEV_QUISCENT ||
2719 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2720 qlcnic_api_unlock(adapter);
2721 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2722 FW_POLL_DELAY * 2);
2723 return;
2724 }
2725
9f26f547 2726 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2727 qlcnic_api_unlock(adapter);
2728 goto wait_npar;
9f26f547
AC
2729 }
2730
f73dfc50
AKS
2731 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2732 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2733 adapter->reset_ack_timeo);
2734 goto skip_ack_check;
2735 }
2736
2737 if (!qlcnic_check_drv_state(adapter)) {
2738skip_ack_check:
2739 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2740
f73dfc50
AKS
2741 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2742 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2743 QLCNIC_DEV_INITIALIZING);
2744 set_bit(__QLCNIC_START_FW, &adapter->state);
2745 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2746 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2747 }
2748
f73dfc50
AKS
2749 qlcnic_api_unlock(adapter);
2750
9f26f547 2751 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2752 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2753 adapter->fw_wait_cnt = 0;
af19b491
AKS
2754 return;
2755 }
af19b491
AKS
2756 goto err_ret;
2757 }
2758
f73dfc50 2759 qlcnic_api_unlock(adapter);
aa5e18c0 2760
9f26f547 2761wait_npar:
af19b491 2762 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2763 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2764
af19b491 2765 switch (dev_state) {
3c4b23b1 2766 case QLCNIC_DEV_READY:
9f26f547 2767 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2768 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2769 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2770 return;
2771 }
3c4b23b1
AKS
2772 case QLCNIC_DEV_FAILED:
2773 break;
2774 default:
2775 qlcnic_schedule_work(adapter,
2776 qlcnic_fwinit_work, FW_POLL_DELAY);
2777 return;
af19b491
AKS
2778 }
2779
2780err_ret:
f73dfc50
AKS
2781 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2782 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2783 netif_device_attach(adapter->netdev);
21854f02 2784 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2785}
2786
2787static void
2788qlcnic_detach_work(struct work_struct *work)
2789{
2790 struct qlcnic_adapter *adapter = container_of(work,
2791 struct qlcnic_adapter, fw_work.work);
2792 struct net_device *netdev = adapter->netdev;
2793 u32 status;
2794
2795 netif_device_detach(netdev);
2796
b8c17620
AKS
2797 /* Dont grab rtnl lock during Quiscent mode */
2798 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2799 if (netif_running(netdev))
2800 __qlcnic_down(adapter, netdev);
2801 } else
2802 qlcnic_down(adapter, netdev);
af19b491 2803
af19b491
AKS
2804 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2805
2806 if (status & QLCNIC_RCODE_FATAL_ERROR)
2807 goto err_ret;
2808
2809 if (adapter->temp == QLCNIC_TEMP_PANIC)
2810 goto err_ret;
2811
ade91f8e
AKS
2812 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2813 goto err_ret;
af19b491
AKS
2814
2815 adapter->fw_wait_cnt = 0;
2816
2817 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2818
2819 return;
2820
2821err_ret:
65b5b420
AKS
2822 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2823 status, adapter->temp);
34ce3626 2824 netif_device_attach(netdev);
21854f02 2825 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2826}
2827
3c4b23b1
AKS
2828/*Transit NPAR state to NON Operational */
2829static void
2830qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2831{
2832 u32 state;
2833
2834 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2835 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2836 return;
2837
2838 if (qlcnic_api_lock(adapter))
2839 return;
2840 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2841 qlcnic_api_unlock(adapter);
2842}
2843
f73dfc50 2844/*Transit to RESET state from READY state only */
af19b491
AKS
2845static void
2846qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2847{
2848 u32 state;
2849
cea8975e 2850 adapter->need_fw_reset = 1;
af19b491
AKS
2851 if (qlcnic_api_lock(adapter))
2852 return;
2853
2854 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2855
f73dfc50 2856 if (state == QLCNIC_DEV_READY) {
af19b491 2857 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2858 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2859 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2860 }
2861
3c4b23b1 2862 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2863 qlcnic_api_unlock(adapter);
2864}
2865
9f26f547
AC
2866/* Transit to NPAR READY state from NPAR NOT READY state */
2867static void
2868qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2869{
9f26f547
AC
2870 if (qlcnic_api_lock(adapter))
2871 return;
2872
3c4b23b1
AKS
2873 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2874 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2875
2876 qlcnic_api_unlock(adapter);
2877}
2878
af19b491
AKS
2879static void
2880qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2881 work_func_t func, int delay)
2882{
451724c8
SC
2883 if (test_bit(__QLCNIC_AER, &adapter->state))
2884 return;
2885
af19b491 2886 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2887 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2888 round_jiffies_relative(delay));
af19b491
AKS
2889}
2890
2891static void
2892qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2893{
2894 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2895 msleep(10);
2896
2897 cancel_delayed_work_sync(&adapter->fw_work);
2898}
2899
2900static void
2901qlcnic_attach_work(struct work_struct *work)
2902{
2903 struct qlcnic_adapter *adapter = container_of(work,
2904 struct qlcnic_adapter, fw_work.work);
2905 struct net_device *netdev = adapter->netdev;
b18971d1 2906 u32 npar_state;
af19b491 2907
b18971d1
AKS
2908 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2909 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2910 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2911 qlcnic_clr_all_drv_state(adapter, 0);
2912 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2913 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2914 FW_POLL_DELAY);
2915 else
2916 goto attach;
2917 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2918 return;
2919 }
2920attach:
af19b491 2921 if (netif_running(netdev)) {
52486a3a 2922 if (qlcnic_up(adapter, netdev))
af19b491 2923 goto done;
af19b491 2924
aec1e845 2925 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2926 }
2927
af19b491 2928done:
34ce3626 2929 netif_device_attach(netdev);
af19b491
AKS
2930 adapter->fw_fail_cnt = 0;
2931 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2932
2933 if (!qlcnic_clr_drv_state(adapter))
2934 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2935 FW_POLL_DELAY);
af19b491
AKS
2936}
2937
2938static int
2939qlcnic_check_health(struct qlcnic_adapter *adapter)
2940{
4e70812b 2941 u32 state = 0, heartbeat;
af19b491
AKS
2942 struct net_device *netdev = adapter->netdev;
2943
2944 if (qlcnic_check_temp(adapter))
2945 goto detach;
2946
2372a5f1 2947 if (adapter->need_fw_reset)
af19b491 2948 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2949
2950 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 2951 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 2952 qlcnic_set_npar_non_operational(adapter);
af19b491 2953 adapter->need_fw_reset = 1;
b8c17620
AKS
2954 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
2955 goto detach;
af19b491 2956
4e70812b
SC
2957 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2958 if (heartbeat != adapter->heartbeat) {
2959 adapter->heartbeat = heartbeat;
af19b491
AKS
2960 adapter->fw_fail_cnt = 0;
2961 if (adapter->need_fw_reset)
2962 goto detach;
68bf1c68 2963
9ce13ca8 2964 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
2965 qlcnic_reset_hw_context(adapter);
2966 adapter->netdev->trans_start = jiffies;
2967 }
2968
af19b491
AKS
2969 return 0;
2970 }
2971
2972 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2973 return 0;
2974
2975 qlcnic_dev_request_reset(adapter);
2976
9ce13ca8 2977 if (auto_fw_reset)
0df170b6 2978 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2979
2980 dev_info(&netdev->dev, "firmware hang detected\n");
2981
2982detach:
2983 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2984 QLCNIC_DEV_NEED_RESET;
2985
9ce13ca8 2986 if (auto_fw_reset &&
65b5b420
AKS
2987 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2988
af19b491 2989 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2990 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2991 }
af19b491
AKS
2992
2993 return 1;
2994}
2995
2996static void
2997qlcnic_fw_poll_work(struct work_struct *work)
2998{
2999 struct qlcnic_adapter *adapter = container_of(work,
3000 struct qlcnic_adapter, fw_work.work);
3001
3002 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3003 goto reschedule;
3004
3005
3006 if (qlcnic_check_health(adapter))
3007 return;
3008
b5e5492c
AKS
3009 if (adapter->fhash.fnum)
3010 qlcnic_prune_lb_filters(adapter);
3011
af19b491
AKS
3012reschedule:
3013 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3014}
3015
451724c8
SC
3016static int qlcnic_is_first_func(struct pci_dev *pdev)
3017{
3018 struct pci_dev *oth_pdev;
3019 int val = pdev->devfn;
3020
3021 while (val-- > 0) {
3022 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3023 (pdev->bus), pdev->bus->number,
3024 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3025 if (!oth_pdev)
3026 continue;
451724c8 3027
bfc978fa
AKS
3028 if (oth_pdev->current_state != PCI_D3cold) {
3029 pci_dev_put(oth_pdev);
451724c8 3030 return 0;
bfc978fa
AKS
3031 }
3032 pci_dev_put(oth_pdev);
451724c8
SC
3033 }
3034 return 1;
3035}
3036
3037static int qlcnic_attach_func(struct pci_dev *pdev)
3038{
3039 int err, first_func;
3040 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3041 struct net_device *netdev = adapter->netdev;
3042
3043 pdev->error_state = pci_channel_io_normal;
3044
3045 err = pci_enable_device(pdev);
3046 if (err)
3047 return err;
3048
3049 pci_set_power_state(pdev, PCI_D0);
3050 pci_set_master(pdev);
3051 pci_restore_state(pdev);
3052
3053 first_func = qlcnic_is_first_func(pdev);
3054
3055 if (qlcnic_api_lock(adapter))
3056 return -EINVAL;
3057
933fce12 3058 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3059 adapter->need_fw_reset = 1;
3060 set_bit(__QLCNIC_START_FW, &adapter->state);
3061 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3062 QLCDB(adapter, DRV, "Restarting fw\n");
3063 }
3064 qlcnic_api_unlock(adapter);
3065
3066 err = adapter->nic_ops->start_firmware(adapter);
3067 if (err)
3068 return err;
3069
3070 qlcnic_clr_drv_state(adapter);
3071 qlcnic_setup_intr(adapter);
3072
3073 if (netif_running(netdev)) {
3074 err = qlcnic_attach(adapter);
3075 if (err) {
21854f02 3076 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3077 clear_bit(__QLCNIC_AER, &adapter->state);
3078 netif_device_attach(netdev);
3079 return err;
3080 }
3081
3082 err = qlcnic_up(adapter, netdev);
3083 if (err)
3084 goto done;
3085
aec1e845 3086 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3087 }
3088 done:
3089 netif_device_attach(netdev);
3090 return err;
3091}
3092
3093static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3094 pci_channel_state_t state)
3095{
3096 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3097 struct net_device *netdev = adapter->netdev;
3098
3099 if (state == pci_channel_io_perm_failure)
3100 return PCI_ERS_RESULT_DISCONNECT;
3101
3102 if (state == pci_channel_io_normal)
3103 return PCI_ERS_RESULT_RECOVERED;
3104
3105 set_bit(__QLCNIC_AER, &adapter->state);
3106 netif_device_detach(netdev);
3107
3108 cancel_delayed_work_sync(&adapter->fw_work);
3109
3110 if (netif_running(netdev))
3111 qlcnic_down(adapter, netdev);
3112
3113 qlcnic_detach(adapter);
3114 qlcnic_teardown_intr(adapter);
3115
3116 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3117
3118 pci_save_state(pdev);
3119 pci_disable_device(pdev);
3120
3121 return PCI_ERS_RESULT_NEED_RESET;
3122}
3123
3124static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3125{
3126 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3127 PCI_ERS_RESULT_RECOVERED;
3128}
3129
3130static void qlcnic_io_resume(struct pci_dev *pdev)
3131{
3132 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3133
3134 pci_cleanup_aer_uncorrect_error_status(pdev);
3135
3136 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3137 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3138 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3139 FW_POLL_DELAY);
3140}
3141
87eb743b
AC
3142static int
3143qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3144{
3145 int err;
3146
3147 err = qlcnic_can_start_firmware(adapter);
3148 if (err)
3149 return err;
3150
78f84e1a
AKS
3151 err = qlcnic_check_npar_opertional(adapter);
3152 if (err)
3153 return err;
3c4b23b1 3154
174240a8
RB
3155 err = qlcnic_initialize_nic(adapter);
3156 if (err)
3157 return err;
3158
87eb743b
AC
3159 qlcnic_check_options(adapter);
3160
7373373d
RB
3161 err = qlcnic_set_eswitch_port_config(adapter);
3162 if (err)
3163 return err;
3164
87eb743b
AC
3165 adapter->need_fw_reset = 0;
3166
3167 return err;
3168}
3169
3170static int
3171qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3172{
3173 return -EOPNOTSUPP;
3174}
3175
3176static int
3177qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3178{
3179 return -EOPNOTSUPP;
3180}
3181
af19b491
AKS
3182static ssize_t
3183qlcnic_store_bridged_mode(struct device *dev,
3184 struct device_attribute *attr, const char *buf, size_t len)
3185{
3186 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3187 unsigned long new;
3188 int ret = -EINVAL;
3189
3190 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3191 goto err_out;
3192
8a15ad1f 3193 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3194 goto err_out;
3195
3196 if (strict_strtoul(buf, 2, &new))
3197 goto err_out;
3198
2e9d722d 3199 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3200 ret = len;
3201
3202err_out:
3203 return ret;
3204}
3205
3206static ssize_t
3207qlcnic_show_bridged_mode(struct device *dev,
3208 struct device_attribute *attr, char *buf)
3209{
3210 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3211 int bridged_mode = 0;
3212
3213 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3214 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3215
3216 return sprintf(buf, "%d\n", bridged_mode);
3217}
3218
3219static struct device_attribute dev_attr_bridged_mode = {
3220 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3221 .show = qlcnic_show_bridged_mode,
3222 .store = qlcnic_store_bridged_mode,
3223};
3224
3225static ssize_t
3226qlcnic_store_diag_mode(struct device *dev,
3227 struct device_attribute *attr, const char *buf, size_t len)
3228{
3229 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3230 unsigned long new;
3231
3232 if (strict_strtoul(buf, 2, &new))
3233 return -EINVAL;
3234
3235 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3236 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3237
3238 return len;
3239}
3240
3241static ssize_t
3242qlcnic_show_diag_mode(struct device *dev,
3243 struct device_attribute *attr, char *buf)
3244{
3245 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3246
3247 return sprintf(buf, "%d\n",
3248 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3249}
3250
3251static struct device_attribute dev_attr_diag_mode = {
3252 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3253 .show = qlcnic_show_diag_mode,
3254 .store = qlcnic_store_diag_mode,
3255};
3256
3257static int
3258qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3259 loff_t offset, size_t size)
3260{
897e8c7c
DP
3261 size_t crb_size = 4;
3262
af19b491
AKS
3263 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3264 return -EIO;
3265
897e8c7c
DP
3266 if (offset < QLCNIC_PCI_CRBSPACE) {
3267 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3268 QLCNIC_PCI_CAMQM_END))
3269 crb_size = 8;
3270 else
3271 return -EINVAL;
3272 }
af19b491 3273
897e8c7c
DP
3274 if ((size != crb_size) || (offset & (crb_size-1)))
3275 return -EINVAL;
af19b491
AKS
3276
3277 return 0;
3278}
3279
3280static ssize_t
2c3c8bea
CW
3281qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3282 struct bin_attribute *attr,
af19b491
AKS
3283 char *buf, loff_t offset, size_t size)
3284{
3285 struct device *dev = container_of(kobj, struct device, kobj);
3286 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3287 u32 data;
897e8c7c 3288 u64 qmdata;
af19b491
AKS
3289 int ret;
3290
3291 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3292 if (ret != 0)
3293 return ret;
3294
897e8c7c
DP
3295 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3296 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3297 memcpy(buf, &qmdata, size);
3298 } else {
3299 data = QLCRD32(adapter, offset);
3300 memcpy(buf, &data, size);
3301 }
af19b491
AKS
3302 return size;
3303}
3304
3305static ssize_t
2c3c8bea
CW
3306qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3307 struct bin_attribute *attr,
af19b491
AKS
3308 char *buf, loff_t offset, size_t size)
3309{
3310 struct device *dev = container_of(kobj, struct device, kobj);
3311 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3312 u32 data;
897e8c7c 3313 u64 qmdata;
af19b491
AKS
3314 int ret;
3315
3316 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3317 if (ret != 0)
3318 return ret;
3319
897e8c7c
DP
3320 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3321 memcpy(&qmdata, buf, size);
3322 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3323 } else {
3324 memcpy(&data, buf, size);
3325 QLCWR32(adapter, offset, data);
3326 }
af19b491
AKS
3327 return size;
3328}
3329
3330static int
3331qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3332 loff_t offset, size_t size)
3333{
3334 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3335 return -EIO;
3336
3337 if ((size != 8) || (offset & 0x7))
3338 return -EIO;
3339
3340 return 0;
3341}
3342
3343static ssize_t
2c3c8bea
CW
3344qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3345 struct bin_attribute *attr,
af19b491
AKS
3346 char *buf, loff_t offset, size_t size)
3347{
3348 struct device *dev = container_of(kobj, struct device, kobj);
3349 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3350 u64 data;
3351 int ret;
3352
3353 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3354 if (ret != 0)
3355 return ret;
3356
3357 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3358 return -EIO;
3359
3360 memcpy(buf, &data, size);
3361
3362 return size;
3363}
3364
3365static ssize_t
2c3c8bea
CW
3366qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3367 struct bin_attribute *attr,
af19b491
AKS
3368 char *buf, loff_t offset, size_t size)
3369{
3370 struct device *dev = container_of(kobj, struct device, kobj);
3371 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3372 u64 data;
3373 int ret;
3374
3375 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3376 if (ret != 0)
3377 return ret;
3378
3379 memcpy(&data, buf, size);
3380
3381 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3382 return -EIO;
3383
3384 return size;
3385}
3386
3387
3388static struct bin_attribute bin_attr_crb = {
3389 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3390 .size = 0,
3391 .read = qlcnic_sysfs_read_crb,
3392 .write = qlcnic_sysfs_write_crb,
3393};
3394
3395static struct bin_attribute bin_attr_mem = {
3396 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3397 .size = 0,
3398 .read = qlcnic_sysfs_read_mem,
3399 .write = qlcnic_sysfs_write_mem,
3400};
3401
cea8975e 3402static int
346fe763
RB
3403validate_pm_config(struct qlcnic_adapter *adapter,
3404 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3405{
3406
3407 u8 src_pci_func, s_esw_id, d_esw_id;
3408 u8 dest_pci_func;
3409 int i;
3410
3411 for (i = 0; i < count; i++) {
3412 src_pci_func = pm_cfg[i].pci_func;
3413 dest_pci_func = pm_cfg[i].dest_npar;
3414 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3415 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3416 return QL_STATUS_INVALID_PARAM;
3417
3418 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3419 return QL_STATUS_INVALID_PARAM;
3420
3421 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3422 return QL_STATUS_INVALID_PARAM;
3423
346fe763
RB
3424 s_esw_id = adapter->npars[src_pci_func].phy_port;
3425 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3426
3427 if (s_esw_id != d_esw_id)
3428 return QL_STATUS_INVALID_PARAM;
3429
3430 }
3431 return 0;
3432
3433}
3434
3435static ssize_t
3436qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3437 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3438{
3439 struct device *dev = container_of(kobj, struct device, kobj);
3440 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3441 struct qlcnic_pm_func_cfg *pm_cfg;
3442 u32 id, action, pci_func;
3443 int count, rem, i, ret;
3444
3445 count = size / sizeof(struct qlcnic_pm_func_cfg);
3446 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3447 if (rem)
3448 return QL_STATUS_INVALID_PARAM;
3449
3450 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3451
3452 ret = validate_pm_config(adapter, pm_cfg, count);
3453 if (ret)
3454 return ret;
3455 for (i = 0; i < count; i++) {
3456 pci_func = pm_cfg[i].pci_func;
4e8acb01 3457 action = !!pm_cfg[i].action;
346fe763
RB
3458 id = adapter->npars[pci_func].phy_port;
3459 ret = qlcnic_config_port_mirroring(adapter, id,
3460 action, pci_func);
3461 if (ret)
3462 return ret;
3463 }
3464
3465 for (i = 0; i < count; i++) {
3466 pci_func = pm_cfg[i].pci_func;
3467 id = adapter->npars[pci_func].phy_port;
4e8acb01 3468 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3469 adapter->npars[pci_func].dest_npar = id;
3470 }
3471 return size;
3472}
3473
3474static ssize_t
3475qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3476 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3477{
3478 struct device *dev = container_of(kobj, struct device, kobj);
3479 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3480 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3481 int i;
3482
3483 if (size != sizeof(pm_cfg))
3484 return QL_STATUS_INVALID_PARAM;
3485
3486 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3487 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3488 continue;
3489 pm_cfg[i].action = adapter->npars[i].enable_pm;
3490 pm_cfg[i].dest_npar = 0;
3491 pm_cfg[i].pci_func = i;
3492 }
3493 memcpy(buf, &pm_cfg, size);
3494
3495 return size;
3496}
3497
cea8975e 3498static int
346fe763 3499validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3500 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3501{
7613c87b 3502 u32 op_mode;
346fe763
RB
3503 u8 pci_func;
3504 int i;
7613c87b
RB
3505
3506 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3507
346fe763
RB
3508 for (i = 0; i < count; i++) {
3509 pci_func = esw_cfg[i].pci_func;
3510 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3511 return QL_STATUS_INVALID_PARAM;
3512
4e8acb01
RB
3513 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3514 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3515 return QL_STATUS_INVALID_PARAM;
346fe763 3516
4e8acb01
RB
3517 switch (esw_cfg[i].op_mode) {
3518 case QLCNIC_PORT_DEFAULTS:
7613c87b 3519 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3520 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3521 if (esw_cfg[i].mac_anti_spoof != 0)
3522 return QL_STATUS_INVALID_PARAM;
3523 if (esw_cfg[i].mac_override != 1)
3524 return QL_STATUS_INVALID_PARAM;
3525 if (esw_cfg[i].promisc_mode != 1)
3526 return QL_STATUS_INVALID_PARAM;
7373373d 3527 }
4e8acb01
RB
3528 break;
3529 case QLCNIC_ADD_VLAN:
346fe763
RB
3530 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3531 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3532 if (!esw_cfg[i].op_type)
3533 return QL_STATUS_INVALID_PARAM;
3534 break;
3535 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3536 if (!esw_cfg[i].op_type)
3537 return QL_STATUS_INVALID_PARAM;
3538 break;
3539 default:
346fe763 3540 return QL_STATUS_INVALID_PARAM;
4e8acb01 3541 }
346fe763 3542 }
346fe763
RB
3543 return 0;
3544}
3545
3546static ssize_t
3547qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3548 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3549{
3550 struct device *dev = container_of(kobj, struct device, kobj);
3551 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3552 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3553 struct qlcnic_npar_info *npar;
346fe763 3554 int count, rem, i, ret;
0325d69b 3555 u8 pci_func, op_mode = 0;
346fe763
RB
3556
3557 count = size / sizeof(struct qlcnic_esw_func_cfg);
3558 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3559 if (rem)
3560 return QL_STATUS_INVALID_PARAM;
3561
3562 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3563 ret = validate_esw_config(adapter, esw_cfg, count);
3564 if (ret)
3565 return ret;
3566
3567 for (i = 0; i < count; i++) {
0325d69b
RB
3568 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3569 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3570 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3571
3572 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3573 continue;
3574
3575 op_mode = esw_cfg[i].op_mode;
3576 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3577 esw_cfg[i].op_mode = op_mode;
3578 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3579
3580 switch (esw_cfg[i].op_mode) {
3581 case QLCNIC_PORT_DEFAULTS:
3582 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3583 break;
8cf61f89
AKS
3584 case QLCNIC_ADD_VLAN:
3585 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3586 break;
3587 case QLCNIC_DEL_VLAN:
3588 esw_cfg[i].vlan_id = 0;
3589 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3590 break;
0325d69b 3591 }
346fe763
RB
3592 }
3593
0325d69b
RB
3594 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3595 goto out;
e9a47700 3596
346fe763
RB
3597 for (i = 0; i < count; i++) {
3598 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3599 npar = &adapter->npars[pci_func];
3600 switch (esw_cfg[i].op_mode) {
3601 case QLCNIC_PORT_DEFAULTS:
3602 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3603 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3604 npar->offload_flags = esw_cfg[i].offload_flags;
3605 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3606 npar->discard_tagged = esw_cfg[i].discard_tagged;
3607 break;
3608 case QLCNIC_ADD_VLAN:
3609 npar->pvid = esw_cfg[i].vlan_id;
3610 break;
3611 case QLCNIC_DEL_VLAN:
3612 npar->pvid = 0;
3613 break;
3614 }
346fe763 3615 }
0325d69b 3616out:
346fe763
RB
3617 return size;
3618}
3619
3620static ssize_t
3621qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3622 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3623{
3624 struct device *dev = container_of(kobj, struct device, kobj);
3625 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3626 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3627 u8 i;
346fe763
RB
3628
3629 if (size != sizeof(esw_cfg))
3630 return QL_STATUS_INVALID_PARAM;
3631
3632 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3633 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3634 continue;
4e8acb01
RB
3635 esw_cfg[i].pci_func = i;
3636 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3637 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3638 }
3639 memcpy(buf, &esw_cfg, size);
3640
3641 return size;
3642}
3643
cea8975e 3644static int
346fe763
RB
3645validate_npar_config(struct qlcnic_adapter *adapter,
3646 struct qlcnic_npar_func_cfg *np_cfg, int count)
3647{
3648 u8 pci_func, i;
3649
3650 for (i = 0; i < count; i++) {
3651 pci_func = np_cfg[i].pci_func;
3652 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3653 return QL_STATUS_INVALID_PARAM;
3654
3655 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3656 return QL_STATUS_INVALID_PARAM;
3657
d12b0d9a
RB
3658 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3659 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3660 return QL_STATUS_INVALID_PARAM;
3661 }
3662 return 0;
3663}
3664
3665static ssize_t
3666qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3667 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3668{
3669 struct device *dev = container_of(kobj, struct device, kobj);
3670 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3671 struct qlcnic_info nic_info;
3672 struct qlcnic_npar_func_cfg *np_cfg;
3673 int i, count, rem, ret;
3674 u8 pci_func;
3675
3676 count = size / sizeof(struct qlcnic_npar_func_cfg);
3677 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3678 if (rem)
3679 return QL_STATUS_INVALID_PARAM;
3680
3681 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3682 ret = validate_npar_config(adapter, np_cfg, count);
3683 if (ret)
3684 return ret;
3685
3686 for (i = 0; i < count ; i++) {
3687 pci_func = np_cfg[i].pci_func;
3688 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3689 if (ret)
3690 return ret;
3691 nic_info.pci_func = pci_func;
3692 nic_info.min_tx_bw = np_cfg[i].min_bw;
3693 nic_info.max_tx_bw = np_cfg[i].max_bw;
3694 ret = qlcnic_set_nic_info(adapter, &nic_info);
3695 if (ret)
3696 return ret;
cea8975e
AC
3697 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3698 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3699 }
3700
3701 return size;
3702
3703}
3704static ssize_t
3705qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3706 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3707{
3708 struct device *dev = container_of(kobj, struct device, kobj);
3709 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3710 struct qlcnic_info nic_info;
3711 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3712 int i, ret;
3713
3714 if (size != sizeof(np_cfg))
3715 return QL_STATUS_INVALID_PARAM;
3716
3717 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3718 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3719 continue;
3720 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3721 if (ret)
3722 return ret;
3723
3724 np_cfg[i].pci_func = i;
a1c0c459 3725 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3726 np_cfg[i].port_num = nic_info.phys_port;
3727 np_cfg[i].fw_capab = nic_info.capabilities;
3728 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3729 np_cfg[i].max_bw = nic_info.max_tx_bw;
3730 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3731 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3732 }
3733 memcpy(buf, &np_cfg, size);
3734 return size;
3735}
3736
b6021212
AKS
3737static ssize_t
3738qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3739 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3740{
3741 struct device *dev = container_of(kobj, struct device, kobj);
3742 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3743 struct qlcnic_esw_statistics port_stats;
3744 int ret;
3745
3746 if (size != sizeof(struct qlcnic_esw_statistics))
3747 return QL_STATUS_INVALID_PARAM;
3748
3749 if (offset >= QLCNIC_MAX_PCI_FUNC)
3750 return QL_STATUS_INVALID_PARAM;
3751
3752 memset(&port_stats, 0, size);
3753 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3754 &port_stats.rx);
3755 if (ret)
3756 return ret;
3757
3758 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3759 &port_stats.tx);
3760 if (ret)
3761 return ret;
3762
3763 memcpy(buf, &port_stats, size);
3764 return size;
3765}
3766
3767static ssize_t
3768qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3769 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3770{
3771 struct device *dev = container_of(kobj, struct device, kobj);
3772 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3773 struct qlcnic_esw_statistics esw_stats;
3774 int ret;
3775
3776 if (size != sizeof(struct qlcnic_esw_statistics))
3777 return QL_STATUS_INVALID_PARAM;
3778
3779 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3780 return QL_STATUS_INVALID_PARAM;
3781
3782 memset(&esw_stats, 0, size);
3783 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3784 &esw_stats.rx);
3785 if (ret)
3786 return ret;
3787
3788 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3789 &esw_stats.tx);
3790 if (ret)
3791 return ret;
3792
3793 memcpy(buf, &esw_stats, size);
3794 return size;
3795}
3796
3797static ssize_t
3798qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3799 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3800{
3801 struct device *dev = container_of(kobj, struct device, kobj);
3802 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3803 int ret;
3804
3805 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3806 return QL_STATUS_INVALID_PARAM;
3807
3808 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3809 QLCNIC_QUERY_RX_COUNTER);
3810 if (ret)
3811 return ret;
3812
3813 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3814 QLCNIC_QUERY_TX_COUNTER);
3815 if (ret)
3816 return ret;
3817
3818 return size;
3819}
3820
3821static ssize_t
3822qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3823 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3824{
3825
3826 struct device *dev = container_of(kobj, struct device, kobj);
3827 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3828 int ret;
3829
3830 if (offset >= QLCNIC_MAX_PCI_FUNC)
3831 return QL_STATUS_INVALID_PARAM;
3832
3833 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3834 QLCNIC_QUERY_RX_COUNTER);
3835 if (ret)
3836 return ret;
3837
3838 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3839 QLCNIC_QUERY_TX_COUNTER);
3840 if (ret)
3841 return ret;
3842
3843 return size;
3844}
3845
346fe763
RB
3846static ssize_t
3847qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3848 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3849{
3850 struct device *dev = container_of(kobj, struct device, kobj);
3851 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3852 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3853 struct qlcnic_pci_info *pci_info;
346fe763
RB
3854 int i, ret;
3855
3856 if (size != sizeof(pci_cfg))
3857 return QL_STATUS_INVALID_PARAM;
3858
e88db3bd
DC
3859 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3860 if (!pci_info)
3861 return -ENOMEM;
3862
346fe763 3863 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3864 if (ret) {
3865 kfree(pci_info);
346fe763 3866 return ret;
e88db3bd 3867 }
346fe763
RB
3868
3869 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3870 pci_cfg[i].pci_func = pci_info[i].id;
3871 pci_cfg[i].func_type = pci_info[i].type;
3872 pci_cfg[i].port_num = pci_info[i].default_port;
3873 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3874 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3875 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3876 }
3877 memcpy(buf, &pci_cfg, size);
e88db3bd 3878 kfree(pci_info);
346fe763 3879 return size;
346fe763
RB
3880}
3881static struct bin_attribute bin_attr_npar_config = {
3882 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3883 .size = 0,
3884 .read = qlcnic_sysfs_read_npar_config,
3885 .write = qlcnic_sysfs_write_npar_config,
3886};
3887
3888static struct bin_attribute bin_attr_pci_config = {
3889 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3890 .size = 0,
3891 .read = qlcnic_sysfs_read_pci_config,
3892 .write = NULL,
3893};
3894
b6021212
AKS
3895static struct bin_attribute bin_attr_port_stats = {
3896 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3897 .size = 0,
3898 .read = qlcnic_sysfs_get_port_stats,
3899 .write = qlcnic_sysfs_clear_port_stats,
3900};
3901
3902static struct bin_attribute bin_attr_esw_stats = {
3903 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3904 .size = 0,
3905 .read = qlcnic_sysfs_get_esw_stats,
3906 .write = qlcnic_sysfs_clear_esw_stats,
3907};
3908
346fe763
RB
3909static struct bin_attribute bin_attr_esw_config = {
3910 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3911 .size = 0,
3912 .read = qlcnic_sysfs_read_esw_config,
3913 .write = qlcnic_sysfs_write_esw_config,
3914};
3915
3916static struct bin_attribute bin_attr_pm_config = {
3917 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3918 .size = 0,
3919 .read = qlcnic_sysfs_read_pm_config,
3920 .write = qlcnic_sysfs_write_pm_config,
3921};
3922
af19b491
AKS
3923static void
3924qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3925{
3926 struct device *dev = &adapter->pdev->dev;
3927
3928 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3929 if (device_create_file(dev, &dev_attr_bridged_mode))
3930 dev_warn(dev,
3931 "failed to create bridged_mode sysfs entry\n");
3932}
3933
3934static void
3935qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3936{
3937 struct device *dev = &adapter->pdev->dev;
3938
3939 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3940 device_remove_file(dev, &dev_attr_bridged_mode);
3941}
3942
3943static void
3944qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3945{
3946 struct device *dev = &adapter->pdev->dev;
3947
b6021212
AKS
3948 if (device_create_bin_file(dev, &bin_attr_port_stats))
3949 dev_info(dev, "failed to create port stats sysfs entry");
3950
132ff00a
AC
3951 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3952 return;
af19b491
AKS
3953 if (device_create_file(dev, &dev_attr_diag_mode))
3954 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3955 if (device_create_bin_file(dev, &bin_attr_crb))
3956 dev_info(dev, "failed to create crb sysfs entry\n");
3957 if (device_create_bin_file(dev, &bin_attr_mem))
3958 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
3959 if (device_create_bin_file(dev, &bin_attr_pci_config))
3960 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
3961 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3962 return;
3963 if (device_create_bin_file(dev, &bin_attr_esw_config))
3964 dev_info(dev, "failed to create esw config sysfs entry");
3965 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 3966 return;
346fe763
RB
3967 if (device_create_bin_file(dev, &bin_attr_npar_config))
3968 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3969 if (device_create_bin_file(dev, &bin_attr_pm_config))
3970 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3971 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3972 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3973}
3974
af19b491
AKS
3975static void
3976qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3977{
3978 struct device *dev = &adapter->pdev->dev;
3979
b6021212
AKS
3980 device_remove_bin_file(dev, &bin_attr_port_stats);
3981
132ff00a
AC
3982 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3983 return;
af19b491
AKS
3984 device_remove_file(dev, &dev_attr_diag_mode);
3985 device_remove_bin_file(dev, &bin_attr_crb);
3986 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 3987 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
3988 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3989 return;
3990 device_remove_bin_file(dev, &bin_attr_esw_config);
3991 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 3992 return;
346fe763 3993 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3994 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3995 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3996}
3997
3998#ifdef CONFIG_INET
3999
4000#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4001
af19b491 4002static void
aec1e845
AKS
4003qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4004 struct net_device *dev, unsigned long event)
af19b491
AKS
4005{
4006 struct in_device *indev;
af19b491 4007
af19b491
AKS
4008 indev = in_dev_get(dev);
4009 if (!indev)
4010 return;
4011
4012 for_ifa(indev) {
4013 switch (event) {
4014 case NETDEV_UP:
4015 qlcnic_config_ipaddr(adapter,
4016 ifa->ifa_address, QLCNIC_IP_UP);
4017 break;
4018 case NETDEV_DOWN:
4019 qlcnic_config_ipaddr(adapter,
4020 ifa->ifa_address, QLCNIC_IP_DOWN);
4021 break;
4022 default:
4023 break;
4024 }
4025 } endfor_ifa(indev);
4026
4027 in_dev_put(indev);
af19b491
AKS
4028}
4029
aec1e845
AKS
4030static void
4031qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4032{
4033 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4034 struct net_device *dev;
4035 u16 vid;
4036
4037 qlcnic_config_indev_addr(adapter, netdev, event);
4038
4039 if (!adapter->vlgrp)
4040 return;
4041
b738127d 4042 for (vid = 0; vid < VLAN_N_VID; vid++) {
aec1e845
AKS
4043 dev = vlan_group_get_device(adapter->vlgrp, vid);
4044 if (!dev)
4045 continue;
4046
4047 qlcnic_config_indev_addr(adapter, dev, event);
4048 }
4049}
4050
af19b491
AKS
4051static int qlcnic_netdev_event(struct notifier_block *this,
4052 unsigned long event, void *ptr)
4053{
4054 struct qlcnic_adapter *adapter;
4055 struct net_device *dev = (struct net_device *)ptr;
4056
4057recheck:
4058 if (dev == NULL)
4059 goto done;
4060
4061 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4062 dev = vlan_dev_real_dev(dev);
4063 goto recheck;
4064 }
4065
4066 if (!is_qlcnic_netdev(dev))
4067 goto done;
4068
4069 adapter = netdev_priv(dev);
4070
4071 if (!adapter)
4072 goto done;
4073
8a15ad1f 4074 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4075 goto done;
4076
aec1e845 4077 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4078done:
4079 return NOTIFY_DONE;
4080}
4081
4082static int
4083qlcnic_inetaddr_event(struct notifier_block *this,
4084 unsigned long event, void *ptr)
4085{
4086 struct qlcnic_adapter *adapter;
4087 struct net_device *dev;
4088
4089 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4090
4091 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4092
4093recheck:
aec1e845 4094 if (dev == NULL)
af19b491
AKS
4095 goto done;
4096
4097 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4098 dev = vlan_dev_real_dev(dev);
4099 goto recheck;
4100 }
4101
4102 if (!is_qlcnic_netdev(dev))
4103 goto done;
4104
4105 adapter = netdev_priv(dev);
4106
251a84c9 4107 if (!adapter)
af19b491
AKS
4108 goto done;
4109
8a15ad1f 4110 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4111 goto done;
4112
4113 switch (event) {
4114 case NETDEV_UP:
4115 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4116 break;
4117 case NETDEV_DOWN:
4118 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4119 break;
4120 default:
4121 break;
4122 }
4123
4124done:
4125 return NOTIFY_DONE;
4126}
4127
4128static struct notifier_block qlcnic_netdev_cb = {
4129 .notifier_call = qlcnic_netdev_event,
4130};
4131
4132static struct notifier_block qlcnic_inetaddr_cb = {
4133 .notifier_call = qlcnic_inetaddr_event,
4134};
4135#else
4136static void
aec1e845 4137qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4138{ }
4139#endif
451724c8
SC
4140static struct pci_error_handlers qlcnic_err_handler = {
4141 .error_detected = qlcnic_io_error_detected,
4142 .slot_reset = qlcnic_io_slot_reset,
4143 .resume = qlcnic_io_resume,
4144};
af19b491
AKS
4145
4146static struct pci_driver qlcnic_driver = {
4147 .name = qlcnic_driver_name,
4148 .id_table = qlcnic_pci_tbl,
4149 .probe = qlcnic_probe,
4150 .remove = __devexit_p(qlcnic_remove),
4151#ifdef CONFIG_PM
4152 .suspend = qlcnic_suspend,
4153 .resume = qlcnic_resume,
4154#endif
451724c8
SC
4155 .shutdown = qlcnic_shutdown,
4156 .err_handler = &qlcnic_err_handler
4157
af19b491
AKS
4158};
4159
4160static int __init qlcnic_init_module(void)
4161{
0cf3a14c 4162 int ret;
af19b491
AKS
4163
4164 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4165
f7ec804a
AKS
4166 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4167 if (qlcnic_wq == NULL) {
4168 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4169 return -ENOMEM;
4170 }
4171
af19b491
AKS
4172#ifdef CONFIG_INET
4173 register_netdevice_notifier(&qlcnic_netdev_cb);
4174 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4175#endif
4176
0cf3a14c
AKS
4177 ret = pci_register_driver(&qlcnic_driver);
4178 if (ret) {
4179#ifdef CONFIG_INET
4180 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4181 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4182#endif
f7ec804a 4183 destroy_workqueue(qlcnic_wq);
0cf3a14c 4184 }
af19b491 4185
0cf3a14c 4186 return ret;
af19b491
AKS
4187}
4188
4189module_init(qlcnic_init_module);
4190
4191static void __exit qlcnic_exit_module(void)
4192{
4193
4194 pci_unregister_driver(&qlcnic_driver);
4195
4196#ifdef CONFIG_INET
4197 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4198 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4199#endif
f7ec804a 4200 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4201}
4202
4203module_exit(qlcnic_exit_module);