]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
btrfs: fix comment typos
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
f94bc1e7 21#include <linux/log2.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c 93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
8e586137
JP
100static int qlcnic_vlan_rx_add(struct net_device *, u16);
101static int qlcnic_vlan_rx_del(struct net_device *, u16);
b9796a14 102
af19b491
AKS
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
b1fc6d3c 118inline void
af19b491
AKS
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
807540ba 157 return recv_ctx->sds_rings == NULL;
af19b491
AKS
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
b1fc6d3c 205 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 214
780ab790
AKS
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
af19b491
AKS
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 231
780ab790
AKS
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
af19b491
AKS
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
246}
247
af19b491
AKS
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
2e9d722d 275 u8 mac_addr[ETH_ALEN];
af19b491
AKS
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
da48e6c3 279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
280 return -EIO;
281
2e9d722d 282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
7373373d
RB
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
af19b491 303 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 304 return -EADDRNOTAVAIL;
af19b491 305
8a15ad1f 306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
8a15ad1f 315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
afc4b13d 328 .ndo_set_rx_mode = qlcnic_set_multi,
af19b491
AKS
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
af19b491 333 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338#endif
339};
340
b43e5ee7
SC
341static const struct net_device_ops qlcnic_netdev_failed_ops = {
342 .ndo_open = qlcnic_open,
343};
344
2e9d722d 345static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
346 .config_bridged_mode = qlcnic_config_bridged_mode,
347 .config_led = qlcnic_config_led,
9f26f547
AC
348 .start_firmware = qlcnic_start_firmware
349};
350
351static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
352 .config_bridged_mode = qlcnicvf_config_bridged_mode,
353 .config_led = qlcnicvf_config_led,
9f26f547 354 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
355};
356
f94bc1e7 357static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
af19b491 358{
af19b491 359 struct pci_dev *pdev = adapter->pdev;
f94bc1e7 360 int err = -1;
af19b491
AKS
361
362 adapter->max_sds_rings = 1;
af19b491 363 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
af19b491
AKS
364 qlcnic_set_msix_bit(pdev, 0);
365
366 if (adapter->msix_supported) {
f94bc1e7 367 enable_msix:
af19b491
AKS
368 qlcnic_init_msix_entries(adapter, num_msix);
369 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
370 if (err == 0) {
371 adapter->flags |= QLCNIC_MSIX_ENABLED;
372 qlcnic_set_msix_bit(pdev, 1);
373
b1fc6d3c 374 adapter->max_sds_rings = num_msix;
af19b491
AKS
375
376 dev_info(&pdev->dev, "using msi-x interrupts\n");
f94bc1e7 377 return err;
af19b491 378 }
f94bc1e7
SC
379 if (err > 0) {
380 num_msix = rounddown_pow_of_two(err);
381 if (num_msix)
382 goto enable_msix;
383 }
384 }
385 return err;
386}
af19b491 387
af19b491 388
f94bc1e7
SC
389static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
390{
391 const struct qlcnic_legacy_intr_set *legacy_intrp;
392 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
393
394 if (use_msi && !pci_enable_msi(pdev)) {
395 adapter->flags |= QLCNIC_MSI_ENABLED;
396 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 397 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
398 dev_info(&pdev->dev, "using msi interrupts\n");
399 adapter->msix_entries[0].vector = pdev->irq;
400 return;
401 }
402
f94bc1e7
SC
403 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
404
405 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
406 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
407 legacy_intrp->tgt_status_reg);
408 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
409 legacy_intrp->tgt_mask_reg);
410 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
411
412 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
413 ISR_INT_STATE_REG);
af19b491
AKS
414 dev_info(&pdev->dev, "using legacy interrupts\n");
415 adapter->msix_entries[0].vector = pdev->irq;
416}
417
f94bc1e7
SC
418static void
419qlcnic_setup_intr(struct qlcnic_adapter *adapter)
420{
421 int num_msix;
422
423 if (adapter->msix_supported) {
5f6ec29a
SC
424 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
425 QLCNIC_DEF_NUM_STS_DESC_RINGS));
f94bc1e7
SC
426 } else
427 num_msix = 1;
428
429 if (!qlcnic_enable_msix(adapter, num_msix))
430 return;
431
432 qlcnic_enable_msi_legacy(adapter);
433}
434
af19b491
AKS
435static void
436qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
437{
438 if (adapter->flags & QLCNIC_MSIX_ENABLED)
439 pci_disable_msix(adapter->pdev);
440 if (adapter->flags & QLCNIC_MSI_ENABLED)
441 pci_disable_msi(adapter->pdev);
442}
443
444static void
445qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
446{
b1fc6d3c
AC
447 if (adapter->ahw->pci_base0 != NULL)
448 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
449}
450
346fe763
RB
451static int
452qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
453{
e88db3bd 454 struct qlcnic_pci_info *pci_info;
900853a4 455 int i, ret = 0;
346fe763
RB
456 u8 pfn;
457
e88db3bd
DC
458 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
459 if (!pci_info)
460 return -ENOMEM;
461
ca315ac2 462 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 463 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 464 if (!adapter->npars) {
900853a4 465 ret = -ENOMEM;
e88db3bd
DC
466 goto err_pci_info;
467 }
346fe763 468
ca315ac2 469 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
470 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
471 if (!adapter->eswitch) {
900853a4 472 ret = -ENOMEM;
ca315ac2 473 goto err_npars;
346fe763
RB
474 }
475
476 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
477 if (ret)
478 goto err_eswitch;
346fe763 479
ca315ac2
DC
480 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
481 pfn = pci_info[i].id;
0f6efff9 482 if (pfn >= QLCNIC_MAX_PCI_FUNC) {
f848d6dd
SC
483 ret = QL_STATUS_INVALID_PARAM;
484 goto err_eswitch;
485 }
a1c0c459
SC
486 adapter->npars[pfn].active = (u8)pci_info[i].active;
487 adapter->npars[pfn].type = (u8)pci_info[i].type;
488 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
489 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
490 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
491 }
492
ca315ac2
DC
493 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
494 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
495
e88db3bd 496 kfree(pci_info);
ca315ac2
DC
497 return 0;
498
499err_eswitch:
346fe763
RB
500 kfree(adapter->eswitch);
501 adapter->eswitch = NULL;
ca315ac2 502err_npars:
346fe763 503 kfree(adapter->npars);
ca315ac2 504 adapter->npars = NULL;
e88db3bd
DC
505err_pci_info:
506 kfree(pci_info);
346fe763
RB
507
508 return ret;
509}
510
2e9d722d
AC
511static int
512qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
513{
514 u8 id;
515 u32 ref_count;
516 int i, ret = 1;
517 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 518 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
519
520 /* If other drivers are not in use set their privilege level */
31018e06 521 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
522 ret = qlcnic_api_lock(adapter);
523 if (ret)
524 goto err_lock;
2e9d722d 525
0e33c664
AC
526 if (qlcnic_config_npars) {
527 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 528 id = i;
0e33c664 529 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 530 id == adapter->ahw->pci_func)
0e33c664
AC
531 continue;
532 data |= (qlcnic_config_npars &
533 QLC_DEV_SET_DRV(0xf, id));
534 }
535 } else {
536 data = readl(priv_op);
b1fc6d3c 537 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 538 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 539 adapter->ahw->pci_func));
2e9d722d
AC
540 }
541 writel(data, priv_op);
2e9d722d
AC
542 qlcnic_api_unlock(adapter);
543err_lock:
544 return ret;
545}
546
0866d96d
AC
547static void
548qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
549{
550 void __iomem *msix_base_addr;
551 void __iomem *priv_op;
552 u32 func;
553 u32 msix_base;
554 u32 op_mode, priv_level;
555
556 /* Determine FW API version */
b1fc6d3c
AC
557 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
558 QLCNIC_FW_API);
2e9d722d
AC
559
560 /* Find PCI function number */
561 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 562 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
563 msix_base = readl(msix_base_addr);
564 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 565 adapter->ahw->pci_func = func;
2e9d722d
AC
566
567 /* Determine function privilege level */
b1fc6d3c 568 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 569 op_mode = readl(priv_op);
0e33c664 570 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 571 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 572 else
b1fc6d3c 573 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 574
0866d96d 575 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
576 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
577 dev_info(&adapter->pdev->dev,
578 "HAL Version: %d Non Privileged function\n",
579 adapter->fw_hal_version);
580 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
581 } else
582 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
583}
584
af19b491
AKS
585static int
586qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
587{
588 void __iomem *mem_ptr0 = NULL;
589 resource_size_t mem_base;
590 unsigned long mem_len, pci_len0 = 0;
591
592 struct pci_dev *pdev = adapter->pdev;
af19b491 593
af19b491
AKS
594 /* remap phys address */
595 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
596 mem_len = pci_resource_len(pdev, 0);
597
598 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
599
600 mem_ptr0 = pci_ioremap_bar(pdev, 0);
601 if (mem_ptr0 == NULL) {
602 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
603 return -EIO;
604 }
605 pci_len0 = mem_len;
606 } else {
607 return -EIO;
608 }
609
610 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
611
b1fc6d3c
AC
612 adapter->ahw->pci_base0 = mem_ptr0;
613 adapter->ahw->pci_len0 = pci_len0;
af19b491 614
0866d96d 615 qlcnic_check_vf(adapter);
2e9d722d 616
b1fc6d3c
AC
617 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
618 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
619 adapter->ahw->pci_func)));
af19b491
AKS
620
621 return 0;
622}
623
624static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
625{
626 struct pci_dev *pdev = adapter->pdev;
627 int i, found = 0;
628
629 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
630 if (qlcnic_boards[i].vendor == pdev->vendor &&
631 qlcnic_boards[i].device == pdev->device &&
632 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
633 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
634 sprintf(name, "%pM: %s" ,
635 adapter->mac_addr,
636 qlcnic_boards[i].short_name);
af19b491
AKS
637 found = 1;
638 break;
639 }
640
641 }
642
643 if (!found)
7f9a0c34 644 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
645}
646
647static void
648qlcnic_check_options(struct qlcnic_adapter *adapter)
649{
031a4a26 650 u32 fw_major, fw_minor, fw_build, prev_fw_version;
af19b491 651 struct pci_dev *pdev = adapter->pdev;
031a4a26
SV
652 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
653
654 prev_fw_version = adapter->fw_version;
af19b491
AKS
655
656 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
657 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
658 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
659
660 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
661
031a4a26
SV
662 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
663 if (fw_dump->tmpl_hdr == NULL ||
664 adapter->fw_version > prev_fw_version) {
665 if (fw_dump->tmpl_hdr)
666 vfree(fw_dump->tmpl_hdr);
667 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
668 dev_info(&pdev->dev,
669 "Supports FW dump capability\n");
670 }
671 }
672
251a84c9
AKS
673 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
674 fw_major, fw_minor, fw_build);
b1fc6d3c 675 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
676 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
677 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
678 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
679 } else {
680 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
681 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
682 }
683
af19b491 684 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
685 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
686
b1fc6d3c 687 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
688 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
689 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
690 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
691 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
692 }
693
694 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
695
696 adapter->num_txd = MAX_CMD_DESCRIPTORS;
697
251b036a 698 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
699}
700
174240a8
RB
701static int
702qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
703{
704 int err;
705 struct qlcnic_info nic_info;
706
b1fc6d3c 707 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
708 if (err)
709 return err;
710
a1c0c459 711 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
712 adapter->switch_mode = nic_info.switch_mode;
713 adapter->max_tx_ques = nic_info.max_tx_ques;
714 adapter->max_rx_ques = nic_info.max_rx_ques;
715 adapter->capabilities = nic_info.capabilities;
716 adapter->max_mac_filters = nic_info.max_mac_filters;
717 adapter->max_mtu = nic_info.max_mtu;
718
719 if (adapter->capabilities & BIT_6)
720 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
721 else
722 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
723
724 return err;
725}
726
8cf61f89
AKS
727static void
728qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
729 struct qlcnic_esw_func_cfg *esw_cfg)
730{
731 if (esw_cfg->discard_tagged)
732 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
733 else
734 adapter->flags |= QLCNIC_TAGGING_ENABLED;
735
736 if (esw_cfg->vlan_id)
737 adapter->pvid = esw_cfg->vlan_id;
738 else
739 adapter->pvid = 0;
740}
741
8e586137 742static int
b9796a14
AC
743qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
744{
745 struct qlcnic_adapter *adapter = netdev_priv(netdev);
746 set_bit(vid, adapter->vlans);
8e586137 747 return 0;
b9796a14
AC
748}
749
8e586137 750static int
b9796a14
AC
751qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
752{
753 struct qlcnic_adapter *adapter = netdev_priv(netdev);
754
755 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
756 clear_bit(vid, adapter->vlans);
8e586137 757 return 0;
b9796a14
AC
758}
759
0325d69b
RB
760static void
761qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
762 struct qlcnic_esw_func_cfg *esw_cfg)
763{
ee07c1a7
RB
764 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
765 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
766
767 if (esw_cfg->mac_anti_spoof)
768 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 769
7373373d
RB
770 if (!esw_cfg->mac_override)
771 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
772
ee07c1a7
RB
773 if (!esw_cfg->promisc_mode)
774 adapter->flags |= QLCNIC_PROMISC_DISABLED;
775
0325d69b
RB
776 qlcnic_set_netdev_features(adapter, esw_cfg);
777}
778
779static int
780qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
781{
782 struct qlcnic_esw_func_cfg esw_cfg;
783
784 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
785 return 0;
786
b1fc6d3c 787 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
788 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
789 return -EIO;
8cf61f89 790 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
791 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
792
793 return 0;
794}
795
796static void
797qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
798 struct qlcnic_esw_func_cfg *esw_cfg)
799{
800 struct net_device *netdev = adapter->netdev;
c8f44aff 801 netdev_features_t features, vlan_features;
0325d69b 802
135d84a9 803 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
804 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
805 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 806 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
807
808 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
809 features |= (NETIF_F_TSO | NETIF_F_TSO6);
810 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
811 }
b56421d0
RB
812
813 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
814 features |= NETIF_F_LRO;
815
816 if (esw_cfg->offload_flags & BIT_0) {
817 netdev->features |= features;
0325d69b
RB
818 if (!(esw_cfg->offload_flags & BIT_1))
819 netdev->features &= ~NETIF_F_TSO;
820 if (!(esw_cfg->offload_flags & BIT_2))
821 netdev->features &= ~NETIF_F_TSO6;
822 } else {
823 netdev->features &= ~features;
0325d69b
RB
824 }
825
826 netdev->vlan_features = (features & vlan_features);
827}
828
0866d96d
AC
829static int
830qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
831{
832 void __iomem *priv_op;
833 u32 op_mode, priv_level;
834 int err = 0;
835
174240a8
RB
836 err = qlcnic_initialize_nic(adapter);
837 if (err)
838 return err;
839
0866d96d
AC
840 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
841 return 0;
842
b1fc6d3c 843 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 844 op_mode = readl(priv_op);
b1fc6d3c 845 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
846
847 if (op_mode == QLC_DEV_DRV_DEFAULT)
848 priv_level = QLCNIC_MGMT_FUNC;
849 else
b1fc6d3c 850 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 851
174240a8 852 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
853 if (priv_level == QLCNIC_MGMT_FUNC) {
854 adapter->op_mode = QLCNIC_MGMT_FUNC;
855 err = qlcnic_init_pci_info(adapter);
856 if (err)
857 return err;
858 /* Set privilege level for other functions */
859 qlcnic_set_function_modes(adapter);
860 dev_info(&adapter->pdev->dev,
861 "HAL Version: %d, Management function\n",
862 adapter->fw_hal_version);
863 } else if (priv_level == QLCNIC_PRIV_FUNC) {
864 adapter->op_mode = QLCNIC_PRIV_FUNC;
865 dev_info(&adapter->pdev->dev,
866 "HAL Version: %d, Privileged function\n",
867 adapter->fw_hal_version);
868 }
174240a8 869 }
0866d96d
AC
870
871 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
872
873 return err;
874}
875
0325d69b
RB
876static int
877qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
878{
879 struct qlcnic_esw_func_cfg esw_cfg;
880 struct qlcnic_npar_info *npar;
881 u8 i;
882
174240a8 883 if (adapter->need_fw_reset)
0325d69b
RB
884 return 0;
885
886 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
887 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
888 continue;
889 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
890 esw_cfg.pci_func = i;
891 esw_cfg.offload_flags = BIT_0;
7373373d 892 esw_cfg.mac_override = BIT_0;
ee07c1a7 893 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
894 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
895 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
896 if (qlcnic_config_switch_port(adapter, &esw_cfg))
897 return -EIO;
898 npar = &adapter->npars[i];
899 npar->pvid = esw_cfg.vlan_id;
7373373d 900 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
901 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
902 npar->discard_tagged = esw_cfg.discard_tagged;
903 npar->promisc_mode = esw_cfg.promisc_mode;
904 npar->offload_flags = esw_cfg.offload_flags;
905 }
906
907 return 0;
908}
909
4e8acb01
RB
910static int
911qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
912 struct qlcnic_npar_info *npar, int pci_func)
913{
914 struct qlcnic_esw_func_cfg esw_cfg;
915 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
916 esw_cfg.pci_func = pci_func;
917 esw_cfg.vlan_id = npar->pvid;
7373373d 918 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
919 esw_cfg.discard_tagged = npar->discard_tagged;
920 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
921 esw_cfg.offload_flags = npar->offload_flags;
922 esw_cfg.promisc_mode = npar->promisc_mode;
923 if (qlcnic_config_switch_port(adapter, &esw_cfg))
924 return -EIO;
925
926 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
927 if (qlcnic_config_switch_port(adapter, &esw_cfg))
928 return -EIO;
929
930 return 0;
931}
932
cea8975e
AC
933static int
934qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
935{
4e8acb01 936 int i, err;
cea8975e
AC
937 struct qlcnic_npar_info *npar;
938 struct qlcnic_info nic_info;
939
174240a8 940 if (!adapter->need_fw_reset)
cea8975e
AC
941 return 0;
942
4e8acb01
RB
943 /* Set the NPAR config data after FW reset */
944 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
945 npar = &adapter->npars[i];
946 if (npar->type != QLCNIC_TYPE_NIC)
947 continue;
948 err = qlcnic_get_nic_info(adapter, &nic_info, i);
949 if (err)
950 return err;
951 nic_info.min_tx_bw = npar->min_bw;
952 nic_info.max_tx_bw = npar->max_bw;
953 err = qlcnic_set_nic_info(adapter, &nic_info);
954 if (err)
955 return err;
cea8975e 956
4e8acb01
RB
957 if (npar->enable_pm) {
958 err = qlcnic_config_port_mirroring(adapter,
959 npar->dest_npar, 1, i);
960 if (err)
961 return err;
cea8975e 962 }
4e8acb01
RB
963 err = qlcnic_reset_eswitch_config(adapter, npar, i);
964 if (err)
965 return err;
cea8975e 966 }
4e8acb01 967 return 0;
cea8975e
AC
968}
969
78f84e1a
AKS
970static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
971{
972 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
973 u32 npar_state;
974
975 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
976 return 0;
977
978 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
979 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
980 msleep(1000);
981 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
982 }
983 if (!npar_opt_timeo) {
984 dev_err(&adapter->pdev->dev,
985 "Waiting for NPAR state to opertional timeout\n");
986 return -EIO;
987 }
988 return 0;
989}
990
174240a8
RB
991static int
992qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
993{
994 int err;
995
996 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
997 adapter->op_mode != QLCNIC_MGMT_FUNC)
998 return 0;
999
1000 err = qlcnic_set_default_offload_settings(adapter);
1001 if (err)
1002 return err;
1003
1004 err = qlcnic_reset_npar_config(adapter);
1005 if (err)
1006 return err;
1007
1008 qlcnic_dev_set_npar_ready(adapter);
1009
1010 return err;
1011}
1012
af19b491
AKS
1013static int
1014qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1015{
d4066833 1016 int err;
af19b491 1017
aa5e18c0
SC
1018 err = qlcnic_can_start_firmware(adapter);
1019 if (err < 0)
1020 return err;
1021 else if (!err)
d4066833 1022 goto check_fw_status;
af19b491 1023
4d5bdb38
AKS
1024 if (load_fw_file)
1025 qlcnic_request_firmware(adapter);
8f891387 1026 else {
8cfdce08
SC
1027 err = qlcnic_check_flash_fw_ver(adapter);
1028 if (err)
8f891387 1029 goto err_out;
1030
4d5bdb38 1031 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1032 }
af19b491
AKS
1033
1034 err = qlcnic_need_fw_reset(adapter);
af19b491 1035 if (err == 0)
4e70812b 1036 goto check_fw_status;
af19b491 1037
d4066833
SC
1038 err = qlcnic_pinit_from_rom(adapter);
1039 if (err)
1040 goto err_out;
af19b491
AKS
1041
1042 err = qlcnic_load_firmware(adapter);
1043 if (err)
1044 goto err_out;
1045
1046 qlcnic_release_firmware(adapter);
d4066833 1047 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1048
d4066833
SC
1049check_fw_status:
1050 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1051 if (err)
1052 goto err_out;
1053
1054 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1055 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1056
0866d96d
AC
1057 err = qlcnic_check_eswitch_mode(adapter);
1058 if (err) {
1059 dev_err(&adapter->pdev->dev,
1060 "Memory allocation failed for eswitch\n");
1061 goto err_out;
1062 }
174240a8
RB
1063 err = qlcnic_set_mgmt_operations(adapter);
1064 if (err)
1065 goto err_out;
1066
1067 qlcnic_check_options(adapter);
af19b491
AKS
1068 adapter->need_fw_reset = 0;
1069
a7fc948f
AKS
1070 qlcnic_release_firmware(adapter);
1071 return 0;
af19b491
AKS
1072
1073err_out:
a7fc948f
AKS
1074 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1075 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1076
af19b491
AKS
1077 qlcnic_release_firmware(adapter);
1078 return err;
1079}
1080
1081static int
1082qlcnic_request_irq(struct qlcnic_adapter *adapter)
1083{
1084 irq_handler_t handler;
1085 struct qlcnic_host_sds_ring *sds_ring;
1086 int err, ring;
1087
1088 unsigned long flags = 0;
1089 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1090 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1091
7eb9855d
AKS
1092 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1093 handler = qlcnic_tmp_intr;
1094 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1095 flags |= IRQF_SHARED;
1096
1097 } else {
1098 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1099 handler = qlcnic_msix_intr;
1100 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1101 handler = qlcnic_msi_intr;
1102 else {
1103 flags |= IRQF_SHARED;
1104 handler = qlcnic_intr;
1105 }
af19b491
AKS
1106 }
1107 adapter->irq = netdev->irq;
1108
1109 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1110 sds_ring = &recv_ctx->sds_rings[ring];
1111 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1112 err = request_irq(sds_ring->irq, handler,
1113 flags, sds_ring->name, sds_ring);
1114 if (err)
1115 return err;
1116 }
1117
1118 return 0;
1119}
1120
1121static void
1122qlcnic_free_irq(struct qlcnic_adapter *adapter)
1123{
1124 int ring;
1125 struct qlcnic_host_sds_ring *sds_ring;
1126
b1fc6d3c 1127 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1128
1129 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1130 sds_ring = &recv_ctx->sds_rings[ring];
1131 free_irq(sds_ring->irq, sds_ring);
1132 }
1133}
1134
af19b491
AKS
1135static int
1136__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1137{
8a15ad1f 1138 int ring;
cae82d49
RB
1139 u32 capab2;
1140
8a15ad1f
AKS
1141 struct qlcnic_host_rds_ring *rds_ring;
1142
af19b491
AKS
1143 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1144 return -EIO;
1145
8a15ad1f
AKS
1146 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1147 return 0;
0325d69b
RB
1148 if (qlcnic_set_eswitch_port_config(adapter))
1149 return -EIO;
8a15ad1f 1150
cae82d49
RB
1151 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
1152 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
1153 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1154 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
1155 }
1156
8a15ad1f
AKS
1157 if (qlcnic_fw_create_ctx(adapter))
1158 return -EIO;
1159
1160 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1161 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1162 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1163 }
1164
af19b491
AKS
1165 qlcnic_set_multi(netdev);
1166 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1167
b1fc6d3c 1168 adapter->ahw->linkup = 0;
af19b491
AKS
1169
1170 if (adapter->max_sds_rings > 1)
1171 qlcnic_config_rss(adapter, 1);
1172
1173 qlcnic_config_intr_coalesce(adapter);
1174
24763d80 1175 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1176 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1177
1178 qlcnic_napi_enable(adapter);
1179
1180 qlcnic_linkevent_request(adapter, 1);
1181
68bf1c68 1182 adapter->reset_context = 0;
af19b491
AKS
1183 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1184 return 0;
1185}
1186
1187/* Usage: During resume and firmware recovery module.*/
1188
1189static int
1190qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1191{
1192 int err = 0;
1193
1194 rtnl_lock();
1195 if (netif_running(netdev))
1196 err = __qlcnic_up(adapter, netdev);
1197 rtnl_unlock();
1198
1199 return err;
1200}
1201
1202static void
1203__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1204{
1205 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1206 return;
1207
1208 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1209 return;
1210
1211 smp_mb();
1212 spin_lock(&adapter->tx_clean_lock);
1213 netif_carrier_off(netdev);
1214 netif_tx_disable(netdev);
1215
1216 qlcnic_free_mac_list(adapter);
1217
b5e5492c
AKS
1218 if (adapter->fhash.fnum)
1219 qlcnic_delete_lb_filters(adapter);
1220
af19b491
AKS
1221 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1222
1223 qlcnic_napi_disable(adapter);
1224
8a15ad1f 1225 qlcnic_fw_destroy_ctx(adapter);
cae82d49 1226 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
8a15ad1f
AKS
1227
1228 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1229 qlcnic_release_tx_buffers(adapter);
1230 spin_unlock(&adapter->tx_clean_lock);
1231}
1232
1233/* Usage: During suspend and firmware recovery module */
1234
1235static void
1236qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1237{
1238 rtnl_lock();
1239 if (netif_running(netdev))
1240 __qlcnic_down(adapter, netdev);
1241 rtnl_unlock();
1242
1243}
1244
1245static int
1246qlcnic_attach(struct qlcnic_adapter *adapter)
1247{
1248 struct net_device *netdev = adapter->netdev;
1249 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1250 int err;
af19b491
AKS
1251
1252 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1253 return 0;
1254
af19b491
AKS
1255 err = qlcnic_napi_add(adapter, netdev);
1256 if (err)
1257 return err;
1258
1259 err = qlcnic_alloc_sw_resources(adapter);
1260 if (err) {
1261 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1262 goto err_out_napi_del;
af19b491
AKS
1263 }
1264
1265 err = qlcnic_alloc_hw_resources(adapter);
1266 if (err) {
1267 dev_err(&pdev->dev, "Error in setting hw resources\n");
1268 goto err_out_free_sw;
1269 }
1270
af19b491
AKS
1271 err = qlcnic_request_irq(adapter);
1272 if (err) {
1273 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1274 goto err_out_free_hw;
af19b491
AKS
1275 }
1276
af19b491
AKS
1277 qlcnic_create_sysfs_entries(adapter);
1278
1279 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1280 return 0;
1281
8a15ad1f 1282err_out_free_hw:
af19b491
AKS
1283 qlcnic_free_hw_resources(adapter);
1284err_out_free_sw:
1285 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1286err_out_napi_del:
1287 qlcnic_napi_del(adapter);
af19b491
AKS
1288 return err;
1289}
1290
1291static void
1292qlcnic_detach(struct qlcnic_adapter *adapter)
1293{
1294 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1295 return;
1296
1297 qlcnic_remove_sysfs_entries(adapter);
1298
1299 qlcnic_free_hw_resources(adapter);
1300 qlcnic_release_rx_buffers(adapter);
1301 qlcnic_free_irq(adapter);
1302 qlcnic_napi_del(adapter);
1303 qlcnic_free_sw_resources(adapter);
1304
1305 adapter->is_up = 0;
1306}
1307
7eb9855d
AKS
1308void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1309{
1310 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1311 struct qlcnic_host_sds_ring *sds_ring;
1312 int ring;
1313
78ad3892 1314 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1315 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1316 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1317 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1318 qlcnic_disable_int(sds_ring);
1319 }
7eb9855d
AKS
1320 }
1321
8a15ad1f
AKS
1322 qlcnic_fw_destroy_ctx(adapter);
1323
7eb9855d
AKS
1324 qlcnic_detach(adapter);
1325
1326 adapter->diag_test = 0;
1327 adapter->max_sds_rings = max_sds_rings;
1328
1329 if (qlcnic_attach(adapter))
34ce3626 1330 goto out;
7eb9855d
AKS
1331
1332 if (netif_running(netdev))
1333 __qlcnic_up(adapter, netdev);
34ce3626 1334out:
7eb9855d
AKS
1335 netif_device_attach(netdev);
1336}
1337
b1fc6d3c
AC
1338static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1339{
1340 int err = 0;
1341 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1342 GFP_KERNEL);
1343 if (!adapter->ahw) {
1344 dev_err(&adapter->pdev->dev,
1345 "Failed to allocate recv ctx resources for adapter\n");
1346 err = -ENOMEM;
1347 goto err_out;
1348 }
1349 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1350 GFP_KERNEL);
1351 if (!adapter->recv_ctx) {
1352 dev_err(&adapter->pdev->dev,
1353 "Failed to allocate recv ctx resources for adapter\n");
1354 kfree(adapter->ahw);
1355 adapter->ahw = NULL;
1356 err = -ENOMEM;
8816d009 1357 goto err_out;
b1fc6d3c 1358 }
8816d009
AC
1359 /* Initialize interrupt coalesce parameters */
1360 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1361 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1362 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1363err_out:
1364 return err;
1365}
1366
1367static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1368{
1369 kfree(adapter->recv_ctx);
1370 adapter->recv_ctx = NULL;
1371
18f2f616
AC
1372 if (adapter->ahw->fw_dump.tmpl_hdr) {
1373 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1374 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1375 }
b1fc6d3c
AC
1376 kfree(adapter->ahw);
1377 adapter->ahw = NULL;
1378}
1379
7eb9855d
AKS
1380int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1381{
1382 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1383 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1384 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1385 int ring;
1386 int ret;
1387
1388 netif_device_detach(netdev);
1389
1390 if (netif_running(netdev))
1391 __qlcnic_down(adapter, netdev);
1392
1393 qlcnic_detach(adapter);
1394
1395 adapter->max_sds_rings = 1;
1396 adapter->diag_test = test;
1397
1398 ret = qlcnic_attach(adapter);
34ce3626
AKS
1399 if (ret) {
1400 netif_device_attach(netdev);
7eb9855d 1401 return ret;
34ce3626 1402 }
7eb9855d 1403
8a15ad1f
AKS
1404 ret = qlcnic_fw_create_ctx(adapter);
1405 if (ret) {
1406 qlcnic_detach(adapter);
57e46248 1407 netif_device_attach(netdev);
8a15ad1f
AKS
1408 return ret;
1409 }
1410
1411 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1412 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1413 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1414 }
1415
cdaff185
AKS
1416 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1417 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1418 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1419 qlcnic_enable_int(sds_ring);
1420 }
7eb9855d 1421 }
22c8c934
SC
1422
1423 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1424 adapter->ahw->loopback_state = 0;
1425 qlcnic_linkevent_request(adapter, 1);
1426 }
1427
78ad3892 1428 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1429
1430 return 0;
1431}
1432
68bf1c68
AKS
1433/* Reset context in hardware only */
1434static int
1435qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1436{
1437 struct net_device *netdev = adapter->netdev;
1438
1439 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1440 return -EBUSY;
1441
1442 netif_device_detach(netdev);
1443
1444 qlcnic_down(adapter, netdev);
1445
1446 qlcnic_up(adapter, netdev);
1447
1448 netif_device_attach(netdev);
1449
1450 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1451 return 0;
1452}
1453
af19b491
AKS
1454int
1455qlcnic_reset_context(struct qlcnic_adapter *adapter)
1456{
1457 int err = 0;
1458 struct net_device *netdev = adapter->netdev;
1459
1460 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1461 return -EBUSY;
1462
1463 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1464
1465 netif_device_detach(netdev);
1466
1467 if (netif_running(netdev))
1468 __qlcnic_down(adapter, netdev);
1469
1470 qlcnic_detach(adapter);
1471
1472 if (netif_running(netdev)) {
1473 err = qlcnic_attach(adapter);
1d5c88e3 1474 if (!err) {
34ce3626 1475 __qlcnic_up(adapter, netdev);
1d5c88e3
AC
1476 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1477 }
af19b491
AKS
1478 }
1479
1480 netif_device_attach(netdev);
1481 }
1482
af19b491
AKS
1483 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1484 return err;
1485}
1486
1487static int
1488qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1489 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1490{
1491 int err;
1492 struct pci_dev *pdev = adapter->pdev;
1493
af19b491
AKS
1494 adapter->mc_enabled = 0;
1495 adapter->max_mc_count = 38;
1496
1497 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1498 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1499
1500 qlcnic_change_mtu(netdev, netdev->mtu);
1501
1502 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1503
135d84a9
MM
1504 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1505 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1506
135d84a9
MM
1507 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1508 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1509 if (pci_using_dac)
1510 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1511
135d84a9 1512 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1513
1514 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1515 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1516 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1517 netdev->hw_features |= NETIF_F_LRO;
1518
1519 netdev->features |= netdev->hw_features |
1520 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1521
af19b491
AKS
1522 netdev->irq = adapter->msix_entries[0].vector;
1523
af19b491
AKS
1524 err = register_netdev(netdev);
1525 if (err) {
1526 dev_err(&pdev->dev, "failed to register net device\n");
1527 return err;
1528 }
1529
1530 return 0;
1531}
1532
1bb09fb9
AKS
1533static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1534{
1535 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1536 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1537 *pci_using_dac = 1;
1538 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1539 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1540 *pci_using_dac = 0;
1541 else {
1542 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1543 return -EIO;
1544 }
1545
1546 return 0;
1547}
1548
f94bc1e7
SC
1549static int
1550qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1551{
1552 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1553 GFP_KERNEL);
1554
1555 if (adapter->msix_entries)
1556 return 0;
1557
1558 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1559 return -ENOMEM;
1560}
1561
af19b491
AKS
1562static int __devinit
1563qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1564{
1565 struct net_device *netdev = NULL;
1566 struct qlcnic_adapter *adapter = NULL;
1567 int err;
af19b491 1568 uint8_t revision_id;
1bb09fb9 1569 uint8_t pci_using_dac;
da48e6c3 1570 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1571
1572 err = pci_enable_device(pdev);
1573 if (err)
1574 return err;
1575
1576 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1577 err = -ENODEV;
1578 goto err_out_disable_pdev;
1579 }
1580
1bb09fb9
AKS
1581 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1582 if (err)
1583 goto err_out_disable_pdev;
1584
af19b491
AKS
1585 err = pci_request_regions(pdev, qlcnic_driver_name);
1586 if (err)
1587 goto err_out_disable_pdev;
1588
1589 pci_set_master(pdev);
451724c8 1590 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1591
1592 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1593 if (!netdev) {
af19b491
AKS
1594 err = -ENOMEM;
1595 goto err_out_free_res;
1596 }
1597
1598 SET_NETDEV_DEV(netdev, &pdev->dev);
1599
1600 adapter = netdev_priv(netdev);
1601 adapter->netdev = netdev;
1602 adapter->pdev = pdev;
af19b491 1603
2dfc9671
PST
1604 err = qlcnic_alloc_adapter_resources(adapter);
1605 if (err)
b1fc6d3c
AC
1606 goto err_out_free_netdev;
1607
1608 adapter->dev_rst_time = jiffies;
af19b491 1609 revision_id = pdev->revision;
b1fc6d3c 1610 adapter->ahw->revision_id = revision_id;
e5dcf6dc 1611 adapter->mac_learn = qlcnic_mac_learn;
af19b491 1612
b1fc6d3c
AC
1613 rwlock_init(&adapter->ahw->crb_lock);
1614 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1615
1616 spin_lock_init(&adapter->tx_clean_lock);
1617 INIT_LIST_HEAD(&adapter->mac_list);
1618
1619 err = qlcnic_setup_pci_map(adapter);
1620 if (err)
b1fc6d3c 1621 goto err_out_free_hw;
af19b491
AKS
1622
1623 /* This will be reset for mezz cards */
b1fc6d3c 1624 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1625
1626 err = qlcnic_get_board_info(adapter);
1627 if (err) {
1628 dev_err(&pdev->dev, "Error getting board config info.\n");
1629 goto err_out_iounmap;
1630 }
1631
8cfdce08
SC
1632 err = qlcnic_setup_idc_param(adapter);
1633 if (err)
b3a24649 1634 goto err_out_iounmap;
af19b491 1635
1dc0f3c5 1636 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1637
9f26f547 1638 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f 1639 if (err) {
b43e5ee7
SC
1640 dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
1641 "\t\tIf reboot doesn't help, try flashing the card\n");
1642 goto err_out_maintenance_mode;
a7fc948f 1643 }
af19b491 1644
da48e6c3
RB
1645 if (qlcnic_read_mac_addr(adapter))
1646 dev_warn(&pdev->dev, "failed to read mac addr\n");
1647
1648 if (adapter->portnum == 0) {
1649 get_brd_name(adapter, brd_name);
1650
1651 pr_info("%s: %s Board Chip rev 0x%x\n",
1652 module_name(THIS_MODULE),
b1fc6d3c 1653 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1654 }
1655
af19b491
AKS
1656 qlcnic_clear_stats(adapter);
1657
f94bc1e7
SC
1658 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1659 if (err)
1660 goto err_out_decr_ref;
1661
af19b491
AKS
1662 qlcnic_setup_intr(adapter);
1663
1bb09fb9 1664 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1665 if (err)
1666 goto err_out_disable_msi;
1667
1668 pci_set_drvdata(pdev, adapter);
1669
1670 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1671
b1fc6d3c 1672 switch (adapter->ahw->port_type) {
af19b491
AKS
1673 case QLCNIC_GBE:
1674 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1675 adapter->netdev->name);
1676 break;
1677 case QLCNIC_XGBE:
1678 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1679 adapter->netdev->name);
1680 break;
1681 }
1682
e5dcf6dc
SC
1683 if (adapter->mac_learn)
1684 qlcnic_alloc_lb_filters_mem(adapter);
1685
af19b491
AKS
1686 qlcnic_create_diag_entries(adapter);
1687
1688 return 0;
1689
1690err_out_disable_msi:
1691 qlcnic_teardown_intr(adapter);
f94bc1e7 1692 kfree(adapter->msix_entries);
af19b491
AKS
1693
1694err_out_decr_ref:
21854f02 1695 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1696
1697err_out_iounmap:
1698 qlcnic_cleanup_pci_map(adapter);
1699
b1fc6d3c
AC
1700err_out_free_hw:
1701 qlcnic_free_adapter_resources(adapter);
1702
af19b491
AKS
1703err_out_free_netdev:
1704 free_netdev(netdev);
1705
1706err_out_free_res:
1707 pci_release_regions(pdev);
1708
1709err_out_disable_pdev:
1710 pci_set_drvdata(pdev, NULL);
1711 pci_disable_device(pdev);
1712 return err;
b43e5ee7
SC
1713
1714err_out_maintenance_mode:
1715 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
1716 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
1717 err = register_netdev(netdev);
1718 if (err) {
1719 dev_err(&pdev->dev, "failed to register net device\n");
1720 goto err_out_decr_ref;
1721 }
1722 pci_set_drvdata(pdev, adapter);
1723 qlcnic_create_diag_entries(adapter);
1724 return 0;
af19b491
AKS
1725}
1726
1727static void __devexit qlcnic_remove(struct pci_dev *pdev)
1728{
1729 struct qlcnic_adapter *adapter;
1730 struct net_device *netdev;
1731
1732 adapter = pci_get_drvdata(pdev);
1733 if (adapter == NULL)
1734 return;
1735
1736 netdev = adapter->netdev;
1737
1738 qlcnic_cancel_fw_work(adapter);
1739
1740 unregister_netdev(netdev);
1741
af19b491
AKS
1742 qlcnic_detach(adapter);
1743
2e9d722d
AC
1744 if (adapter->npars != NULL)
1745 kfree(adapter->npars);
1746 if (adapter->eswitch != NULL)
1747 kfree(adapter->eswitch);
1748
21854f02 1749 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1750
1751 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1752
b5e5492c
AKS
1753 qlcnic_free_lb_filters_mem(adapter);
1754
af19b491 1755 qlcnic_teardown_intr(adapter);
f94bc1e7 1756 kfree(adapter->msix_entries);
af19b491
AKS
1757
1758 qlcnic_remove_diag_entries(adapter);
1759
1760 qlcnic_cleanup_pci_map(adapter);
1761
1762 qlcnic_release_firmware(adapter);
1763
451724c8 1764 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1765 pci_release_regions(pdev);
1766 pci_disable_device(pdev);
1767 pci_set_drvdata(pdev, NULL);
1768
b1fc6d3c 1769 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1770 free_netdev(netdev);
1771}
1772static int __qlcnic_shutdown(struct pci_dev *pdev)
1773{
1774 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1775 struct net_device *netdev = adapter->netdev;
1776 int retval;
1777
1778 netif_device_detach(netdev);
1779
1780 qlcnic_cancel_fw_work(adapter);
1781
1782 if (netif_running(netdev))
1783 qlcnic_down(adapter, netdev);
1784
21854f02 1785 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1786
1787 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1788
1789 retval = pci_save_state(pdev);
1790 if (retval)
1791 return retval;
1792
1793 if (qlcnic_wol_supported(adapter)) {
1794 pci_enable_wake(pdev, PCI_D3cold, 1);
1795 pci_enable_wake(pdev, PCI_D3hot, 1);
1796 }
1797
1798 return 0;
1799}
1800
1801static void qlcnic_shutdown(struct pci_dev *pdev)
1802{
1803 if (__qlcnic_shutdown(pdev))
1804 return;
1805
1806 pci_disable_device(pdev);
1807}
1808
1809#ifdef CONFIG_PM
1810static int
1811qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1812{
1813 int retval;
1814
1815 retval = __qlcnic_shutdown(pdev);
1816 if (retval)
1817 return retval;
1818
1819 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1820 return 0;
1821}
1822
1823static int
1824qlcnic_resume(struct pci_dev *pdev)
1825{
1826 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1827 struct net_device *netdev = adapter->netdev;
1828 int err;
1829
1830 err = pci_enable_device(pdev);
1831 if (err)
1832 return err;
1833
1834 pci_set_power_state(pdev, PCI_D0);
1835 pci_set_master(pdev);
1836 pci_restore_state(pdev);
1837
9f26f547 1838 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1839 if (err) {
1840 dev_err(&pdev->dev, "failed to start firmware\n");
1841 return err;
1842 }
1843
1844 if (netif_running(netdev)) {
af19b491
AKS
1845 err = qlcnic_up(adapter, netdev);
1846 if (err)
52486a3a 1847 goto done;
af19b491 1848
aec1e845 1849 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1850 }
52486a3a 1851done:
af19b491
AKS
1852 netif_device_attach(netdev);
1853 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1854 return 0;
af19b491
AKS
1855}
1856#endif
1857
1858static int qlcnic_open(struct net_device *netdev)
1859{
1860 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b43e5ee7 1861 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
af19b491
AKS
1862 int err;
1863
b43e5ee7
SC
1864 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
1865 netdev_err(netdev, "Device in FAILED state\n");
1866 return -EIO;
1867 }
1868
c55ad8e5
AKS
1869 netif_carrier_off(netdev);
1870
af19b491
AKS
1871 err = qlcnic_attach(adapter);
1872 if (err)
1873 return err;
1874
1875 err = __qlcnic_up(adapter, netdev);
1876 if (err)
1877 goto err_out;
1878
1879 netif_start_queue(netdev);
1880
1881 return 0;
1882
1883err_out:
1884 qlcnic_detach(adapter);
1885 return err;
1886}
1887
1888/*
1889 * qlcnic_close - Disables a network interface entry point
1890 */
1891static int qlcnic_close(struct net_device *netdev)
1892{
1893 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1894
1895 __qlcnic_down(adapter, netdev);
1896 return 0;
1897}
1898
e5dcf6dc 1899void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
b5e5492c
AKS
1900{
1901 void *head;
1902 int i;
1903
e5dcf6dc 1904 if (adapter->fhash.fmax && adapter->fhash.fhead)
b5e5492c
AKS
1905 return;
1906
1907 spin_lock_init(&adapter->mac_learn_lock);
1908
1909 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1910 GFP_KERNEL);
1911 if (!head)
1912 return;
1913
1914 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
43d620c8 1915 adapter->fhash.fhead = head;
b5e5492c
AKS
1916
1917 for (i = 0; i < adapter->fhash.fmax; i++)
1918 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1919}
1920
1921static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1922{
1923 if (adapter->fhash.fmax && adapter->fhash.fhead)
1924 kfree(adapter->fhash.fhead);
1925
1926 adapter->fhash.fhead = NULL;
1927 adapter->fhash.fmax = 0;
1928}
1929
1930static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1931 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1932{
1933 struct cmd_desc_type0 *hwdesc;
1934 struct qlcnic_nic_req *req;
1935 struct qlcnic_mac_req *mac_req;
7e56cac4 1936 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1937 u32 producer;
1938 u64 word;
1939
1940 producer = tx_ring->producer;
1941 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1942
1943 req = (struct qlcnic_nic_req *)hwdesc;
1944 memset(req, 0, sizeof(struct qlcnic_nic_req));
1945 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1946
1947 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1948 req->req_hdr = cpu_to_le64(word);
1949
1950 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1951 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1952 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1953
7e56cac4
SC
1954 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1955 vlan_req->vlan_id = vlan_id;
03c5d770 1956
b5e5492c 1957 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1958 smp_mb();
b5e5492c
AKS
1959}
1960
1961#define QLCNIC_MAC_HASH(MAC)\
1962 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1963
1964static void
1965qlcnic_send_filter(struct qlcnic_adapter *adapter,
1966 struct qlcnic_host_tx_ring *tx_ring,
1967 struct cmd_desc_type0 *first_desc,
1968 struct sk_buff *skb)
1969{
1970 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1971 struct qlcnic_filter *fil, *tmp_fil;
1972 struct hlist_node *tmp_hnode, *n;
1973 struct hlist_head *head;
1974 u64 src_addr = 0;
7e56cac4 1975 __le16 vlan_id = 0;
b5e5492c
AKS
1976 u8 hindex;
1977
2e42e474 1978 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
b5e5492c
AKS
1979 return;
1980
1981 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1982 return;
1983
03c5d770
AKS
1984 /* Only NPAR capable devices support vlan based learning*/
1985 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1986 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1987 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1988 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1989 head = &(adapter->fhash.fhead[hindex]);
1990
1991 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1992 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1993 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1994
1995 if (jiffies >
1996 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1997 qlcnic_change_filter(adapter, src_addr, vlan_id,
1998 tx_ring);
b5e5492c
AKS
1999 tmp_fil->ftime = jiffies;
2000 return;
2001 }
2002 }
2003
2004 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
2005 if (!fil)
2006 return;
2007
03c5d770 2008 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
2009
2010 fil->ftime = jiffies;
03c5d770 2011 fil->vlan_id = vlan_id;
b5e5492c
AKS
2012 memcpy(fil->faddr, &src_addr, ETH_ALEN);
2013 spin_lock(&adapter->mac_learn_lock);
2014 hlist_add_head(&(fil->fnode), head);
2015 adapter->fhash.fnum++;
2016 spin_unlock(&adapter->mac_learn_lock);
2017}
2018
036d61f0
AC
2019static int
2020qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
2021 struct cmd_desc_type0 *first_desc,
2022 struct sk_buff *skb)
2023{
036d61f0
AC
2024 u8 opcode = 0, hdr_len = 0;
2025 u16 flags = 0, vlan_tci = 0;
2026 int copied, offset, copy_len;
af19b491
AKS
2027 struct cmd_desc_type0 *hwdesc;
2028 struct vlan_ethhdr *vh;
036d61f0
AC
2029 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2030 u16 protocol = ntohs(skb->protocol);
2e9d722d 2031 u32 producer = tx_ring->producer;
036d61f0
AC
2032
2033 if (protocol == ETH_P_8021Q) {
2034 vh = (struct vlan_ethhdr *)skb->data;
2035 flags = FLAGS_VLAN_TAGGED;
2036 vlan_tci = vh->h_vlan_TCI;
0bb79565 2037 protocol = ntohs(vh->h_vlan_encapsulated_proto);
036d61f0
AC
2038 } else if (vlan_tx_tag_present(skb)) {
2039 flags = FLAGS_VLAN_OOB;
2040 vlan_tci = vlan_tx_tag_get(skb);
2041 }
2042 if (unlikely(adapter->pvid)) {
2043 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2044 return -EIO;
2045 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2046 goto set_flags;
2047
2048 flags = FLAGS_VLAN_OOB;
2049 vlan_tci = adapter->pvid;
2050 }
2051set_flags:
2052 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2053 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 2054
2e9d722d
AC
2055 if (*(skb->data) & BIT_0) {
2056 flags |= BIT_0;
2057 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2058 }
036d61f0
AC
2059 opcode = TX_ETHER_PKT;
2060 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
2061 skb_shinfo(skb)->gso_size > 0) {
2062
2063 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2064
2065 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2066 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
2067
2068 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2069
2070 /* For LSO, we need to copy the MAC/IP/TCP headers into
2071 * the descriptor ring */
2072 copied = 0;
2073 offset = 2;
2074
2075 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
2076 first_desc->total_hdr_length += VLAN_HLEN;
2077 first_desc->tcp_hdr_offset = VLAN_HLEN;
2078 first_desc->ip_hdr_offset = VLAN_HLEN;
2079 /* Only in case of TSO on vlan device */
2080 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
2081
2082 /* Create a TSO vlan header template for firmware */
2083
2084 hwdesc = &tx_ring->desc_head[producer];
2085 tx_ring->cmd_buf_arr[producer].skb = NULL;
2086
2087 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2088 offset, hdr_len + VLAN_HLEN);
2089
2090 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2091 skb_copy_from_linear_data(skb, vh, 12);
2092 vh->h_vlan_proto = htons(ETH_P_8021Q);
2093 vh->h_vlan_TCI = htons(vlan_tci);
2094
2095 skb_copy_from_linear_data_offset(skb, 12,
2096 (char *)vh + 16, copy_len - 16);
2097
2098 copied = copy_len - VLAN_HLEN;
2099 offset = 0;
2100
2101 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2102 }
2103
036d61f0
AC
2104 while (copied < hdr_len) {
2105
2106 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2107 offset, (hdr_len - copied));
2108
2109 hwdesc = &tx_ring->desc_head[producer];
2110 tx_ring->cmd_buf_arr[producer].skb = NULL;
2111
2112 skb_copy_from_linear_data_offset(skb, copied,
2113 (char *) hwdesc + offset, copy_len);
2114
2115 copied += copy_len;
2116 offset = 0;
2117
2118 producer = get_next_index(producer, tx_ring->num_desc);
2119 }
2120
2121 tx_ring->producer = producer;
2122 smp_mb();
2123 adapter->stats.lso_frames++;
af19b491
AKS
2124
2125 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2126 u8 l4proto;
2127
036d61f0 2128 if (protocol == ETH_P_IP) {
af19b491
AKS
2129 l4proto = ip_hdr(skb)->protocol;
2130
2131 if (l4proto == IPPROTO_TCP)
2132 opcode = TX_TCP_PKT;
2133 else if (l4proto == IPPROTO_UDP)
2134 opcode = TX_UDP_PKT;
036d61f0 2135 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2136 l4proto = ipv6_hdr(skb)->nexthdr;
2137
2138 if (l4proto == IPPROTO_TCP)
2139 opcode = TX_TCPV6_PKT;
2140 else if (l4proto == IPPROTO_UDP)
2141 opcode = TX_UDPV6_PKT;
2142 }
2143 }
af19b491
AKS
2144 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2145 first_desc->ip_hdr_offset += skb_network_offset(skb);
2146 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2147
036d61f0 2148 return 0;
af19b491
AKS
2149}
2150
2151static int
2152qlcnic_map_tx_skb(struct pci_dev *pdev,
2153 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2154{
2155 struct qlcnic_skb_frag *nf;
2156 struct skb_frag_struct *frag;
2157 int i, nr_frags;
2158 dma_addr_t map;
2159
2160 nr_frags = skb_shinfo(skb)->nr_frags;
2161 nf = &pbuf->frag_array[0];
2162
2163 map = pci_map_single(pdev, skb->data,
2164 skb_headlen(skb), PCI_DMA_TODEVICE);
2165 if (pci_dma_mapping_error(pdev, map))
2166 goto out_err;
2167
2168 nf->dma = map;
2169 nf->length = skb_headlen(skb);
2170
2171 for (i = 0; i < nr_frags; i++) {
2172 frag = &skb_shinfo(skb)->frags[i];
2173 nf = &pbuf->frag_array[i+1];
2174
9e903e08 2175 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
5d6bcdfe
IC
2176 DMA_TO_DEVICE);
2177 if (dma_mapping_error(&pdev->dev, map))
af19b491
AKS
2178 goto unwind;
2179
2180 nf->dma = map;
9e903e08 2181 nf->length = skb_frag_size(frag);
af19b491
AKS
2182 }
2183
2184 return 0;
2185
2186unwind:
2187 while (--i >= 0) {
2188 nf = &pbuf->frag_array[i+1];
2189 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2190 }
2191
2192 nf = &pbuf->frag_array[0];
2193 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2194
2195out_err:
2196 return -ENOMEM;
2197}
2198
036d61f0
AC
2199static void
2200qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2201 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2202{
036d61f0
AC
2203 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2204 int nr_frags = skb_shinfo(skb)->nr_frags;
2205 int i;
8cf61f89 2206
036d61f0
AC
2207 for (i = 0; i < nr_frags; i++) {
2208 nf = &pbuf->frag_array[i+1];
2209 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2210 }
8cf61f89 2211
036d61f0
AC
2212 nf = &pbuf->frag_array[0];
2213 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
5b446c6a 2214 pbuf->skb = NULL;
8cf61f89
AKS
2215}
2216
af19b491
AKS
2217static inline void
2218qlcnic_clear_cmddesc(u64 *desc)
2219{
2220 desc[0] = 0ULL;
2221 desc[2] = 0ULL;
8cf61f89 2222 desc[7] = 0ULL;
af19b491
AKS
2223}
2224
cdaff185 2225netdev_tx_t
af19b491
AKS
2226qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2227{
2228 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2229 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2230 struct qlcnic_cmd_buffer *pbuf;
2231 struct qlcnic_skb_frag *buffrag;
2232 struct cmd_desc_type0 *hwdesc, *first_desc;
2233 struct pci_dev *pdev;
dcb50aff 2234 struct ethhdr *phdr;
91a403ca 2235 int delta = 0;
af19b491
AKS
2236 int i, k;
2237
2238 u32 producer;
036d61f0 2239 int frag_count;
af19b491
AKS
2240 u32 num_txd = tx_ring->num_desc;
2241
780ab790
AKS
2242 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2243 netif_stop_queue(netdev);
2244 return NETDEV_TX_BUSY;
2245 }
2246
fe4d434d 2247 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff 2248 phdr = (struct ethhdr *)skb->data;
2e42e474 2249 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
fe4d434d
SC
2250 goto drop_packet;
2251 }
2252
af19b491 2253 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2254 /* 14 frags supported for normal packet and
2255 * 32 frags supported for TSO packet
2256 */
2257 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2258
2259 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
9e903e08 2260 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
91a403ca
AKS
2261
2262 if (!__pskb_pull_tail(skb, delta))
2263 goto drop_packet;
2264
2265 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2266 }
af19b491 2267
ef71ff83 2268 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2269 netif_stop_queue(netdev);
ef71ff83
RB
2270 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2271 netif_start_queue(netdev);
2272 else {
2273 adapter->stats.xmit_off++;
2274 return NETDEV_TX_BUSY;
2275 }
af19b491
AKS
2276 }
2277
2278 producer = tx_ring->producer;
2279 pbuf = &tx_ring->cmd_buf_arr[producer];
2280
2281 pdev = adapter->pdev;
2282
8cf61f89
AKS
2283 first_desc = hwdesc = &tx_ring->desc_head[producer];
2284 qlcnic_clear_cmddesc((u64 *)hwdesc);
2285
8ae6df97
AKS
2286 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2287 adapter->stats.tx_dma_map_error++;
af19b491 2288 goto drop_packet;
8ae6df97 2289 }
af19b491
AKS
2290
2291 pbuf->skb = skb;
2292 pbuf->frag_count = frag_count;
2293
af19b491
AKS
2294 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2295 qlcnic_set_tx_port(first_desc, adapter->portnum);
2296
2297 for (i = 0; i < frag_count; i++) {
2298
2299 k = i % 4;
2300
2301 if ((k == 0) && (i > 0)) {
2302 /* move to next desc.*/
2303 producer = get_next_index(producer, num_txd);
2304 hwdesc = &tx_ring->desc_head[producer];
2305 qlcnic_clear_cmddesc((u64 *)hwdesc);
2306 tx_ring->cmd_buf_arr[producer].skb = NULL;
2307 }
2308
2309 buffrag = &pbuf->frag_array[i];
2310
2311 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2312 switch (k) {
2313 case 0:
2314 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2315 break;
2316 case 1:
2317 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2318 break;
2319 case 2:
2320 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2321 break;
2322 case 3:
2323 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2324 break;
2325 }
2326 }
2327
2328 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2329 smp_mb();
af19b491 2330
036d61f0
AC
2331 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2332 goto unwind_buff;
af19b491 2333
e5dcf6dc 2334 if (adapter->mac_learn)
b5e5492c
AKS
2335 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2336
af19b491
AKS
2337 adapter->stats.txbytes += skb->len;
2338 adapter->stats.xmitcalled++;
2339
f127f472
SC
2340 qlcnic_update_cmd_producer(adapter, tx_ring);
2341
af19b491
AKS
2342 return NETDEV_TX_OK;
2343
036d61f0
AC
2344unwind_buff:
2345 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2346drop_packet:
2347 adapter->stats.txdropped++;
2348 dev_kfree_skb_any(skb);
2349 return NETDEV_TX_OK;
2350}
2351
2352static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2353{
2354 struct net_device *netdev = adapter->netdev;
2355 u32 temp, temp_state, temp_val;
2356 int rv = 0;
2357
2358 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2359
2360 temp_state = qlcnic_get_temp_state(temp);
2361 temp_val = qlcnic_get_temp_val(temp);
2362
2363 if (temp_state == QLCNIC_TEMP_PANIC) {
2364 dev_err(&netdev->dev,
2365 "Device temperature %d degrees C exceeds"
2366 " maximum allowed. Hardware has been shut down.\n",
2367 temp_val);
2368 rv = 1;
2369 } else if (temp_state == QLCNIC_TEMP_WARN) {
2370 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2371 dev_err(&netdev->dev,
2372 "Device temperature %d degrees C "
2373 "exceeds operating range."
2374 " Immediate action needed.\n",
2375 temp_val);
2376 }
2377 } else {
2378 if (adapter->temp == QLCNIC_TEMP_WARN) {
2379 dev_info(&netdev->dev,
2380 "Device temperature is now %d degrees C"
2381 " in normal range.\n", temp_val);
2382 }
2383 }
2384 adapter->temp = temp_state;
2385 return rv;
2386}
2387
2388void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2389{
2390 struct net_device *netdev = adapter->netdev;
2391
b1fc6d3c 2392 if (adapter->ahw->linkup && !linkup) {
69324275 2393 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2394 adapter->ahw->linkup = 0;
af19b491
AKS
2395 if (netif_running(netdev)) {
2396 netif_carrier_off(netdev);
2397 netif_stop_queue(netdev);
2398 }
b1fc6d3c 2399 } else if (!adapter->ahw->linkup && linkup) {
69324275 2400 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2401 adapter->ahw->linkup = 1;
af19b491
AKS
2402 if (netif_running(netdev)) {
2403 netif_carrier_on(netdev);
2404 netif_wake_queue(netdev);
2405 }
2406 }
2407}
2408
2409static void qlcnic_tx_timeout(struct net_device *netdev)
2410{
2411 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2412
2413 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2414 return;
2415
2416 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2417
2418 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2419 adapter->need_fw_reset = 1;
2420 else
2421 adapter->reset_context = 1;
af19b491
AKS
2422}
2423
2424static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2425{
2426 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2427 struct net_device_stats *stats = &netdev->stats;
2428
af19b491
AKS
2429 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2430 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2431 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2432 stats->tx_bytes = adapter->stats.txbytes;
2433 stats->rx_dropped = adapter->stats.rxdropped;
2434 stats->tx_dropped = adapter->stats.txdropped;
2435
2436 return stats;
2437}
2438
7eb9855d 2439static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2440{
af19b491
AKS
2441 u32 status;
2442
2443 status = readl(adapter->isr_int_vec);
2444
2445 if (!(status & adapter->int_vec_bit))
2446 return IRQ_NONE;
2447
2448 /* check interrupt state machine, to be sure */
2449 status = readl(adapter->crb_int_state_reg);
2450 if (!ISR_LEGACY_INT_TRIGGERED(status))
2451 return IRQ_NONE;
2452
2453 writel(0xffffffff, adapter->tgt_status_reg);
2454 /* read twice to ensure write is flushed */
2455 readl(adapter->isr_int_vec);
2456 readl(adapter->isr_int_vec);
2457
7eb9855d
AKS
2458 return IRQ_HANDLED;
2459}
2460
2461static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2462{
2463 struct qlcnic_host_sds_ring *sds_ring = data;
2464 struct qlcnic_adapter *adapter = sds_ring->adapter;
2465
2466 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2467 goto done;
2468 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2469 writel(0xffffffff, adapter->tgt_status_reg);
2470 goto done;
2471 }
2472
2473 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2474 return IRQ_NONE;
2475
2476done:
2477 adapter->diag_cnt++;
2478 qlcnic_enable_int(sds_ring);
2479 return IRQ_HANDLED;
2480}
2481
2482static irqreturn_t qlcnic_intr(int irq, void *data)
2483{
2484 struct qlcnic_host_sds_ring *sds_ring = data;
2485 struct qlcnic_adapter *adapter = sds_ring->adapter;
2486
2487 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2488 return IRQ_NONE;
2489
af19b491
AKS
2490 napi_schedule(&sds_ring->napi);
2491
2492 return IRQ_HANDLED;
2493}
2494
2495static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2496{
2497 struct qlcnic_host_sds_ring *sds_ring = data;
2498 struct qlcnic_adapter *adapter = sds_ring->adapter;
2499
2500 /* clear interrupt */
2501 writel(0xffffffff, adapter->tgt_status_reg);
2502
2503 napi_schedule(&sds_ring->napi);
2504 return IRQ_HANDLED;
2505}
2506
2507static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2508{
2509 struct qlcnic_host_sds_ring *sds_ring = data;
2510
2511 napi_schedule(&sds_ring->napi);
2512 return IRQ_HANDLED;
2513}
2514
2515static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2516{
2517 u32 sw_consumer, hw_consumer;
2518 int count = 0, i;
2519 struct qlcnic_cmd_buffer *buffer;
2520 struct pci_dev *pdev = adapter->pdev;
2521 struct net_device *netdev = adapter->netdev;
2522 struct qlcnic_skb_frag *frag;
2523 int done;
2524 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2525
2526 if (!spin_trylock(&adapter->tx_clean_lock))
2527 return 1;
2528
2529 sw_consumer = tx_ring->sw_consumer;
2530 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2531
2532 while (sw_consumer != hw_consumer) {
2533 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2534 if (buffer->skb) {
2535 frag = &buffer->frag_array[0];
2536 pci_unmap_single(pdev, frag->dma, frag->length,
2537 PCI_DMA_TODEVICE);
2538 frag->dma = 0ULL;
2539 for (i = 1; i < buffer->frag_count; i++) {
2540 frag++;
2541 pci_unmap_page(pdev, frag->dma, frag->length,
2542 PCI_DMA_TODEVICE);
2543 frag->dma = 0ULL;
2544 }
2545
2546 adapter->stats.xmitfinished++;
2547 dev_kfree_skb_any(buffer->skb);
2548 buffer->skb = NULL;
2549 }
2550
2551 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2552 if (++count >= MAX_STATUS_HANDLE)
2553 break;
2554 }
2555
2556 if (count && netif_running(netdev)) {
2557 tx_ring->sw_consumer = sw_consumer;
2558
2559 smp_mb();
2560
2561 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2562 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2563 netif_wake_queue(netdev);
8bfe8b91 2564 adapter->stats.xmit_on++;
af19b491 2565 }
af19b491 2566 }
ef71ff83 2567 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2568 }
2569 /*
2570 * If everything is freed up to consumer then check if the ring is full
2571 * If the ring is full then check if more needs to be freed and
2572 * schedule the call back again.
2573 *
2574 * This happens when there are 2 CPUs. One could be freeing and the
2575 * other filling it. If the ring is full when we get out of here and
2576 * the card has already interrupted the host then the host can miss the
2577 * interrupt.
2578 *
2579 * There is still a possible race condition and the host could miss an
2580 * interrupt. The card has to take care of this.
2581 */
2582 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2583 done = (sw_consumer == hw_consumer);
2584 spin_unlock(&adapter->tx_clean_lock);
2585
2586 return done;
2587}
2588
2589static int qlcnic_poll(struct napi_struct *napi, int budget)
2590{
2591 struct qlcnic_host_sds_ring *sds_ring =
2592 container_of(napi, struct qlcnic_host_sds_ring, napi);
2593
2594 struct qlcnic_adapter *adapter = sds_ring->adapter;
2595
2596 int tx_complete;
2597 int work_done;
2598
2599 tx_complete = qlcnic_process_cmd_ring(adapter);
2600
2601 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2602
2603 if ((work_done < budget) && tx_complete) {
2604 napi_complete(&sds_ring->napi);
2605 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2606 qlcnic_enable_int(sds_ring);
2607 }
2608
2609 return work_done;
2610}
2611
8f891387 2612static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2613{
2614 struct qlcnic_host_sds_ring *sds_ring =
2615 container_of(napi, struct qlcnic_host_sds_ring, napi);
2616
2617 struct qlcnic_adapter *adapter = sds_ring->adapter;
2618 int work_done;
2619
2620 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2621
2622 if (work_done < budget) {
2623 napi_complete(&sds_ring->napi);
2624 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2625 qlcnic_enable_int(sds_ring);
2626 }
2627
2628 return work_done;
2629}
2630
af19b491
AKS
2631#ifdef CONFIG_NET_POLL_CONTROLLER
2632static void qlcnic_poll_controller(struct net_device *netdev)
2633{
bf82791e
YL
2634 int ring;
2635 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2636 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2637 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2638
af19b491 2639 disable_irq(adapter->irq);
bf82791e
YL
2640 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2641 sds_ring = &recv_ctx->sds_rings[ring];
2642 qlcnic_intr(adapter->irq, sds_ring);
2643 }
af19b491
AKS
2644 enable_irq(adapter->irq);
2645}
2646#endif
2647
6df900e9
SC
2648static void
2649qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2650{
2651 u32 val;
2652
2653 val = adapter->portnum & 0xf;
2654 val |= encoding << 7;
2655 val |= (jiffies - adapter->dev_rst_time) << 8;
2656
2657 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2658 adapter->dev_rst_time = jiffies;
2659}
2660
ade91f8e
AKS
2661static int
2662qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2663{
2664 u32 val;
2665
2666 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2667 state != QLCNIC_DEV_NEED_QUISCENT);
2668
2669 if (qlcnic_api_lock(adapter))
ade91f8e 2670 return -EIO;
af19b491
AKS
2671
2672 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2673
2674 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2675 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2676 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2677 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2678
2679 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2680
2681 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2682
2683 return 0;
af19b491
AKS
2684}
2685
1b95a839
AKS
2686static int
2687qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2688{
2689 u32 val;
2690
2691 if (qlcnic_api_lock(adapter))
2692 return -EBUSY;
2693
2694 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2695 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2696 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2697
2698 qlcnic_api_unlock(adapter);
2699
2700 return 0;
2701}
2702
af19b491 2703static void
21854f02 2704qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2705{
2706 u32 val;
2707
2708 if (qlcnic_api_lock(adapter))
2709 goto err;
2710
31018e06 2711 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2712 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2713 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2714
21854f02
AKS
2715 if (failed) {
2716 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2717 dev_info(&adapter->pdev->dev,
2718 "Device state set to Failed. Please Reboot\n");
2719 } else if (!(val & 0x11111111))
af19b491
AKS
2720 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2721
2722 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2723 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2724 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2725
2726 qlcnic_api_unlock(adapter);
2727err:
2728 adapter->fw_fail_cnt = 0;
032a13c7 2729 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491
AKS
2730 clear_bit(__QLCNIC_START_FW, &adapter->state);
2731 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2732}
2733
f73dfc50 2734/* Grab api lock, before checking state */
af19b491
AKS
2735static int
2736qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2737{
602ca6f0 2738 int act, state, active_mask;
af19b491
AKS
2739
2740 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2741 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491 2742
602ca6f0
SV
2743 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2744 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2745 act = act & active_mask;
2746 }
2747
af19b491
AKS
2748 if (((state & 0x11111111) == (act & 0x11111111)) ||
2749 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2750 return 0;
2751 else
2752 return 1;
2753}
2754
96f8118c
SC
2755static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2756{
2757 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2758
2759 if (val != QLCNIC_DRV_IDC_VER) {
2760 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2761 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2762 }
2763
2764 return 0;
2765}
2766
af19b491
AKS
2767static int
2768qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2769{
2770 u32 val, prev_state;
aa5e18c0 2771 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2772 u8 portnum = adapter->portnum;
96f8118c 2773 u8 ret;
af19b491 2774
f73dfc50
AKS
2775 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2776 return 1;
2777
af19b491
AKS
2778 if (qlcnic_api_lock(adapter))
2779 return -1;
2780
31018e06 2781 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2782 if (!(val & (1 << (portnum * 4)))) {
2783 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2784 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2785 }
2786
2787 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2788 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2789
2790 switch (prev_state) {
2791 case QLCNIC_DEV_COLD:
bbd8c6a4 2792 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2793 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2794 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2795 qlcnic_api_unlock(adapter);
2796 return 1;
2797
2798 case QLCNIC_DEV_READY:
96f8118c 2799 ret = qlcnic_check_idc_ver(adapter);
af19b491 2800 qlcnic_api_unlock(adapter);
96f8118c 2801 return ret;
af19b491
AKS
2802
2803 case QLCNIC_DEV_NEED_RESET:
2804 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2805 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2806 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2807 break;
2808
2809 case QLCNIC_DEV_NEED_QUISCENT:
2810 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2811 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2812 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2813 break;
2814
2815 case QLCNIC_DEV_FAILED:
a7fc948f 2816 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2817 qlcnic_api_unlock(adapter);
2818 return -1;
bbd8c6a4
AKS
2819
2820 case QLCNIC_DEV_INITIALIZING:
2821 case QLCNIC_DEV_QUISCENT:
2822 break;
af19b491
AKS
2823 }
2824
2825 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2826
2827 do {
af19b491 2828 msleep(1000);
a5e463d0
SC
2829 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2830
2831 if (prev_state == QLCNIC_DEV_QUISCENT)
2832 continue;
2833 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2834
65b5b420
AKS
2835 if (!dev_init_timeo) {
2836 dev_err(&adapter->pdev->dev,
2837 "Waiting for device to initialize timeout\n");
af19b491 2838 return -1;
65b5b420 2839 }
af19b491
AKS
2840
2841 if (qlcnic_api_lock(adapter))
2842 return -1;
2843
2844 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2845 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2846 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2847
96f8118c 2848 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2849 qlcnic_api_unlock(adapter);
2850
96f8118c 2851 return ret;
af19b491
AKS
2852}
2853
2854static void
2855qlcnic_fwinit_work(struct work_struct *work)
2856{
2857 struct qlcnic_adapter *adapter = container_of(work,
2858 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2859 u32 dev_state = 0xf;
7b749ff4 2860 u32 val;
af19b491 2861
f73dfc50
AKS
2862 if (qlcnic_api_lock(adapter))
2863 goto err_ret;
af19b491 2864
a5e463d0 2865 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2866 if (dev_state == QLCNIC_DEV_QUISCENT ||
2867 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2868 qlcnic_api_unlock(adapter);
2869 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2870 FW_POLL_DELAY * 2);
2871 return;
2872 }
2873
9f26f547 2874 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2875 qlcnic_api_unlock(adapter);
2876 goto wait_npar;
9f26f547
AC
2877 }
2878
16e3cf73
SV
2879 if (dev_state == QLCNIC_DEV_INITIALIZING ||
2880 dev_state == QLCNIC_DEV_READY) {
2881 dev_info(&adapter->pdev->dev, "Detected state change from "
2882 "DEV_NEED_RESET, skipping ack check\n");
2883 goto skip_ack_check;
2884 }
2885
f73dfc50 2886 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
16e3cf73 2887 dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
f73dfc50
AKS
2888 adapter->reset_ack_timeo);
2889 goto skip_ack_check;
2890 }
2891
2892 if (!qlcnic_check_drv_state(adapter)) {
2893skip_ack_check:
2894 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2895
f73dfc50
AKS
2896 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2898 QLCNIC_DEV_INITIALIZING);
2899 set_bit(__QLCNIC_START_FW, &adapter->state);
2900 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2901 qlcnic_idc_debug_info(adapter, 0);
7b749ff4
SV
2902 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2903 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2904 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
af19b491
AKS
2905 }
2906
f73dfc50
AKS
2907 qlcnic_api_unlock(adapter);
2908
287e38aa 2909 rtnl_lock();
7b749ff4
SV
2910 if (adapter->ahw->fw_dump.enable &&
2911 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
9d6a6440
AC
2912 QLCDB(adapter, DRV, "Take FW dump\n");
2913 qlcnic_dump_fw(adapter);
032a13c7 2914 adapter->flags |= QLCNIC_FW_HANG;
9d6a6440 2915 }
287e38aa 2916 rtnl_unlock();
7b749ff4
SV
2917
2918 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
9f26f547 2919 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2920 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2921 adapter->fw_wait_cnt = 0;
af19b491
AKS
2922 return;
2923 }
af19b491
AKS
2924 goto err_ret;
2925 }
2926
f73dfc50 2927 qlcnic_api_unlock(adapter);
aa5e18c0 2928
9f26f547 2929wait_npar:
af19b491 2930 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2931 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2932
af19b491 2933 switch (dev_state) {
3c4b23b1 2934 case QLCNIC_DEV_READY:
9f26f547 2935 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2936 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2937 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2938 return;
2939 }
3c4b23b1
AKS
2940 case QLCNIC_DEV_FAILED:
2941 break;
2942 default:
2943 qlcnic_schedule_work(adapter,
2944 qlcnic_fwinit_work, FW_POLL_DELAY);
2945 return;
af19b491
AKS
2946 }
2947
2948err_ret:
f73dfc50
AKS
2949 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2950 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2951 netif_device_attach(adapter->netdev);
21854f02 2952 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2953}
2954
2955static void
2956qlcnic_detach_work(struct work_struct *work)
2957{
2958 struct qlcnic_adapter *adapter = container_of(work,
2959 struct qlcnic_adapter, fw_work.work);
2960 struct net_device *netdev = adapter->netdev;
2961 u32 status;
2962
2963 netif_device_detach(netdev);
2964
b8c17620
AKS
2965 /* Dont grab rtnl lock during Quiscent mode */
2966 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2967 if (netif_running(netdev))
2968 __qlcnic_down(adapter, netdev);
2969 } else
2970 qlcnic_down(adapter, netdev);
af19b491 2971
af19b491
AKS
2972 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2973
44f65b29
SC
2974 if (status & QLCNIC_RCODE_FATAL_ERROR) {
2975 dev_err(&adapter->pdev->dev,
2976 "Detaching the device: peg halt status1=0x%x\n",
2977 status);
2978
2979 if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
2980 dev_err(&adapter->pdev->dev,
2981 "On board active cooling fan failed. "
2982 "Device has been halted.\n");
2983 dev_err(&adapter->pdev->dev,
2984 "Replace the adapter.\n");
2985 }
2986
af19b491 2987 goto err_ret;
44f65b29 2988 }
af19b491 2989
44f65b29
SC
2990 if (adapter->temp == QLCNIC_TEMP_PANIC) {
2991 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
2992 adapter->temp);
af19b491 2993 goto err_ret;
44f65b29
SC
2994 }
2995
602ca6f0
SV
2996 /* Dont ack if this instance is the reset owner */
2997 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
44f65b29
SC
2998 if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
2999 dev_err(&adapter->pdev->dev,
3000 "Failed to set driver state,"
3001 "detaching the device.\n");
602ca6f0 3002 goto err_ret;
44f65b29 3003 }
602ca6f0 3004 }
af19b491
AKS
3005
3006 adapter->fw_wait_cnt = 0;
3007
3008 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
3009
3010 return;
3011
3012err_ret:
34ce3626 3013 netif_device_attach(netdev);
21854f02 3014 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
3015}
3016
3c4b23b1
AKS
3017/*Transit NPAR state to NON Operational */
3018static void
3019qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
3020{
3021 u32 state;
3022
3023 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3024 if (state == QLCNIC_DEV_NPAR_NON_OPER)
3025 return;
3026
3027 if (qlcnic_api_lock(adapter))
3028 return;
3029 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
3030 qlcnic_api_unlock(adapter);
3031}
3032
f73dfc50 3033/*Transit to RESET state from READY state only */
18f2f616 3034void
af19b491
AKS
3035qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
3036{
646779f1
SV
3037 u32 state, xg_val = 0, gb_val = 0;
3038
3039 qlcnic_xg_set_xg0_mask(xg_val);
3040 qlcnic_xg_set_xg1_mask(xg_val);
3041 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
3042 qlcnic_gb_set_gb0_mask(gb_val);
3043 qlcnic_gb_set_gb1_mask(gb_val);
3044 qlcnic_gb_set_gb2_mask(gb_val);
3045 qlcnic_gb_set_gb3_mask(gb_val);
3046 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
3047 dev_info(&adapter->pdev->dev, "Pause control frames disabled"
3048 " on all ports\n");
cea8975e 3049 adapter->need_fw_reset = 1;
af19b491
AKS
3050 if (qlcnic_api_lock(adapter))
3051 return;
3052
3053 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b43e5ee7
SC
3054 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
3055 netdev_err(adapter->netdev,
3056 "Device is in FAILED state, Please Reboot\n");
3057 qlcnic_api_unlock(adapter);
3058 return;
3059 }
af19b491 3060
f73dfc50 3061 if (state == QLCNIC_DEV_READY) {
af19b491 3062 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
602ca6f0 3063 adapter->flags |= QLCNIC_FW_RESET_OWNER;
65b5b420 3064 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 3065 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
3066 }
3067
3c4b23b1 3068 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
3069 qlcnic_api_unlock(adapter);
3070}
3071
9f26f547
AC
3072/* Transit to NPAR READY state from NPAR NOT READY state */
3073static void
3074qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
3075{
9f26f547
AC
3076 if (qlcnic_api_lock(adapter))
3077 return;
3078
3c4b23b1
AKS
3079 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
3080 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
3081
3082 qlcnic_api_unlock(adapter);
3083}
3084
af19b491
AKS
3085static void
3086qlcnic_schedule_work(struct qlcnic_adapter *adapter,
3087 work_func_t func, int delay)
3088{
451724c8
SC
3089 if (test_bit(__QLCNIC_AER, &adapter->state))
3090 return;
3091
af19b491 3092 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
3093 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3094 round_jiffies_relative(delay));
af19b491
AKS
3095}
3096
3097static void
3098qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3099{
3100 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3101 msleep(10);
3102
b43e5ee7
SC
3103 if (!adapter->fw_work.work.func)
3104 return;
3105
af19b491
AKS
3106 cancel_delayed_work_sync(&adapter->fw_work);
3107}
3108
3109static void
3110qlcnic_attach_work(struct work_struct *work)
3111{
3112 struct qlcnic_adapter *adapter = container_of(work,
3113 struct qlcnic_adapter, fw_work.work);
3114 struct net_device *netdev = adapter->netdev;
b18971d1 3115 u32 npar_state;
af19b491 3116
b18971d1
AKS
3117 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3118 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3119 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3120 qlcnic_clr_all_drv_state(adapter, 0);
3121 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3122 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3123 FW_POLL_DELAY);
3124 else
3125 goto attach;
3126 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3127 return;
3128 }
3129attach:
af19b491 3130 if (netif_running(netdev)) {
52486a3a 3131 if (qlcnic_up(adapter, netdev))
af19b491 3132 goto done;
af19b491 3133
aec1e845 3134 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
3135 }
3136
af19b491 3137done:
34ce3626 3138 netif_device_attach(netdev);
af19b491 3139 adapter->fw_fail_cnt = 0;
032a13c7 3140 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491 3141 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3142
3143 if (!qlcnic_clr_drv_state(adapter))
3144 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3145 FW_POLL_DELAY);
af19b491
AKS
3146}
3147
3148static int
3149qlcnic_check_health(struct qlcnic_adapter *adapter)
3150{
4e70812b 3151 u32 state = 0, heartbeat;
853d4bca 3152 u32 peg_status;
af19b491
AKS
3153
3154 if (qlcnic_check_temp(adapter))
3155 goto detach;
3156
2372a5f1 3157 if (adapter->need_fw_reset)
af19b491 3158 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3159
3160 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3161 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3162 qlcnic_set_npar_non_operational(adapter);
af19b491 3163 adapter->need_fw_reset = 1;
b8c17620
AKS
3164 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3165 goto detach;
af19b491 3166
4e70812b
SC
3167 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3168 if (heartbeat != adapter->heartbeat) {
3169 adapter->heartbeat = heartbeat;
af19b491
AKS
3170 adapter->fw_fail_cnt = 0;
3171 if (adapter->need_fw_reset)
3172 goto detach;
68bf1c68 3173
9ce13ca8 3174 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3175 qlcnic_reset_hw_context(adapter);
3176 adapter->netdev->trans_start = jiffies;
3177 }
3178
af19b491
AKS
3179 return 0;
3180 }
3181
3182 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3183 return 0;
3184
032a13c7
SV
3185 adapter->flags |= QLCNIC_FW_HANG;
3186
af19b491
AKS
3187 qlcnic_dev_request_reset(adapter);
3188
9ce13ca8 3189 if (auto_fw_reset)
0df170b6 3190 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491 3191
853d4bca
AR
3192 dev_err(&adapter->pdev->dev, "firmware hang detected\n");
3193 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
c76ecb7a
SV
3194 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
3195 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
3196 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
3197 "PEG_NET_4_PC: 0x%x\n",
3198 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
3199 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
3200 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
3201 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
3202 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
3203 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
3204 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
853d4bca 3205 peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
97048a1f 3206 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
853d4bca
AR
3207 dev_err(&adapter->pdev->dev,
3208 "Firmware aborted with error code 0x00006700. "
3209 "Device is being reset.\n");
af19b491
AKS
3210detach:
3211 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3212 QLCNIC_DEV_NEED_RESET;
3213
9ce13ca8 3214 if (auto_fw_reset &&
65b5b420
AKS
3215 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3216
af19b491 3217 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3218 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3219 }
af19b491
AKS
3220
3221 return 1;
3222}
3223
3224static void
3225qlcnic_fw_poll_work(struct work_struct *work)
3226{
3227 struct qlcnic_adapter *adapter = container_of(work,
3228 struct qlcnic_adapter, fw_work.work);
3229
3230 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3231 goto reschedule;
3232
3233
3234 if (qlcnic_check_health(adapter))
3235 return;
3236
b5e5492c
AKS
3237 if (adapter->fhash.fnum)
3238 qlcnic_prune_lb_filters(adapter);
3239
af19b491
AKS
3240reschedule:
3241 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3242}
3243
451724c8
SC
3244static int qlcnic_is_first_func(struct pci_dev *pdev)
3245{
3246 struct pci_dev *oth_pdev;
3247 int val = pdev->devfn;
3248
3249 while (val-- > 0) {
3250 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3251 (pdev->bus), pdev->bus->number,
3252 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3253 if (!oth_pdev)
3254 continue;
451724c8 3255
bfc978fa
AKS
3256 if (oth_pdev->current_state != PCI_D3cold) {
3257 pci_dev_put(oth_pdev);
451724c8 3258 return 0;
bfc978fa
AKS
3259 }
3260 pci_dev_put(oth_pdev);
451724c8
SC
3261 }
3262 return 1;
3263}
3264
3265static int qlcnic_attach_func(struct pci_dev *pdev)
3266{
3267 int err, first_func;
3268 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3269 struct net_device *netdev = adapter->netdev;
3270
3271 pdev->error_state = pci_channel_io_normal;
3272
3273 err = pci_enable_device(pdev);
3274 if (err)
3275 return err;
3276
3277 pci_set_power_state(pdev, PCI_D0);
3278 pci_set_master(pdev);
3279 pci_restore_state(pdev);
3280
3281 first_func = qlcnic_is_first_func(pdev);
3282
3283 if (qlcnic_api_lock(adapter))
3284 return -EINVAL;
3285
933fce12 3286 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3287 adapter->need_fw_reset = 1;
3288 set_bit(__QLCNIC_START_FW, &adapter->state);
3289 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3290 QLCDB(adapter, DRV, "Restarting fw\n");
3291 }
3292 qlcnic_api_unlock(adapter);
3293
3294 err = adapter->nic_ops->start_firmware(adapter);
3295 if (err)
3296 return err;
3297
3298 qlcnic_clr_drv_state(adapter);
3299 qlcnic_setup_intr(adapter);
3300
3301 if (netif_running(netdev)) {
3302 err = qlcnic_attach(adapter);
3303 if (err) {
21854f02 3304 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3305 clear_bit(__QLCNIC_AER, &adapter->state);
3306 netif_device_attach(netdev);
3307 return err;
3308 }
3309
3310 err = qlcnic_up(adapter, netdev);
3311 if (err)
3312 goto done;
3313
aec1e845 3314 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3315 }
3316 done:
3317 netif_device_attach(netdev);
3318 return err;
3319}
3320
3321static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3322 pci_channel_state_t state)
3323{
3324 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3325 struct net_device *netdev = adapter->netdev;
3326
3327 if (state == pci_channel_io_perm_failure)
3328 return PCI_ERS_RESULT_DISCONNECT;
3329
3330 if (state == pci_channel_io_normal)
3331 return PCI_ERS_RESULT_RECOVERED;
3332
3333 set_bit(__QLCNIC_AER, &adapter->state);
3334 netif_device_detach(netdev);
3335
3336 cancel_delayed_work_sync(&adapter->fw_work);
3337
3338 if (netif_running(netdev))
3339 qlcnic_down(adapter, netdev);
3340
3341 qlcnic_detach(adapter);
3342 qlcnic_teardown_intr(adapter);
3343
3344 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3345
3346 pci_save_state(pdev);
3347 pci_disable_device(pdev);
3348
3349 return PCI_ERS_RESULT_NEED_RESET;
3350}
3351
3352static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3353{
3354 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3355 PCI_ERS_RESULT_RECOVERED;
3356}
3357
3358static void qlcnic_io_resume(struct pci_dev *pdev)
3359{
3360 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3361
3362 pci_cleanup_aer_uncorrect_error_status(pdev);
3363
3364 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3365 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3366 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3367 FW_POLL_DELAY);
3368}
3369
87eb743b
AC
3370static int
3371qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3372{
3373 int err;
3374
3375 err = qlcnic_can_start_firmware(adapter);
3376 if (err)
3377 return err;
3378
78f84e1a
AKS
3379 err = qlcnic_check_npar_opertional(adapter);
3380 if (err)
3381 return err;
3c4b23b1 3382
174240a8
RB
3383 err = qlcnic_initialize_nic(adapter);
3384 if (err)
3385 return err;
3386
87eb743b
AC
3387 qlcnic_check_options(adapter);
3388
7373373d
RB
3389 err = qlcnic_set_eswitch_port_config(adapter);
3390 if (err)
3391 return err;
3392
87eb743b
AC
3393 adapter->need_fw_reset = 0;
3394
3395 return err;
3396}
3397
3398static int
3399qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3400{
3401 return -EOPNOTSUPP;
3402}
3403
3404static int
3405qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3406{
3407 return -EOPNOTSUPP;
3408}
3409
af19b491
AKS
3410static ssize_t
3411qlcnic_store_bridged_mode(struct device *dev,
3412 struct device_attribute *attr, const char *buf, size_t len)
3413{
3414 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3415 unsigned long new;
3416 int ret = -EINVAL;
3417
3418 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3419 goto err_out;
3420
8a15ad1f 3421 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3422 goto err_out;
3423
3424 if (strict_strtoul(buf, 2, &new))
3425 goto err_out;
3426
2e9d722d 3427 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3428 ret = len;
3429
3430err_out:
3431 return ret;
3432}
3433
3434static ssize_t
3435qlcnic_show_bridged_mode(struct device *dev,
3436 struct device_attribute *attr, char *buf)
3437{
3438 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3439 int bridged_mode = 0;
3440
3441 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3442 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3443
3444 return sprintf(buf, "%d\n", bridged_mode);
3445}
3446
3447static struct device_attribute dev_attr_bridged_mode = {
3448 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3449 .show = qlcnic_show_bridged_mode,
3450 .store = qlcnic_store_bridged_mode,
3451};
3452
3453static ssize_t
3454qlcnic_store_diag_mode(struct device *dev,
3455 struct device_attribute *attr, const char *buf, size_t len)
3456{
3457 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3458 unsigned long new;
3459
3460 if (strict_strtoul(buf, 2, &new))
3461 return -EINVAL;
3462
3463 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3464 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3465
3466 return len;
3467}
3468
3469static ssize_t
3470qlcnic_show_diag_mode(struct device *dev,
3471 struct device_attribute *attr, char *buf)
3472{
3473 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3474
3475 return sprintf(buf, "%d\n",
3476 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3477}
3478
3479static struct device_attribute dev_attr_diag_mode = {
3480 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3481 .show = qlcnic_show_diag_mode,
3482 .store = qlcnic_store_diag_mode,
3483};
3484
f94bc1e7
SC
3485int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3486{
3487 if (!use_msi_x && !use_msi) {
3488 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3489 return -EINVAL;
3490 }
3491
3492 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3493 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3494 " powers of 2\n", max_hw);
3495 return -EINVAL;
3496 }
3497 return 0;
3498
3499}
3500
3501int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3502{
3503 struct net_device *netdev = adapter->netdev;
3504 int err = 0;
3505
3506 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3507 return -EBUSY;
3508
3509 netif_device_detach(netdev);
3510 if (netif_running(netdev))
3511 __qlcnic_down(adapter, netdev);
3512 qlcnic_detach(adapter);
3513 qlcnic_teardown_intr(adapter);
3514
3515 if (qlcnic_enable_msix(adapter, data)) {
3516 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3517 qlcnic_enable_msi_legacy(adapter);
3518 }
3519
3520 if (netif_running(netdev)) {
3521 err = qlcnic_attach(adapter);
3522 if (err)
3523 goto done;
3524 err = __qlcnic_up(adapter, netdev);
3525 if (err)
3526 goto done;
3527 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3528 }
3529 done:
3530 netif_device_attach(netdev);
3531 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3532 return err;
3533}
3534
728a98b8
SC
3535static int
3536qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
3537 u8 *rate)
3538{
3539 *rate = LSB(beacon);
3540 *state = MSB(beacon);
3541
3542 QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
3543
3544 if (!*state) {
3545 *rate = __QLCNIC_MAX_LED_RATE;
3546 return 0;
3547 } else if (*state > __QLCNIC_MAX_LED_STATE)
3548 return -EINVAL;
3549
3550 if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
3551 return -EINVAL;
3552
3553 return 0;
3554}
3555
3556static ssize_t
3557qlcnic_store_beacon(struct device *dev,
3558 struct device_attribute *attr, const char *buf, size_t len)
3559{
3560 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3561 int max_sds_rings = adapter->max_sds_rings;
728a98b8
SC
3562 u16 beacon;
3563 u8 b_state, b_rate;
3564 int err;
3565
10ee0fae
SC
3566 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3567 dev_warn(dev, "LED test not supported for non "
3568 "privilege function\n");
3569 return -EOPNOTSUPP;
3570 }
3571
728a98b8
SC
3572 if (len != sizeof(u16))
3573 return QL_STATUS_INVALID_PARAM;
3574
3575 memcpy(&beacon, buf, sizeof(u16));
3576 err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
3577 if (err)
3578 return err;
3579
3580 if (adapter->ahw->beacon_state == b_state)
3581 return len;
3582
10ee0fae
SC
3583 rtnl_lock();
3584
728a98b8 3585 if (!adapter->ahw->beacon_state)
10ee0fae
SC
3586 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
3587 rtnl_unlock();
728a98b8 3588 return -EBUSY;
10ee0fae
SC
3589 }
3590
3591 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3592 err = -EIO;
3593 goto out;
3594 }
728a98b8
SC
3595
3596 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
728a98b8 3597 err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
10ee0fae
SC
3598 if (err)
3599 goto out;
3600 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
728a98b8
SC
3601 }
3602
3603 err = qlcnic_config_led(adapter, b_state, b_rate);
3604
3605 if (!err) {
728a98b8 3606 err = len;
10ee0fae 3607 adapter->ahw->beacon_state = b_state;
728a98b8
SC
3608 }
3609
10ee0fae 3610 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
728a98b8 3611 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
728a98b8 3612
10ee0fae
SC
3613 out:
3614 if (!adapter->ahw->beacon_state)
728a98b8 3615 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
10ee0fae 3616 rtnl_unlock();
728a98b8
SC
3617
3618 return err;
3619}
3620
3621static ssize_t
3622qlcnic_show_beacon(struct device *dev,
3623 struct device_attribute *attr, char *buf)
3624{
3625 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3626
3627 return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
3628}
3629
3630static struct device_attribute dev_attr_beacon = {
3631 .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
3632 .show = qlcnic_show_beacon,
3633 .store = qlcnic_store_beacon,
3634};
3635
af19b491
AKS
3636static int
3637qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3638 loff_t offset, size_t size)
3639{
897e8c7c
DP
3640 size_t crb_size = 4;
3641
af19b491
AKS
3642 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3643 return -EIO;
3644
897e8c7c
DP
3645 if (offset < QLCNIC_PCI_CRBSPACE) {
3646 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3647 QLCNIC_PCI_CAMQM_END))
3648 crb_size = 8;
3649 else
3650 return -EINVAL;
3651 }
af19b491 3652
897e8c7c
DP
3653 if ((size != crb_size) || (offset & (crb_size-1)))
3654 return -EINVAL;
af19b491
AKS
3655
3656 return 0;
3657}
3658
3659static ssize_t
2c3c8bea
CW
3660qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3661 struct bin_attribute *attr,
af19b491
AKS
3662 char *buf, loff_t offset, size_t size)
3663{
3664 struct device *dev = container_of(kobj, struct device, kobj);
3665 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3666 u32 data;
897e8c7c 3667 u64 qmdata;
af19b491
AKS
3668 int ret;
3669
3670 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3671 if (ret != 0)
3672 return ret;
3673
897e8c7c
DP
3674 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3675 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3676 memcpy(buf, &qmdata, size);
3677 } else {
3678 data = QLCRD32(adapter, offset);
3679 memcpy(buf, &data, size);
3680 }
af19b491
AKS
3681 return size;
3682}
3683
3684static ssize_t
2c3c8bea
CW
3685qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3686 struct bin_attribute *attr,
af19b491
AKS
3687 char *buf, loff_t offset, size_t size)
3688{
3689 struct device *dev = container_of(kobj, struct device, kobj);
3690 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3691 u32 data;
897e8c7c 3692 u64 qmdata;
af19b491
AKS
3693 int ret;
3694
3695 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3696 if (ret != 0)
3697 return ret;
3698
897e8c7c
DP
3699 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3700 memcpy(&qmdata, buf, size);
3701 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3702 } else {
3703 memcpy(&data, buf, size);
3704 QLCWR32(adapter, offset, data);
3705 }
af19b491
AKS
3706 return size;
3707}
3708
3709static int
3710qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3711 loff_t offset, size_t size)
3712{
3713 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3714 return -EIO;
3715
3716 if ((size != 8) || (offset & 0x7))
3717 return -EIO;
3718
3719 return 0;
3720}
3721
3722static ssize_t
2c3c8bea
CW
3723qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3724 struct bin_attribute *attr,
af19b491
AKS
3725 char *buf, loff_t offset, size_t size)
3726{
3727 struct device *dev = container_of(kobj, struct device, kobj);
3728 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3729 u64 data;
3730 int ret;
3731
3732 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3733 if (ret != 0)
3734 return ret;
3735
3736 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3737 return -EIO;
3738
3739 memcpy(buf, &data, size);
3740
3741 return size;
3742}
3743
3744static ssize_t
2c3c8bea
CW
3745qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3746 struct bin_attribute *attr,
af19b491
AKS
3747 char *buf, loff_t offset, size_t size)
3748{
3749 struct device *dev = container_of(kobj, struct device, kobj);
3750 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3751 u64 data;
3752 int ret;
3753
3754 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3755 if (ret != 0)
3756 return ret;
3757
3758 memcpy(&data, buf, size);
3759
3760 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3761 return -EIO;
3762
3763 return size;
3764}
3765
af19b491
AKS
3766static struct bin_attribute bin_attr_crb = {
3767 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3768 .size = 0,
3769 .read = qlcnic_sysfs_read_crb,
3770 .write = qlcnic_sysfs_write_crb,
3771};
3772
3773static struct bin_attribute bin_attr_mem = {
3774 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3775 .size = 0,
3776 .read = qlcnic_sysfs_read_mem,
3777 .write = qlcnic_sysfs_write_mem,
3778};
3779
cea8975e 3780static int
346fe763
RB
3781validate_pm_config(struct qlcnic_adapter *adapter,
3782 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3783{
3784
3785 u8 src_pci_func, s_esw_id, d_esw_id;
3786 u8 dest_pci_func;
3787 int i;
3788
3789 for (i = 0; i < count; i++) {
3790 src_pci_func = pm_cfg[i].pci_func;
3791 dest_pci_func = pm_cfg[i].dest_npar;
3792 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3793 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3794 return QL_STATUS_INVALID_PARAM;
3795
3796 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3797 return QL_STATUS_INVALID_PARAM;
3798
3799 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3800 return QL_STATUS_INVALID_PARAM;
3801
346fe763
RB
3802 s_esw_id = adapter->npars[src_pci_func].phy_port;
3803 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3804
3805 if (s_esw_id != d_esw_id)
3806 return QL_STATUS_INVALID_PARAM;
3807
3808 }
3809 return 0;
3810
3811}
3812
3813static ssize_t
3814qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3815 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3816{
3817 struct device *dev = container_of(kobj, struct device, kobj);
3818 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3819 struct qlcnic_pm_func_cfg *pm_cfg;
3820 u32 id, action, pci_func;
3821 int count, rem, i, ret;
3822
3823 count = size / sizeof(struct qlcnic_pm_func_cfg);
3824 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3825 if (rem)
3826 return QL_STATUS_INVALID_PARAM;
3827
3828 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3829
3830 ret = validate_pm_config(adapter, pm_cfg, count);
3831 if (ret)
3832 return ret;
3833 for (i = 0; i < count; i++) {
3834 pci_func = pm_cfg[i].pci_func;
4e8acb01 3835 action = !!pm_cfg[i].action;
346fe763
RB
3836 id = adapter->npars[pci_func].phy_port;
3837 ret = qlcnic_config_port_mirroring(adapter, id,
3838 action, pci_func);
3839 if (ret)
3840 return ret;
3841 }
3842
3843 for (i = 0; i < count; i++) {
3844 pci_func = pm_cfg[i].pci_func;
3845 id = adapter->npars[pci_func].phy_port;
4e8acb01 3846 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3847 adapter->npars[pci_func].dest_npar = id;
3848 }
3849 return size;
3850}
3851
3852static ssize_t
3853qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3854 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3855{
3856 struct device *dev = container_of(kobj, struct device, kobj);
3857 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3858 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3859 int i;
3860
3861 if (size != sizeof(pm_cfg))
3862 return QL_STATUS_INVALID_PARAM;
3863
3864 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3865 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3866 continue;
3867 pm_cfg[i].action = adapter->npars[i].enable_pm;
3868 pm_cfg[i].dest_npar = 0;
3869 pm_cfg[i].pci_func = i;
3870 }
3871 memcpy(buf, &pm_cfg, size);
3872
3873 return size;
3874}
3875
cea8975e 3876static int
346fe763 3877validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3878 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3879{
7613c87b 3880 u32 op_mode;
346fe763
RB
3881 u8 pci_func;
3882 int i;
7613c87b 3883
b1fc6d3c 3884 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3885
346fe763
RB
3886 for (i = 0; i < count; i++) {
3887 pci_func = esw_cfg[i].pci_func;
3888 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3889 return QL_STATUS_INVALID_PARAM;
3890
4e8acb01
RB
3891 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3892 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3893 return QL_STATUS_INVALID_PARAM;
346fe763 3894
4e8acb01
RB
3895 switch (esw_cfg[i].op_mode) {
3896 case QLCNIC_PORT_DEFAULTS:
7613c87b 3897 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3898 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3899 if (esw_cfg[i].mac_anti_spoof != 0)
3900 return QL_STATUS_INVALID_PARAM;
3901 if (esw_cfg[i].mac_override != 1)
3902 return QL_STATUS_INVALID_PARAM;
3903 if (esw_cfg[i].promisc_mode != 1)
3904 return QL_STATUS_INVALID_PARAM;
7373373d 3905 }
4e8acb01
RB
3906 break;
3907 case QLCNIC_ADD_VLAN:
346fe763
RB
3908 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3909 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3910 if (!esw_cfg[i].op_type)
3911 return QL_STATUS_INVALID_PARAM;
3912 break;
3913 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3914 if (!esw_cfg[i].op_type)
3915 return QL_STATUS_INVALID_PARAM;
3916 break;
3917 default:
346fe763 3918 return QL_STATUS_INVALID_PARAM;
4e8acb01 3919 }
346fe763 3920 }
346fe763
RB
3921 return 0;
3922}
3923
3924static ssize_t
3925qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3926 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3927{
3928 struct device *dev = container_of(kobj, struct device, kobj);
3929 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3930 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3931 struct qlcnic_npar_info *npar;
346fe763 3932 int count, rem, i, ret;
0325d69b 3933 u8 pci_func, op_mode = 0;
346fe763
RB
3934
3935 count = size / sizeof(struct qlcnic_esw_func_cfg);
3936 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3937 if (rem)
3938 return QL_STATUS_INVALID_PARAM;
3939
3940 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3941 ret = validate_esw_config(adapter, esw_cfg, count);
3942 if (ret)
3943 return ret;
3944
3945 for (i = 0; i < count; i++) {
0325d69b
RB
3946 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3947 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3948 return QL_STATUS_INVALID_PARAM;
e9a47700 3949
b1fc6d3c 3950 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3951 continue;
3952
3953 op_mode = esw_cfg[i].op_mode;
3954 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3955 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3956 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3957
3958 switch (esw_cfg[i].op_mode) {
3959 case QLCNIC_PORT_DEFAULTS:
3960 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3961 break;
8cf61f89
AKS
3962 case QLCNIC_ADD_VLAN:
3963 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3964 break;
3965 case QLCNIC_DEL_VLAN:
3966 esw_cfg[i].vlan_id = 0;
3967 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3968 break;
0325d69b 3969 }
346fe763
RB
3970 }
3971
0325d69b
RB
3972 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3973 goto out;
e9a47700 3974
346fe763
RB
3975 for (i = 0; i < count; i++) {
3976 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3977 npar = &adapter->npars[pci_func];
3978 switch (esw_cfg[i].op_mode) {
3979 case QLCNIC_PORT_DEFAULTS:
3980 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3981 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3982 npar->offload_flags = esw_cfg[i].offload_flags;
3983 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3984 npar->discard_tagged = esw_cfg[i].discard_tagged;
3985 break;
3986 case QLCNIC_ADD_VLAN:
3987 npar->pvid = esw_cfg[i].vlan_id;
3988 break;
3989 case QLCNIC_DEL_VLAN:
3990 npar->pvid = 0;
3991 break;
3992 }
346fe763 3993 }
0325d69b 3994out:
346fe763
RB
3995 return size;
3996}
3997
3998static ssize_t
3999qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
4000 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4001{
4002 struct device *dev = container_of(kobj, struct device, kobj);
4003 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4004 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 4005 u8 i;
346fe763
RB
4006
4007 if (size != sizeof(esw_cfg))
4008 return QL_STATUS_INVALID_PARAM;
4009
4010 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
4011 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
4012 continue;
4e8acb01
RB
4013 esw_cfg[i].pci_func = i;
4014 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
4015 return QL_STATUS_INVALID_PARAM;
346fe763
RB
4016 }
4017 memcpy(buf, &esw_cfg, size);
4018
4019 return size;
4020}
4021
cea8975e 4022static int
346fe763
RB
4023validate_npar_config(struct qlcnic_adapter *adapter,
4024 struct qlcnic_npar_func_cfg *np_cfg, int count)
4025{
4026 u8 pci_func, i;
4027
4028 for (i = 0; i < count; i++) {
4029 pci_func = np_cfg[i].pci_func;
4030 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
4031 return QL_STATUS_INVALID_PARAM;
4032
4033 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
4034 return QL_STATUS_INVALID_PARAM;
4035
d12b0d9a
RB
4036 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
4037 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
4038 return QL_STATUS_INVALID_PARAM;
4039 }
4040 return 0;
4041}
4042
4043static ssize_t
4044qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
4045 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4046{
4047 struct device *dev = container_of(kobj, struct device, kobj);
4048 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4049 struct qlcnic_info nic_info;
4050 struct qlcnic_npar_func_cfg *np_cfg;
4051 int i, count, rem, ret;
4052 u8 pci_func;
4053
4054 count = size / sizeof(struct qlcnic_npar_func_cfg);
4055 rem = size % sizeof(struct qlcnic_npar_func_cfg);
4056 if (rem)
4057 return QL_STATUS_INVALID_PARAM;
4058
4059 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
4060 ret = validate_npar_config(adapter, np_cfg, count);
4061 if (ret)
4062 return ret;
4063
4064 for (i = 0; i < count ; i++) {
4065 pci_func = np_cfg[i].pci_func;
4066 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
4067 if (ret)
4068 return ret;
4069 nic_info.pci_func = pci_func;
4070 nic_info.min_tx_bw = np_cfg[i].min_bw;
4071 nic_info.max_tx_bw = np_cfg[i].max_bw;
4072 ret = qlcnic_set_nic_info(adapter, &nic_info);
4073 if (ret)
4074 return ret;
cea8975e
AC
4075 adapter->npars[i].min_bw = nic_info.min_tx_bw;
4076 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
4077 }
4078
4079 return size;
4080
4081}
4082static ssize_t
4083qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
4084 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4085{
4086 struct device *dev = container_of(kobj, struct device, kobj);
4087 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4088 struct qlcnic_info nic_info;
4089 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
4090 int i, ret;
4091
4092 if (size != sizeof(np_cfg))
4093 return QL_STATUS_INVALID_PARAM;
4094
4095 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4096 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
4097 continue;
4098 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
4099 if (ret)
4100 return ret;
4101
4102 np_cfg[i].pci_func = i;
a1c0c459 4103 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
4104 np_cfg[i].port_num = nic_info.phys_port;
4105 np_cfg[i].fw_capab = nic_info.capabilities;
4106 np_cfg[i].min_bw = nic_info.min_tx_bw ;
4107 np_cfg[i].max_bw = nic_info.max_tx_bw;
4108 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
4109 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
4110 }
4111 memcpy(buf, &np_cfg, size);
4112 return size;
4113}
4114
b6021212
AKS
4115static ssize_t
4116qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
4117 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4118{
4119 struct device *dev = container_of(kobj, struct device, kobj);
4120 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4121 struct qlcnic_esw_statistics port_stats;
4122 int ret;
4123
4124 if (size != sizeof(struct qlcnic_esw_statistics))
4125 return QL_STATUS_INVALID_PARAM;
4126
4127 if (offset >= QLCNIC_MAX_PCI_FUNC)
4128 return QL_STATUS_INVALID_PARAM;
4129
4130 memset(&port_stats, 0, size);
4131 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4132 &port_stats.rx);
4133 if (ret)
4134 return ret;
4135
4136 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4137 &port_stats.tx);
4138 if (ret)
4139 return ret;
4140
4141 memcpy(buf, &port_stats, size);
4142 return size;
4143}
4144
4145static ssize_t
4146qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
4147 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4148{
4149 struct device *dev = container_of(kobj, struct device, kobj);
4150 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4151 struct qlcnic_esw_statistics esw_stats;
4152 int ret;
4153
4154 if (size != sizeof(struct qlcnic_esw_statistics))
4155 return QL_STATUS_INVALID_PARAM;
4156
4157 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4158 return QL_STATUS_INVALID_PARAM;
4159
4160 memset(&esw_stats, 0, size);
4161 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4162 &esw_stats.rx);
4163 if (ret)
4164 return ret;
4165
4166 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4167 &esw_stats.tx);
4168 if (ret)
4169 return ret;
4170
4171 memcpy(buf, &esw_stats, size);
4172 return size;
4173}
4174
4175static ssize_t
4176qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
4177 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4178{
4179 struct device *dev = container_of(kobj, struct device, kobj);
4180 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4181 int ret;
4182
4183 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4184 return QL_STATUS_INVALID_PARAM;
4185
4186 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4187 QLCNIC_QUERY_RX_COUNTER);
4188 if (ret)
4189 return ret;
4190
4191 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4192 QLCNIC_QUERY_TX_COUNTER);
4193 if (ret)
4194 return ret;
4195
4196 return size;
4197}
4198
4199static ssize_t
4200qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
4201 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4202{
4203
4204 struct device *dev = container_of(kobj, struct device, kobj);
4205 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4206 int ret;
4207
4208 if (offset >= QLCNIC_MAX_PCI_FUNC)
4209 return QL_STATUS_INVALID_PARAM;
4210
4211 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4212 QLCNIC_QUERY_RX_COUNTER);
4213 if (ret)
4214 return ret;
4215
4216 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4217 QLCNIC_QUERY_TX_COUNTER);
4218 if (ret)
4219 return ret;
4220
4221 return size;
4222}
4223
346fe763
RB
4224static ssize_t
4225qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4226 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4227{
4228 struct device *dev = container_of(kobj, struct device, kobj);
4229 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4230 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 4231 struct qlcnic_pci_info *pci_info;
346fe763
RB
4232 int i, ret;
4233
4234 if (size != sizeof(pci_cfg))
4235 return QL_STATUS_INVALID_PARAM;
4236
e88db3bd
DC
4237 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4238 if (!pci_info)
4239 return -ENOMEM;
4240
346fe763 4241 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
4242 if (ret) {
4243 kfree(pci_info);
346fe763 4244 return ret;
e88db3bd 4245 }
346fe763
RB
4246
4247 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4248 pci_cfg[i].pci_func = pci_info[i].id;
4249 pci_cfg[i].func_type = pci_info[i].type;
4250 pci_cfg[i].port_num = pci_info[i].default_port;
4251 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4252 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4253 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4254 }
4255 memcpy(buf, &pci_cfg, size);
e88db3bd 4256 kfree(pci_info);
346fe763 4257 return size;
346fe763
RB
4258}
4259static struct bin_attribute bin_attr_npar_config = {
4260 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4261 .size = 0,
4262 .read = qlcnic_sysfs_read_npar_config,
4263 .write = qlcnic_sysfs_write_npar_config,
4264};
4265
4266static struct bin_attribute bin_attr_pci_config = {
4267 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4268 .size = 0,
4269 .read = qlcnic_sysfs_read_pci_config,
4270 .write = NULL,
4271};
4272
b6021212
AKS
4273static struct bin_attribute bin_attr_port_stats = {
4274 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4275 .size = 0,
4276 .read = qlcnic_sysfs_get_port_stats,
4277 .write = qlcnic_sysfs_clear_port_stats,
4278};
4279
4280static struct bin_attribute bin_attr_esw_stats = {
4281 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4282 .size = 0,
4283 .read = qlcnic_sysfs_get_esw_stats,
4284 .write = qlcnic_sysfs_clear_esw_stats,
4285};
4286
346fe763
RB
4287static struct bin_attribute bin_attr_esw_config = {
4288 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4289 .size = 0,
4290 .read = qlcnic_sysfs_read_esw_config,
4291 .write = qlcnic_sysfs_write_esw_config,
4292};
4293
4294static struct bin_attribute bin_attr_pm_config = {
4295 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4296 .size = 0,
4297 .read = qlcnic_sysfs_read_pm_config,
4298 .write = qlcnic_sysfs_write_pm_config,
4299};
4300
af19b491
AKS
4301static void
4302qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4303{
4304 struct device *dev = &adapter->pdev->dev;
4305
4306 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4307 if (device_create_file(dev, &dev_attr_bridged_mode))
4308 dev_warn(dev,
4309 "failed to create bridged_mode sysfs entry\n");
4310}
4311
4312static void
4313qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4314{
4315 struct device *dev = &adapter->pdev->dev;
4316
4317 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4318 device_remove_file(dev, &dev_attr_bridged_mode);
4319}
4320
4321static void
4322qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4323{
4324 struct device *dev = &adapter->pdev->dev;
b43e5ee7 4325 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
af19b491 4326
b6021212
AKS
4327 if (device_create_bin_file(dev, &bin_attr_port_stats))
4328 dev_info(dev, "failed to create port stats sysfs entry");
4329
132ff00a
AC
4330 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4331 return;
af19b491
AKS
4332 if (device_create_file(dev, &dev_attr_diag_mode))
4333 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4334 if (device_create_bin_file(dev, &bin_attr_crb))
4335 dev_info(dev, "failed to create crb sysfs entry\n");
4336 if (device_create_bin_file(dev, &bin_attr_mem))
4337 dev_info(dev, "failed to create mem sysfs entry\n");
b43e5ee7
SC
4338
4339 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4340 return;
4341
53478fef
SC
4342 if (device_create_bin_file(dev, &bin_attr_pci_config))
4343 dev_info(dev, "failed to create pci config sysfs entry");
b43e5ee7
SC
4344 if (device_create_file(dev, &dev_attr_beacon))
4345 dev_info(dev, "failed to create beacon sysfs entry");
4346
4e8acb01
RB
4347 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4348 return;
4349 if (device_create_bin_file(dev, &bin_attr_esw_config))
4350 dev_info(dev, "failed to create esw config sysfs entry");
4351 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4352 return;
346fe763
RB
4353 if (device_create_bin_file(dev, &bin_attr_npar_config))
4354 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4355 if (device_create_bin_file(dev, &bin_attr_pm_config))
4356 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4357 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4358 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4359}
4360
af19b491
AKS
4361static void
4362qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4363{
4364 struct device *dev = &adapter->pdev->dev;
b43e5ee7 4365 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
af19b491 4366
b6021212
AKS
4367 device_remove_bin_file(dev, &bin_attr_port_stats);
4368
132ff00a
AC
4369 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4370 return;
af19b491
AKS
4371 device_remove_file(dev, &dev_attr_diag_mode);
4372 device_remove_bin_file(dev, &bin_attr_crb);
4373 device_remove_bin_file(dev, &bin_attr_mem);
b43e5ee7
SC
4374 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4375 return;
53478fef 4376 device_remove_bin_file(dev, &bin_attr_pci_config);
b43e5ee7 4377 device_remove_file(dev, &dev_attr_beacon);
4e8acb01
RB
4378 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4379 return;
4380 device_remove_bin_file(dev, &bin_attr_esw_config);
4381 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4382 return;
346fe763 4383 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4384 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4385 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4386}
4387
4388#ifdef CONFIG_INET
4389
4390#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4391
af19b491 4392static void
aec1e845
AKS
4393qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4394 struct net_device *dev, unsigned long event)
af19b491
AKS
4395{
4396 struct in_device *indev;
af19b491 4397
af19b491
AKS
4398 indev = in_dev_get(dev);
4399 if (!indev)
4400 return;
4401
4402 for_ifa(indev) {
4403 switch (event) {
4404 case NETDEV_UP:
4405 qlcnic_config_ipaddr(adapter,
4406 ifa->ifa_address, QLCNIC_IP_UP);
4407 break;
4408 case NETDEV_DOWN:
4409 qlcnic_config_ipaddr(adapter,
4410 ifa->ifa_address, QLCNIC_IP_DOWN);
4411 break;
4412 default:
4413 break;
4414 }
4415 } endfor_ifa(indev);
4416
4417 in_dev_put(indev);
af19b491
AKS
4418}
4419
aec1e845
AKS
4420static void
4421qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4422{
4423 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4424 struct net_device *dev;
4425 u16 vid;
4426
4427 qlcnic_config_indev_addr(adapter, netdev, event);
4428
b9796a14 4429 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
223bb15e 4430 dev = __vlan_find_dev_deep(netdev, vid);
aec1e845
AKS
4431 if (!dev)
4432 continue;
aec1e845
AKS
4433 qlcnic_config_indev_addr(adapter, dev, event);
4434 }
4435}
4436
af19b491
AKS
4437static int qlcnic_netdev_event(struct notifier_block *this,
4438 unsigned long event, void *ptr)
4439{
4440 struct qlcnic_adapter *adapter;
4441 struct net_device *dev = (struct net_device *)ptr;
4442
4443recheck:
4444 if (dev == NULL)
4445 goto done;
4446
4447 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4448 dev = vlan_dev_real_dev(dev);
4449 goto recheck;
4450 }
4451
4452 if (!is_qlcnic_netdev(dev))
4453 goto done;
4454
4455 adapter = netdev_priv(dev);
4456
4457 if (!adapter)
4458 goto done;
4459
8a15ad1f 4460 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4461 goto done;
4462
aec1e845 4463 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4464done:
4465 return NOTIFY_DONE;
4466}
4467
4468static int
4469qlcnic_inetaddr_event(struct notifier_block *this,
4470 unsigned long event, void *ptr)
4471{
4472 struct qlcnic_adapter *adapter;
4473 struct net_device *dev;
4474
4475 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4476
4477 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4478
4479recheck:
aec1e845 4480 if (dev == NULL)
af19b491
AKS
4481 goto done;
4482
4483 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4484 dev = vlan_dev_real_dev(dev);
4485 goto recheck;
4486 }
4487
4488 if (!is_qlcnic_netdev(dev))
4489 goto done;
4490
4491 adapter = netdev_priv(dev);
4492
251a84c9 4493 if (!adapter)
af19b491
AKS
4494 goto done;
4495
8a15ad1f 4496 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4497 goto done;
4498
4499 switch (event) {
4500 case NETDEV_UP:
4501 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4502 break;
4503 case NETDEV_DOWN:
4504 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4505 break;
4506 default:
4507 break;
4508 }
4509
4510done:
4511 return NOTIFY_DONE;
4512}
4513
4514static struct notifier_block qlcnic_netdev_cb = {
4515 .notifier_call = qlcnic_netdev_event,
4516};
4517
4518static struct notifier_block qlcnic_inetaddr_cb = {
4519 .notifier_call = qlcnic_inetaddr_event,
4520};
4521#else
4522static void
aec1e845 4523qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4524{ }
4525#endif
3646f0e5 4526static const struct pci_error_handlers qlcnic_err_handler = {
451724c8
SC
4527 .error_detected = qlcnic_io_error_detected,
4528 .slot_reset = qlcnic_io_slot_reset,
4529 .resume = qlcnic_io_resume,
4530};
af19b491
AKS
4531
4532static struct pci_driver qlcnic_driver = {
4533 .name = qlcnic_driver_name,
4534 .id_table = qlcnic_pci_tbl,
4535 .probe = qlcnic_probe,
4536 .remove = __devexit_p(qlcnic_remove),
4537#ifdef CONFIG_PM
4538 .suspend = qlcnic_suspend,
4539 .resume = qlcnic_resume,
4540#endif
451724c8
SC
4541 .shutdown = qlcnic_shutdown,
4542 .err_handler = &qlcnic_err_handler
4543
af19b491
AKS
4544};
4545
4546static int __init qlcnic_init_module(void)
4547{
0cf3a14c 4548 int ret;
af19b491
AKS
4549
4550 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4551
f7ec804a
AKS
4552 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4553 if (qlcnic_wq == NULL) {
4554 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4555 return -ENOMEM;
4556 }
4557
af19b491
AKS
4558#ifdef CONFIG_INET
4559 register_netdevice_notifier(&qlcnic_netdev_cb);
4560 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4561#endif
4562
0cf3a14c
AKS
4563 ret = pci_register_driver(&qlcnic_driver);
4564 if (ret) {
4565#ifdef CONFIG_INET
4566 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4567 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4568#endif
f7ec804a 4569 destroy_workqueue(qlcnic_wq);
0cf3a14c 4570 }
af19b491 4571
0cf3a14c 4572 return ret;
af19b491
AKS
4573}
4574
4575module_init(qlcnic_init_module);
4576
4577static void __exit qlcnic_exit_module(void)
4578{
4579
4580 pci_unregister_driver(&qlcnic_driver);
4581
4582#ifdef CONFIG_INET
4583 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4584 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4585#endif
f7ec804a 4586 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4587}
4588
4589module_exit(qlcnic_exit_module);