]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: clean up qlcnic_init_pci_info()
[mirror_ubuntu-artful-kernel.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
af19b491
AKS
113/* PCI Device ID Table */
114#define ENTRY(device) \
115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
116 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
117
118#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
119
6a902881 120static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
121 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
122 {0,}
123};
124
125MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
126
127
128void
129qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
130 struct qlcnic_host_tx_ring *tx_ring)
131{
132 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
133}
134
135static const u32 msi_tgt_status[8] = {
136 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
137 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
138 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
139 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
140};
141
142static const
143struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
144
145static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
146{
147 writel(0, sds_ring->crb_intr_mask);
148}
149
150static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
151{
152 struct qlcnic_adapter *adapter = sds_ring->adapter;
153
154 writel(0x1, sds_ring->crb_intr_mask);
155
156 if (!QLCNIC_IS_MSI_FAMILY(adapter))
157 writel(0xfbff, adapter->tgt_mask_reg);
158}
159
160static int
161qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
162{
163 int size = sizeof(struct qlcnic_host_sds_ring) * count;
164
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166
167 return (recv_ctx->sds_rings == NULL);
168}
169
170static void
171qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
172{
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
175
176 recv_ctx->sds_rings = NULL;
177}
178
179static int
180qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
181{
182 int ring;
183 struct qlcnic_host_sds_ring *sds_ring;
184 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
185
186 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
187 return -ENOMEM;
188
189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
190 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 191
192 if (ring == adapter->max_sds_rings - 1)
193 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
194 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
195 else
196 netif_napi_add(netdev, &sds_ring->napi,
197 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
198 }
199
200 return 0;
201}
202
203static void
204qlcnic_napi_del(struct qlcnic_adapter *adapter)
205{
206 int ring;
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
209
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 netif_napi_del(&sds_ring->napi);
213 }
214
215 qlcnic_free_sds_rings(&adapter->recv_ctx);
216}
217
218static void
219qlcnic_napi_enable(struct qlcnic_adapter *adapter)
220{
221 int ring;
222 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
224
780ab790
AKS
225 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
226 return;
227
af19b491
AKS
228 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
229 sds_ring = &recv_ctx->sds_rings[ring];
230 napi_enable(&sds_ring->napi);
231 qlcnic_enable_int(sds_ring);
232 }
233}
234
235static void
236qlcnic_napi_disable(struct qlcnic_adapter *adapter)
237{
238 int ring;
239 struct qlcnic_host_sds_ring *sds_ring;
240 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
241
780ab790
AKS
242 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
243 return;
244
af19b491
AKS
245 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
246 sds_ring = &recv_ctx->sds_rings[ring];
247 qlcnic_disable_int(sds_ring);
248 napi_synchronize(&sds_ring->napi);
249 napi_disable(&sds_ring->napi);
250 }
251}
252
253static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
254{
255 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
256}
257
af19b491
AKS
258static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
259{
260 u32 val, data;
261
262 val = adapter->ahw.board_type;
263 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
264 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
265 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
266 data = QLCNIC_PORT_MODE_802_3_AP;
267 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
268 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
269 data = QLCNIC_PORT_MODE_XG;
270 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
271 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
272 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
273 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
274 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
275 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
276 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
277 } else {
278 data = QLCNIC_PORT_MODE_AUTO_NEG;
279 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
280 }
281
282 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
283 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
284 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
286 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
287 }
288 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
289 }
290}
291
292static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
293{
294 u32 control;
295 int pos;
296
297 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
298 if (pos) {
299 pci_read_config_dword(pdev, pos, &control);
300 if (enable)
301 control |= PCI_MSIX_FLAGS_ENABLE;
302 else
303 control = 0;
304 pci_write_config_dword(pdev, pos, control);
305 }
306}
307
308static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
309{
310 int i;
311
312 for (i = 0; i < count; i++)
313 adapter->msix_entries[i].entry = i;
314}
315
316static int
317qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
318{
2e9d722d 319 u8 mac_addr[ETH_ALEN];
af19b491
AKS
320 struct net_device *netdev = adapter->netdev;
321 struct pci_dev *pdev = adapter->pdev;
322
2e9d722d 323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
324 return -EIO;
325
2e9d722d 326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
327 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
328 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
329
330 /* set station address */
331
332 if (!is_valid_ether_addr(netdev->perm_addr))
333 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
334 netdev->dev_addr);
335
336 return 0;
337}
338
339static int qlcnic_set_mac(struct net_device *netdev, void *p)
340{
341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
342 struct sockaddr *addr = p;
343
344 if (!is_valid_ether_addr(addr->sa_data))
345 return -EINVAL;
346
8a15ad1f 347 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
348 netif_device_detach(netdev);
349 qlcnic_napi_disable(adapter);
350 }
351
352 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
353 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
354 qlcnic_set_multi(adapter->netdev);
355
8a15ad1f 356 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
357 netif_device_attach(netdev);
358 qlcnic_napi_enable(adapter);
359 }
360 return 0;
361}
362
363static const struct net_device_ops qlcnic_netdev_ops = {
364 .ndo_open = qlcnic_open,
365 .ndo_stop = qlcnic_close,
366 .ndo_start_xmit = qlcnic_xmit_frame,
367 .ndo_get_stats = qlcnic_get_stats,
368 .ndo_validate_addr = eth_validate_addr,
369 .ndo_set_multicast_list = qlcnic_set_multi,
370 .ndo_set_mac_address = qlcnic_set_mac,
371 .ndo_change_mtu = qlcnic_change_mtu,
372 .ndo_tx_timeout = qlcnic_tx_timeout,
373#ifdef CONFIG_NET_POLL_CONTROLLER
374 .ndo_poll_controller = qlcnic_poll_controller,
375#endif
376};
377
2e9d722d 378static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led,
9f26f547
AC
382 .start_firmware = qlcnic_start_firmware
383};
384
385static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led,
9f26f547 389 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
390};
391
af19b491
AKS
392static void
393qlcnic_setup_intr(struct qlcnic_adapter *adapter)
394{
395 const struct qlcnic_legacy_intr_set *legacy_intrp;
396 struct pci_dev *pdev = adapter->pdev;
397 int err, num_msix;
398
399 if (adapter->rss_supported) {
400 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
401 MSIX_ENTRIES_PER_ADAPTER : 2;
402 } else
403 num_msix = 1;
404
405 adapter->max_sds_rings = 1;
406
407 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
408
409 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
410
411 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
412 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
413 legacy_intrp->tgt_status_reg);
414 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_mask_reg);
416 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
417
418 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
419 ISR_INT_STATE_REG);
420
421 qlcnic_set_msix_bit(pdev, 0);
422
423 if (adapter->msix_supported) {
424
425 qlcnic_init_msix_entries(adapter, num_msix);
426 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
427 if (err == 0) {
428 adapter->flags |= QLCNIC_MSIX_ENABLED;
429 qlcnic_set_msix_bit(pdev, 1);
430
431 if (adapter->rss_supported)
432 adapter->max_sds_rings = num_msix;
433
434 dev_info(&pdev->dev, "using msi-x interrupts\n");
435 return;
436 }
437
438 if (err > 0)
439 pci_disable_msix(pdev);
440
441 /* fall through for msi */
442 }
443
444 if (use_msi && !pci_enable_msi(pdev)) {
445 adapter->flags |= QLCNIC_MSI_ENABLED;
446 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
447 msi_tgt_status[adapter->ahw.pci_func]);
448 dev_info(&pdev->dev, "using msi interrupts\n");
449 adapter->msix_entries[0].vector = pdev->irq;
450 return;
451 }
452
453 dev_info(&pdev->dev, "using legacy interrupts\n");
454 adapter->msix_entries[0].vector = pdev->irq;
455}
456
457static void
458qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
459{
460 if (adapter->flags & QLCNIC_MSIX_ENABLED)
461 pci_disable_msix(adapter->pdev);
462 if (adapter->flags & QLCNIC_MSI_ENABLED)
463 pci_disable_msi(adapter->pdev);
464}
465
466static void
467qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
468{
469 if (adapter->ahw.pci_base0 != NULL)
470 iounmap(adapter->ahw.pci_base0);
471}
472
346fe763
RB
473static int
474qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475{
476 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
477 int i, ret = 0, err;
478 u8 pfn;
479
ca315ac2 480 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763
RB
481 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
482 if (!adapter->npars)
483 return -ENOMEM;
484
ca315ac2 485 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
486 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
487 if (!adapter->eswitch) {
488 err = -ENOMEM;
ca315ac2 489 goto err_npars;
346fe763
RB
490 }
491
492 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
493 if (ret)
494 goto err_eswitch;
346fe763 495
ca315ac2
DC
496 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
497 pfn = pci_info[i].id;
498 if (pfn > QLCNIC_MAX_PCI_FUNC)
499 return QL_STATUS_INVALID_PARAM;
500 adapter->npars[pfn].active = pci_info[i].active;
501 adapter->npars[pfn].type = pci_info[i].type;
502 adapter->npars[pfn].phy_port = pci_info[i].default_port;
503 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
504 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
505 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
506 }
507
ca315ac2
DC
508 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
509 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
510
511 return 0;
512
513err_eswitch:
346fe763
RB
514 kfree(adapter->eswitch);
515 adapter->eswitch = NULL;
ca315ac2 516err_npars:
346fe763 517 kfree(adapter->npars);
ca315ac2 518 adapter->npars = NULL;
346fe763
RB
519
520 return ret;
521}
522
2e9d722d
AC
523static int
524qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
525{
526 u8 id;
527 u32 ref_count;
528 int i, ret = 1;
529 u32 data = QLCNIC_MGMT_FUNC;
530 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
531
532 /* If other drivers are not in use set their privilege level */
533 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
534 ret = qlcnic_api_lock(adapter);
535 if (ret)
536 goto err_lock;
537 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
538 goto err_npar;
539
0e33c664
AC
540 if (qlcnic_config_npars) {
541 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 542 id = i;
0e33c664
AC
543 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
544 id == adapter->ahw.pci_func)
545 continue;
546 data |= (qlcnic_config_npars &
547 QLC_DEV_SET_DRV(0xf, id));
548 }
549 } else {
550 data = readl(priv_op);
551 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
552 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
553 adapter->ahw.pci_func));
2e9d722d
AC
554 }
555 writel(data, priv_op);
2e9d722d
AC
556err_npar:
557 qlcnic_api_unlock(adapter);
558err_lock:
559 return ret;
560}
561
2e9d722d
AC
562static u32
563qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
564{
565 void __iomem *msix_base_addr;
566 void __iomem *priv_op;
346fe763 567 struct qlcnic_info nic_info;
2e9d722d
AC
568 u32 func;
569 u32 msix_base;
570 u32 op_mode, priv_level;
571
572 /* Determine FW API version */
573 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
574
575 /* Find PCI function number */
576 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
577 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
578 msix_base = readl(msix_base_addr);
579 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
580 adapter->ahw.pci_func = func;
581
346fe763
RB
582 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
583 adapter->capabilities = nic_info.capabilities;
584
585 if (adapter->capabilities & BIT_6)
586 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
587 else
588 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
589 }
0e33c664
AC
590
591 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
592 adapter->nic_ops = &qlcnic_ops;
593 return adapter->fw_hal_version;
594 }
595
2e9d722d
AC
596 /* Determine function privilege level */
597 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
598 op_mode = readl(priv_op);
0e33c664 599 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 600 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 601 else
2e9d722d
AC
602 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
603
604 switch (priv_level) {
605 case QLCNIC_MGMT_FUNC:
606 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 607 adapter->nic_ops = &qlcnic_ops;
346fe763 608 qlcnic_init_pci_info(adapter);
2e9d722d 609 /* Set privilege level for other functions */
0e33c664 610 qlcnic_set_function_modes(adapter);
2e9d722d
AC
611 dev_info(&adapter->pdev->dev,
612 "HAL Version: %d, Management function\n",
613 adapter->fw_hal_version);
614 break;
615 case QLCNIC_PRIV_FUNC:
616 adapter->op_mode = QLCNIC_PRIV_FUNC;
617 dev_info(&adapter->pdev->dev,
618 "HAL Version: %d, Privileged function\n",
619 adapter->fw_hal_version);
45918e2f 620 adapter->nic_ops = &qlcnic_ops;
2e9d722d 621 break;
9f26f547
AC
622 case QLCNIC_NON_PRIV_FUNC:
623 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
624 dev_info(&adapter->pdev->dev,
625 "HAL Version: %d Non Privileged function\n",
626 adapter->fw_hal_version);
627 adapter->nic_ops = &qlcnic_vf_ops;
628 break;
2e9d722d
AC
629 default:
630 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
631 priv_level);
632 return 0;
633 }
634 return adapter->fw_hal_version;
635}
636
af19b491
AKS
637static int
638qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
639{
640 void __iomem *mem_ptr0 = NULL;
641 resource_size_t mem_base;
642 unsigned long mem_len, pci_len0 = 0;
643
644 struct pci_dev *pdev = adapter->pdev;
af19b491 645
af19b491
AKS
646 /* remap phys address */
647 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
648 mem_len = pci_resource_len(pdev, 0);
649
650 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
651
652 mem_ptr0 = pci_ioremap_bar(pdev, 0);
653 if (mem_ptr0 == NULL) {
654 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
655 return -EIO;
656 }
657 pci_len0 = mem_len;
658 } else {
659 return -EIO;
660 }
661
662 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
663
664 adapter->ahw.pci_base0 = mem_ptr0;
665 adapter->ahw.pci_len0 = pci_len0;
666
2e9d722d
AC
667 if (!qlcnic_get_driver_mode(adapter)) {
668 iounmap(adapter->ahw.pci_base0);
669 return -EIO;
670 }
671
af19b491 672 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 673 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
674
675 return 0;
676}
677
678static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
679{
680 struct pci_dev *pdev = adapter->pdev;
681 int i, found = 0;
682
683 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
684 if (qlcnic_boards[i].vendor == pdev->vendor &&
685 qlcnic_boards[i].device == pdev->device &&
686 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
687 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
688 sprintf(name, "%pM: %s" ,
689 adapter->mac_addr,
690 qlcnic_boards[i].short_name);
af19b491
AKS
691 found = 1;
692 break;
693 }
694
695 }
696
697 if (!found)
7f9a0c34 698 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
699}
700
701static void
702qlcnic_check_options(struct qlcnic_adapter *adapter)
703{
704 u32 fw_major, fw_minor, fw_build;
705 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
706 char serial_num[32];
707 int i, offset, val;
708 int *ptr32;
709 struct pci_dev *pdev = adapter->pdev;
346fe763 710 struct qlcnic_info nic_info;
af19b491
AKS
711 adapter->driver_mismatch = 0;
712
713 ptr32 = (int *)&serial_num;
714 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
715 for (i = 0; i < 8; i++) {
716 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
717 dev_err(&pdev->dev, "error reading board info\n");
718 adapter->driver_mismatch = 1;
719 return;
720 }
721 ptr32[i] = cpu_to_le32(val);
722 offset += sizeof(u32);
723 }
724
725 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
726 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
727 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
728
729 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
730
731 if (adapter->portnum == 0) {
732 get_brd_name(adapter, brd_name);
733
734 pr_info("%s: %s Board Chip rev 0x%x\n",
735 module_name(THIS_MODULE),
736 brd_name, adapter->ahw.revision_id);
737 }
738
251a84c9
AKS
739 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
740 fw_major, fw_minor, fw_build);
af19b491 741
af19b491
AKS
742 adapter->flags &= ~QLCNIC_LRO_ENABLED;
743
744 if (adapter->ahw.port_type == QLCNIC_XGBE) {
745 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
746 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
747 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
748 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
749 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
750 }
751
346fe763
RB
752 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
753 adapter->physical_port = nic_info.phys_port;
754 adapter->switch_mode = nic_info.switch_mode;
755 adapter->max_tx_ques = nic_info.max_tx_ques;
756 adapter->max_rx_ques = nic_info.max_rx_ques;
757 adapter->capabilities = nic_info.capabilities;
758 adapter->max_mac_filters = nic_info.max_mac_filters;
759 adapter->max_mtu = nic_info.max_mtu;
760 }
0e33c664 761
af19b491
AKS
762 adapter->msix_supported = !!use_msi_x;
763 adapter->rss_supported = !!use_msi_x;
764
765 adapter->num_txd = MAX_CMD_DESCRIPTORS;
766
af19b491
AKS
767 adapter->max_rds_rings = 2;
768}
769
cea8975e
AC
770static int
771qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
772{
773 int i, err = 0;
774 struct qlcnic_npar_info *npar;
775 struct qlcnic_info nic_info;
776
777 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
778 !adapter->need_fw_reset)
779 return 0;
780
781 if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
782 /* Set the NPAR config data after FW reset */
783 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
784 npar = &adapter->npars[i];
785 if (npar->type != QLCNIC_TYPE_NIC)
786 continue;
787 err = qlcnic_get_nic_info(adapter, &nic_info, i);
788 if (err)
789 goto err_out;
790 nic_info.min_tx_bw = npar->min_bw;
791 nic_info.max_tx_bw = npar->max_bw;
792 err = qlcnic_set_nic_info(adapter, &nic_info);
793 if (err)
794 goto err_out;
795
796 if (npar->enable_pm) {
797 err = qlcnic_config_port_mirroring(adapter,
798 npar->dest_npar, 1, i);
799 if (err)
800 goto err_out;
801
802 }
803 npar->mac_learning = DEFAULT_MAC_LEARN;
804 npar->host_vlan_tag = 0;
805 npar->promisc_mode = 0;
806 npar->discard_tagged = 0;
807 npar->vlan_id = 0;
808 }
809 }
810err_out:
811 return err;
812}
813
af19b491
AKS
814static int
815qlcnic_start_firmware(struct qlcnic_adapter *adapter)
816{
817 int val, err, first_boot;
818
aa5e18c0
SC
819 err = qlcnic_can_start_firmware(adapter);
820 if (err < 0)
821 return err;
822 else if (!err)
af19b491
AKS
823 goto wait_init;
824
825 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
826 if (first_boot == 0x55555555)
827 /* This is the first boot after power up */
828 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
829
4d5bdb38
AKS
830 if (load_fw_file)
831 qlcnic_request_firmware(adapter);
8f891387 832 else {
833 if (qlcnic_check_flash_fw_ver(adapter))
834 goto err_out;
835
4d5bdb38 836 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 837 }
af19b491
AKS
838
839 err = qlcnic_need_fw_reset(adapter);
840 if (err < 0)
841 goto err_out;
842 if (err == 0)
843 goto wait_init;
844
845 if (first_boot != 0x55555555) {
846 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
900c6cff 847 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
af19b491
AKS
848 qlcnic_pinit_from_rom(adapter);
849 msleep(1);
850 }
851
af19b491
AKS
852 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
853 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
854
855 qlcnic_set_port_mode(adapter);
856
857 err = qlcnic_load_firmware(adapter);
858 if (err)
859 goto err_out;
860
861 qlcnic_release_firmware(adapter);
862
863 val = (_QLCNIC_LINUX_MAJOR << 16)
864 | ((_QLCNIC_LINUX_MINOR << 8))
865 | (_QLCNIC_LINUX_SUBVERSION);
866 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
867
868wait_init:
869 /* Handshake with the card before we register the devices. */
900c6cff 870 err = qlcnic_init_firmware(adapter);
af19b491
AKS
871 if (err)
872 goto err_out;
873
874 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 875 qlcnic_idc_debug_info(adapter, 1);
af19b491 876
af19b491 877 qlcnic_check_options(adapter);
cea8975e
AC
878 if (qlcnic_reset_npar_config(adapter))
879 goto err_out;
880 qlcnic_dev_set_npar_ready(adapter);
2e9d722d 881
af19b491
AKS
882 adapter->need_fw_reset = 0;
883
a7fc948f
AKS
884 qlcnic_release_firmware(adapter);
885 return 0;
af19b491
AKS
886
887err_out:
a7fc948f
AKS
888 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
889 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
890 qlcnic_release_firmware(adapter);
891 return err;
892}
893
894static int
895qlcnic_request_irq(struct qlcnic_adapter *adapter)
896{
897 irq_handler_t handler;
898 struct qlcnic_host_sds_ring *sds_ring;
899 int err, ring;
900
901 unsigned long flags = 0;
902 struct net_device *netdev = adapter->netdev;
903 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
904
7eb9855d
AKS
905 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
906 handler = qlcnic_tmp_intr;
907 if (!QLCNIC_IS_MSI_FAMILY(adapter))
908 flags |= IRQF_SHARED;
909
910 } else {
911 if (adapter->flags & QLCNIC_MSIX_ENABLED)
912 handler = qlcnic_msix_intr;
913 else if (adapter->flags & QLCNIC_MSI_ENABLED)
914 handler = qlcnic_msi_intr;
915 else {
916 flags |= IRQF_SHARED;
917 handler = qlcnic_intr;
918 }
af19b491
AKS
919 }
920 adapter->irq = netdev->irq;
921
922 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
923 sds_ring = &recv_ctx->sds_rings[ring];
924 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
925 err = request_irq(sds_ring->irq, handler,
926 flags, sds_ring->name, sds_ring);
927 if (err)
928 return err;
929 }
930
931 return 0;
932}
933
934static void
935qlcnic_free_irq(struct qlcnic_adapter *adapter)
936{
937 int ring;
938 struct qlcnic_host_sds_ring *sds_ring;
939
940 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
941
942 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
943 sds_ring = &recv_ctx->sds_rings[ring];
944 free_irq(sds_ring->irq, sds_ring);
945 }
946}
947
948static void
949qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
950{
951 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
952 adapter->coal.normal.data.rx_time_us =
953 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
954 adapter->coal.normal.data.rx_packets =
955 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
956 adapter->coal.normal.data.tx_time_us =
957 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
958 adapter->coal.normal.data.tx_packets =
959 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
960}
961
962static int
963__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
964{
8a15ad1f
AKS
965 int ring;
966 struct qlcnic_host_rds_ring *rds_ring;
967
af19b491
AKS
968 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
969 return -EIO;
970
8a15ad1f
AKS
971 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
972 return 0;
973
974 if (qlcnic_fw_create_ctx(adapter))
975 return -EIO;
976
977 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
978 rds_ring = &adapter->recv_ctx.rds_rings[ring];
979 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
980 }
981
af19b491
AKS
982 qlcnic_set_multi(netdev);
983 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
984
985 adapter->ahw.linkup = 0;
986
987 if (adapter->max_sds_rings > 1)
988 qlcnic_config_rss(adapter, 1);
989
990 qlcnic_config_intr_coalesce(adapter);
991
992 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
993 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
994
995 qlcnic_napi_enable(adapter);
996
997 qlcnic_linkevent_request(adapter, 1);
998
68bf1c68 999 adapter->reset_context = 0;
af19b491
AKS
1000 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1001 return 0;
1002}
1003
1004/* Usage: During resume and firmware recovery module.*/
1005
1006static int
1007qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1008{
1009 int err = 0;
1010
1011 rtnl_lock();
1012 if (netif_running(netdev))
1013 err = __qlcnic_up(adapter, netdev);
1014 rtnl_unlock();
1015
1016 return err;
1017}
1018
1019static void
1020__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1021{
1022 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1023 return;
1024
1025 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1026 return;
1027
1028 smp_mb();
1029 spin_lock(&adapter->tx_clean_lock);
1030 netif_carrier_off(netdev);
1031 netif_tx_disable(netdev);
1032
1033 qlcnic_free_mac_list(adapter);
1034
1035 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1036
1037 qlcnic_napi_disable(adapter);
1038
8a15ad1f
AKS
1039 qlcnic_fw_destroy_ctx(adapter);
1040
1041 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1042 qlcnic_release_tx_buffers(adapter);
1043 spin_unlock(&adapter->tx_clean_lock);
1044}
1045
1046/* Usage: During suspend and firmware recovery module */
1047
1048static void
1049qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1050{
1051 rtnl_lock();
1052 if (netif_running(netdev))
1053 __qlcnic_down(adapter, netdev);
1054 rtnl_unlock();
1055
1056}
1057
1058static int
1059qlcnic_attach(struct qlcnic_adapter *adapter)
1060{
1061 struct net_device *netdev = adapter->netdev;
1062 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1063 int err;
af19b491
AKS
1064
1065 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1066 return 0;
1067
af19b491
AKS
1068 err = qlcnic_napi_add(adapter, netdev);
1069 if (err)
1070 return err;
1071
1072 err = qlcnic_alloc_sw_resources(adapter);
1073 if (err) {
1074 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1075 goto err_out_napi_del;
af19b491
AKS
1076 }
1077
1078 err = qlcnic_alloc_hw_resources(adapter);
1079 if (err) {
1080 dev_err(&pdev->dev, "Error in setting hw resources\n");
1081 goto err_out_free_sw;
1082 }
1083
af19b491
AKS
1084 err = qlcnic_request_irq(adapter);
1085 if (err) {
1086 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1087 goto err_out_free_hw;
af19b491
AKS
1088 }
1089
1090 qlcnic_init_coalesce_defaults(adapter);
1091
1092 qlcnic_create_sysfs_entries(adapter);
1093
1094 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1095 return 0;
1096
8a15ad1f 1097err_out_free_hw:
af19b491
AKS
1098 qlcnic_free_hw_resources(adapter);
1099err_out_free_sw:
1100 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1101err_out_napi_del:
1102 qlcnic_napi_del(adapter);
af19b491
AKS
1103 return err;
1104}
1105
1106static void
1107qlcnic_detach(struct qlcnic_adapter *adapter)
1108{
1109 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1110 return;
1111
1112 qlcnic_remove_sysfs_entries(adapter);
1113
1114 qlcnic_free_hw_resources(adapter);
1115 qlcnic_release_rx_buffers(adapter);
1116 qlcnic_free_irq(adapter);
1117 qlcnic_napi_del(adapter);
1118 qlcnic_free_sw_resources(adapter);
1119
1120 adapter->is_up = 0;
1121}
1122
7eb9855d
AKS
1123void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1124{
1125 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1126 struct qlcnic_host_sds_ring *sds_ring;
1127 int ring;
1128
78ad3892 1129 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1130 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1131 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1132 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1133 qlcnic_disable_int(sds_ring);
1134 }
7eb9855d
AKS
1135 }
1136
8a15ad1f
AKS
1137 qlcnic_fw_destroy_ctx(adapter);
1138
7eb9855d
AKS
1139 qlcnic_detach(adapter);
1140
1141 adapter->diag_test = 0;
1142 adapter->max_sds_rings = max_sds_rings;
1143
1144 if (qlcnic_attach(adapter))
34ce3626 1145 goto out;
7eb9855d
AKS
1146
1147 if (netif_running(netdev))
1148 __qlcnic_up(adapter, netdev);
34ce3626 1149out:
7eb9855d
AKS
1150 netif_device_attach(netdev);
1151}
1152
1153int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1154{
1155 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1156 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1157 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1158 int ring;
1159 int ret;
1160
1161 netif_device_detach(netdev);
1162
1163 if (netif_running(netdev))
1164 __qlcnic_down(adapter, netdev);
1165
1166 qlcnic_detach(adapter);
1167
1168 adapter->max_sds_rings = 1;
1169 adapter->diag_test = test;
1170
1171 ret = qlcnic_attach(adapter);
34ce3626
AKS
1172 if (ret) {
1173 netif_device_attach(netdev);
7eb9855d 1174 return ret;
34ce3626 1175 }
7eb9855d 1176
8a15ad1f
AKS
1177 ret = qlcnic_fw_create_ctx(adapter);
1178 if (ret) {
1179 qlcnic_detach(adapter);
57e46248 1180 netif_device_attach(netdev);
8a15ad1f
AKS
1181 return ret;
1182 }
1183
1184 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1185 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1186 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1187 }
1188
cdaff185
AKS
1189 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1190 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1191 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1192 qlcnic_enable_int(sds_ring);
1193 }
7eb9855d 1194 }
78ad3892 1195 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1196
1197 return 0;
1198}
1199
68bf1c68
AKS
1200/* Reset context in hardware only */
1201static int
1202qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1203{
1204 struct net_device *netdev = adapter->netdev;
1205
1206 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1207 return -EBUSY;
1208
1209 netif_device_detach(netdev);
1210
1211 qlcnic_down(adapter, netdev);
1212
1213 qlcnic_up(adapter, netdev);
1214
1215 netif_device_attach(netdev);
1216
1217 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1218 return 0;
1219}
1220
af19b491
AKS
1221int
1222qlcnic_reset_context(struct qlcnic_adapter *adapter)
1223{
1224 int err = 0;
1225 struct net_device *netdev = adapter->netdev;
1226
1227 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1228 return -EBUSY;
1229
1230 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1231
1232 netif_device_detach(netdev);
1233
1234 if (netif_running(netdev))
1235 __qlcnic_down(adapter, netdev);
1236
1237 qlcnic_detach(adapter);
1238
1239 if (netif_running(netdev)) {
1240 err = qlcnic_attach(adapter);
1241 if (!err)
34ce3626 1242 __qlcnic_up(adapter, netdev);
af19b491
AKS
1243 }
1244
1245 netif_device_attach(netdev);
1246 }
1247
af19b491
AKS
1248 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1249 return err;
1250}
1251
1252static int
1253qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1254 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1255{
1256 int err;
1257 struct pci_dev *pdev = adapter->pdev;
1258
1259 adapter->rx_csum = 1;
1260 adapter->mc_enabled = 0;
1261 adapter->max_mc_count = 38;
1262
1263 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1264 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1265
1266 qlcnic_change_mtu(netdev, netdev->mtu);
1267
1268 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1269
2e9d722d 1270 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1271 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1272 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1273 NETIF_F_IPV6_CSUM);
1274
1275 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1276 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1277 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1278 }
af19b491 1279
1bb09fb9 1280 if (pci_using_dac) {
af19b491
AKS
1281 netdev->features |= NETIF_F_HIGHDMA;
1282 netdev->vlan_features |= NETIF_F_HIGHDMA;
1283 }
1284
1285 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1286 netdev->features |= (NETIF_F_HW_VLAN_TX);
1287
1288 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1289 netdev->features |= NETIF_F_LRO;
1290
1291 netdev->irq = adapter->msix_entries[0].vector;
1292
af19b491
AKS
1293 if (qlcnic_read_mac_addr(adapter))
1294 dev_warn(&pdev->dev, "failed to read mac addr\n");
1295
1296 netif_carrier_off(netdev);
1297 netif_stop_queue(netdev);
1298
1299 err = register_netdev(netdev);
1300 if (err) {
1301 dev_err(&pdev->dev, "failed to register net device\n");
1302 return err;
1303 }
1304
1305 return 0;
1306}
1307
1bb09fb9
AKS
1308static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1309{
1310 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1311 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1312 *pci_using_dac = 1;
1313 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1314 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1315 *pci_using_dac = 0;
1316 else {
1317 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1318 return -EIO;
1319 }
1320
1321 return 0;
1322}
1323
af19b491
AKS
1324static int __devinit
1325qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1326{
1327 struct net_device *netdev = NULL;
1328 struct qlcnic_adapter *adapter = NULL;
1329 int err;
af19b491 1330 uint8_t revision_id;
1bb09fb9 1331 uint8_t pci_using_dac;
af19b491
AKS
1332
1333 err = pci_enable_device(pdev);
1334 if (err)
1335 return err;
1336
1337 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1338 err = -ENODEV;
1339 goto err_out_disable_pdev;
1340 }
1341
1bb09fb9
AKS
1342 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1343 if (err)
1344 goto err_out_disable_pdev;
1345
af19b491
AKS
1346 err = pci_request_regions(pdev, qlcnic_driver_name);
1347 if (err)
1348 goto err_out_disable_pdev;
1349
1350 pci_set_master(pdev);
451724c8 1351 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1352
1353 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1354 if (!netdev) {
1355 dev_err(&pdev->dev, "failed to allocate net_device\n");
1356 err = -ENOMEM;
1357 goto err_out_free_res;
1358 }
1359
1360 SET_NETDEV_DEV(netdev, &pdev->dev);
1361
1362 adapter = netdev_priv(netdev);
1363 adapter->netdev = netdev;
1364 adapter->pdev = pdev;
6df900e9 1365 adapter->dev_rst_time = jiffies;
af19b491
AKS
1366
1367 revision_id = pdev->revision;
1368 adapter->ahw.revision_id = revision_id;
1369
1370 rwlock_init(&adapter->ahw.crb_lock);
1371 mutex_init(&adapter->ahw.mem_lock);
1372
1373 spin_lock_init(&adapter->tx_clean_lock);
1374 INIT_LIST_HEAD(&adapter->mac_list);
1375
1376 err = qlcnic_setup_pci_map(adapter);
1377 if (err)
1378 goto err_out_free_netdev;
1379
1380 /* This will be reset for mezz cards */
2e9d722d 1381 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1382
1383 err = qlcnic_get_board_info(adapter);
1384 if (err) {
1385 dev_err(&pdev->dev, "Error getting board config info.\n");
1386 goto err_out_iounmap;
1387 }
1388
02f6e46f
SC
1389 if (qlcnic_read_mac_addr(adapter))
1390 dev_warn(&pdev->dev, "failed to read mac addr\n");
1391
b3a24649
SC
1392 if (qlcnic_setup_idc_param(adapter))
1393 goto err_out_iounmap;
af19b491 1394
9f26f547 1395 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1396 if (err) {
1397 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1398 goto err_out_decr_ref;
a7fc948f 1399 }
af19b491 1400
af19b491
AKS
1401 qlcnic_clear_stats(adapter);
1402
1403 qlcnic_setup_intr(adapter);
1404
1bb09fb9 1405 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1406 if (err)
1407 goto err_out_disable_msi;
1408
1409 pci_set_drvdata(pdev, adapter);
1410
1411 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1412
1413 switch (adapter->ahw.port_type) {
1414 case QLCNIC_GBE:
1415 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1416 adapter->netdev->name);
1417 break;
1418 case QLCNIC_XGBE:
1419 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1420 adapter->netdev->name);
1421 break;
1422 }
1423
1424 qlcnic_create_diag_entries(adapter);
1425
1426 return 0;
1427
1428err_out_disable_msi:
1429 qlcnic_teardown_intr(adapter);
1430
1431err_out_decr_ref:
1432 qlcnic_clr_all_drv_state(adapter);
1433
1434err_out_iounmap:
1435 qlcnic_cleanup_pci_map(adapter);
1436
1437err_out_free_netdev:
1438 free_netdev(netdev);
1439
1440err_out_free_res:
1441 pci_release_regions(pdev);
1442
1443err_out_disable_pdev:
1444 pci_set_drvdata(pdev, NULL);
1445 pci_disable_device(pdev);
1446 return err;
1447}
1448
1449static void __devexit qlcnic_remove(struct pci_dev *pdev)
1450{
1451 struct qlcnic_adapter *adapter;
1452 struct net_device *netdev;
1453
1454 adapter = pci_get_drvdata(pdev);
1455 if (adapter == NULL)
1456 return;
1457
1458 netdev = adapter->netdev;
1459
1460 qlcnic_cancel_fw_work(adapter);
1461
1462 unregister_netdev(netdev);
1463
af19b491
AKS
1464 qlcnic_detach(adapter);
1465
2e9d722d
AC
1466 if (adapter->npars != NULL)
1467 kfree(adapter->npars);
1468 if (adapter->eswitch != NULL)
1469 kfree(adapter->eswitch);
1470
af19b491
AKS
1471 qlcnic_clr_all_drv_state(adapter);
1472
1473 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1474
1475 qlcnic_teardown_intr(adapter);
1476
1477 qlcnic_remove_diag_entries(adapter);
1478
1479 qlcnic_cleanup_pci_map(adapter);
1480
1481 qlcnic_release_firmware(adapter);
1482
451724c8 1483 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1484 pci_release_regions(pdev);
1485 pci_disable_device(pdev);
1486 pci_set_drvdata(pdev, NULL);
1487
1488 free_netdev(netdev);
1489}
1490static int __qlcnic_shutdown(struct pci_dev *pdev)
1491{
1492 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1493 struct net_device *netdev = adapter->netdev;
1494 int retval;
1495
1496 netif_device_detach(netdev);
1497
1498 qlcnic_cancel_fw_work(adapter);
1499
1500 if (netif_running(netdev))
1501 qlcnic_down(adapter, netdev);
1502
af19b491
AKS
1503 qlcnic_clr_all_drv_state(adapter);
1504
1505 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1506
1507 retval = pci_save_state(pdev);
1508 if (retval)
1509 return retval;
1510
1511 if (qlcnic_wol_supported(adapter)) {
1512 pci_enable_wake(pdev, PCI_D3cold, 1);
1513 pci_enable_wake(pdev, PCI_D3hot, 1);
1514 }
1515
1516 return 0;
1517}
1518
1519static void qlcnic_shutdown(struct pci_dev *pdev)
1520{
1521 if (__qlcnic_shutdown(pdev))
1522 return;
1523
1524 pci_disable_device(pdev);
1525}
1526
1527#ifdef CONFIG_PM
1528static int
1529qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1530{
1531 int retval;
1532
1533 retval = __qlcnic_shutdown(pdev);
1534 if (retval)
1535 return retval;
1536
1537 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1538 return 0;
1539}
1540
1541static int
1542qlcnic_resume(struct pci_dev *pdev)
1543{
1544 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1545 struct net_device *netdev = adapter->netdev;
1546 int err;
1547
1548 err = pci_enable_device(pdev);
1549 if (err)
1550 return err;
1551
1552 pci_set_power_state(pdev, PCI_D0);
1553 pci_set_master(pdev);
1554 pci_restore_state(pdev);
1555
9f26f547 1556 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1557 if (err) {
1558 dev_err(&pdev->dev, "failed to start firmware\n");
1559 return err;
1560 }
1561
1562 if (netif_running(netdev)) {
af19b491
AKS
1563 err = qlcnic_up(adapter, netdev);
1564 if (err)
52486a3a 1565 goto done;
af19b491
AKS
1566
1567 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1568 }
52486a3a 1569done:
af19b491
AKS
1570 netif_device_attach(netdev);
1571 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1572 return 0;
af19b491
AKS
1573}
1574#endif
1575
1576static int qlcnic_open(struct net_device *netdev)
1577{
1578 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1579 int err;
1580
1581 if (adapter->driver_mismatch)
1582 return -EIO;
1583
1584 err = qlcnic_attach(adapter);
1585 if (err)
1586 return err;
1587
1588 err = __qlcnic_up(adapter, netdev);
1589 if (err)
1590 goto err_out;
1591
1592 netif_start_queue(netdev);
1593
1594 return 0;
1595
1596err_out:
1597 qlcnic_detach(adapter);
1598 return err;
1599}
1600
1601/*
1602 * qlcnic_close - Disables a network interface entry point
1603 */
1604static int qlcnic_close(struct net_device *netdev)
1605{
1606 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1607
1608 __qlcnic_down(adapter, netdev);
1609 return 0;
1610}
1611
1612static void
1613qlcnic_tso_check(struct net_device *netdev,
1614 struct qlcnic_host_tx_ring *tx_ring,
1615 struct cmd_desc_type0 *first_desc,
1616 struct sk_buff *skb)
1617{
1618 u8 opcode = TX_ETHER_PKT;
1619 __be16 protocol = skb->protocol;
1620 u16 flags = 0, vid = 0;
af19b491
AKS
1621 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1622 struct cmd_desc_type0 *hwdesc;
1623 struct vlan_ethhdr *vh;
8bfe8b91 1624 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1625 u32 producer = tx_ring->producer;
af19b491
AKS
1626
1627 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1628
1629 vh = (struct vlan_ethhdr *)skb->data;
1630 protocol = vh->h_vlan_encapsulated_proto;
1631 flags = FLAGS_VLAN_TAGGED;
1632
1633 } else if (vlan_tx_tag_present(skb)) {
1634
1635 flags = FLAGS_VLAN_OOB;
1636 vid = vlan_tx_tag_get(skb);
1637 qlcnic_set_tx_vlan_tci(first_desc, vid);
1638 vlan_oob = 1;
1639 }
1640
2e9d722d
AC
1641 if (*(skb->data) & BIT_0) {
1642 flags |= BIT_0;
1643 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1644 }
1645
af19b491
AKS
1646 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1647 skb_shinfo(skb)->gso_size > 0) {
1648
1649 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1650
1651 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1652 first_desc->total_hdr_length = hdr_len;
1653 if (vlan_oob) {
1654 first_desc->total_hdr_length += VLAN_HLEN;
1655 first_desc->tcp_hdr_offset = VLAN_HLEN;
1656 first_desc->ip_hdr_offset = VLAN_HLEN;
1657 /* Only in case of TSO on vlan device */
1658 flags |= FLAGS_VLAN_TAGGED;
1659 }
1660
1661 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1662 TX_TCP_LSO6 : TX_TCP_LSO;
1663 tso = 1;
1664
1665 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1666 u8 l4proto;
1667
1668 if (protocol == cpu_to_be16(ETH_P_IP)) {
1669 l4proto = ip_hdr(skb)->protocol;
1670
1671 if (l4proto == IPPROTO_TCP)
1672 opcode = TX_TCP_PKT;
1673 else if (l4proto == IPPROTO_UDP)
1674 opcode = TX_UDP_PKT;
1675 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1676 l4proto = ipv6_hdr(skb)->nexthdr;
1677
1678 if (l4proto == IPPROTO_TCP)
1679 opcode = TX_TCPV6_PKT;
1680 else if (l4proto == IPPROTO_UDP)
1681 opcode = TX_UDPV6_PKT;
1682 }
1683 }
1684
1685 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1686 first_desc->ip_hdr_offset += skb_network_offset(skb);
1687 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1688
1689 if (!tso)
1690 return;
1691
1692 /* For LSO, we need to copy the MAC/IP/TCP headers into
1693 * the descriptor ring
1694 */
af19b491
AKS
1695 copied = 0;
1696 offset = 2;
1697
1698 if (vlan_oob) {
1699 /* Create a TSO vlan header template for firmware */
1700
1701 hwdesc = &tx_ring->desc_head[producer];
1702 tx_ring->cmd_buf_arr[producer].skb = NULL;
1703
1704 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1705 hdr_len + VLAN_HLEN);
1706
1707 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1708 skb_copy_from_linear_data(skb, vh, 12);
1709 vh->h_vlan_proto = htons(ETH_P_8021Q);
1710 vh->h_vlan_TCI = htons(vid);
1711 skb_copy_from_linear_data_offset(skb, 12,
1712 (char *)vh + 16, copy_len - 16);
1713
1714 copied = copy_len - VLAN_HLEN;
1715 offset = 0;
1716
1717 producer = get_next_index(producer, tx_ring->num_desc);
1718 }
1719
1720 while (copied < hdr_len) {
1721
1722 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1723 (hdr_len - copied));
1724
1725 hwdesc = &tx_ring->desc_head[producer];
1726 tx_ring->cmd_buf_arr[producer].skb = NULL;
1727
1728 skb_copy_from_linear_data_offset(skb, copied,
1729 (char *)hwdesc + offset, copy_len);
1730
1731 copied += copy_len;
1732 offset = 0;
1733
1734 producer = get_next_index(producer, tx_ring->num_desc);
1735 }
1736
1737 tx_ring->producer = producer;
1738 barrier();
8bfe8b91 1739 adapter->stats.lso_frames++;
af19b491
AKS
1740}
1741
1742static int
1743qlcnic_map_tx_skb(struct pci_dev *pdev,
1744 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1745{
1746 struct qlcnic_skb_frag *nf;
1747 struct skb_frag_struct *frag;
1748 int i, nr_frags;
1749 dma_addr_t map;
1750
1751 nr_frags = skb_shinfo(skb)->nr_frags;
1752 nf = &pbuf->frag_array[0];
1753
1754 map = pci_map_single(pdev, skb->data,
1755 skb_headlen(skb), PCI_DMA_TODEVICE);
1756 if (pci_dma_mapping_error(pdev, map))
1757 goto out_err;
1758
1759 nf->dma = map;
1760 nf->length = skb_headlen(skb);
1761
1762 for (i = 0; i < nr_frags; i++) {
1763 frag = &skb_shinfo(skb)->frags[i];
1764 nf = &pbuf->frag_array[i+1];
1765
1766 map = pci_map_page(pdev, frag->page, frag->page_offset,
1767 frag->size, PCI_DMA_TODEVICE);
1768 if (pci_dma_mapping_error(pdev, map))
1769 goto unwind;
1770
1771 nf->dma = map;
1772 nf->length = frag->size;
1773 }
1774
1775 return 0;
1776
1777unwind:
1778 while (--i >= 0) {
1779 nf = &pbuf->frag_array[i+1];
1780 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1781 }
1782
1783 nf = &pbuf->frag_array[0];
1784 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1785
1786out_err:
1787 return -ENOMEM;
1788}
1789
1790static inline void
1791qlcnic_clear_cmddesc(u64 *desc)
1792{
1793 desc[0] = 0ULL;
1794 desc[2] = 0ULL;
1795}
1796
cdaff185 1797netdev_tx_t
af19b491
AKS
1798qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1799{
1800 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1801 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1802 struct qlcnic_cmd_buffer *pbuf;
1803 struct qlcnic_skb_frag *buffrag;
1804 struct cmd_desc_type0 *hwdesc, *first_desc;
1805 struct pci_dev *pdev;
1806 int i, k;
1807
1808 u32 producer;
1809 int frag_count, no_of_desc;
1810 u32 num_txd = tx_ring->num_desc;
1811
780ab790
AKS
1812 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1813 netif_stop_queue(netdev);
1814 return NETDEV_TX_BUSY;
1815 }
1816
af19b491
AKS
1817 frag_count = skb_shinfo(skb)->nr_frags + 1;
1818
1819 /* 4 fragments per cmd des */
1820 no_of_desc = (frag_count + 3) >> 2;
1821
ef71ff83 1822 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1823 netif_stop_queue(netdev);
ef71ff83
RB
1824 smp_mb();
1825 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1826 netif_start_queue(netdev);
1827 else {
1828 adapter->stats.xmit_off++;
1829 return NETDEV_TX_BUSY;
1830 }
af19b491
AKS
1831 }
1832
1833 producer = tx_ring->producer;
1834 pbuf = &tx_ring->cmd_buf_arr[producer];
1835
1836 pdev = adapter->pdev;
1837
8ae6df97
AKS
1838 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1839 adapter->stats.tx_dma_map_error++;
af19b491 1840 goto drop_packet;
8ae6df97 1841 }
af19b491
AKS
1842
1843 pbuf->skb = skb;
1844 pbuf->frag_count = frag_count;
1845
1846 first_desc = hwdesc = &tx_ring->desc_head[producer];
1847 qlcnic_clear_cmddesc((u64 *)hwdesc);
1848
1849 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1850 qlcnic_set_tx_port(first_desc, adapter->portnum);
1851
1852 for (i = 0; i < frag_count; i++) {
1853
1854 k = i % 4;
1855
1856 if ((k == 0) && (i > 0)) {
1857 /* move to next desc.*/
1858 producer = get_next_index(producer, num_txd);
1859 hwdesc = &tx_ring->desc_head[producer];
1860 qlcnic_clear_cmddesc((u64 *)hwdesc);
1861 tx_ring->cmd_buf_arr[producer].skb = NULL;
1862 }
1863
1864 buffrag = &pbuf->frag_array[i];
1865
1866 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1867 switch (k) {
1868 case 0:
1869 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1870 break;
1871 case 1:
1872 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1873 break;
1874 case 2:
1875 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1876 break;
1877 case 3:
1878 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1879 break;
1880 }
1881 }
1882
1883 tx_ring->producer = get_next_index(producer, num_txd);
1884
1885 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1886
1887 qlcnic_update_cmd_producer(adapter, tx_ring);
1888
1889 adapter->stats.txbytes += skb->len;
1890 adapter->stats.xmitcalled++;
1891
1892 return NETDEV_TX_OK;
1893
1894drop_packet:
1895 adapter->stats.txdropped++;
1896 dev_kfree_skb_any(skb);
1897 return NETDEV_TX_OK;
1898}
1899
1900static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1901{
1902 struct net_device *netdev = adapter->netdev;
1903 u32 temp, temp_state, temp_val;
1904 int rv = 0;
1905
1906 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1907
1908 temp_state = qlcnic_get_temp_state(temp);
1909 temp_val = qlcnic_get_temp_val(temp);
1910
1911 if (temp_state == QLCNIC_TEMP_PANIC) {
1912 dev_err(&netdev->dev,
1913 "Device temperature %d degrees C exceeds"
1914 " maximum allowed. Hardware has been shut down.\n",
1915 temp_val);
1916 rv = 1;
1917 } else if (temp_state == QLCNIC_TEMP_WARN) {
1918 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1919 dev_err(&netdev->dev,
1920 "Device temperature %d degrees C "
1921 "exceeds operating range."
1922 " Immediate action needed.\n",
1923 temp_val);
1924 }
1925 } else {
1926 if (adapter->temp == QLCNIC_TEMP_WARN) {
1927 dev_info(&netdev->dev,
1928 "Device temperature is now %d degrees C"
1929 " in normal range.\n", temp_val);
1930 }
1931 }
1932 adapter->temp = temp_state;
1933 return rv;
1934}
1935
1936void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1937{
1938 struct net_device *netdev = adapter->netdev;
1939
1940 if (adapter->ahw.linkup && !linkup) {
1941 dev_info(&netdev->dev, "NIC Link is down\n");
1942 adapter->ahw.linkup = 0;
1943 if (netif_running(netdev)) {
1944 netif_carrier_off(netdev);
1945 netif_stop_queue(netdev);
1946 }
1947 } else if (!adapter->ahw.linkup && linkup) {
1948 dev_info(&netdev->dev, "NIC Link is up\n");
1949 adapter->ahw.linkup = 1;
1950 if (netif_running(netdev)) {
1951 netif_carrier_on(netdev);
1952 netif_wake_queue(netdev);
1953 }
1954 }
1955}
1956
1957static void qlcnic_tx_timeout(struct net_device *netdev)
1958{
1959 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1960
1961 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1962 return;
1963
1964 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
1965
1966 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
1967 adapter->need_fw_reset = 1;
1968 else
1969 adapter->reset_context = 1;
af19b491
AKS
1970}
1971
1972static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1973{
1974 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1975 struct net_device_stats *stats = &netdev->stats;
1976
1977 memset(stats, 0, sizeof(*stats));
1978
1979 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1980 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 1981 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
1982 stats->tx_bytes = adapter->stats.txbytes;
1983 stats->rx_dropped = adapter->stats.rxdropped;
1984 stats->tx_dropped = adapter->stats.txdropped;
1985
1986 return stats;
1987}
1988
7eb9855d 1989static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1990{
af19b491
AKS
1991 u32 status;
1992
1993 status = readl(adapter->isr_int_vec);
1994
1995 if (!(status & adapter->int_vec_bit))
1996 return IRQ_NONE;
1997
1998 /* check interrupt state machine, to be sure */
1999 status = readl(adapter->crb_int_state_reg);
2000 if (!ISR_LEGACY_INT_TRIGGERED(status))
2001 return IRQ_NONE;
2002
2003 writel(0xffffffff, adapter->tgt_status_reg);
2004 /* read twice to ensure write is flushed */
2005 readl(adapter->isr_int_vec);
2006 readl(adapter->isr_int_vec);
2007
7eb9855d
AKS
2008 return IRQ_HANDLED;
2009}
2010
2011static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2012{
2013 struct qlcnic_host_sds_ring *sds_ring = data;
2014 struct qlcnic_adapter *adapter = sds_ring->adapter;
2015
2016 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2017 goto done;
2018 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2019 writel(0xffffffff, adapter->tgt_status_reg);
2020 goto done;
2021 }
2022
2023 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2024 return IRQ_NONE;
2025
2026done:
2027 adapter->diag_cnt++;
2028 qlcnic_enable_int(sds_ring);
2029 return IRQ_HANDLED;
2030}
2031
2032static irqreturn_t qlcnic_intr(int irq, void *data)
2033{
2034 struct qlcnic_host_sds_ring *sds_ring = data;
2035 struct qlcnic_adapter *adapter = sds_ring->adapter;
2036
2037 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2038 return IRQ_NONE;
2039
af19b491
AKS
2040 napi_schedule(&sds_ring->napi);
2041
2042 return IRQ_HANDLED;
2043}
2044
2045static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2046{
2047 struct qlcnic_host_sds_ring *sds_ring = data;
2048 struct qlcnic_adapter *adapter = sds_ring->adapter;
2049
2050 /* clear interrupt */
2051 writel(0xffffffff, adapter->tgt_status_reg);
2052
2053 napi_schedule(&sds_ring->napi);
2054 return IRQ_HANDLED;
2055}
2056
2057static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2058{
2059 struct qlcnic_host_sds_ring *sds_ring = data;
2060
2061 napi_schedule(&sds_ring->napi);
2062 return IRQ_HANDLED;
2063}
2064
2065static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2066{
2067 u32 sw_consumer, hw_consumer;
2068 int count = 0, i;
2069 struct qlcnic_cmd_buffer *buffer;
2070 struct pci_dev *pdev = adapter->pdev;
2071 struct net_device *netdev = adapter->netdev;
2072 struct qlcnic_skb_frag *frag;
2073 int done;
2074 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2075
2076 if (!spin_trylock(&adapter->tx_clean_lock))
2077 return 1;
2078
2079 sw_consumer = tx_ring->sw_consumer;
2080 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2081
2082 while (sw_consumer != hw_consumer) {
2083 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2084 if (buffer->skb) {
2085 frag = &buffer->frag_array[0];
2086 pci_unmap_single(pdev, frag->dma, frag->length,
2087 PCI_DMA_TODEVICE);
2088 frag->dma = 0ULL;
2089 for (i = 1; i < buffer->frag_count; i++) {
2090 frag++;
2091 pci_unmap_page(pdev, frag->dma, frag->length,
2092 PCI_DMA_TODEVICE);
2093 frag->dma = 0ULL;
2094 }
2095
2096 adapter->stats.xmitfinished++;
2097 dev_kfree_skb_any(buffer->skb);
2098 buffer->skb = NULL;
2099 }
2100
2101 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2102 if (++count >= MAX_STATUS_HANDLE)
2103 break;
2104 }
2105
2106 if (count && netif_running(netdev)) {
2107 tx_ring->sw_consumer = sw_consumer;
2108
2109 smp_mb();
2110
2111 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2112 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2113 netif_wake_queue(netdev);
8bfe8b91 2114 adapter->stats.xmit_on++;
af19b491 2115 }
af19b491 2116 }
ef71ff83 2117 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2118 }
2119 /*
2120 * If everything is freed up to consumer then check if the ring is full
2121 * If the ring is full then check if more needs to be freed and
2122 * schedule the call back again.
2123 *
2124 * This happens when there are 2 CPUs. One could be freeing and the
2125 * other filling it. If the ring is full when we get out of here and
2126 * the card has already interrupted the host then the host can miss the
2127 * interrupt.
2128 *
2129 * There is still a possible race condition and the host could miss an
2130 * interrupt. The card has to take care of this.
2131 */
2132 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2133 done = (sw_consumer == hw_consumer);
2134 spin_unlock(&adapter->tx_clean_lock);
2135
2136 return done;
2137}
2138
2139static int qlcnic_poll(struct napi_struct *napi, int budget)
2140{
2141 struct qlcnic_host_sds_ring *sds_ring =
2142 container_of(napi, struct qlcnic_host_sds_ring, napi);
2143
2144 struct qlcnic_adapter *adapter = sds_ring->adapter;
2145
2146 int tx_complete;
2147 int work_done;
2148
2149 tx_complete = qlcnic_process_cmd_ring(adapter);
2150
2151 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2152
2153 if ((work_done < budget) && tx_complete) {
2154 napi_complete(&sds_ring->napi);
2155 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2156 qlcnic_enable_int(sds_ring);
2157 }
2158
2159 return work_done;
2160}
2161
8f891387 2162static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2163{
2164 struct qlcnic_host_sds_ring *sds_ring =
2165 container_of(napi, struct qlcnic_host_sds_ring, napi);
2166
2167 struct qlcnic_adapter *adapter = sds_ring->adapter;
2168 int work_done;
2169
2170 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2171
2172 if (work_done < budget) {
2173 napi_complete(&sds_ring->napi);
2174 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2175 qlcnic_enable_int(sds_ring);
2176 }
2177
2178 return work_done;
2179}
2180
af19b491
AKS
2181#ifdef CONFIG_NET_POLL_CONTROLLER
2182static void qlcnic_poll_controller(struct net_device *netdev)
2183{
2184 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2185 disable_irq(adapter->irq);
2186 qlcnic_intr(adapter->irq, adapter);
2187 enable_irq(adapter->irq);
2188}
2189#endif
2190
6df900e9
SC
2191static void
2192qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2193{
2194 u32 val;
2195
2196 val = adapter->portnum & 0xf;
2197 val |= encoding << 7;
2198 val |= (jiffies - adapter->dev_rst_time) << 8;
2199
2200 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2201 adapter->dev_rst_time = jiffies;
2202}
2203
ade91f8e
AKS
2204static int
2205qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2206{
2207 u32 val;
2208
2209 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2210 state != QLCNIC_DEV_NEED_QUISCENT);
2211
2212 if (qlcnic_api_lock(adapter))
ade91f8e 2213 return -EIO;
af19b491
AKS
2214
2215 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2216
2217 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2218 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2219 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2220 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2221
2222 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2223
2224 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2225
2226 return 0;
af19b491
AKS
2227}
2228
1b95a839
AKS
2229static int
2230qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2231{
2232 u32 val;
2233
2234 if (qlcnic_api_lock(adapter))
2235 return -EBUSY;
2236
2237 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2238 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2239 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2240
2241 qlcnic_api_unlock(adapter);
2242
2243 return 0;
2244}
2245
af19b491
AKS
2246static void
2247qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2248{
2249 u32 val;
2250
2251 if (qlcnic_api_lock(adapter))
2252 goto err;
2253
2254 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2255 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2256 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2257
2258 if (!(val & 0x11111111))
2259 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2260
2261 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2262 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2263 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2264
2265 qlcnic_api_unlock(adapter);
2266err:
2267 adapter->fw_fail_cnt = 0;
2268 clear_bit(__QLCNIC_START_FW, &adapter->state);
2269 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2270}
2271
f73dfc50 2272/* Grab api lock, before checking state */
af19b491
AKS
2273static int
2274qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2275{
2276 int act, state;
2277
2278 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2279 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2280
2281 if (((state & 0x11111111) == (act & 0x11111111)) ||
2282 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2283 return 0;
2284 else
2285 return 1;
2286}
2287
96f8118c
SC
2288static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2289{
2290 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2291
2292 if (val != QLCNIC_DRV_IDC_VER) {
2293 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2294 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2295 }
2296
2297 return 0;
2298}
2299
af19b491
AKS
2300static int
2301qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2302{
2303 u32 val, prev_state;
aa5e18c0 2304 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2305 u8 portnum = adapter->portnum;
96f8118c 2306 u8 ret;
af19b491 2307
f73dfc50
AKS
2308 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2309 return 1;
2310
af19b491
AKS
2311 if (qlcnic_api_lock(adapter))
2312 return -1;
2313
2314 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2315 if (!(val & (1 << (portnum * 4)))) {
2316 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2317 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2318 }
2319
2320 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2321 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2322
2323 switch (prev_state) {
2324 case QLCNIC_DEV_COLD:
bbd8c6a4 2325 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2326 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2327 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2328 qlcnic_api_unlock(adapter);
2329 return 1;
2330
2331 case QLCNIC_DEV_READY:
96f8118c 2332 ret = qlcnic_check_idc_ver(adapter);
af19b491 2333 qlcnic_api_unlock(adapter);
96f8118c 2334 return ret;
af19b491
AKS
2335
2336 case QLCNIC_DEV_NEED_RESET:
2337 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2338 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2339 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2340 break;
2341
2342 case QLCNIC_DEV_NEED_QUISCENT:
2343 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2344 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2345 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2346 break;
2347
2348 case QLCNIC_DEV_FAILED:
a7fc948f 2349 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2350 qlcnic_api_unlock(adapter);
2351 return -1;
bbd8c6a4
AKS
2352
2353 case QLCNIC_DEV_INITIALIZING:
2354 case QLCNIC_DEV_QUISCENT:
2355 break;
af19b491
AKS
2356 }
2357
2358 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2359
2360 do {
af19b491 2361 msleep(1000);
a5e463d0
SC
2362 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2363
2364 if (prev_state == QLCNIC_DEV_QUISCENT)
2365 continue;
2366 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2367
65b5b420
AKS
2368 if (!dev_init_timeo) {
2369 dev_err(&adapter->pdev->dev,
2370 "Waiting for device to initialize timeout\n");
af19b491 2371 return -1;
65b5b420 2372 }
af19b491
AKS
2373
2374 if (qlcnic_api_lock(adapter))
2375 return -1;
2376
2377 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2378 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2379 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2380
96f8118c 2381 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2382 qlcnic_api_unlock(adapter);
2383
96f8118c 2384 return ret;
af19b491
AKS
2385}
2386
2387static void
2388qlcnic_fwinit_work(struct work_struct *work)
2389{
2390 struct qlcnic_adapter *adapter = container_of(work,
2391 struct qlcnic_adapter, fw_work.work);
9f26f547 2392 u32 dev_state = 0xf, npar_state;
af19b491 2393
f73dfc50
AKS
2394 if (qlcnic_api_lock(adapter))
2395 goto err_ret;
af19b491 2396
a5e463d0
SC
2397 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2398 if (dev_state == QLCNIC_DEV_QUISCENT) {
2399 qlcnic_api_unlock(adapter);
2400 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2401 FW_POLL_DELAY * 2);
2402 return;
2403 }
2404
9f26f547
AC
2405 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2406 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2407 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2408 qlcnic_api_unlock(adapter);
2409 goto wait_npar;
2410 } else {
2411 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2412 FW_POLL_DELAY);
2413 qlcnic_api_unlock(adapter);
2414 return;
2415 }
2416 }
2417
f73dfc50
AKS
2418 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2419 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2420 adapter->reset_ack_timeo);
2421 goto skip_ack_check;
2422 }
2423
2424 if (!qlcnic_check_drv_state(adapter)) {
2425skip_ack_check:
2426 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2427
2428 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2429 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2430 QLCNIC_DEV_QUISCENT);
2431 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2432 FW_POLL_DELAY * 2);
2433 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2434 qlcnic_idc_debug_info(adapter, 0);
2435
a5e463d0
SC
2436 qlcnic_api_unlock(adapter);
2437 return;
2438 }
2439
f73dfc50
AKS
2440 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2441 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2442 QLCNIC_DEV_INITIALIZING);
2443 set_bit(__QLCNIC_START_FW, &adapter->state);
2444 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2445 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2446 }
2447
f73dfc50
AKS
2448 qlcnic_api_unlock(adapter);
2449
9f26f547 2450 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2451 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2452 return;
2453 }
af19b491
AKS
2454 goto err_ret;
2455 }
2456
f73dfc50 2457 qlcnic_api_unlock(adapter);
aa5e18c0 2458
9f26f547 2459wait_npar:
af19b491 2460 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2461 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2462
af19b491 2463 switch (dev_state) {
a5e463d0
SC
2464 case QLCNIC_DEV_QUISCENT:
2465 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2466 case QLCNIC_DEV_NEED_RESET:
2467 qlcnic_schedule_work(adapter,
2468 qlcnic_fwinit_work, FW_POLL_DELAY);
2469 return;
af19b491
AKS
2470 case QLCNIC_DEV_FAILED:
2471 break;
2472
2473 default:
9f26f547 2474 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2475 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2476 return;
2477 }
af19b491
AKS
2478 }
2479
2480err_ret:
f73dfc50
AKS
2481 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2482 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2483 netif_device_attach(adapter->netdev);
af19b491
AKS
2484 qlcnic_clr_all_drv_state(adapter);
2485}
2486
2487static void
2488qlcnic_detach_work(struct work_struct *work)
2489{
2490 struct qlcnic_adapter *adapter = container_of(work,
2491 struct qlcnic_adapter, fw_work.work);
2492 struct net_device *netdev = adapter->netdev;
2493 u32 status;
2494
2495 netif_device_detach(netdev);
2496
2497 qlcnic_down(adapter, netdev);
2498
af19b491
AKS
2499 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2500
2501 if (status & QLCNIC_RCODE_FATAL_ERROR)
2502 goto err_ret;
2503
2504 if (adapter->temp == QLCNIC_TEMP_PANIC)
2505 goto err_ret;
2506
ade91f8e
AKS
2507 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2508 goto err_ret;
af19b491
AKS
2509
2510 adapter->fw_wait_cnt = 0;
2511
2512 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2513
2514 return;
2515
2516err_ret:
65b5b420
AKS
2517 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2518 status, adapter->temp);
34ce3626 2519 netif_device_attach(netdev);
af19b491
AKS
2520 qlcnic_clr_all_drv_state(adapter);
2521
2522}
2523
f73dfc50 2524/*Transit to RESET state from READY state only */
af19b491
AKS
2525static void
2526qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2527{
2528 u32 state;
2529
cea8975e 2530 adapter->need_fw_reset = 1;
af19b491
AKS
2531 if (qlcnic_api_lock(adapter))
2532 return;
2533
2534 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2535
f73dfc50 2536 if (state == QLCNIC_DEV_READY) {
af19b491 2537 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2538 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2539 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2540 }
2541
2542 qlcnic_api_unlock(adapter);
2543}
2544
9f26f547
AC
2545/* Transit to NPAR READY state from NPAR NOT READY state */
2546static void
2547qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2548{
2549 u32 state;
2550
cea8975e
AC
2551 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2552 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2553 return;
9f26f547
AC
2554 if (qlcnic_api_lock(adapter))
2555 return;
2556
2557 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2558
2559 if (state != QLCNIC_DEV_NPAR_RDY) {
2560 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2561 QLCNIC_DEV_NPAR_RDY);
2562 QLCDB(adapter, DRV, "NPAR READY state set\n");
2563 }
2564
2565 qlcnic_api_unlock(adapter);
2566}
2567
af19b491
AKS
2568static void
2569qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2570 work_func_t func, int delay)
2571{
451724c8
SC
2572 if (test_bit(__QLCNIC_AER, &adapter->state))
2573 return;
2574
af19b491
AKS
2575 INIT_DELAYED_WORK(&adapter->fw_work, func);
2576 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2577}
2578
2579static void
2580qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2581{
2582 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2583 msleep(10);
2584
2585 cancel_delayed_work_sync(&adapter->fw_work);
2586}
2587
2588static void
2589qlcnic_attach_work(struct work_struct *work)
2590{
2591 struct qlcnic_adapter *adapter = container_of(work,
2592 struct qlcnic_adapter, fw_work.work);
2593 struct net_device *netdev = adapter->netdev;
af19b491
AKS
2594
2595 if (netif_running(netdev)) {
52486a3a 2596 if (qlcnic_up(adapter, netdev))
af19b491 2597 goto done;
af19b491
AKS
2598
2599 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2600 }
2601
af19b491 2602done:
34ce3626 2603 netif_device_attach(netdev);
af19b491
AKS
2604 adapter->fw_fail_cnt = 0;
2605 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2606
2607 if (!qlcnic_clr_drv_state(adapter))
2608 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2609 FW_POLL_DELAY);
af19b491
AKS
2610}
2611
2612static int
2613qlcnic_check_health(struct qlcnic_adapter *adapter)
2614{
2615 u32 state = 0, heartbit;
2616 struct net_device *netdev = adapter->netdev;
2617
2618 if (qlcnic_check_temp(adapter))
2619 goto detach;
2620
2372a5f1 2621 if (adapter->need_fw_reset)
af19b491 2622 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2623
2624 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2625 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2626 adapter->need_fw_reset = 1;
2627
2628 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2629 if (heartbit != adapter->heartbit) {
2630 adapter->heartbit = heartbit;
2631 adapter->fw_fail_cnt = 0;
2632 if (adapter->need_fw_reset)
2633 goto detach;
68bf1c68 2634
0df170b6
AKS
2635 if (adapter->reset_context &&
2636 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2637 qlcnic_reset_hw_context(adapter);
2638 adapter->netdev->trans_start = jiffies;
2639 }
2640
af19b491
AKS
2641 return 0;
2642 }
2643
2644 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2645 return 0;
2646
2647 qlcnic_dev_request_reset(adapter);
2648
0df170b6
AKS
2649 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2650 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2651
2652 dev_info(&netdev->dev, "firmware hang detected\n");
2653
2654detach:
2655 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2656 QLCNIC_DEV_NEED_RESET;
2657
2658 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2659 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2660
af19b491 2661 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2662 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2663 }
af19b491
AKS
2664
2665 return 1;
2666}
2667
2668static void
2669qlcnic_fw_poll_work(struct work_struct *work)
2670{
2671 struct qlcnic_adapter *adapter = container_of(work,
2672 struct qlcnic_adapter, fw_work.work);
2673
2674 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2675 goto reschedule;
2676
2677
2678 if (qlcnic_check_health(adapter))
2679 return;
2680
2681reschedule:
2682 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2683}
2684
451724c8
SC
2685static int qlcnic_is_first_func(struct pci_dev *pdev)
2686{
2687 struct pci_dev *oth_pdev;
2688 int val = pdev->devfn;
2689
2690 while (val-- > 0) {
2691 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2692 (pdev->bus), pdev->bus->number,
2693 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
2694 if (!oth_pdev)
2695 continue;
451724c8 2696
bfc978fa
AKS
2697 if (oth_pdev->current_state != PCI_D3cold) {
2698 pci_dev_put(oth_pdev);
451724c8 2699 return 0;
bfc978fa
AKS
2700 }
2701 pci_dev_put(oth_pdev);
451724c8
SC
2702 }
2703 return 1;
2704}
2705
2706static int qlcnic_attach_func(struct pci_dev *pdev)
2707{
2708 int err, first_func;
2709 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2710 struct net_device *netdev = adapter->netdev;
2711
2712 pdev->error_state = pci_channel_io_normal;
2713
2714 err = pci_enable_device(pdev);
2715 if (err)
2716 return err;
2717
2718 pci_set_power_state(pdev, PCI_D0);
2719 pci_set_master(pdev);
2720 pci_restore_state(pdev);
2721
2722 first_func = qlcnic_is_first_func(pdev);
2723
2724 if (qlcnic_api_lock(adapter))
2725 return -EINVAL;
2726
2727 if (first_func) {
2728 adapter->need_fw_reset = 1;
2729 set_bit(__QLCNIC_START_FW, &adapter->state);
2730 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2731 QLCDB(adapter, DRV, "Restarting fw\n");
2732 }
2733 qlcnic_api_unlock(adapter);
2734
2735 err = adapter->nic_ops->start_firmware(adapter);
2736 if (err)
2737 return err;
2738
2739 qlcnic_clr_drv_state(adapter);
2740 qlcnic_setup_intr(adapter);
2741
2742 if (netif_running(netdev)) {
2743 err = qlcnic_attach(adapter);
2744 if (err) {
2745 qlcnic_clr_all_drv_state(adapter);
2746 clear_bit(__QLCNIC_AER, &adapter->state);
2747 netif_device_attach(netdev);
2748 return err;
2749 }
2750
2751 err = qlcnic_up(adapter, netdev);
2752 if (err)
2753 goto done;
2754
2755 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2756 }
2757 done:
2758 netif_device_attach(netdev);
2759 return err;
2760}
2761
2762static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2763 pci_channel_state_t state)
2764{
2765 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2766 struct net_device *netdev = adapter->netdev;
2767
2768 if (state == pci_channel_io_perm_failure)
2769 return PCI_ERS_RESULT_DISCONNECT;
2770
2771 if (state == pci_channel_io_normal)
2772 return PCI_ERS_RESULT_RECOVERED;
2773
2774 set_bit(__QLCNIC_AER, &adapter->state);
2775 netif_device_detach(netdev);
2776
2777 cancel_delayed_work_sync(&adapter->fw_work);
2778
2779 if (netif_running(netdev))
2780 qlcnic_down(adapter, netdev);
2781
2782 qlcnic_detach(adapter);
2783 qlcnic_teardown_intr(adapter);
2784
2785 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2786
2787 pci_save_state(pdev);
2788 pci_disable_device(pdev);
2789
2790 return PCI_ERS_RESULT_NEED_RESET;
2791}
2792
2793static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2794{
2795 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2796 PCI_ERS_RESULT_RECOVERED;
2797}
2798
2799static void qlcnic_io_resume(struct pci_dev *pdev)
2800{
2801 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2802
2803 pci_cleanup_aer_uncorrect_error_status(pdev);
2804
2805 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2806 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2807 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2808 FW_POLL_DELAY);
2809}
2810
2811
87eb743b
AC
2812static int
2813qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2814{
2815 int err;
2816
2817 err = qlcnic_can_start_firmware(adapter);
2818 if (err)
2819 return err;
2820
2821 qlcnic_check_options(adapter);
2822
2823 adapter->need_fw_reset = 0;
2824
2825 return err;
2826}
2827
2828static int
2829qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2830{
2831 return -EOPNOTSUPP;
2832}
2833
2834static int
2835qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2836{
2837 return -EOPNOTSUPP;
2838}
2839
af19b491
AKS
2840static ssize_t
2841qlcnic_store_bridged_mode(struct device *dev,
2842 struct device_attribute *attr, const char *buf, size_t len)
2843{
2844 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2845 unsigned long new;
2846 int ret = -EINVAL;
2847
2848 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2849 goto err_out;
2850
8a15ad1f 2851 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
2852 goto err_out;
2853
2854 if (strict_strtoul(buf, 2, &new))
2855 goto err_out;
2856
2e9d722d 2857 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
2858 ret = len;
2859
2860err_out:
2861 return ret;
2862}
2863
2864static ssize_t
2865qlcnic_show_bridged_mode(struct device *dev,
2866 struct device_attribute *attr, char *buf)
2867{
2868 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2869 int bridged_mode = 0;
2870
2871 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2872 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2873
2874 return sprintf(buf, "%d\n", bridged_mode);
2875}
2876
2877static struct device_attribute dev_attr_bridged_mode = {
2878 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2879 .show = qlcnic_show_bridged_mode,
2880 .store = qlcnic_store_bridged_mode,
2881};
2882
2883static ssize_t
2884qlcnic_store_diag_mode(struct device *dev,
2885 struct device_attribute *attr, const char *buf, size_t len)
2886{
2887 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2888 unsigned long new;
2889
2890 if (strict_strtoul(buf, 2, &new))
2891 return -EINVAL;
2892
2893 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2894 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2895
2896 return len;
2897}
2898
2899static ssize_t
2900qlcnic_show_diag_mode(struct device *dev,
2901 struct device_attribute *attr, char *buf)
2902{
2903 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2904
2905 return sprintf(buf, "%d\n",
2906 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2907}
2908
2909static struct device_attribute dev_attr_diag_mode = {
2910 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2911 .show = qlcnic_show_diag_mode,
2912 .store = qlcnic_store_diag_mode,
2913};
2914
2915static int
2916qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2917 loff_t offset, size_t size)
2918{
897e8c7c
DP
2919 size_t crb_size = 4;
2920
af19b491
AKS
2921 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2922 return -EIO;
2923
897e8c7c
DP
2924 if (offset < QLCNIC_PCI_CRBSPACE) {
2925 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2926 QLCNIC_PCI_CAMQM_END))
2927 crb_size = 8;
2928 else
2929 return -EINVAL;
2930 }
af19b491 2931
897e8c7c
DP
2932 if ((size != crb_size) || (offset & (crb_size-1)))
2933 return -EINVAL;
af19b491
AKS
2934
2935 return 0;
2936}
2937
2938static ssize_t
2c3c8bea
CW
2939qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2940 struct bin_attribute *attr,
af19b491
AKS
2941 char *buf, loff_t offset, size_t size)
2942{
2943 struct device *dev = container_of(kobj, struct device, kobj);
2944 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2945 u32 data;
897e8c7c 2946 u64 qmdata;
af19b491
AKS
2947 int ret;
2948
2949 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2950 if (ret != 0)
2951 return ret;
2952
897e8c7c
DP
2953 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2954 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2955 memcpy(buf, &qmdata, size);
2956 } else {
2957 data = QLCRD32(adapter, offset);
2958 memcpy(buf, &data, size);
2959 }
af19b491
AKS
2960 return size;
2961}
2962
2963static ssize_t
2c3c8bea
CW
2964qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2965 struct bin_attribute *attr,
af19b491
AKS
2966 char *buf, loff_t offset, size_t size)
2967{
2968 struct device *dev = container_of(kobj, struct device, kobj);
2969 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2970 u32 data;
897e8c7c 2971 u64 qmdata;
af19b491
AKS
2972 int ret;
2973
2974 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2975 if (ret != 0)
2976 return ret;
2977
897e8c7c
DP
2978 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2979 memcpy(&qmdata, buf, size);
2980 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2981 } else {
2982 memcpy(&data, buf, size);
2983 QLCWR32(adapter, offset, data);
2984 }
af19b491
AKS
2985 return size;
2986}
2987
2988static int
2989qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2990 loff_t offset, size_t size)
2991{
2992 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2993 return -EIO;
2994
2995 if ((size != 8) || (offset & 0x7))
2996 return -EIO;
2997
2998 return 0;
2999}
3000
3001static ssize_t
2c3c8bea
CW
3002qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3003 struct bin_attribute *attr,
af19b491
AKS
3004 char *buf, loff_t offset, size_t size)
3005{
3006 struct device *dev = container_of(kobj, struct device, kobj);
3007 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3008 u64 data;
3009 int ret;
3010
3011 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3012 if (ret != 0)
3013 return ret;
3014
3015 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3016 return -EIO;
3017
3018 memcpy(buf, &data, size);
3019
3020 return size;
3021}
3022
3023static ssize_t
2c3c8bea
CW
3024qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3025 struct bin_attribute *attr,
af19b491
AKS
3026 char *buf, loff_t offset, size_t size)
3027{
3028 struct device *dev = container_of(kobj, struct device, kobj);
3029 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3030 u64 data;
3031 int ret;
3032
3033 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3034 if (ret != 0)
3035 return ret;
3036
3037 memcpy(&data, buf, size);
3038
3039 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3040 return -EIO;
3041
3042 return size;
3043}
3044
3045
3046static struct bin_attribute bin_attr_crb = {
3047 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3048 .size = 0,
3049 .read = qlcnic_sysfs_read_crb,
3050 .write = qlcnic_sysfs_write_crb,
3051};
3052
3053static struct bin_attribute bin_attr_mem = {
3054 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3055 .size = 0,
3056 .read = qlcnic_sysfs_read_mem,
3057 .write = qlcnic_sysfs_write_mem,
3058};
3059
cea8975e 3060static int
346fe763
RB
3061validate_pm_config(struct qlcnic_adapter *adapter,
3062 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3063{
3064
3065 u8 src_pci_func, s_esw_id, d_esw_id;
3066 u8 dest_pci_func;
3067 int i;
3068
3069 for (i = 0; i < count; i++) {
3070 src_pci_func = pm_cfg[i].pci_func;
3071 dest_pci_func = pm_cfg[i].dest_npar;
3072 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3073 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3074 return QL_STATUS_INVALID_PARAM;
3075
3076 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3077 return QL_STATUS_INVALID_PARAM;
3078
3079 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3080 return QL_STATUS_INVALID_PARAM;
3081
3082 if (!IS_VALID_MODE(pm_cfg[i].action))
3083 return QL_STATUS_INVALID_PARAM;
3084
3085 s_esw_id = adapter->npars[src_pci_func].phy_port;
3086 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3087
3088 if (s_esw_id != d_esw_id)
3089 return QL_STATUS_INVALID_PARAM;
3090
3091 }
3092 return 0;
3093
3094}
3095
3096static ssize_t
3097qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3098 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3099{
3100 struct device *dev = container_of(kobj, struct device, kobj);
3101 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3102 struct qlcnic_pm_func_cfg *pm_cfg;
3103 u32 id, action, pci_func;
3104 int count, rem, i, ret;
3105
3106 count = size / sizeof(struct qlcnic_pm_func_cfg);
3107 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3108 if (rem)
3109 return QL_STATUS_INVALID_PARAM;
3110
3111 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3112
3113 ret = validate_pm_config(adapter, pm_cfg, count);
3114 if (ret)
3115 return ret;
3116 for (i = 0; i < count; i++) {
3117 pci_func = pm_cfg[i].pci_func;
3118 action = pm_cfg[i].action;
3119 id = adapter->npars[pci_func].phy_port;
3120 ret = qlcnic_config_port_mirroring(adapter, id,
3121 action, pci_func);
3122 if (ret)
3123 return ret;
3124 }
3125
3126 for (i = 0; i < count; i++) {
3127 pci_func = pm_cfg[i].pci_func;
3128 id = adapter->npars[pci_func].phy_port;
3129 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
3130 adapter->npars[pci_func].dest_npar = id;
3131 }
3132 return size;
3133}
3134
3135static ssize_t
3136qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3137 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3138{
3139 struct device *dev = container_of(kobj, struct device, kobj);
3140 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3141 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3142 int i;
3143
3144 if (size != sizeof(pm_cfg))
3145 return QL_STATUS_INVALID_PARAM;
3146
3147 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3148 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3149 continue;
3150 pm_cfg[i].action = adapter->npars[i].enable_pm;
3151 pm_cfg[i].dest_npar = 0;
3152 pm_cfg[i].pci_func = i;
3153 }
3154 memcpy(buf, &pm_cfg, size);
3155
3156 return size;
3157}
3158
cea8975e 3159static int
346fe763
RB
3160validate_esw_config(struct qlcnic_adapter *adapter,
3161 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3162{
3163 u8 pci_func;
3164 int i;
3165
3166 for (i = 0; i < count; i++) {
3167 pci_func = esw_cfg[i].pci_func;
3168 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3169 return QL_STATUS_INVALID_PARAM;
3170
3171 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3172 return QL_STATUS_INVALID_PARAM;
3173
3174 if (esw_cfg->host_vlan_tag == 1)
3175 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3176 return QL_STATUS_INVALID_PARAM;
3177
3178 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3179 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3180 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3181 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3182 return QL_STATUS_INVALID_PARAM;
3183 }
3184
3185 return 0;
3186}
3187
3188static ssize_t
3189qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3190 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3191{
3192 struct device *dev = container_of(kobj, struct device, kobj);
3193 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3194 struct qlcnic_esw_func_cfg *esw_cfg;
346fe763 3195 int count, rem, i, ret;
cea8975e 3196 u8 id, pci_func;
346fe763
RB
3197
3198 count = size / sizeof(struct qlcnic_esw_func_cfg);
3199 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3200 if (rem)
3201 return QL_STATUS_INVALID_PARAM;
3202
3203 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3204 ret = validate_esw_config(adapter, esw_cfg, count);
3205 if (ret)
3206 return ret;
3207
3208 for (i = 0; i < count; i++) {
3209 pci_func = esw_cfg[i].pci_func;
3210 id = adapter->npars[pci_func].phy_port;
cea8975e
AC
3211 ret = qlcnic_config_switch_port(adapter, id,
3212 esw_cfg[i].host_vlan_tag,
3213 esw_cfg[i].discard_tagged,
3214 esw_cfg[i].promisc_mode,
3215 esw_cfg[i].mac_learning,
3216 esw_cfg[i].pci_func,
3217 esw_cfg[i].vlan_id);
346fe763
RB
3218 if (ret)
3219 return ret;
3220 }
3221
3222 for (i = 0; i < count; i++) {
3223 pci_func = esw_cfg[i].pci_func;
3224 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3225 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3226 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3227 adapter->npars[pci_func].discard_tagged =
3228 esw_cfg[i].discard_tagged;
3229 adapter->npars[pci_func].host_vlan_tag =
3230 esw_cfg[i].host_vlan_tag;
3231 }
3232
3233 return size;
3234}
3235
3236static ssize_t
3237qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3238 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3239{
3240 struct device *dev = container_of(kobj, struct device, kobj);
3241 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3242 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3243 int i;
3244
3245 if (size != sizeof(esw_cfg))
3246 return QL_STATUS_INVALID_PARAM;
3247
3248 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3249 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3250 continue;
3251
3252 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3253 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3254 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3255 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3256 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3257 }
3258 memcpy(buf, &esw_cfg, size);
3259
3260 return size;
3261}
3262
cea8975e 3263static int
346fe763
RB
3264validate_npar_config(struct qlcnic_adapter *adapter,
3265 struct qlcnic_npar_func_cfg *np_cfg, int count)
3266{
3267 u8 pci_func, i;
3268
3269 for (i = 0; i < count; i++) {
3270 pci_func = np_cfg[i].pci_func;
3271 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3272 return QL_STATUS_INVALID_PARAM;
3273
3274 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3275 return QL_STATUS_INVALID_PARAM;
3276
3277 if (!IS_VALID_BW(np_cfg[i].min_bw)
3278 || !IS_VALID_BW(np_cfg[i].max_bw)
3279 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3280 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3281 return QL_STATUS_INVALID_PARAM;
3282 }
3283 return 0;
3284}
3285
3286static ssize_t
3287qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3288 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3289{
3290 struct device *dev = container_of(kobj, struct device, kobj);
3291 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3292 struct qlcnic_info nic_info;
3293 struct qlcnic_npar_func_cfg *np_cfg;
3294 int i, count, rem, ret;
3295 u8 pci_func;
3296
3297 count = size / sizeof(struct qlcnic_npar_func_cfg);
3298 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3299 if (rem)
3300 return QL_STATUS_INVALID_PARAM;
3301
3302 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3303 ret = validate_npar_config(adapter, np_cfg, count);
3304 if (ret)
3305 return ret;
3306
3307 for (i = 0; i < count ; i++) {
3308 pci_func = np_cfg[i].pci_func;
3309 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3310 if (ret)
3311 return ret;
3312 nic_info.pci_func = pci_func;
3313 nic_info.min_tx_bw = np_cfg[i].min_bw;
3314 nic_info.max_tx_bw = np_cfg[i].max_bw;
3315 ret = qlcnic_set_nic_info(adapter, &nic_info);
3316 if (ret)
3317 return ret;
cea8975e
AC
3318 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3319 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3320 }
3321
3322 return size;
3323
3324}
3325static ssize_t
3326qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3327 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3328{
3329 struct device *dev = container_of(kobj, struct device, kobj);
3330 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3331 struct qlcnic_info nic_info;
3332 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3333 int i, ret;
3334
3335 if (size != sizeof(np_cfg))
3336 return QL_STATUS_INVALID_PARAM;
3337
3338 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3339 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3340 continue;
3341 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3342 if (ret)
3343 return ret;
3344
3345 np_cfg[i].pci_func = i;
3346 np_cfg[i].op_mode = nic_info.op_mode;
3347 np_cfg[i].port_num = nic_info.phys_port;
3348 np_cfg[i].fw_capab = nic_info.capabilities;
3349 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3350 np_cfg[i].max_bw = nic_info.max_tx_bw;
3351 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3352 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3353 }
3354 memcpy(buf, &np_cfg, size);
3355 return size;
3356}
3357
3358static ssize_t
3359qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3360 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3361{
3362 struct device *dev = container_of(kobj, struct device, kobj);
3363 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3364 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3365 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
3366 int i, ret;
3367
3368 if (size != sizeof(pci_cfg))
3369 return QL_STATUS_INVALID_PARAM;
3370
3371 ret = qlcnic_get_pci_info(adapter, pci_info);
3372 if (ret)
3373 return ret;
3374
3375 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3376 pci_cfg[i].pci_func = pci_info[i].id;
3377 pci_cfg[i].func_type = pci_info[i].type;
3378 pci_cfg[i].port_num = pci_info[i].default_port;
3379 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3380 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3381 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3382 }
3383 memcpy(buf, &pci_cfg, size);
3384 return size;
3385
3386}
3387static struct bin_attribute bin_attr_npar_config = {
3388 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3389 .size = 0,
3390 .read = qlcnic_sysfs_read_npar_config,
3391 .write = qlcnic_sysfs_write_npar_config,
3392};
3393
3394static struct bin_attribute bin_attr_pci_config = {
3395 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3396 .size = 0,
3397 .read = qlcnic_sysfs_read_pci_config,
3398 .write = NULL,
3399};
3400
3401static struct bin_attribute bin_attr_esw_config = {
3402 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3403 .size = 0,
3404 .read = qlcnic_sysfs_read_esw_config,
3405 .write = qlcnic_sysfs_write_esw_config,
3406};
3407
3408static struct bin_attribute bin_attr_pm_config = {
3409 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3410 .size = 0,
3411 .read = qlcnic_sysfs_read_pm_config,
3412 .write = qlcnic_sysfs_write_pm_config,
3413};
3414
af19b491
AKS
3415static void
3416qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3417{
3418 struct device *dev = &adapter->pdev->dev;
3419
3420 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3421 if (device_create_file(dev, &dev_attr_bridged_mode))
3422 dev_warn(dev,
3423 "failed to create bridged_mode sysfs entry\n");
3424}
3425
3426static void
3427qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3428{
3429 struct device *dev = &adapter->pdev->dev;
3430
3431 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3432 device_remove_file(dev, &dev_attr_bridged_mode);
3433}
3434
3435static void
3436qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3437{
3438 struct device *dev = &adapter->pdev->dev;
3439
132ff00a
AC
3440 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3441 return;
af19b491
AKS
3442 if (device_create_file(dev, &dev_attr_diag_mode))
3443 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3444 if (device_create_bin_file(dev, &bin_attr_crb))
3445 dev_info(dev, "failed to create crb sysfs entry\n");
3446 if (device_create_bin_file(dev, &bin_attr_mem))
3447 dev_info(dev, "failed to create mem sysfs entry\n");
346fe763
RB
3448 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3449 adapter->op_mode != QLCNIC_MGMT_FUNC)
3450 return;
3451 if (device_create_bin_file(dev, &bin_attr_pci_config))
3452 dev_info(dev, "failed to create pci config sysfs entry");
3453 if (device_create_bin_file(dev, &bin_attr_npar_config))
3454 dev_info(dev, "failed to create npar config sysfs entry");
3455 if (device_create_bin_file(dev, &bin_attr_esw_config))
3456 dev_info(dev, "failed to create esw config sysfs entry");
3457 if (device_create_bin_file(dev, &bin_attr_pm_config))
3458 dev_info(dev, "failed to create pm config sysfs entry");
3459
af19b491
AKS
3460}
3461
af19b491
AKS
3462static void
3463qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3464{
3465 struct device *dev = &adapter->pdev->dev;
3466
132ff00a
AC
3467 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3468 return;
af19b491
AKS
3469 device_remove_file(dev, &dev_attr_diag_mode);
3470 device_remove_bin_file(dev, &bin_attr_crb);
3471 device_remove_bin_file(dev, &bin_attr_mem);
346fe763
RB
3472 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3473 adapter->op_mode != QLCNIC_MGMT_FUNC)
3474 return;
3475 device_remove_bin_file(dev, &bin_attr_pci_config);
3476 device_remove_bin_file(dev, &bin_attr_npar_config);
3477 device_remove_bin_file(dev, &bin_attr_esw_config);
3478 device_remove_bin_file(dev, &bin_attr_pm_config);
af19b491
AKS
3479}
3480
3481#ifdef CONFIG_INET
3482
3483#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3484
af19b491
AKS
3485static void
3486qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3487{
3488 struct in_device *indev;
3489 struct qlcnic_adapter *adapter = netdev_priv(dev);
3490
af19b491
AKS
3491 indev = in_dev_get(dev);
3492 if (!indev)
3493 return;
3494
3495 for_ifa(indev) {
3496 switch (event) {
3497 case NETDEV_UP:
3498 qlcnic_config_ipaddr(adapter,
3499 ifa->ifa_address, QLCNIC_IP_UP);
3500 break;
3501 case NETDEV_DOWN:
3502 qlcnic_config_ipaddr(adapter,
3503 ifa->ifa_address, QLCNIC_IP_DOWN);
3504 break;
3505 default:
3506 break;
3507 }
3508 } endfor_ifa(indev);
3509
3510 in_dev_put(indev);
af19b491
AKS
3511}
3512
3513static int qlcnic_netdev_event(struct notifier_block *this,
3514 unsigned long event, void *ptr)
3515{
3516 struct qlcnic_adapter *adapter;
3517 struct net_device *dev = (struct net_device *)ptr;
3518
3519recheck:
3520 if (dev == NULL)
3521 goto done;
3522
3523 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3524 dev = vlan_dev_real_dev(dev);
3525 goto recheck;
3526 }
3527
3528 if (!is_qlcnic_netdev(dev))
3529 goto done;
3530
3531 adapter = netdev_priv(dev);
3532
3533 if (!adapter)
3534 goto done;
3535
8a15ad1f 3536 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3537 goto done;
3538
3539 qlcnic_config_indev_addr(dev, event);
3540done:
3541 return NOTIFY_DONE;
3542}
3543
3544static int
3545qlcnic_inetaddr_event(struct notifier_block *this,
3546 unsigned long event, void *ptr)
3547{
3548 struct qlcnic_adapter *adapter;
3549 struct net_device *dev;
3550
3551 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3552
3553 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3554
3555recheck:
3556 if (dev == NULL || !netif_running(dev))
3557 goto done;
3558
3559 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3560 dev = vlan_dev_real_dev(dev);
3561 goto recheck;
3562 }
3563
3564 if (!is_qlcnic_netdev(dev))
3565 goto done;
3566
3567 adapter = netdev_priv(dev);
3568
251a84c9 3569 if (!adapter)
af19b491
AKS
3570 goto done;
3571
8a15ad1f 3572 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3573 goto done;
3574
3575 switch (event) {
3576 case NETDEV_UP:
3577 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3578 break;
3579 case NETDEV_DOWN:
3580 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3581 break;
3582 default:
3583 break;
3584 }
3585
3586done:
3587 return NOTIFY_DONE;
3588}
3589
3590static struct notifier_block qlcnic_netdev_cb = {
3591 .notifier_call = qlcnic_netdev_event,
3592};
3593
3594static struct notifier_block qlcnic_inetaddr_cb = {
3595 .notifier_call = qlcnic_inetaddr_event,
3596};
3597#else
3598static void
3599qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3600{ }
3601#endif
451724c8
SC
3602static struct pci_error_handlers qlcnic_err_handler = {
3603 .error_detected = qlcnic_io_error_detected,
3604 .slot_reset = qlcnic_io_slot_reset,
3605 .resume = qlcnic_io_resume,
3606};
af19b491
AKS
3607
3608static struct pci_driver qlcnic_driver = {
3609 .name = qlcnic_driver_name,
3610 .id_table = qlcnic_pci_tbl,
3611 .probe = qlcnic_probe,
3612 .remove = __devexit_p(qlcnic_remove),
3613#ifdef CONFIG_PM
3614 .suspend = qlcnic_suspend,
3615 .resume = qlcnic_resume,
3616#endif
451724c8
SC
3617 .shutdown = qlcnic_shutdown,
3618 .err_handler = &qlcnic_err_handler
3619
af19b491
AKS
3620};
3621
3622static int __init qlcnic_init_module(void)
3623{
0cf3a14c 3624 int ret;
af19b491
AKS
3625
3626 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3627
3628#ifdef CONFIG_INET
3629 register_netdevice_notifier(&qlcnic_netdev_cb);
3630 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3631#endif
3632
0cf3a14c
AKS
3633 ret = pci_register_driver(&qlcnic_driver);
3634 if (ret) {
3635#ifdef CONFIG_INET
3636 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3637 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3638#endif
3639 }
af19b491 3640
0cf3a14c 3641 return ret;
af19b491
AKS
3642}
3643
3644module_init(qlcnic_init_module);
3645
3646static void __exit qlcnic_exit_module(void)
3647{
3648
3649 pci_unregister_driver(&qlcnic_driver);
3650
3651#ifdef CONFIG_INET
3652 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3653 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3654#endif
3655}
3656
3657module_exit(qlcnic_exit_module);