]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/bfa/bfad.c
[SCSI] bnx2i: Updated copyright and bump version
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / bfa / bfad.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
5fbe25c7 18/*
7725ccfd
JH
19 * bfad.c Linux driver PCI interface module.
20 */
7725ccfd 21#include <linux/module.h>
e6714324 22#include <linux/kthread.h>
a36c61f9
KG
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
7725ccfd
JH
32#include "bfad_drv.h"
33#include "bfad_im.h"
a36c61f9 34#include "bfa_fcs.h"
a36c61f9
KG
35#include "bfa_defs.h"
36#include "bfa.h"
7725ccfd
JH
37
38BFA_TRC_FILE(LDRV, BFAD);
42b426ec 39DEFINE_MUTEX(bfad_mutex);
7725ccfd 40LIST_HEAD(bfad_list);
a36c61f9
KG
41
42static int bfad_inst;
43static int num_sgpgs_parm;
44int supported_fc4s;
45char *host_name, *os_name, *os_patch;
46int num_rports, num_ios, num_tms;
47int num_fcxps, num_ufbufs;
48int reqq_size, rspq_size, num_sgpgs;
49int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51int bfa_io_max_sge = BFAD_IO_MAX_SGE;
88166242 52int bfa_log_level = 3; /* WARNING log level */
a36c61f9
KG
53int ioc_auto_recover = BFA_TRUE;
54int bfa_linkup_delay = -1;
55int fdmi_enable = BFA_TRUE;
be540a99 56int fc_credit_recovery = BFA_TRUE;
a36c61f9 57int pcie_max_read_reqsz;
ab2a9ba1 58int bfa_debugfs_enable = 1;
a36c61f9
KG
59int msix_disable_cb = 0, msix_disable_ct = 0;
60
61338a0b 61/* Firmware releated */
11189208
KG
62u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
63u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
a36c61f9 64
11189208
KG
65#define BFAD_FW_FILE_CB "cbfw.bin"
66#define BFAD_FW_FILE_CT "ctfw.bin"
67#define BFAD_FW_FILE_CT2 "ct2fw.bin"
61338a0b
JH
68
69static u32 *bfad_load_fwimg(struct pci_dev *pdev);
70static void bfad_free_fwimg(void);
71static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
72 u32 *bfi_image_size, char *fw_name);
73
52f94b6f 74static const char *msix_name_ct[] = {
11189208 75 "ctrl",
a36c61f9 76 "cpe0", "cpe1", "cpe2", "cpe3",
11189208 77 "rme0", "rme1", "rme2", "rme3" };
a36c61f9 78
52f94b6f 79static const char *msix_name_cb[] = {
a36c61f9
KG
80 "cpe0", "cpe1", "cpe2", "cpe3",
81 "rme0", "rme1", "rme2", "rme3",
82 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
83
11189208
KG
84MODULE_FIRMWARE(BFAD_FW_FILE_CB);
85MODULE_FIRMWARE(BFAD_FW_FILE_CT);
86MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
7725ccfd
JH
87
88module_param(os_name, charp, S_IRUGO | S_IWUSR);
604158ad 89MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
7725ccfd 90module_param(os_patch, charp, S_IRUGO | S_IWUSR);
604158ad 91MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
7725ccfd 92module_param(host_name, charp, S_IRUGO | S_IWUSR);
604158ad 93MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
7725ccfd 94module_param(num_rports, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
95MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
96 "(physical/logical), default=1024");
7725ccfd 97module_param(num_ios, int, S_IRUGO | S_IWUSR);
604158ad 98MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
7725ccfd 99module_param(num_tms, int, S_IRUGO | S_IWUSR);
604158ad 100MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
7725ccfd 101module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
604158ad 102MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
7725ccfd 103module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
104MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
105 "buffers, default=64");
7725ccfd 106module_param(reqq_size, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
107MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
108 "default=256");
7725ccfd 109module_param(rspq_size, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
110MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
111 "default=64");
7725ccfd 112module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
604158ad 113MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
7725ccfd 114module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
115MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
116 "Range[>0]");
7725ccfd 117module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
a36c61f9 118MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
7725ccfd 119module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
604158ad 120MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
88166242
JH
121module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
a36c61f9 123 "Range[Critical:1|Error:2|Warning:3|Info:4]");
7725ccfd 124module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
125MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
126 "Range[off:0|on:1]");
7725ccfd 127module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
128MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
129 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
130 "[RHEL5, SLES10, ESX40] Range[>0]");
131module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
133 "for Brocade-415/425/815/825 cards, default=0, "
134 " Range[false:0|true:1]");
135module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
137 "if possible for Brocade-1010/1020/804/1007/902/1741 "
138 "cards, default=0, Range[false:0|true:1]");
604158ad 139module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
a36c61f9
KG
140MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
141 "Range[false:0|true:1]");
be540a99
KG
142module_param(fc_credit_recovery, int, S_IRUGO | S_IWUSR);
143MODULE_PARM_DESC(fc_credit_recovery, "Enables FC Credit Recovery, default=1, "
144 "Range[false:0|true:1]");
a36c61f9
KG
145module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
146MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
147 "(use system setting), Range[128|256|512|1024|2048|4096]");
ab2a9ba1
JH
148module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
149MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
150 " Range[false:0|true:1]");
7725ccfd 151
a36c61f9
KG
152static void
153bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
154static void
155bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
156static void
157bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
158static void
159bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
160static void
161bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
162static void
163bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
164static void
165bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
166
5fbe25c7 167/*
a36c61f9 168 * Beginning state for the driver instance, awaiting the pci_probe event
7725ccfd 169 */
a36c61f9
KG
170static void
171bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
172{
173 bfa_trc(bfad, event);
174
175 switch (event) {
176 case BFAD_E_CREATE:
177 bfa_sm_set_state(bfad, bfad_sm_created);
178 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
179 "%s", "bfad_worker");
180 if (IS_ERR(bfad->bfad_tsk)) {
181 printk(KERN_INFO "bfad[%d]: Kernel thread "
182 "creation failed!\n", bfad->inst_no);
183 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
184 }
185 bfa_sm_send_event(bfad, BFAD_E_INIT);
186 break;
187
188 case BFAD_E_STOP:
189 /* Ignore stop; already in uninit */
190 break;
191
192 default:
193 bfa_sm_fault(bfad, event);
194 }
195}
7725ccfd 196
5fbe25c7 197/*
a36c61f9
KG
198 * Driver Instance is created, awaiting event INIT to initialize the bfad
199 */
200static void
201bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
7725ccfd 202{
a36c61f9 203 unsigned long flags;
7725ccfd 204
a36c61f9 205 bfa_trc(bfad, event);
7725ccfd 206
a36c61f9
KG
207 switch (event) {
208 case BFAD_E_INIT:
209 bfa_sm_set_state(bfad, bfad_sm_initializing);
7725ccfd 210
a36c61f9 211 init_completion(&bfad->comp);
e6714324 212
a36c61f9
KG
213 /* Enable Interrupt and wait bfa_init completion */
214 if (bfad_setup_intr(bfad)) {
215 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
216 bfad->inst_no);
217 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
218 break;
219 }
220
221 spin_lock_irqsave(&bfad->bfad_lock, flags);
f7f73812 222 bfa_iocfc_init(&bfad->bfa);
a36c61f9
KG
223 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
224
225 /* Set up interrupt handler for each vectors */
226 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
227 bfad_install_msix_handler(bfad)) {
228 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
229 __func__, bfad->inst_no);
230 }
231
232 bfad_init_timer(bfad);
233
234 wait_for_completion(&bfad->comp);
235
236 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
237 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
238 } else {
7c38c05b
KG
239 printk(KERN_WARNING
240 "bfa %s: bfa init failed\n",
241 bfad->pci_name);
a36c61f9
KG
242 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
243 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
244 }
245
246 break;
247
248 case BFAD_E_KTHREAD_CREATE_FAILED:
249 bfa_sm_set_state(bfad, bfad_sm_uninit);
250 break;
251
252 default:
253 bfa_sm_fault(bfad, event);
254 }
7725ccfd
JH
255}
256
257static void
a36c61f9 258bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
7725ccfd 259{
a36c61f9
KG
260 int retval;
261 unsigned long flags;
262
263 bfa_trc(bfad, event);
264
265 switch (event) {
266 case BFAD_E_INIT_SUCCESS:
267 kthread_stop(bfad->bfad_tsk);
268 spin_lock_irqsave(&bfad->bfad_lock, flags);
269 bfad->bfad_tsk = NULL;
270 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
271
272 retval = bfad_start_ops(bfad);
273 if (retval != BFA_STATUS_OK)
274 break;
275 bfa_sm_set_state(bfad, bfad_sm_operational);
276 break;
277
278 case BFAD_E_INTR_INIT_FAILED:
279 bfa_sm_set_state(bfad, bfad_sm_uninit);
280 kthread_stop(bfad->bfad_tsk);
281 spin_lock_irqsave(&bfad->bfad_lock, flags);
282 bfad->bfad_tsk = NULL;
283 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
284 break;
285
286 case BFAD_E_INIT_FAILED:
287 bfa_sm_set_state(bfad, bfad_sm_failed);
288 break;
289 default:
290 bfa_sm_fault(bfad, event);
291 }
7725ccfd
JH
292}
293
294static void
a36c61f9 295bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
7725ccfd 296{
a36c61f9 297 int retval;
7725ccfd 298
a36c61f9
KG
299 bfa_trc(bfad, event);
300
301 switch (event) {
302 case BFAD_E_INIT_SUCCESS:
303 retval = bfad_start_ops(bfad);
304 if (retval != BFA_STATUS_OK)
305 break;
306 bfa_sm_set_state(bfad, bfad_sm_operational);
307 break;
308
309 case BFAD_E_STOP:
310 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
311 bfad_uncfg_pport(bfad);
312 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
313 bfad_im_probe_undo(bfad);
314 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
315 }
316 bfad_stop(bfad);
317 break;
318
319 case BFAD_E_EXIT_COMP:
320 bfa_sm_set_state(bfad, bfad_sm_uninit);
321 bfad_remove_intr(bfad);
322 del_timer_sync(&bfad->hal_tmo);
323 break;
324
325 default:
326 bfa_sm_fault(bfad, event);
327 }
7725ccfd
JH
328}
329
a36c61f9
KG
330static void
331bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
7725ccfd 332{
a36c61f9 333 bfa_trc(bfad, event);
7725ccfd 334
a36c61f9
KG
335 switch (event) {
336 case BFAD_E_STOP:
337 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
338 bfad_fcs_stop(bfad);
339 break;
7725ccfd 340
a36c61f9
KG
341 default:
342 bfa_sm_fault(bfad, event);
343 }
344}
7725ccfd 345
a36c61f9
KG
346static void
347bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
348{
349 bfa_trc(bfad, event);
350
351 switch (event) {
352 case BFAD_E_FCS_EXIT_COMP:
353 bfa_sm_set_state(bfad, bfad_sm_stopping);
354 bfad_stop(bfad);
355 break;
356
357 default:
358 bfa_sm_fault(bfad, event);
359 }
7725ccfd
JH
360}
361
362static void
a36c61f9 363bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
7725ccfd 364{
a36c61f9 365 bfa_trc(bfad, event);
7725ccfd 366
a36c61f9
KG
367 switch (event) {
368 case BFAD_E_EXIT_COMP:
369 bfa_sm_set_state(bfad, bfad_sm_uninit);
370 bfad_remove_intr(bfad);
371 del_timer_sync(&bfad->hal_tmo);
372 bfad_im_probe_undo(bfad);
373 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
374 bfad_uncfg_pport(bfad);
375 break;
7725ccfd 376
a36c61f9
KG
377 default:
378 bfa_sm_fault(bfad, event);
379 break;
380 }
7725ccfd
JH
381}
382
5fbe25c7 383/*
7725ccfd
JH
384 * BFA callbacks
385 */
386void
387bfad_hcb_comp(void *arg, bfa_status_t status)
388{
389 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
390
391 fcomp->status = status;
392 complete(&fcomp->comp);
393}
394
5fbe25c7 395/*
7725ccfd
JH
396 * bfa_init callback
397 */
398void
399bfa_cb_init(void *drv, bfa_status_t init_status)
400{
a36c61f9 401 struct bfad_s *bfad = drv;
7725ccfd 402
e6714324 403 if (init_status == BFA_STATUS_OK) {
7725ccfd
JH
404 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
405
a36c61f9
KG
406 /*
407 * If BFAD_HAL_INIT_FAIL flag is set:
e6714324
KG
408 * Wake up the kernel thread to start
409 * the bfad operations after HAL init done
410 */
411 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
412 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
413 wake_up_process(bfad->bfad_tsk);
414 }
415 }
416
7725ccfd
JH
417 complete(&bfad->comp);
418}
419
5fbe25c7 420/*
7725ccfd
JH
421 * BFA_FCS callbacks
422 */
7725ccfd 423struct bfad_port_s *
a36c61f9
KG
424bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
425 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
7725ccfd
JH
426 struct bfad_vport_s *vp_drv)
427{
a36c61f9
KG
428 bfa_status_t rc;
429 struct bfad_port_s *port_drv;
7725ccfd
JH
430
431 if (!vp_drv && !vf_drv) {
432 port_drv = &bfad->pport;
433 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
434 } else if (!vp_drv && vf_drv) {
435 port_drv = &vf_drv->base_port;
436 port_drv->pvb_type = BFAD_PORT_VF_BASE;
437 } else if (vp_drv && !vf_drv) {
438 port_drv = &vp_drv->drv_port;
439 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
440 } else {
441 port_drv = &vp_drv->drv_port;
442 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
443 }
444
445 port_drv->fcs_port = port;
446 port_drv->roles = roles;
a36c61f9
KG
447
448 if (roles & BFA_LPORT_ROLE_FCP_IM) {
449 rc = bfad_im_port_new(bfad, port_drv);
450 if (rc != BFA_STATUS_OK) {
451 bfad_im_port_delete(bfad, port_drv);
452 port_drv = NULL;
453 }
7725ccfd
JH
454 }
455
456 return port_drv;
457}
458
459void
a36c61f9 460bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
7725ccfd
JH
461 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
462{
a36c61f9 463 struct bfad_port_s *port_drv;
7725ccfd 464
a36c61f9 465 /* this will be only called from rmmod context */
7725ccfd 466 if (vp_drv && !vp_drv->comp_del) {
a36c61f9
KG
467 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
468 ((vf_drv) ? (&(vf_drv)->base_port) :
469 (&(bfad)->pport));
7725ccfd 470 bfa_trc(bfad, roles);
a36c61f9
KG
471 if (roles & BFA_LPORT_ROLE_FCP_IM)
472 bfad_im_port_delete(bfad, port_drv);
7725ccfd 473 }
7725ccfd
JH
474}
475
5fbe25c7 476/*
7725ccfd
JH
477 * FCS RPORT alloc callback, after successful PLOGI by FCS
478 */
479bfa_status_t
480bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
481 struct bfad_rport_s **rport_drv)
482{
a36c61f9 483 bfa_status_t rc = BFA_STATUS_OK;
7725ccfd
JH
484
485 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
486 if (*rport_drv == NULL) {
487 rc = BFA_STATUS_ENOMEM;
488 goto ext;
489 }
490
491 *rport = &(*rport_drv)->fcs_rport;
492
493ext:
494 return rc;
495}
496
5fbe25c7 497/*
d9883548
JH
498 * FCS PBC VPORT Create
499 */
500void
501bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
502{
503
a36c61f9
KG
504 struct bfa_lport_cfg_s port_cfg = {0};
505 struct bfad_vport_s *vport;
506 int rc;
d9883548 507
a36c61f9
KG
508 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
509 if (!vport) {
d9883548
JH
510 bfa_trc(bfad, 0);
511 return;
512 }
7725ccfd 513
a36c61f9
KG
514 vport->drv_port.bfad = bfad;
515 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
516 port_cfg.pwwn = pbc_vport.vp_pwwn;
517 port_cfg.nwwn = pbc_vport.vp_nwwn;
518 port_cfg.preboot_vp = BFA_TRUE;
519
520 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
521 &port_cfg, vport);
d9883548 522
a36c61f9
KG
523 if (rc != BFA_STATUS_OK) {
524 bfa_trc(bfad, 0);
525 return;
526 }
d9883548 527
a36c61f9 528 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
d9883548 529}
7725ccfd
JH
530
531void
532bfad_hal_mem_release(struct bfad_s *bfad)
533{
a36c61f9 534 int i;
7725ccfd
JH
535 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
536 struct bfa_mem_elem_s *meminfo_elem;
537
538 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
539 meminfo_elem = &hal_meminfo->meminfo[i];
540 if (meminfo_elem->kva != NULL) {
541 switch (meminfo_elem->mem_type) {
542 case BFA_MEM_TYPE_KVA:
543 vfree(meminfo_elem->kva);
544 break;
545 case BFA_MEM_TYPE_DMA:
546 dma_free_coherent(&bfad->pcidev->dev,
a36c61f9
KG
547 meminfo_elem->mem_len,
548 meminfo_elem->kva,
549 (dma_addr_t) meminfo_elem->dma);
7725ccfd
JH
550 break;
551 default:
d4b671c5 552 WARN_ON(1);
7725ccfd
JH
553 break;
554 }
555 }
556 }
557
558 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
559}
560
561void
562bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
563{
564 if (num_rports > 0)
565 bfa_cfg->fwcfg.num_rports = num_rports;
566 if (num_ios > 0)
567 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
568 if (num_tms > 0)
569 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
570 if (num_fcxps > 0)
571 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
572 if (num_ufbufs > 0)
573 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
574 if (reqq_size > 0)
575 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
576 if (rspq_size > 0)
577 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
578 if (num_sgpgs > 0)
579 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
580
581 /*
582 * populate the hal values back to the driver for sysfs use.
583 * otherwise, the default values will be shown as 0 in sysfs
584 */
585 num_rports = bfa_cfg->fwcfg.num_rports;
a36c61f9
KG
586 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
587 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
588 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
7725ccfd 589 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
a36c61f9
KG
590 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
591 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
592 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
7725ccfd
JH
593}
594
595bfa_status_t
596bfad_hal_mem_alloc(struct bfad_s *bfad)
597{
a36c61f9 598 int i;
7725ccfd
JH
599 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
600 struct bfa_mem_elem_s *meminfo_elem;
a36c61f9
KG
601 dma_addr_t phys_addr;
602 void *kva;
603 bfa_status_t rc = BFA_STATUS_OK;
604 int retry_count = 0;
605 int reset_value = 1;
606 int min_num_sgpgs = 512;
7725ccfd
JH
607
608 bfa_cfg_get_default(&bfad->ioc_cfg);
609
610retry:
611 bfad_update_hal_cfg(&bfad->ioc_cfg);
612 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
613 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
614
615 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
616 meminfo_elem = &hal_meminfo->meminfo[i];
617 switch (meminfo_elem->mem_type) {
618 case BFA_MEM_TYPE_KVA:
619 kva = vmalloc(meminfo_elem->mem_len);
620 if (kva == NULL) {
621 bfad_hal_mem_release(bfad);
622 rc = BFA_STATUS_ENOMEM;
623 goto ext;
624 }
625 memset(kva, 0, meminfo_elem->mem_len);
626 meminfo_elem->kva = kva;
627 break;
628 case BFA_MEM_TYPE_DMA:
629 kva = dma_alloc_coherent(&bfad->pcidev->dev,
a36c61f9 630 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
7725ccfd
JH
631 if (kva == NULL) {
632 bfad_hal_mem_release(bfad);
633 /*
634 * If we cannot allocate with default
635 * num_sgpages try with half the value.
636 */
637 if (num_sgpgs > min_num_sgpgs) {
a36c61f9
KG
638 printk(KERN_INFO
639 "bfad[%d]: memory allocation failed"
640 " with num_sgpgs: %d\n",
7725ccfd
JH
641 bfad->inst_no, num_sgpgs);
642 nextLowerInt(&num_sgpgs);
a36c61f9
KG
643 printk(KERN_INFO
644 "bfad[%d]: trying to allocate memory"
645 " with num_sgpgs: %d\n",
7725ccfd
JH
646 bfad->inst_no, num_sgpgs);
647 retry_count++;
648 goto retry;
649 } else {
650 if (num_sgpgs_parm > 0)
651 num_sgpgs = num_sgpgs_parm;
652 else {
653 reset_value =
654 (1 << retry_count);
655 num_sgpgs *= reset_value;
656 }
657 rc = BFA_STATUS_ENOMEM;
658 goto ext;
659 }
660 }
661
662 if (num_sgpgs_parm > 0)
663 num_sgpgs = num_sgpgs_parm;
664 else {
665 reset_value = (1 << retry_count);
666 num_sgpgs *= reset_value;
667 }
668
669 memset(kva, 0, meminfo_elem->mem_len);
670 meminfo_elem->kva = kva;
671 meminfo_elem->dma = phys_addr;
672 break;
673 default:
674 break;
675
676 }
677 }
678ext:
679 return rc;
680}
681
5fbe25c7 682/*
7725ccfd
JH
683 * Create a vport under a vf.
684 */
685bfa_status_t
686bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
a36c61f9 687 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
7725ccfd 688{
a36c61f9
KG
689 struct bfad_vport_s *vport;
690 int rc = BFA_STATUS_OK;
691 unsigned long flags;
7725ccfd
JH
692 struct completion fcomp;
693
694 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
695 if (!vport) {
696 rc = BFA_STATUS_ENOMEM;
697 goto ext;
698 }
699
700 vport->drv_port.bfad = bfad;
701 spin_lock_irqsave(&bfad->bfad_lock, flags);
a36c61f9
KG
702 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
703 port_cfg, vport);
7725ccfd
JH
704 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
705
706 if (rc != BFA_STATUS_OK)
707 goto ext_free_vport;
708
a36c61f9 709 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
b504293f
JH
710 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
711 dev);
7725ccfd
JH
712 if (rc != BFA_STATUS_OK)
713 goto ext_free_fcs_vport;
714 }
715
716 spin_lock_irqsave(&bfad->bfad_lock, flags);
717 bfa_fcs_vport_start(&vport->fcs_vport);
718 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
719
720 return BFA_STATUS_OK;
721
722ext_free_fcs_vport:
723 spin_lock_irqsave(&bfad->bfad_lock, flags);
724 vport->comp_del = &fcomp;
725 init_completion(vport->comp_del);
726 bfa_fcs_vport_delete(&vport->fcs_vport);
727 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
728 wait_for_completion(vport->comp_del);
729ext_free_vport:
730 kfree(vport);
731ext:
732 return rc;
733}
734
7725ccfd
JH
735void
736bfad_bfa_tmo(unsigned long data)
737{
a36c61f9
KG
738 struct bfad_s *bfad = (struct bfad_s *) data;
739 unsigned long flags;
740 struct list_head doneq;
7725ccfd
JH
741
742 spin_lock_irqsave(&bfad->bfad_lock, flags);
743
f7f73812 744 bfa_timer_beat(&bfad->bfa.timer_mod);
7725ccfd
JH
745
746 bfa_comp_deq(&bfad->bfa, &doneq);
747 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
748
749 if (!list_empty(&doneq)) {
750 bfa_comp_process(&bfad->bfa, &doneq);
751 spin_lock_irqsave(&bfad->bfad_lock, flags);
752 bfa_comp_free(&bfad->bfa, &doneq);
753 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
754 }
755
a36c61f9
KG
756 mod_timer(&bfad->hal_tmo,
757 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
7725ccfd
JH
758}
759
760void
761bfad_init_timer(struct bfad_s *bfad)
762{
763 init_timer(&bfad->hal_tmo);
764 bfad->hal_tmo.function = bfad_bfa_tmo;
765 bfad->hal_tmo.data = (unsigned long)bfad;
766
a36c61f9
KG
767 mod_timer(&bfad->hal_tmo,
768 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
7725ccfd
JH
769}
770
771int
772bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
773{
a36c61f9 774 int rc = -ENODEV;
7725ccfd
JH
775
776 if (pci_enable_device(pdev)) {
a36c61f9 777 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
7725ccfd
JH
778 goto out;
779 }
780
781 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
782 goto out_disable_device;
783
784 pci_set_master(pdev);
785
786
787 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
788 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a36c61f9 789 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
7725ccfd
JH
790 goto out_release_region;
791 }
792
b3522f08 793 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
11189208 794 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
7725ccfd
JH
795
796 if (bfad->pci_bar0_kva == NULL) {
a36c61f9 797 printk(KERN_ERR "Fail to map bar0\n");
7725ccfd
JH
798 goto out_release_region;
799 }
800
801 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
802 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
803 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
804 bfad->hal_pcidev.device_id = pdev->device;
805 bfad->pci_name = pci_name(pdev);
806
807 bfad->pci_attr.vendor_id = pdev->vendor;
808 bfad->pci_attr.device_id = pdev->device;
809 bfad->pci_attr.ssid = pdev->subsystem_device;
810 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
811 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
812
813 bfad->pcidev = pdev;
a36c61f9
KG
814
815 /* Adjust PCIe Maximum Read Request Size */
816 if (pcie_max_read_reqsz > 0) {
817 int pcie_cap_reg;
818 u16 pcie_dev_ctl;
819 u16 mask = 0xffff;
820
821 switch (pcie_max_read_reqsz) {
822 case 128:
823 mask = 0x0;
824 break;
825 case 256:
826 mask = 0x1000;
827 break;
828 case 512:
829 mask = 0x2000;
830 break;
831 case 1024:
832 mask = 0x3000;
833 break;
834 case 2048:
835 mask = 0x4000;
836 break;
837 case 4096:
838 mask = 0x5000;
839 break;
840 default:
841 break;
842 }
843
844 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
845 if (mask != 0xffff && pcie_cap_reg) {
846 pcie_cap_reg += 0x08;
847 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
848 if ((pcie_dev_ctl & 0x7000) != mask) {
849 printk(KERN_WARNING "BFA[%s]: "
850 "pcie_max_read_request_size is %d, "
851 "reset to %d\n", bfad->pci_name,
852 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
853 pcie_max_read_reqsz);
854
855 pcie_dev_ctl &= ~0x7000;
856 pci_write_config_word(pdev, pcie_cap_reg,
857 pcie_dev_ctl | mask);
858 }
859 }
860 }
861
7725ccfd
JH
862 return 0;
863
864out_release_region:
865 pci_release_regions(pdev);
866out_disable_device:
867 pci_disable_device(pdev);
868out:
869 return rc;
870}
871
872void
873bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
874{
7725ccfd 875 pci_iounmap(pdev, bfad->pci_bar0_kva);
11189208 876 pci_iounmap(pdev, bfad->pci_bar2_kva);
7725ccfd
JH
877 pci_release_regions(pdev);
878 pci_disable_device(pdev);
879 pci_set_drvdata(pdev, NULL);
880}
881
7725ccfd
JH
882bfa_status_t
883bfad_drv_init(struct bfad_s *bfad)
884{
a36c61f9
KG
885 bfa_status_t rc;
886 unsigned long flags;
7725ccfd
JH
887
888 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
889 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
890 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
891 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
892
893 rc = bfad_hal_mem_alloc(bfad);
894 if (rc != BFA_STATUS_OK) {
895 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
896 bfad->inst_no);
897 printk(KERN_WARNING
a36c61f9
KG
898 "Not enough memory to attach all Brocade HBA ports, %s",
899 "System may need more memory.\n");
7725ccfd
JH
900 goto out_hal_mem_alloc_failure;
901 }
902
f7f73812
MZ
903 bfad->bfa.trcmod = bfad->trcmod;
904 bfad->bfa.plog = &bfad->plog_buf;
7725ccfd
JH
905 bfa_plog_init(&bfad->plog_buf);
906 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
907 0, "Driver Attach");
908
909 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
910 &bfad->hal_pcidev);
911
a36c61f9 912 /* FCS INIT */
7725ccfd 913 spin_lock_irqsave(&bfad->bfad_lock, flags);
f7f73812 914 bfad->bfa_fcs.trcmod = bfad->trcmod;
82794a2e 915 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
f7f73812 916 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
be540a99 917 bfad->bfa_fcs.bbscn_enabled = fc_credit_recovery;
75332a70 918 bfa_fcs_init(&bfad->bfa_fcs);
7725ccfd
JH
919 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
920
921 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
a36c61f9 922
75332a70
KG
923 /* configure base port */
924 rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
925 if (rc != BFA_STATUS_OK)
926 goto out_cfg_pport_fail;
927
7725ccfd
JH
928 return BFA_STATUS_OK;
929
75332a70
KG
930out_cfg_pport_fail:
931 /* fcs exit - on cfg pport failure */
932 spin_lock_irqsave(&bfad->bfad_lock, flags);
933 init_completion(&bfad->comp);
934 bfad->pport.flags |= BFAD_PORT_DELETE;
935 bfa_fcs_exit(&bfad->bfa_fcs);
936 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
937 wait_for_completion(&bfad->comp);
938 /* bfa detach - free hal memory */
939 bfa_detach(&bfad->bfa);
940 bfad_hal_mem_release(bfad);
7725ccfd
JH
941out_hal_mem_alloc_failure:
942 return BFA_STATUS_FAILED;
943}
944
945void
946bfad_drv_uninit(struct bfad_s *bfad)
947{
e6714324
KG
948 unsigned long flags;
949
950 spin_lock_irqsave(&bfad->bfad_lock, flags);
951 init_completion(&bfad->comp);
f7f73812 952 bfa_iocfc_stop(&bfad->bfa);
e6714324
KG
953 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
954 wait_for_completion(&bfad->comp);
955
7725ccfd
JH
956 del_timer_sync(&bfad->hal_tmo);
957 bfa_isr_disable(&bfad->bfa);
958 bfa_detach(&bfad->bfa);
959 bfad_remove_intr(bfad);
7725ccfd 960 bfad_hal_mem_release(bfad);
e6714324
KG
961
962 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
7725ccfd
JH
963}
964
965void
966bfad_drv_start(struct bfad_s *bfad)
967{
a36c61f9 968 unsigned long flags;
7725ccfd
JH
969
970 spin_lock_irqsave(&bfad->bfad_lock, flags);
f7f73812 971 bfa_iocfc_start(&bfad->bfa);
75332a70 972 bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
f7f73812 973 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
7725ccfd
JH
974 bfad->bfad_flags |= BFAD_HAL_START_DONE;
975 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
976
a36c61f9
KG
977 if (bfad->im)
978 flush_workqueue(bfad->im->drv_workq);
7725ccfd
JH
979}
980
981void
a36c61f9 982bfad_fcs_stop(struct bfad_s *bfad)
7725ccfd 983{
a36c61f9 984 unsigned long flags;
7725ccfd
JH
985
986 spin_lock_irqsave(&bfad->bfad_lock, flags);
987 init_completion(&bfad->comp);
988 bfad->pport.flags |= BFAD_PORT_DELETE;
989 bfa_fcs_exit(&bfad->bfa_fcs);
990 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
991 wait_for_completion(&bfad->comp);
992
a36c61f9
KG
993 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
994}
995
996void
997bfad_stop(struct bfad_s *bfad)
998{
999 unsigned long flags;
1000
7725ccfd
JH
1001 spin_lock_irqsave(&bfad->bfad_lock, flags);
1002 init_completion(&bfad->comp);
f7f73812 1003 bfa_iocfc_stop(&bfad->bfa);
7725ccfd
JH
1004 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
1005 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1006 wait_for_completion(&bfad->comp);
a36c61f9
KG
1007
1008 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
7725ccfd
JH
1009}
1010
1011bfa_status_t
a36c61f9 1012bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
7725ccfd 1013{
a36c61f9 1014 int rc = BFA_STATUS_OK;
7725ccfd 1015
a36c61f9
KG
1016 /* Allocate scsi_host for the physical port */
1017 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1018 (role & BFA_LPORT_ROLE_FCP_IM)) {
7725ccfd
JH
1019 if (bfad->pport.im_port == NULL) {
1020 rc = BFA_STATUS_FAILED;
1021 goto out;
1022 }
1023
b504293f
JH
1024 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
1025 &bfad->pcidev->dev);
7725ccfd
JH
1026 if (rc != BFA_STATUS_OK)
1027 goto out;
1028
a36c61f9 1029 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
7725ccfd
JH
1030 }
1031
1032 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
1033
1034out:
1035 return rc;
1036}
1037
1038void
1039bfad_uncfg_pport(struct bfad_s *bfad)
1040{
a36c61f9
KG
1041 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1042 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
7725ccfd
JH
1043 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1044 bfad_im_port_clean(bfad->pport.im_port);
1045 kfree(bfad->pport.im_port);
a36c61f9 1046 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
7725ccfd
JH
1047 }
1048
1049 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1050}
1051
e6714324 1052bfa_status_t
a36c61f9
KG
1053bfad_start_ops(struct bfad_s *bfad) {
1054
1055 int retval;
1056 unsigned long flags;
1057 struct bfad_vport_s *vport, *vport_new;
1058 struct bfa_fcs_driver_info_s driver_info;
1059
1060 /* Fill the driver_info info to fcs*/
1061 memset(&driver_info, 0, sizeof(driver_info));
1062 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1063 sizeof(driver_info.version) - 1);
1064 if (host_name)
1065 strncpy(driver_info.host_machine_name, host_name,
1066 sizeof(driver_info.host_machine_name) - 1);
1067 if (os_name)
1068 strncpy(driver_info.host_os_name, os_name,
1069 sizeof(driver_info.host_os_name) - 1);
1070 if (os_patch)
1071 strncpy(driver_info.host_os_patch, os_patch,
1072 sizeof(driver_info.host_os_patch) - 1);
1073
1074 strncpy(driver_info.os_device_name, bfad->pci_name,
1075 sizeof(driver_info.os_device_name - 1));
1076
75332a70 1077 /* FCS driver info init */
a36c61f9
KG
1078 spin_lock_irqsave(&bfad->bfad_lock, flags);
1079 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
a36c61f9 1080 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
e6714324 1081
75332a70
KG
1082 /*
1083 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
1084 * with values learned during bfa_init firmware GETATTR REQ.
1085 */
1086 bfa_fcs_update_cfg(&bfad->bfa_fcs);
1087
1088 /* Setup fc host fixed attribute if the lk supports */
1089 bfad_fc_host_init(bfad->pport.im_port);
e6714324 1090
a36c61f9
KG
1091 /* BFAD level FC4 IM specific resource allocation */
1092 retval = bfad_im_probe(bfad);
1093 if (retval != BFA_STATUS_OK) {
1094 printk(KERN_WARNING "bfad_im_probe failed\n");
1095 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1096 bfa_sm_set_state(bfad, bfad_sm_failed);
1097 bfad_im_probe_undo(bfad);
1098 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1099 bfad_uncfg_pport(bfad);
1100 bfad_stop(bfad);
1101 return BFA_STATUS_FAILED;
1102 } else
1103 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1104
e6714324
KG
1105 bfad_drv_start(bfad);
1106
a36c61f9
KG
1107 /* Complete pbc vport create */
1108 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1109 list_entry) {
d9883548
JH
1110 struct fc_vport_identifiers vid;
1111 struct fc_vport *fc_vport;
a36c61f9 1112 char pwwn_buf[BFA_STRING_32];
d9883548
JH
1113
1114 memset(&vid, 0, sizeof(vid));
1115 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1116 vid.vport_type = FC_PORTTYPE_NPIV;
1117 vid.disable = false;
a36c61f9
KG
1118 vid.node_name = wwn_to_u64((u8 *)
1119 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1120 vid.port_name = wwn_to_u64((u8 *)
1121 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
d9883548 1122 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
a36c61f9
KG
1123 if (!fc_vport) {
1124 wwn2str(pwwn_buf, vid.port_name);
d9883548 1125 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
a36c61f9
KG
1126 " %s\n", bfad->inst_no, pwwn_buf);
1127 }
1128 list_del(&vport->list_entry);
1129 kfree(vport);
d9883548
JH
1130 }
1131
e6714324
KG
1132 /*
1133 * If bfa_linkup_delay is set to -1 default; try to retrive the
f16a1750 1134 * value using the bfad_get_linkup_delay(); else use the
e6714324
KG
1135 * passed in module param value as the bfa_linkup_delay.
1136 */
1137 if (bfa_linkup_delay < 0) {
f16a1750
MZ
1138 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1139 bfad_rport_online_wait(bfad);
e6714324 1140 bfa_linkup_delay = -1;
a36c61f9 1141 } else
f16a1750 1142 bfad_rport_online_wait(bfad);
e6714324 1143
88166242 1144 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
e6714324
KG
1145
1146 return BFA_STATUS_OK;
e6714324
KG
1147}
1148
1149int
d9883548 1150bfad_worker(void *ptr)
e6714324
KG
1151{
1152 struct bfad_s *bfad;
1153 unsigned long flags;
1154
1155 bfad = (struct bfad_s *)ptr;
1156
1157 while (!kthread_should_stop()) {
1158
a36c61f9
KG
1159 /* Send event BFAD_E_INIT_SUCCESS */
1160 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
e6714324
KG
1161
1162 spin_lock_irqsave(&bfad->bfad_lock, flags);
1163 bfad->bfad_tsk = NULL;
1164 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1165
1166 break;
1167 }
1168
1169 return 0;
1170}
1171
5fbe25c7 1172/*
a36c61f9
KG
1173 * BFA driver interrupt functions
1174 */
1175irqreturn_t
1176bfad_intx(int irq, void *dev_id)
1177{
1178 struct bfad_s *bfad = dev_id;
1179 struct list_head doneq;
1180 unsigned long flags;
1181 bfa_boolean_t rc;
1182
1183 spin_lock_irqsave(&bfad->bfad_lock, flags);
1184 rc = bfa_intx(&bfad->bfa);
1185 if (!rc) {
1186 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1187 return IRQ_NONE;
1188 }
1189
1190 bfa_comp_deq(&bfad->bfa, &doneq);
1191 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1192
1193 if (!list_empty(&doneq)) {
1194 bfa_comp_process(&bfad->bfa, &doneq);
1195
1196 spin_lock_irqsave(&bfad->bfad_lock, flags);
1197 bfa_comp_free(&bfad->bfa, &doneq);
1198 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
a36c61f9
KG
1199 }
1200
1201 return IRQ_HANDLED;
1202
1203}
1204
1205static irqreturn_t
1206bfad_msix(int irq, void *dev_id)
1207{
1208 struct bfad_msix_s *vec = dev_id;
1209 struct bfad_s *bfad = vec->bfad;
1210 struct list_head doneq;
1211 unsigned long flags;
1212
1213 spin_lock_irqsave(&bfad->bfad_lock, flags);
1214
1215 bfa_msix(&bfad->bfa, vec->msix.entry);
1216 bfa_comp_deq(&bfad->bfa, &doneq);
1217 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1218
1219 if (!list_empty(&doneq)) {
1220 bfa_comp_process(&bfad->bfa, &doneq);
1221
1222 spin_lock_irqsave(&bfad->bfad_lock, flags);
1223 bfa_comp_free(&bfad->bfa, &doneq);
1224 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1225 }
1226
1227 return IRQ_HANDLED;
1228}
1229
5fbe25c7 1230/*
a36c61f9
KG
1231 * Initialize the MSIX entry table.
1232 */
1233static void
1234bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1235 int mask, int max_bit)
1236{
1237 int i;
1238 int match = 0x00000001;
1239
1240 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1241 if (mask & match) {
1242 bfad->msix_tab[bfad->nvec].msix.entry = i;
1243 bfad->msix_tab[bfad->nvec].bfad = bfad;
1244 msix_entries[bfad->nvec].entry = i;
1245 bfad->nvec++;
1246 }
1247
1248 match <<= 1;
1249 }
1250
1251}
1252
1253int
1254bfad_install_msix_handler(struct bfad_s *bfad)
1255{
1256 int i, error = 0;
1257
1258 for (i = 0; i < bfad->nvec; i++) {
1259 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1260 bfad->pci_name,
11189208
KG
1261 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1262 msix_name_cb[i] : msix_name_ct[i]));
a36c61f9
KG
1263
1264 error = request_irq(bfad->msix_tab[i].msix.vector,
1265 (irq_handler_t) bfad_msix, 0,
1266 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1267 bfa_trc(bfad, i);
1268 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1269 if (error) {
1270 int j;
1271
1272 for (j = 0; j < i; j++)
1273 free_irq(bfad->msix_tab[j].msix.vector,
1274 &bfad->msix_tab[j]);
1275
1276 return 1;
1277 }
1278 }
1279
1280 return 0;
1281}
1282
5fbe25c7 1283/*
a36c61f9
KG
1284 * Setup MSIX based interrupt.
1285 */
1286int
1287bfad_setup_intr(struct bfad_s *bfad)
1288{
1289 int error = 0;
1290 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1291 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1292 struct pci_dev *pdev = bfad->pcidev;
1293
1294 /* Call BFA to get the msix map for this PCI function. */
1295 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1296
1297 /* Set up the msix entry table */
1298 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1299
11189208
KG
1300 if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1301 (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
a36c61f9
KG
1302
1303 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1304 if (error) {
1305 /*
1306 * Only error number of vector is available.
1307 * We don't have a mechanism to map multiple
1308 * interrupts into one vector, so even if we
1309 * can try to request less vectors, we don't
1310 * know how to associate interrupt events to
25985edc 1311 * vectors. Linux doesn't duplicate vectors
a36c61f9
KG
1312 * in the MSIX table for this case.
1313 */
1314
1315 printk(KERN_WARNING "bfad%d: "
1316 "pci_enable_msix failed (%d),"
1317 " use line based.\n", bfad->inst_no, error);
1318
1319 goto line_based;
1320 }
1321
1322 /* Save the vectors */
1323 for (i = 0; i < bfad->nvec; i++) {
1324 bfa_trc(bfad, msix_entries[i].vector);
1325 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1326 }
1327
1328 bfa_msix_init(&bfad->bfa, bfad->nvec);
1329
1330 bfad->bfad_flags |= BFAD_MSIX_ON;
1331
1332 return error;
1333 }
1334
1335line_based:
1336 error = 0;
1337 if (request_irq
1338 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1339 BFAD_DRIVER_NAME, bfad) != 0) {
1340 /* Enable interrupt handler failed */
1341 return 1;
1342 }
1343
1344 return error;
1345}
1346
1347void
1348bfad_remove_intr(struct bfad_s *bfad)
1349{
1350 int i;
1351
1352 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1353 for (i = 0; i < bfad->nvec; i++)
1354 free_irq(bfad->msix_tab[i].msix.vector,
1355 &bfad->msix_tab[i]);
1356
1357 pci_disable_msix(bfad->pcidev);
1358 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1359 } else {
1360 free_irq(bfad->pcidev->irq, bfad);
1361 }
1362}
7725ccfd 1363
5fbe25c7 1364/*
7725ccfd
JH
1365 * PCI probe entry.
1366 */
1367int
1368bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1369{
a36c61f9
KG
1370 struct bfad_s *bfad;
1371 int error = -ENODEV, retval;
7725ccfd 1372
a36c61f9 1373 /* For single port cards - only claim function 0 */
8b070b4a 1374 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
a36c61f9 1375 (PCI_FUNC(pdev->devfn) != 0))
7725ccfd
JH
1376 return -ENODEV;
1377
7725ccfd
JH
1378 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1379 if (!bfad) {
1380 error = -ENOMEM;
1381 goto out;
1382 }
1383
1384 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1385 if (!bfad->trcmod) {
1386 printk(KERN_WARNING "Error alloc trace buffer!\n");
1387 error = -ENOMEM;
1388 goto out_alloc_trace_failure;
1389 }
1390
a36c61f9 1391 /* TRACE INIT */
7725ccfd
JH
1392 bfa_trc_init(bfad->trcmod);
1393 bfa_trc(bfad, bfad_inst);
1394
7725ccfd 1395 if (!(bfad_load_fwimg(pdev))) {
7725ccfd
JH
1396 kfree(bfad->trcmod);
1397 goto out_alloc_trace_failure;
1398 }
1399
1400 retval = bfad_pci_init(pdev, bfad);
1401 if (retval) {
1402 printk(KERN_WARNING "bfad_pci_init failure!\n");
1403 error = retval;
1404 goto out_pci_init_failure;
1405 }
1406
1407 mutex_lock(&bfad_mutex);
1408 bfad->inst_no = bfad_inst++;
1409 list_add_tail(&bfad->list_entry, &bfad_list);
1410 mutex_unlock(&bfad_mutex);
1411
a36c61f9
KG
1412 /* Initializing the state machine: State set to uninit */
1413 bfa_sm_set_state(bfad, bfad_sm_uninit);
1414
7725ccfd
JH
1415 spin_lock_init(&bfad->bfad_lock);
1416 pci_set_drvdata(pdev, bfad);
1417
1418 bfad->ref_count = 0;
1419 bfad->pport.bfad = bfad;
a36c61f9 1420 INIT_LIST_HEAD(&bfad->pbc_vport_list);
e6714324 1421
7c38c05b
KG
1422 /* Setup the debugfs node for this bfad */
1423 if (bfa_debugfs_enable)
1424 bfad_debugfs_init(&bfad->pport);
1425
7725ccfd
JH
1426 retval = bfad_drv_init(bfad);
1427 if (retval != BFA_STATUS_OK)
1428 goto out_drv_init_failure;
7725ccfd 1429
a36c61f9 1430 bfa_sm_send_event(bfad, BFAD_E_CREATE);
7725ccfd 1431
a36c61f9
KG
1432 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1433 goto out_bfad_sm_failure;
7725ccfd 1434
7725ccfd
JH
1435 return 0;
1436
a36c61f9
KG
1437out_bfad_sm_failure:
1438 bfa_detach(&bfad->bfa);
1439 bfad_hal_mem_release(bfad);
7725ccfd 1440out_drv_init_failure:
7c38c05b
KG
1441 /* Remove the debugfs node for this bfad */
1442 kfree(bfad->regdata);
1443 bfad_debugfs_exit(&bfad->pport);
7725ccfd
JH
1444 mutex_lock(&bfad_mutex);
1445 bfad_inst--;
1446 list_del(&bfad->list_entry);
1447 mutex_unlock(&bfad_mutex);
1448 bfad_pci_uninit(pdev, bfad);
1449out_pci_init_failure:
1450 kfree(bfad->trcmod);
1451out_alloc_trace_failure:
1452 kfree(bfad);
1453out:
1454 return error;
1455}
1456
5fbe25c7 1457/*
7725ccfd
JH
1458 * PCI remove entry.
1459 */
1460void
1461bfad_pci_remove(struct pci_dev *pdev)
1462{
a36c61f9
KG
1463 struct bfad_s *bfad = pci_get_drvdata(pdev);
1464 unsigned long flags;
7725ccfd
JH
1465
1466 bfa_trc(bfad, bfad->inst_no);
1467
e6714324 1468 spin_lock_irqsave(&bfad->bfad_lock, flags);
a36c61f9 1469 if (bfad->bfad_tsk != NULL) {
7725ccfd 1470 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
a36c61f9
KG
1471 kthread_stop(bfad->bfad_tsk);
1472 } else {
e6714324 1473 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
e6714324 1474 }
7725ccfd 1475
a36c61f9
KG
1476 /* Send Event BFAD_E_STOP */
1477 bfa_sm_send_event(bfad, BFAD_E_STOP);
e6714324 1478
a36c61f9 1479 /* Driver detach and dealloc mem */
7725ccfd
JH
1480 spin_lock_irqsave(&bfad->bfad_lock, flags);
1481 bfa_detach(&bfad->bfa);
1482 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1483 bfad_hal_mem_release(bfad);
7725ccfd 1484
7c38c05b
KG
1485 /* Remove the debugfs node for this bfad */
1486 kfree(bfad->regdata);
1487 bfad_debugfs_exit(&bfad->pport);
1488
a36c61f9 1489 /* Cleaning the BFAD instance */
7725ccfd
JH
1490 mutex_lock(&bfad_mutex);
1491 bfad_inst--;
1492 list_del(&bfad->list_entry);
1493 mutex_unlock(&bfad_mutex);
1494 bfad_pci_uninit(pdev, bfad);
1495
1496 kfree(bfad->trcmod);
1497 kfree(bfad);
1498}
1499
a36c61f9 1500struct pci_device_id bfad_id_table[] = {
7725ccfd 1501 {
a36c61f9
KG
1502 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1503 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1504 .subvendor = PCI_ANY_ID,
1505 .subdevice = PCI_ANY_ID,
1506 },
7725ccfd 1507 {
a36c61f9
KG
1508 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1509 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1510 .subvendor = PCI_ANY_ID,
1511 .subdevice = PCI_ANY_ID,
1512 },
7725ccfd 1513 {
a36c61f9
KG
1514 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1515 .device = BFA_PCI_DEVICE_ID_CT,
1516 .subvendor = PCI_ANY_ID,
1517 .subdevice = PCI_ANY_ID,
1518 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1519 .class_mask = ~0,
1520 },
293f82d5 1521 {
a36c61f9
KG
1522 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1523 .device = BFA_PCI_DEVICE_ID_CT_FC,
1524 .subvendor = PCI_ANY_ID,
1525 .subdevice = PCI_ANY_ID,
1526 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1527 .class_mask = ~0,
293f82d5 1528 },
11189208
KG
1529 {
1530 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1531 .device = BFA_PCI_DEVICE_ID_CT2,
1532 .subvendor = PCI_ANY_ID,
1533 .subdevice = PCI_ANY_ID,
1534 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1535 .class_mask = ~0,
1536 },
7725ccfd
JH
1537
1538 {0, 0},
1539};
1540
1541MODULE_DEVICE_TABLE(pci, bfad_id_table);
1542
1543static struct pci_driver bfad_pci_driver = {
1544 .name = BFAD_DRIVER_NAME,
1545 .id_table = bfad_id_table,
1546 .probe = bfad_pci_probe,
1547 .remove = __devexit_p(bfad_pci_remove),
1548};
1549
5fbe25c7 1550/*
7725ccfd
JH
1551 * Driver module init.
1552 */
a36c61f9 1553static int __init
7725ccfd
JH
1554bfad_init(void)
1555{
a36c61f9 1556 int error = 0;
7725ccfd
JH
1557
1558 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
a36c61f9 1559 BFAD_DRIVER_VERSION);
7725ccfd
JH
1560
1561 if (num_sgpgs > 0)
1562 num_sgpgs_parm = num_sgpgs;
1563
a36c61f9 1564 error = bfad_im_module_init();
7725ccfd
JH
1565 if (error) {
1566 error = -ENOMEM;
a36c61f9 1567 printk(KERN_WARNING "bfad_im_module_init failure\n");
7725ccfd
JH
1568 goto ext;
1569 }
1570
a36c61f9
KG
1571 if (strcmp(FCPI_NAME, " fcpim") == 0)
1572 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
7725ccfd 1573
f7f73812 1574 bfa_auto_recover = ioc_auto_recover;
7725ccfd 1575 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
7725ccfd 1576
a36c61f9 1577 error = pci_register_driver(&bfad_pci_driver);
7725ccfd 1578 if (error) {
a36c61f9 1579 printk(KERN_WARNING "pci_register_driver failure\n");
7725ccfd
JH
1580 goto ext;
1581 }
1582
1583 return 0;
1584
1585ext:
a36c61f9 1586 bfad_im_module_exit();
7725ccfd
JH
1587 return error;
1588}
1589
5fbe25c7 1590/*
7725ccfd
JH
1591 * Driver module exit.
1592 */
a36c61f9 1593static void __exit
7725ccfd
JH
1594bfad_exit(void)
1595{
1596 pci_unregister_driver(&bfad_pci_driver);
a36c61f9 1597 bfad_im_module_exit();
7725ccfd
JH
1598 bfad_free_fwimg();
1599}
1600
a36c61f9 1601/* Firmware handling */
61338a0b 1602static void
a36c61f9
KG
1603bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1604 u32 *bfi_image_size, char *fw_name)
1605{
1606 const struct firmware *fw;
1607
1608 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1609 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
61338a0b
JH
1610 *bfi_image = NULL;
1611 goto out;
a36c61f9
KG
1612 }
1613
1614 *bfi_image = vmalloc(fw->size);
1615 if (NULL == *bfi_image) {
1616 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1617 "size=%x!\n", (u32) fw->size);
61338a0b 1618 goto out;
a36c61f9
KG
1619 }
1620
1621 memcpy(*bfi_image, fw->data, fw->size);
1622 *bfi_image_size = fw->size/sizeof(u32);
61338a0b
JH
1623out:
1624 release_firmware(fw);
a36c61f9
KG
1625}
1626
61338a0b
JH
1627static u32 *
1628bfad_load_fwimg(struct pci_dev *pdev)
a36c61f9 1629{
11189208
KG
1630 if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
1631 if (bfi_image_ct2_size == 0)
1632 bfad_read_firmware(pdev, &bfi_image_ct2,
1633 &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1634 return bfi_image_ct2;
1635 } else if (bfa_asic_id_ct(pdev->device)) {
1636 if (bfi_image_ct_size == 0)
1637 bfad_read_firmware(pdev, &bfi_image_ct,
1638 &bfi_image_ct_size, BFAD_FW_FILE_CT);
1639 return bfi_image_ct;
a36c61f9 1640 } else {
11189208
KG
1641 if (bfi_image_cb_size == 0)
1642 bfad_read_firmware(pdev, &bfi_image_cb,
1643 &bfi_image_cb_size, BFAD_FW_FILE_CB);
1644 return bfi_image_cb;
a36c61f9
KG
1645 }
1646}
7725ccfd 1647
61338a0b
JH
1648static void
1649bfad_free_fwimg(void)
1650{
11189208
KG
1651 if (bfi_image_ct2_size && bfi_image_ct2)
1652 vfree(bfi_image_ct2);
1653 if (bfi_image_ct_size && bfi_image_ct)
1654 vfree(bfi_image_ct);
1655 if (bfi_image_cb_size && bfi_image_cb)
1656 vfree(bfi_image_cb);
61338a0b
JH
1657}
1658
7725ccfd
JH
1659module_init(bfad_init);
1660module_exit(bfad_exit);
1661MODULE_LICENSE("GPL");
1662MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1663MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1664MODULE_VERSION(BFAD_DRIVER_VERSION);