]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla2xxx/qla_os.c
[SCSI] qla2xxx: Fix cpu-affinity usage for non-capable ISPs.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_os.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
9#include <linux/moduleparam.h>
10#include <linux/vmalloc.h>
1da177e4 11#include <linux/delay.h>
39a11240 12#include <linux/kthread.h>
e1e82b6f 13#include <linux/mutex.h>
3420d36c 14#include <linux/kobject.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16
17#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h>
21
22/*
23 * Driver version
24 */
25char qla2x00_version_str[40];
26
6a03b4cd
HZ
27static int apidev_major;
28
1da177e4
LT
29/*
30 * SRB allocation cache
31 */
e18b890b 32static struct kmem_cache *srb_cachep;
1da177e4 33
a9083016
GM
34/*
35 * CT6 CTX allocation cache
36 */
37static struct kmem_cache *ctx_cachep;
38
1da177e4
LT
39int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
41MODULE_PARM_DESC(ql2xlogintimeout,
42 "Login timeout value in seconds.");
43
a7b61842 44int qlport_down_retry;
1da177e4
LT
45module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
46MODULE_PARM_DESC(qlport_down_retry,
900d9f98 47 "Maximum number of command retries to a port that returns "
1da177e4
LT
48 "a PORT-DOWN status.");
49
1da177e4
LT
50int ql2xplogiabsentdevice;
51module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
52MODULE_PARM_DESC(ql2xplogiabsentdevice,
53 "Option to enable PLOGI to devices that are not present after "
900d9f98 54 "a Fabric scan. This is needed for several broken switches. "
1da177e4
LT
55 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
56
1da177e4
LT
57int ql2xloginretrycount = 0;
58module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
59MODULE_PARM_DESC(ql2xloginretrycount,
60 "Specify an alternate value for the NVRAM login retry count.");
61
a7a167bf
AV
62int ql2xallocfwdump = 1;
63module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
64MODULE_PARM_DESC(ql2xallocfwdump,
65 "Option to enable allocation of memory for a firmware dump "
66 "during HBA initialization. Memory allocation requirements "
67 "vary by ISP type. Default is 1 - allocate memory.");
68
11010fec 69int ql2xextended_error_logging;
27d94035 70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
11010fec 71MODULE_PARM_DESC(ql2xextended_error_logging,
0181944f
AV
72 "Option to enable extended error logging, "
73 "Default is 0 - no logging. 1 - log errors.");
74
a9083016
GM
75int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
77MODULE_PARM_DESC(ql2xshiftctondsd,
78 "Set to control shifting of command type processing "
79 "based on total number of SG elements.");
80
1da177e4
LT
81static void qla2x00_free_device(scsi_qla_host_t *);
82
7e47e5ca 83int ql2xfdmienable=1;
cca5335c
AV
84module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
85MODULE_PARM_DESC(ql2xfdmienable,
7794a5af
FW
86 "Enables FDMI registrations. "
87 "0 - no FDMI. Default is 1 - perform FDMI.");
cca5335c 88
df7baa50
AV
89#define MAX_Q_DEPTH 32
90static int ql2xmaxqdepth = MAX_Q_DEPTH;
91module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
92MODULE_PARM_DESC(ql2xmaxqdepth,
93 "Maximum queue depth to report for target devices.");
94
bad75002
AE
95/* Do not change the value of this after module load */
96int ql2xenabledif = 1;
97module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
98MODULE_PARM_DESC(ql2xenabledif,
99 " Enable T10-CRC-DIF "
100 " Default is 0 - No DIF Support. 1 - Enable it");
101
102int ql2xenablehba_err_chk;
103module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(ql2xenablehba_err_chk,
105 " Enable T10-CRC-DIF Error isolation by HBA"
106 " Default is 0 - Error isolation disabled, 1 - Enable it");
107
e5896bd5
AV
108int ql2xiidmaenable=1;
109module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xiidmaenable,
111 "Enables iIDMA settings "
112 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
113
73208dfd
AC
114int ql2xmaxqueues = 1;
115module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
116MODULE_PARM_DESC(ql2xmaxqueues,
117 "Enables MQ settings "
118 "Default is 1 for single queue. Set it to number \
119 of queues in MQ mode.");
68ca949c
AC
120
121int ql2xmultique_tag;
122module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
123MODULE_PARM_DESC(ql2xmultique_tag,
124 "Enables CPU affinity settings for the driver "
125 "Default is 0 for no affinity of request and response IO. "
126 "Set it to 1 to turn on the cpu affinity.");
e337d907
AV
127
128int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
130MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n"
133 " interface.\n"
134 " 1 -- load firmware from flash.\n"
135 " 0 -- use default semantics.\n");
136
ae97c91e
AV
137int ql2xetsenable;
138module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
139MODULE_PARM_DESC(ql2xetsenable,
140 "Enables firmware ETS burst."
141 "Default is 0 - skip ETS enablement.");
142
a9083016
GM
143int ql2xdbwr;
144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
145MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n"
147 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n");
149
150int ql2xdontresethba;
151module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
152MODULE_PARM_DESC(ql2xdontresethba,
153 "Option to specify reset behaviour\n"
154 " 0 (Default) -- Reset on failure.\n"
155 " 1 -- Do not reset on failure.\n");
156
f4c496c1
GM
157int ql2xtargetreset = 1;
158module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
159MODULE_PARM_DESC(ql2xtargetreset,
160 "Enable target reset."
161 "Default is 1 - use hw defaults.");
162
a9083016 163
3822263e
MI
164int ql2xasynctmfenable;
165module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
166MODULE_PARM_DESC(ql2xasynctmfenable,
167 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
168 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
1da177e4 169/*
fa2a1ce5 170 * SCSI host template entry points
1da177e4
LT
171 */
172static int qla2xxx_slave_configure(struct scsi_device * device);
f4f051eb 173static int qla2xxx_slave_alloc(struct scsi_device *);
1e99e33a
AV
174static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
175static void qla2xxx_scan_start(struct Scsi_Host *);
f4f051eb 176static void qla2xxx_slave_destroy(struct scsi_device *);
a5326f86 177static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
fca29703 178 void (*fn)(struct scsi_cmnd *));
1da177e4
LT
179static int qla2xxx_eh_abort(struct scsi_cmnd *);
180static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
523ec773 181static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
1da177e4
LT
182static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
183static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
1da177e4 184
e881a172 185static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
ce7e4af7
AV
186static int qla2x00_change_queue_type(struct scsi_device *, int);
187
a5326f86 188struct scsi_host_template qla2xxx_driver_template = {
1da177e4 189 .module = THIS_MODULE,
cb63067a 190 .name = QLA2XXX_DRIVER_NAME,
a5326f86 191 .queuecommand = qla2xxx_queuecommand,
fca29703
AV
192
193 .eh_abort_handler = qla2xxx_eh_abort,
194 .eh_device_reset_handler = qla2xxx_eh_device_reset,
523ec773 195 .eh_target_reset_handler = qla2xxx_eh_target_reset,
fca29703
AV
196 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
197 .eh_host_reset_handler = qla2xxx_eh_host_reset,
198
199 .slave_configure = qla2xxx_slave_configure,
200
201 .slave_alloc = qla2xxx_slave_alloc,
202 .slave_destroy = qla2xxx_slave_destroy,
ed677086
AV
203 .scan_finished = qla2xxx_scan_finished,
204 .scan_start = qla2xxx_scan_start,
ce7e4af7
AV
205 .change_queue_depth = qla2x00_change_queue_depth,
206 .change_queue_type = qla2x00_change_queue_type,
fca29703
AV
207 .this_id = -1,
208 .cmd_per_lun = 3,
209 .use_clustering = ENABLE_CLUSTERING,
210 .sg_tablesize = SG_ALL,
211
212 .max_sectors = 0xFFFF,
afb046e2 213 .shost_attrs = qla2x00_host_attrs,
fca29703
AV
214};
215
1da177e4 216static struct scsi_transport_template *qla2xxx_transport_template = NULL;
2c3dfe3f 217struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
1da177e4 218
1da177e4
LT
219/* TODO Convert to inlines
220 *
221 * Timer routines
222 */
1da177e4 223
2c3dfe3f 224__inline__ void
e315cd28 225qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
1da177e4 226{
e315cd28
AC
227 init_timer(&vha->timer);
228 vha->timer.expires = jiffies + interval * HZ;
229 vha->timer.data = (unsigned long)vha;
230 vha->timer.function = (void (*)(unsigned long))func;
231 add_timer(&vha->timer);
232 vha->timer_active = 1;
1da177e4
LT
233}
234
235static inline void
e315cd28 236qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
1da177e4 237{
a9083016
GM
238 /* Currently used for 82XX only. */
239 if (vha->device_flags & DFLG_DEV_FAILED)
240 return;
241
e315cd28 242 mod_timer(&vha->timer, jiffies + interval * HZ);
1da177e4
LT
243}
244
a824ebb3 245static __inline__ void
e315cd28 246qla2x00_stop_timer(scsi_qla_host_t *vha)
1da177e4 247{
e315cd28
AC
248 del_timer_sync(&vha->timer);
249 vha->timer_active = 0;
1da177e4
LT
250}
251
1da177e4
LT
252static int qla2x00_do_dpc(void *data);
253
254static void qla2x00_rst_aen(scsi_qla_host_t *);
255
73208dfd
AC
256static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
257 struct req_que **, struct rsp_que **);
e315cd28
AC
258static void qla2x00_mem_free(struct qla_hw_data *);
259static void qla2x00_sp_free_dma(srb_t *);
1da177e4 260
1da177e4 261/* -------------------------------------------------------------------------- */
73208dfd
AC
262static int qla2x00_alloc_queues(struct qla_hw_data *ha)
263{
2afa19a9 264 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
73208dfd
AC
265 GFP_KERNEL);
266 if (!ha->req_q_map) {
267 qla_printk(KERN_WARNING, ha,
268 "Unable to allocate memory for request queue ptrs\n");
269 goto fail_req_map;
270 }
271
2afa19a9 272 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
73208dfd
AC
273 GFP_KERNEL);
274 if (!ha->rsp_q_map) {
275 qla_printk(KERN_WARNING, ha,
276 "Unable to allocate memory for response queue ptrs\n");
277 goto fail_rsp_map;
278 }
279 set_bit(0, ha->rsp_qid_map);
280 set_bit(0, ha->req_qid_map);
281 return 1;
282
283fail_rsp_map:
284 kfree(ha->req_q_map);
285 ha->req_q_map = NULL;
286fail_req_map:
287 return -ENOMEM;
288}
289
2afa19a9 290static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
73208dfd 291{
73208dfd
AC
292 if (req && req->ring)
293 dma_free_coherent(&ha->pdev->dev,
294 (req->length + 1) * sizeof(request_t),
295 req->ring, req->dma);
296
297 kfree(req);
298 req = NULL;
299}
300
2afa19a9
AC
301static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
302{
303 if (rsp && rsp->ring)
304 dma_free_coherent(&ha->pdev->dev,
305 (rsp->length + 1) * sizeof(response_t),
306 rsp->ring, rsp->dma);
307
308 kfree(rsp);
309 rsp = NULL;
310}
311
73208dfd
AC
312static void qla2x00_free_queues(struct qla_hw_data *ha)
313{
314 struct req_que *req;
315 struct rsp_que *rsp;
316 int cnt;
317
2afa19a9 318 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
73208dfd 319 req = ha->req_q_map[cnt];
2afa19a9 320 qla2x00_free_req_que(ha, req);
73208dfd 321 }
73208dfd
AC
322 kfree(ha->req_q_map);
323 ha->req_q_map = NULL;
2afa19a9
AC
324
325 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
326 rsp = ha->rsp_q_map[cnt];
327 qla2x00_free_rsp_que(ha, rsp);
328 }
329 kfree(ha->rsp_q_map);
330 ha->rsp_q_map = NULL;
73208dfd
AC
331}
332
68ca949c
AC
333static int qla25xx_setup_mode(struct scsi_qla_host *vha)
334{
335 uint16_t options = 0;
336 int ques, req, ret;
337 struct qla_hw_data *ha = vha->hw;
338
7163ea81
AC
339 if (!(ha->fw_attributes & BIT_6)) {
340 qla_printk(KERN_INFO, ha,
341 "Firmware is not multi-queue capable\n");
342 goto fail;
343 }
68ca949c 344 if (ql2xmultique_tag) {
68ca949c
AC
345 /* create a request queue for IO */
346 options |= BIT_7;
347 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
348 QLA_DEFAULT_QUE_QOS);
349 if (!req) {
350 qla_printk(KERN_WARNING, ha,
351 "Can't create request queue\n");
352 goto fail;
353 }
7163ea81 354 ha->wq = create_workqueue("qla2xxx_wq");
68ca949c
AC
355 vha->req = ha->req_q_map[req];
356 options |= BIT_1;
357 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
358 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
359 if (!ret) {
360 qla_printk(KERN_WARNING, ha,
361 "Response Queue create failed\n");
362 goto fail2;
363 }
364 }
7163ea81
AC
365 ha->flags.cpu_affinity_enabled = 1;
366
68ca949c
AC
367 DEBUG2(qla_printk(KERN_INFO, ha,
368 "CPU affinity mode enabled, no. of response"
369 " queues:%d, no. of request queues:%d\n",
370 ha->max_rsp_queues, ha->max_req_queues));
371 }
372 return 0;
373fail2:
374 qla25xx_delete_queues(vha);
7163ea81
AC
375 destroy_workqueue(ha->wq);
376 ha->wq = NULL;
68ca949c
AC
377fail:
378 ha->mqenable = 0;
7163ea81
AC
379 kfree(ha->req_q_map);
380 kfree(ha->rsp_q_map);
381 ha->max_req_queues = ha->max_rsp_queues = 1;
68ca949c
AC
382 return 1;
383}
384
1da177e4 385static char *
e315cd28 386qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
1da177e4 387{
e315cd28 388 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
389 static char *pci_bus_modes[] = {
390 "33", "66", "100", "133",
391 };
392 uint16_t pci_bus;
393
394 strcpy(str, "PCI");
395 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
396 if (pci_bus) {
397 strcat(str, "-X (");
398 strcat(str, pci_bus_modes[pci_bus]);
399 } else {
400 pci_bus = (ha->pci_attr & BIT_8) >> 8;
401 strcat(str, " (");
402 strcat(str, pci_bus_modes[pci_bus]);
403 }
404 strcat(str, " MHz)");
405
406 return (str);
407}
408
fca29703 409static char *
e315cd28 410qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
fca29703
AV
411{
412 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
e315cd28 413 struct qla_hw_data *ha = vha->hw;
fca29703
AV
414 uint32_t pci_bus;
415 int pcie_reg;
416
417 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
418 if (pcie_reg) {
419 char lwstr[6];
420 uint16_t pcie_lstat, lspeed, lwidth;
421
422 pcie_reg += 0x12;
423 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
424 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
425 lwidth = (pcie_lstat &
426 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
427
428 strcpy(str, "PCIe (");
429 if (lspeed == 1)
c87a0d8c 430 strcat(str, "2.5GT/s ");
c3a2f0df 431 else if (lspeed == 2)
c87a0d8c 432 strcat(str, "5.0GT/s ");
fca29703
AV
433 else
434 strcat(str, "<unknown> ");
435 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
436 strcat(str, lwstr);
437
438 return str;
439 }
440
441 strcpy(str, "PCI");
442 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
443 if (pci_bus == 0 || pci_bus == 8) {
444 strcat(str, " (");
445 strcat(str, pci_bus_modes[pci_bus >> 3]);
446 } else {
447 strcat(str, "-X ");
448 if (pci_bus & BIT_2)
449 strcat(str, "Mode 2");
450 else
451 strcat(str, "Mode 1");
452 strcat(str, " (");
453 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
454 }
455 strcat(str, " MHz)");
456
457 return str;
458}
459
e5f82ab8 460static char *
e315cd28 461qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
1da177e4
LT
462{
463 char un_str[10];
e315cd28 464 struct qla_hw_data *ha = vha->hw;
fa2a1ce5 465
1da177e4
LT
466 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
467 ha->fw_minor_version,
468 ha->fw_subminor_version);
469
470 if (ha->fw_attributes & BIT_9) {
471 strcat(str, "FLX");
472 return (str);
473 }
474
475 switch (ha->fw_attributes & 0xFF) {
476 case 0x7:
477 strcat(str, "EF");
478 break;
479 case 0x17:
480 strcat(str, "TP");
481 break;
482 case 0x37:
483 strcat(str, "IP");
484 break;
485 case 0x77:
486 strcat(str, "VI");
487 break;
488 default:
489 sprintf(un_str, "(%x)", ha->fw_attributes);
490 strcat(str, un_str);
491 break;
492 }
493 if (ha->fw_attributes & 0x100)
494 strcat(str, "X");
495
496 return (str);
497}
498
e5f82ab8 499static char *
e315cd28 500qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
fca29703 501{
e315cd28 502 struct qla_hw_data *ha = vha->hw;
f0883ac6 503
3a03eb79
AV
504 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
505 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
fca29703 506 return str;
fca29703
AV
507}
508
509static inline srb_t *
e315cd28 510qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
fca29703
AV
511 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
512{
513 srb_t *sp;
e315cd28 514 struct qla_hw_data *ha = vha->hw;
fca29703
AV
515
516 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
517 if (!sp)
518 return sp;
519
fca29703
AV
520 sp->fcport = fcport;
521 sp->cmd = cmd;
522 sp->flags = 0;
523 CMD_SP(cmd) = (void *)sp;
524 cmd->scsi_done = done;
cf53b069 525 sp->ctx = NULL;
fca29703
AV
526
527 return sp;
528}
529
1da177e4 530static int
a5326f86 531qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
fca29703 532{
e315cd28 533 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fca29703 534 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
19a7b4ae 535 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
e315cd28
AC
536 struct qla_hw_data *ha = vha->hw;
537 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
fca29703
AV
538 srb_t *sp;
539 int rval;
540
85880801
AV
541 if (ha->flags.eeh_busy) {
542 if (ha->flags.pci_channel_io_perm_failure)
b9b12f73 543 cmd->result = DID_NO_CONNECT << 16;
85880801
AV
544 else
545 cmd->result = DID_REQUEUE << 16;
14e660e6
SJ
546 goto qc24_fail_command;
547 }
548
19a7b4ae
JSEC
549 rval = fc_remote_port_chkready(rport);
550 if (rval) {
551 cmd->result = rval;
fca29703
AV
552 goto qc24_fail_command;
553 }
554
387f96b4 555 /* Close window on fcport/rport state-transitioning. */
7b594131
MC
556 if (fcport->drport)
557 goto qc24_target_busy;
387f96b4 558
bad75002
AE
559 if (!vha->flags.difdix_supported &&
560 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
561 DEBUG2(qla_printk(KERN_ERR, ha,
562 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
563 cmd->cmnd[0]));
564 cmd->result = DID_NO_CONNECT << 16;
565 goto qc24_fail_command;
566 }
fca29703
AV
567 if (atomic_read(&fcport->state) != FCS_ONLINE) {
568 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
e315cd28 569 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
fca29703
AV
570 cmd->result = DID_NO_CONNECT << 16;
571 goto qc24_fail_command;
572 }
7b594131 573 goto qc24_target_busy;
fca29703
AV
574 }
575
e315cd28 576 spin_unlock_irq(vha->host->host_lock);
fca29703 577
e315cd28 578 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
fca29703
AV
579 if (!sp)
580 goto qc24_host_busy_lock;
581
e315cd28 582 rval = ha->isp_ops->start_scsi(sp);
fca29703
AV
583 if (rval != QLA_SUCCESS)
584 goto qc24_host_busy_free_sp;
585
e315cd28 586 spin_lock_irq(vha->host->host_lock);
fca29703
AV
587
588 return 0;
589
590qc24_host_busy_free_sp:
e315cd28
AC
591 qla2x00_sp_free_dma(sp);
592 mempool_free(sp, ha->srb_mempool);
fca29703
AV
593
594qc24_host_busy_lock:
e315cd28 595 spin_lock_irq(vha->host->host_lock);
fca29703
AV
596 return SCSI_MLQUEUE_HOST_BUSY;
597
7b594131
MC
598qc24_target_busy:
599 return SCSI_MLQUEUE_TARGET_BUSY;
600
fca29703
AV
601qc24_fail_command:
602 done(cmd);
603
604 return 0;
605}
606
607
1da177e4
LT
608/*
609 * qla2x00_eh_wait_on_command
610 * Waits for the command to be returned by the Firmware for some
611 * max time.
612 *
613 * Input:
1da177e4 614 * cmd = Scsi Command to wait on.
1da177e4
LT
615 *
616 * Return:
617 * Not Found : 0
618 * Found : 1
619 */
620static int
e315cd28 621qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1da177e4 622{
fe74c71f
AV
623#define ABORT_POLLING_PERIOD 1000
624#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
f4f051eb 625 unsigned long wait_iter = ABORT_WAIT_ITER;
85880801
AV
626 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
627 struct qla_hw_data *ha = vha->hw;
f4f051eb 628 int ret = QLA_SUCCESS;
1da177e4 629
85880801
AV
630 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
631 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
632 return ret;
633 }
634
d970432c 635 while (CMD_SP(cmd) && wait_iter--) {
fe74c71f 636 msleep(ABORT_POLLING_PERIOD);
f4f051eb
AV
637 }
638 if (CMD_SP(cmd))
639 ret = QLA_FUNCTION_FAILED;
1da177e4 640
f4f051eb 641 return ret;
1da177e4
LT
642}
643
644/*
645 * qla2x00_wait_for_hba_online
fa2a1ce5 646 * Wait till the HBA is online after going through
1da177e4
LT
647 * <= MAX_RETRIES_OF_ISP_ABORT or
648 * finally HBA is disabled ie marked offline
649 *
650 * Input:
651 * ha - pointer to host adapter structure
fa2a1ce5
AV
652 *
653 * Note:
1da177e4
LT
654 * Does context switching-Release SPIN_LOCK
655 * (if any) before calling this routine.
656 *
657 * Return:
658 * Success (Adapter is online) : 0
659 * Failed (Adapter is offline/disabled) : 1
660 */
854165f4 661int
e315cd28 662qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1da177e4 663{
fca29703
AV
664 int return_status;
665 unsigned long wait_online;
e315cd28
AC
666 struct qla_hw_data *ha = vha->hw;
667 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 668
fa2a1ce5 669 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
e315cd28
AC
670 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
671 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
672 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
673 ha->dpc_active) && time_before(jiffies, wait_online)) {
1da177e4
LT
674
675 msleep(1000);
676 }
e315cd28 677 if (base_vha->flags.online)
fa2a1ce5 678 return_status = QLA_SUCCESS;
1da177e4
LT
679 else
680 return_status = QLA_FUNCTION_FAILED;
681
1da177e4
LT
682 return (return_status);
683}
684
86fbee86
LC
685/*
686 * qla2x00_wait_for_reset_ready
687 * Wait till the HBA is online after going through
688 * <= MAX_RETRIES_OF_ISP_ABORT or
689 * finally HBA is disabled ie marked offline or flash
690 * operations are in progress.
691 *
692 * Input:
693 * ha - pointer to host adapter structure
694 *
695 * Note:
696 * Does context switching-Release SPIN_LOCK
697 * (if any) before calling this routine.
698 *
699 * Return:
700 * Success (Adapter is online/no flash ops) : 0
701 * Failed (Adapter is offline/disabled/flash ops in progress) : 1
702 */
703int
704qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
705{
706 int return_status;
707 unsigned long wait_online;
708 struct qla_hw_data *ha = vha->hw;
709 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
710
711 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
712 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
713 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
714 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
715 ha->optrom_state != QLA_SWAITING ||
716 ha->dpc_active) && time_before(jiffies, wait_online))
717 msleep(1000);
718
719 if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
720 return_status = QLA_SUCCESS;
721 else
722 return_status = QLA_FUNCTION_FAILED;
723
724 DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
725
726 return return_status;
727}
728
2533cf67
LC
729int
730qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
731{
732 int return_status;
733 unsigned long wait_reset;
734 struct qla_hw_data *ha = vha->hw;
735 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
736
737 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
738 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
739 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
740 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
741 ha->dpc_active) && time_before(jiffies, wait_reset)) {
742
743 msleep(1000);
744
745 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
746 ha->flags.chip_reset_done)
747 break;
748 }
749 if (ha->flags.chip_reset_done)
750 return_status = QLA_SUCCESS;
751 else
752 return_status = QLA_FUNCTION_FAILED;
753
754 return return_status;
755}
756
1da177e4
LT
757/*
758 * qla2x00_wait_for_loop_ready
759 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
fa2a1ce5 760 * to be in LOOP_READY state.
1da177e4
LT
761 * Input:
762 * ha - pointer to host adapter structure
fa2a1ce5
AV
763 *
764 * Note:
1da177e4
LT
765 * Does context switching-Release SPIN_LOCK
766 * (if any) before calling this routine.
fa2a1ce5 767 *
1da177e4
LT
768 *
769 * Return:
770 * Success (LOOP_READY) : 0
771 * Failed (LOOP_NOT_READY) : 1
772 */
fa2a1ce5 773static inline int
e315cd28 774qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
1da177e4
LT
775{
776 int return_status = QLA_SUCCESS;
777 unsigned long loop_timeout ;
e315cd28
AC
778 struct qla_hw_data *ha = vha->hw;
779 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
780
781 /* wait for 5 min at the max for loop to be ready */
fa2a1ce5 782 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1da177e4 783
e315cd28
AC
784 while ((!atomic_read(&base_vha->loop_down_timer) &&
785 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
786 atomic_read(&base_vha->loop_state) != LOOP_READY) {
787 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
57680080
RA
788 return_status = QLA_FUNCTION_FAILED;
789 break;
790 }
1da177e4
LT
791 msleep(1000);
792 if (time_after_eq(jiffies, loop_timeout)) {
793 return_status = QLA_FUNCTION_FAILED;
794 break;
795 }
796 }
fa2a1ce5 797 return (return_status);
1da177e4
LT
798}
799
800/**************************************************************************
801* qla2xxx_eh_abort
802*
803* Description:
804* The abort function will abort the specified command.
805*
806* Input:
807* cmd = Linux SCSI command packet to be aborted.
808*
809* Returns:
810* Either SUCCESS or FAILED.
811*
812* Note:
2ea00202 813* Only return FAILED if command not returned by firmware.
1da177e4 814**************************************************************************/
e5f82ab8 815static int
1da177e4
LT
816qla2xxx_eh_abort(struct scsi_cmnd *cmd)
817{
e315cd28 818 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
f4f051eb 819 srb_t *sp;
17d98630 820 int ret, i;
f4f051eb
AV
821 unsigned int id, lun;
822 unsigned long serial;
18e144d3 823 unsigned long flags;
2ea00202 824 int wait = 0;
e315cd28 825 struct qla_hw_data *ha = vha->hw;
67c2e93a 826 struct req_que *req = vha->req;
17d98630 827 srb_t *spt;
1da177e4 828
65d430fa 829 fc_block_scsi_eh(cmd);
07db5183 830
f4f051eb 831 if (!CMD_SP(cmd))
2ea00202 832 return SUCCESS;
1da177e4 833
2ea00202 834 ret = SUCCESS;
1da177e4 835
f4f051eb
AV
836 id = cmd->device->id;
837 lun = cmd->device->lun;
838 serial = cmd->serial_number;
17d98630
AC
839 spt = (srb_t *) CMD_SP(cmd);
840 if (!spt)
841 return SUCCESS;
1da177e4 842
f4f051eb 843 /* Check active list for command command. */
e315cd28 844 spin_lock_irqsave(&ha->hardware_lock, flags);
17d98630
AC
845 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
846 sp = req->outstanding_cmds[i];
1da177e4 847
17d98630
AC
848 if (sp == NULL)
849 continue;
bad75002
AE
850 if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
851 !IS_PROT_IO(sp))
cf53b069 852 continue;
17d98630
AC
853 if (sp->cmd != cmd)
854 continue;
1da177e4 855
17d98630
AC
856 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
857 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
858
859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2afa19a9 860 if (ha->isp_ops->abort_command(sp)) {
17d98630
AC
861 DEBUG2(printk("%s(%ld): abort_command "
862 "mbx failed.\n", __func__, vha->host_no));
2ac4b64f 863 ret = FAILED;
17d98630
AC
864 } else {
865 DEBUG3(printk("%s(%ld): abort_command "
866 "mbx success.\n", __func__, vha->host_no));
867 wait = 1;
73208dfd 868 }
17d98630
AC
869 spin_lock_irqsave(&ha->hardware_lock, flags);
870 break;
f4f051eb 871 }
e315cd28 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 873
f4f051eb 874 /* Wait for the command to be returned. */
2ea00202 875 if (wait) {
e315cd28 876 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
fa2a1ce5 877 qla_printk(KERN_ERR, ha,
f4f051eb 878 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
e315cd28 879 "%x.\n", vha->host_no, id, lun, serial, ret);
2ea00202 880 ret = FAILED;
f4f051eb 881 }
1da177e4 882 }
1da177e4 883
fa2a1ce5 884 qla_printk(KERN_INFO, ha,
2ea00202 885 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
e315cd28 886 vha->host_no, id, lun, wait, serial, ret);
1da177e4 887
f4f051eb
AV
888 return ret;
889}
1da177e4 890
523ec773
AV
891enum nexus_wait_type {
892 WAIT_HOST = 0,
893 WAIT_TARGET,
894 WAIT_LUN,
895};
896
f4f051eb 897static int
e315cd28 898qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
17d98630 899 unsigned int l, srb_t *sp, enum nexus_wait_type type)
f4f051eb 900{
17d98630 901 int cnt, match, status;
18e144d3 902 unsigned long flags;
e315cd28 903 struct qla_hw_data *ha = vha->hw;
73208dfd 904 struct req_que *req;
1da177e4 905
523ec773 906 status = QLA_SUCCESS;
17d98630
AC
907 if (!sp)
908 return status;
909
e315cd28 910 spin_lock_irqsave(&ha->hardware_lock, flags);
67c2e93a 911 req = vha->req;
17d98630
AC
912 for (cnt = 1; status == QLA_SUCCESS &&
913 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
914 sp = req->outstanding_cmds[cnt];
915 if (!sp)
523ec773 916 continue;
bad75002 917 if ((sp->ctx) && !IS_PROT_IO(sp))
cf53b069 918 continue;
17d98630
AC
919 if (vha->vp_idx != sp->fcport->vha->vp_idx)
920 continue;
921 match = 0;
922 switch (type) {
923 case WAIT_HOST:
924 match = 1;
925 break;
926 case WAIT_TARGET:
927 match = sp->cmd->device->id == t;
928 break;
929 case WAIT_LUN:
930 match = (sp->cmd->device->id == t &&
931 sp->cmd->device->lun == l);
932 break;
73208dfd 933 }
17d98630
AC
934 if (!match)
935 continue;
936
937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
938 status = qla2x00_eh_wait_on_command(sp->cmd);
939 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 940 }
e315cd28 941 spin_unlock_irqrestore(&ha->hardware_lock, flags);
523ec773
AV
942
943 return status;
1da177e4
LT
944}
945
a9083016
GM
946void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
947{
948 int cnt;
949 srb_t *sp;
950 struct req_que *req = vha->req;
951
952 DEBUG2(qla_printk(KERN_INFO, vha->hw,
953 "Waiting for pending commands\n"));
954 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
955 sp = req->outstanding_cmds[cnt];
956 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
957 sp, WAIT_HOST) == QLA_SUCCESS) {
958 DEBUG2(qla_printk(KERN_INFO, vha->hw,
959 "Done wait for pending commands\n"));
960 }
961 }
962}
963
523ec773
AV
964static char *reset_errors[] = {
965 "HBA not online",
966 "HBA not ready",
967 "Task management failed",
968 "Waiting for command completions",
969};
1da177e4 970
e5f82ab8 971static int
523ec773 972__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
2afa19a9 973 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
1da177e4 974{
e315cd28 975 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 976 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
523ec773 977 int err;
1da177e4 978
65d430fa 979 fc_block_scsi_eh(cmd);
07db5183 980
b0328bee 981 if (!fcport)
523ec773 982 return FAILED;
1da177e4 983
e315cd28
AC
984 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
985 vha->host_no, cmd->device->id, cmd->device->lun, name);
1da177e4 986
523ec773 987 err = 0;
e315cd28 988 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
523ec773
AV
989 goto eh_reset_failed;
990 err = 1;
e315cd28 991 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
523ec773
AV
992 goto eh_reset_failed;
993 err = 2;
2afa19a9
AC
994 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
995 != QLA_SUCCESS)
523ec773
AV
996 goto eh_reset_failed;
997 err = 3;
e315cd28 998 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
17d98630 999 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
523ec773
AV
1000 goto eh_reset_failed;
1001
e315cd28
AC
1002 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
1003 vha->host_no, cmd->device->id, cmd->device->lun, name);
523ec773
AV
1004
1005 return SUCCESS;
1006
1007 eh_reset_failed:
e315cd28
AC
1008 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
1009 , vha->host_no, cmd->device->id, cmd->device->lun, name,
523ec773
AV
1010 reset_errors[err]);
1011 return FAILED;
1012}
1da177e4 1013
523ec773
AV
1014static int
1015qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1016{
e315cd28
AC
1017 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1018 struct qla_hw_data *ha = vha->hw;
1da177e4 1019
523ec773
AV
1020 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
1021 ha->isp_ops->lun_reset);
1da177e4
LT
1022}
1023
1da177e4 1024static int
523ec773 1025qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1da177e4 1026{
e315cd28
AC
1027 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1028 struct qla_hw_data *ha = vha->hw;
1da177e4 1029
523ec773
AV
1030 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1031 ha->isp_ops->target_reset);
1da177e4
LT
1032}
1033
1da177e4
LT
1034/**************************************************************************
1035* qla2xxx_eh_bus_reset
1036*
1037* Description:
1038* The bus reset function will reset the bus and abort any executing
1039* commands.
1040*
1041* Input:
1042* cmd = Linux SCSI command packet of the command that cause the
1043* bus reset.
1044*
1045* Returns:
1046* SUCCESS/FAILURE (defined as macro in scsi.h).
1047*
1048**************************************************************************/
e5f82ab8 1049static int
1da177e4
LT
1050qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1051{
e315cd28 1052 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 1053 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
2c3dfe3f 1054 int ret = FAILED;
f4f051eb
AV
1055 unsigned int id, lun;
1056 unsigned long serial;
17d98630 1057 srb_t *sp = (srb_t *) CMD_SP(cmd);
f4f051eb 1058
65d430fa 1059 fc_block_scsi_eh(cmd);
07db5183 1060
f4f051eb
AV
1061 id = cmd->device->id;
1062 lun = cmd->device->lun;
1063 serial = cmd->serial_number;
1da177e4 1064
b0328bee 1065 if (!fcport)
f4f051eb 1066 return ret;
1da177e4 1067
e315cd28 1068 qla_printk(KERN_INFO, vha->hw,
749af3d5 1069 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 1070
e315cd28 1071 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1da177e4 1072 DEBUG2(printk("%s failed:board disabled\n",__func__));
f4f051eb 1073 goto eh_bus_reset_done;
1da177e4
LT
1074 }
1075
e315cd28
AC
1076 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
1077 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
f4f051eb 1078 ret = SUCCESS;
1da177e4 1079 }
f4f051eb
AV
1080 if (ret == FAILED)
1081 goto eh_bus_reset_done;
1da177e4 1082
9a41a62b 1083 /* Flush outstanding commands. */
17d98630 1084 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
523ec773 1085 QLA_SUCCESS)
9a41a62b 1086 ret = FAILED;
1da177e4 1087
f4f051eb 1088eh_bus_reset_done:
e315cd28 1089 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
f4f051eb 1090 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1091
f4f051eb 1092 return ret;
1da177e4
LT
1093}
1094
1095/**************************************************************************
1096* qla2xxx_eh_host_reset
1097*
1098* Description:
1099* The reset function will reset the Adapter.
1100*
1101* Input:
1102* cmd = Linux SCSI command packet of the command that cause the
1103* adapter reset.
1104*
1105* Returns:
1106* Either SUCCESS or FAILED.
1107*
1108* Note:
1109**************************************************************************/
e5f82ab8 1110static int
1da177e4
LT
1111qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1112{
e315cd28 1113 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 1114 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
e315cd28 1115 struct qla_hw_data *ha = vha->hw;
2c3dfe3f 1116 int ret = FAILED;
f4f051eb
AV
1117 unsigned int id, lun;
1118 unsigned long serial;
17d98630 1119 srb_t *sp = (srb_t *) CMD_SP(cmd);
e315cd28 1120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 1121
65d430fa 1122 fc_block_scsi_eh(cmd);
07db5183 1123
f4f051eb
AV
1124 id = cmd->device->id;
1125 lun = cmd->device->lun;
1126 serial = cmd->serial_number;
1127
b0328bee 1128 if (!fcport)
f4f051eb 1129 return ret;
1da177e4 1130
1da177e4 1131 qla_printk(KERN_INFO, ha,
e315cd28 1132 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 1133
86fbee86 1134 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
f4f051eb 1135 goto eh_host_reset_lock;
1da177e4
LT
1136
1137 /*
1138 * Fixme-may be dpc thread is active and processing
fa2a1ce5 1139 * loop_resync,so wait a while for it to
1da177e4
LT
1140 * be completed and then issue big hammer.Otherwise
1141 * it may cause I/O failure as big hammer marks the
1142 * devices as lost kicking of the port_down_timer
1143 * while dpc is stuck for the mailbox to complete.
1144 */
e315cd28
AC
1145 qla2x00_wait_for_loop_ready(vha);
1146 if (vha != base_vha) {
1147 if (qla2x00_vp_abort_isp(vha))
f4f051eb 1148 goto eh_host_reset_lock;
e315cd28 1149 } else {
a9083016
GM
1150 if (IS_QLA82XX(vha->hw)) {
1151 if (!qla82xx_fcoe_ctx_reset(vha)) {
1152 /* Ctx reset success */
1153 ret = SUCCESS;
1154 goto eh_host_reset_lock;
1155 }
1156 /* fall thru if ctx reset failed */
1157 }
68ca949c
AC
1158 if (ha->wq)
1159 flush_workqueue(ha->wq);
1160
e315cd28 1161 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 1162 if (ha->isp_ops->abort_isp(base_vha)) {
e315cd28
AC
1163 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1164 /* failed. schedule dpc to try */
1165 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1166
1167 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1168 goto eh_host_reset_lock;
1169 }
1170 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
fa2a1ce5 1171 }
1da177e4 1172
e315cd28 1173 /* Waiting for command to be returned to OS.*/
17d98630 1174 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
e315cd28 1175 QLA_SUCCESS)
f4f051eb 1176 ret = SUCCESS;
1da177e4 1177
f4f051eb 1178eh_host_reset_lock:
f4f051eb
AV
1179 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1180 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1181
f4f051eb
AV
1182 return ret;
1183}
1da177e4
LT
1184
1185/*
1186* qla2x00_loop_reset
1187* Issue loop reset.
1188*
1189* Input:
1190* ha = adapter block pointer.
1191*
1192* Returns:
1193* 0 = success
1194*/
a4722cf2 1195int
e315cd28 1196qla2x00_loop_reset(scsi_qla_host_t *vha)
1da177e4 1197{
0c8c39af 1198 int ret;
bdf79621 1199 struct fc_port *fcport;
e315cd28 1200 struct qla_hw_data *ha = vha->hw;
1da177e4 1201
f4c496c1 1202 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
55e5ed27
AV
1203 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1204 if (fcport->port_type != FCT_TARGET)
1205 continue;
1206
1207 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1208 if (ret != QLA_SUCCESS) {
1209 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1210 "target_reset=%d d_id=%x.\n", __func__,
1211 vha->host_no, ret, fcport->d_id.b24));
1212 }
1213 }
1214 }
1215
a9083016 1216 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
e315cd28 1217 ret = qla2x00_full_login_lip(vha);
0c8c39af 1218 if (ret != QLA_SUCCESS) {
749af3d5 1219 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28 1220 "full_login_lip=%d.\n", __func__, vha->host_no,
0c8c39af 1221 ret));
749af3d5
AC
1222 }
1223 atomic_set(&vha->loop_state, LOOP_DOWN);
1224 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1225 qla2x00_mark_all_devices_lost(vha, 0);
1226 qla2x00_wait_for_loop_ready(vha);
0c8c39af
AV
1227 }
1228
0d6e61bc 1229 if (ha->flags.enable_lip_reset) {
e315cd28 1230 ret = qla2x00_lip_reset(vha);
0c8c39af 1231 if (ret != QLA_SUCCESS) {
749af3d5 1232 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28
AC
1233 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1234 } else
1235 qla2x00_wait_for_loop_ready(vha);
1da177e4
LT
1236 }
1237
1da177e4 1238 /* Issue marker command only when we are going to start the I/O */
e315cd28 1239 vha->marker_needed = 1;
1da177e4 1240
0c8c39af 1241 return QLA_SUCCESS;
1da177e4
LT
1242}
1243
df4bf0bb 1244void
e315cd28 1245qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
df4bf0bb 1246{
73208dfd 1247 int que, cnt;
df4bf0bb
AV
1248 unsigned long flags;
1249 srb_t *sp;
ac280b67 1250 struct srb_ctx *ctx;
e315cd28 1251 struct qla_hw_data *ha = vha->hw;
73208dfd 1252 struct req_que *req;
df4bf0bb
AV
1253
1254 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 1255 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 1256 req = ha->req_q_map[que];
73208dfd
AC
1257 if (!req)
1258 continue;
1259 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1260 sp = req->outstanding_cmds[cnt];
e612d465 1261 if (sp) {
73208dfd 1262 req->outstanding_cmds[cnt] = NULL;
a9083016 1263 if (!sp->ctx ||
bad75002
AE
1264 (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1265 IS_PROT_IO(sp)) {
ac280b67
AV
1266 sp->cmd->result = res;
1267 qla2x00_sp_compl(ha, sp);
1268 } else {
1269 ctx = sp->ctx;
6c452a45
AV
1270 if (ctx->type == SRB_LOGIN_CMD ||
1271 ctx->type == SRB_LOGOUT_CMD) {
4916392b 1272 ctx->u.iocb_cmd->free(sp);
db3ad7f8 1273 } else {
6c452a45 1274 struct fc_bsg_job *bsg_job =
4916392b 1275 ctx->u.bsg_job;
6c452a45
AV
1276 if (bsg_job->request->msgcode
1277 == FC_BSG_HST_CT)
db3ad7f8 1278 kfree(sp->fcport);
6c452a45
AV
1279 bsg_job->req->errors = 0;
1280 bsg_job->reply->result = res;
4916392b 1281 bsg_job->job_done(bsg_job);
db3ad7f8 1282 kfree(sp->ctx);
6c452a45 1283 mempool_free(sp,
4916392b 1284 ha->srb_mempool);
db3ad7f8 1285 }
ac280b67 1286 }
73208dfd 1287 }
df4bf0bb
AV
1288 }
1289 }
1290 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1291}
1292
f4f051eb
AV
1293static int
1294qla2xxx_slave_alloc(struct scsi_device *sdev)
1da177e4 1295{
bdf79621 1296 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1da177e4 1297
19a7b4ae 1298 if (!rport || fc_remote_port_chkready(rport))
f4f051eb 1299 return -ENXIO;
bdf79621 1300
19a7b4ae 1301 sdev->hostdata = *(fc_port_t **)rport->dd_data;
1da177e4 1302
f4f051eb
AV
1303 return 0;
1304}
1da177e4 1305
f4f051eb
AV
1306static int
1307qla2xxx_slave_configure(struct scsi_device *sdev)
1308{
e315cd28
AC
1309 scsi_qla_host_t *vha = shost_priv(sdev->host);
1310 struct qla_hw_data *ha = vha->hw;
8482e118 1311 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2afa19a9 1312 struct req_que *req = vha->req;
8482e118 1313
f4f051eb 1314 if (sdev->tagged_supported)
73208dfd 1315 scsi_activate_tcq(sdev, req->max_q_depth);
f4f051eb 1316 else
73208dfd 1317 scsi_deactivate_tcq(sdev, req->max_q_depth);
1da177e4 1318
85821c90 1319 rport->dev_loss_tmo = ha->port_down_retry_count;
8482e118 1320
f4f051eb
AV
1321 return 0;
1322}
1da177e4 1323
f4f051eb
AV
1324static void
1325qla2xxx_slave_destroy(struct scsi_device *sdev)
1326{
1327 sdev->hostdata = NULL;
1da177e4
LT
1328}
1329
c45dd305
GM
1330static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1331{
1332 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1333
1334 if (!scsi_track_queue_full(sdev, qdepth))
1335 return;
1336
1337 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1338 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1339 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1340 sdev->queue_depth));
1341}
1342
1343static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1344{
1345 fc_port_t *fcport = sdev->hostdata;
1346 struct scsi_qla_host *vha = fcport->vha;
1347 struct qla_hw_data *ha = vha->hw;
1348 struct req_que *req = NULL;
1349
1350 req = vha->req;
1351 if (!req)
1352 return;
1353
1354 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1355 return;
1356
1357 if (sdev->ordered_tags)
1358 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1359 else
1360 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1361
1362 DEBUG2(qla_printk(KERN_INFO, ha,
1363 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1364 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1365 sdev->queue_depth));
1366}
1367
ce7e4af7 1368static int
e881a172 1369qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
ce7e4af7 1370{
c45dd305
GM
1371 switch (reason) {
1372 case SCSI_QDEPTH_DEFAULT:
1373 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1374 break;
1375 case SCSI_QDEPTH_QFULL:
1376 qla2x00_handle_queue_full(sdev, qdepth);
1377 break;
1378 case SCSI_QDEPTH_RAMP_UP:
1379 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1380 break;
1381 default:
08002af2 1382 return -EOPNOTSUPP;
c45dd305 1383 }
e881a172 1384
ce7e4af7
AV
1385 return sdev->queue_depth;
1386}
1387
1388static int
1389qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1390{
1391 if (sdev->tagged_supported) {
1392 scsi_set_tag_type(sdev, tag_type);
1393 if (tag_type)
1394 scsi_activate_tcq(sdev, sdev->queue_depth);
1395 else
1396 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1397 } else
1398 tag_type = 0;
1399
1400 return tag_type;
1401}
1402
1da177e4
LT
1403/**
1404 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1405 * @ha: HA context
1406 *
1407 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1408 * supported addressing method.
1409 */
1410static void
53303c42 1411qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1da177e4 1412{
7524f9b9 1413 /* Assume a 32bit DMA mask. */
1da177e4 1414 ha->flags.enable_64bit_addressing = 0;
1da177e4 1415
6a35528a 1416 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
7524f9b9
AV
1417 /* Any upper-dword bits set? */
1418 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
6a35528a 1419 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7524f9b9 1420 /* Ok, a 64bit DMA mask is applicable. */
1da177e4 1421 ha->flags.enable_64bit_addressing = 1;
fd34f556
AV
1422 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1423 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
7524f9b9 1424 return;
1da177e4 1425 }
1da177e4 1426 }
7524f9b9 1427
284901a9
YH
1428 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1429 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1da177e4
LT
1430}
1431
fd34f556 1432static void
e315cd28 1433qla2x00_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1434{
1435 unsigned long flags = 0;
1436 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1437
1438 spin_lock_irqsave(&ha->hardware_lock, flags);
1439 ha->interrupts_on = 1;
1440 /* enable risc and host interrupts */
1441 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1442 RD_REG_WORD(&reg->ictrl);
1443 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1444
1445}
1446
1447static void
e315cd28 1448qla2x00_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1449{
1450 unsigned long flags = 0;
1451 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1452
1453 spin_lock_irqsave(&ha->hardware_lock, flags);
1454 ha->interrupts_on = 0;
1455 /* disable risc and host interrupts */
1456 WRT_REG_WORD(&reg->ictrl, 0);
1457 RD_REG_WORD(&reg->ictrl);
1458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1459}
1460
1461static void
e315cd28 1462qla24xx_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1463{
1464 unsigned long flags = 0;
1465 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1466
1467 spin_lock_irqsave(&ha->hardware_lock, flags);
1468 ha->interrupts_on = 1;
1469 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1470 RD_REG_DWORD(&reg->ictrl);
1471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1472}
1473
1474static void
e315cd28 1475qla24xx_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1476{
1477 unsigned long flags = 0;
1478 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1479
124f85e6
AV
1480 if (IS_NOPOLLING_TYPE(ha))
1481 return;
fd34f556
AV
1482 spin_lock_irqsave(&ha->hardware_lock, flags);
1483 ha->interrupts_on = 0;
1484 WRT_REG_DWORD(&reg->ictrl, 0);
1485 RD_REG_DWORD(&reg->ictrl);
1486 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1487}
1488
1489static struct isp_operations qla2100_isp_ops = {
1490 .pci_config = qla2100_pci_config,
1491 .reset_chip = qla2x00_reset_chip,
1492 .chip_diag = qla2x00_chip_diag,
1493 .config_rings = qla2x00_config_rings,
1494 .reset_adapter = qla2x00_reset_adapter,
1495 .nvram_config = qla2x00_nvram_config,
1496 .update_fw_options = qla2x00_update_fw_options,
1497 .load_risc = qla2x00_load_risc,
1498 .pci_info_str = qla2x00_pci_info_str,
1499 .fw_version_str = qla2x00_fw_version_str,
1500 .intr_handler = qla2100_intr_handler,
1501 .enable_intrs = qla2x00_enable_intrs,
1502 .disable_intrs = qla2x00_disable_intrs,
1503 .abort_command = qla2x00_abort_command,
523ec773
AV
1504 .target_reset = qla2x00_abort_target,
1505 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1506 .fabric_login = qla2x00_login_fabric,
1507 .fabric_logout = qla2x00_fabric_logout,
1508 .calc_req_entries = qla2x00_calc_iocbs_32,
1509 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1510 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1511 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1512 .read_nvram = qla2x00_read_nvram_data,
1513 .write_nvram = qla2x00_write_nvram_data,
1514 .fw_dump = qla2100_fw_dump,
1515 .beacon_on = NULL,
1516 .beacon_off = NULL,
1517 .beacon_blink = NULL,
1518 .read_optrom = qla2x00_read_optrom_data,
1519 .write_optrom = qla2x00_write_optrom_data,
1520 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1521 .start_scsi = qla2x00_start_scsi,
a9083016 1522 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1523};
1524
1525static struct isp_operations qla2300_isp_ops = {
1526 .pci_config = qla2300_pci_config,
1527 .reset_chip = qla2x00_reset_chip,
1528 .chip_diag = qla2x00_chip_diag,
1529 .config_rings = qla2x00_config_rings,
1530 .reset_adapter = qla2x00_reset_adapter,
1531 .nvram_config = qla2x00_nvram_config,
1532 .update_fw_options = qla2x00_update_fw_options,
1533 .load_risc = qla2x00_load_risc,
1534 .pci_info_str = qla2x00_pci_info_str,
1535 .fw_version_str = qla2x00_fw_version_str,
1536 .intr_handler = qla2300_intr_handler,
1537 .enable_intrs = qla2x00_enable_intrs,
1538 .disable_intrs = qla2x00_disable_intrs,
1539 .abort_command = qla2x00_abort_command,
523ec773
AV
1540 .target_reset = qla2x00_abort_target,
1541 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1542 .fabric_login = qla2x00_login_fabric,
1543 .fabric_logout = qla2x00_fabric_logout,
1544 .calc_req_entries = qla2x00_calc_iocbs_32,
1545 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1546 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1547 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1548 .read_nvram = qla2x00_read_nvram_data,
1549 .write_nvram = qla2x00_write_nvram_data,
1550 .fw_dump = qla2300_fw_dump,
1551 .beacon_on = qla2x00_beacon_on,
1552 .beacon_off = qla2x00_beacon_off,
1553 .beacon_blink = qla2x00_beacon_blink,
1554 .read_optrom = qla2x00_read_optrom_data,
1555 .write_optrom = qla2x00_write_optrom_data,
1556 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1557 .start_scsi = qla2x00_start_scsi,
a9083016 1558 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1559};
1560
1561static struct isp_operations qla24xx_isp_ops = {
1562 .pci_config = qla24xx_pci_config,
1563 .reset_chip = qla24xx_reset_chip,
1564 .chip_diag = qla24xx_chip_diag,
1565 .config_rings = qla24xx_config_rings,
1566 .reset_adapter = qla24xx_reset_adapter,
1567 .nvram_config = qla24xx_nvram_config,
1568 .update_fw_options = qla24xx_update_fw_options,
1569 .load_risc = qla24xx_load_risc,
1570 .pci_info_str = qla24xx_pci_info_str,
1571 .fw_version_str = qla24xx_fw_version_str,
1572 .intr_handler = qla24xx_intr_handler,
1573 .enable_intrs = qla24xx_enable_intrs,
1574 .disable_intrs = qla24xx_disable_intrs,
1575 .abort_command = qla24xx_abort_command,
523ec773
AV
1576 .target_reset = qla24xx_abort_target,
1577 .lun_reset = qla24xx_lun_reset,
fd34f556
AV
1578 .fabric_login = qla24xx_login_fabric,
1579 .fabric_logout = qla24xx_fabric_logout,
1580 .calc_req_entries = NULL,
1581 .build_iocbs = NULL,
1582 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1583 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1584 .read_nvram = qla24xx_read_nvram_data,
1585 .write_nvram = qla24xx_write_nvram_data,
1586 .fw_dump = qla24xx_fw_dump,
1587 .beacon_on = qla24xx_beacon_on,
1588 .beacon_off = qla24xx_beacon_off,
1589 .beacon_blink = qla24xx_beacon_blink,
1590 .read_optrom = qla24xx_read_optrom_data,
1591 .write_optrom = qla24xx_write_optrom_data,
1592 .get_flash_version = qla24xx_get_flash_version,
e315cd28 1593 .start_scsi = qla24xx_start_scsi,
a9083016 1594 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1595};
1596
c3a2f0df
AV
1597static struct isp_operations qla25xx_isp_ops = {
1598 .pci_config = qla25xx_pci_config,
1599 .reset_chip = qla24xx_reset_chip,
1600 .chip_diag = qla24xx_chip_diag,
1601 .config_rings = qla24xx_config_rings,
1602 .reset_adapter = qla24xx_reset_adapter,
1603 .nvram_config = qla24xx_nvram_config,
1604 .update_fw_options = qla24xx_update_fw_options,
1605 .load_risc = qla24xx_load_risc,
1606 .pci_info_str = qla24xx_pci_info_str,
1607 .fw_version_str = qla24xx_fw_version_str,
1608 .intr_handler = qla24xx_intr_handler,
1609 .enable_intrs = qla24xx_enable_intrs,
1610 .disable_intrs = qla24xx_disable_intrs,
1611 .abort_command = qla24xx_abort_command,
523ec773
AV
1612 .target_reset = qla24xx_abort_target,
1613 .lun_reset = qla24xx_lun_reset,
c3a2f0df
AV
1614 .fabric_login = qla24xx_login_fabric,
1615 .fabric_logout = qla24xx_fabric_logout,
1616 .calc_req_entries = NULL,
1617 .build_iocbs = NULL,
1618 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1619 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1620 .read_nvram = qla25xx_read_nvram_data,
1621 .write_nvram = qla25xx_write_nvram_data,
1622 .fw_dump = qla25xx_fw_dump,
1623 .beacon_on = qla24xx_beacon_on,
1624 .beacon_off = qla24xx_beacon_off,
1625 .beacon_blink = qla24xx_beacon_blink,
338c9161 1626 .read_optrom = qla25xx_read_optrom_data,
c3a2f0df
AV
1627 .write_optrom = qla24xx_write_optrom_data,
1628 .get_flash_version = qla24xx_get_flash_version,
bad75002 1629 .start_scsi = qla24xx_dif_start_scsi,
a9083016 1630 .abort_isp = qla2x00_abort_isp,
c3a2f0df
AV
1631};
1632
3a03eb79
AV
1633static struct isp_operations qla81xx_isp_ops = {
1634 .pci_config = qla25xx_pci_config,
1635 .reset_chip = qla24xx_reset_chip,
1636 .chip_diag = qla24xx_chip_diag,
1637 .config_rings = qla24xx_config_rings,
1638 .reset_adapter = qla24xx_reset_adapter,
1639 .nvram_config = qla81xx_nvram_config,
1640 .update_fw_options = qla81xx_update_fw_options,
eaac30be 1641 .load_risc = qla81xx_load_risc,
3a03eb79
AV
1642 .pci_info_str = qla24xx_pci_info_str,
1643 .fw_version_str = qla24xx_fw_version_str,
1644 .intr_handler = qla24xx_intr_handler,
1645 .enable_intrs = qla24xx_enable_intrs,
1646 .disable_intrs = qla24xx_disable_intrs,
1647 .abort_command = qla24xx_abort_command,
1648 .target_reset = qla24xx_abort_target,
1649 .lun_reset = qla24xx_lun_reset,
1650 .fabric_login = qla24xx_login_fabric,
1651 .fabric_logout = qla24xx_fabric_logout,
1652 .calc_req_entries = NULL,
1653 .build_iocbs = NULL,
1654 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1655 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
3d79038f
AV
1656 .read_nvram = NULL,
1657 .write_nvram = NULL,
3a03eb79
AV
1658 .fw_dump = qla81xx_fw_dump,
1659 .beacon_on = qla24xx_beacon_on,
1660 .beacon_off = qla24xx_beacon_off,
1661 .beacon_blink = qla24xx_beacon_blink,
1662 .read_optrom = qla25xx_read_optrom_data,
1663 .write_optrom = qla24xx_write_optrom_data,
1664 .get_flash_version = qla24xx_get_flash_version,
1665 .start_scsi = qla24xx_start_scsi,
a9083016
GM
1666 .abort_isp = qla2x00_abort_isp,
1667};
1668
1669static struct isp_operations qla82xx_isp_ops = {
1670 .pci_config = qla82xx_pci_config,
1671 .reset_chip = qla82xx_reset_chip,
1672 .chip_diag = qla24xx_chip_diag,
1673 .config_rings = qla82xx_config_rings,
1674 .reset_adapter = qla24xx_reset_adapter,
1675 .nvram_config = qla81xx_nvram_config,
1676 .update_fw_options = qla24xx_update_fw_options,
1677 .load_risc = qla82xx_load_risc,
1678 .pci_info_str = qla82xx_pci_info_str,
1679 .fw_version_str = qla24xx_fw_version_str,
1680 .intr_handler = qla82xx_intr_handler,
1681 .enable_intrs = qla82xx_enable_intrs,
1682 .disable_intrs = qla82xx_disable_intrs,
1683 .abort_command = qla24xx_abort_command,
1684 .target_reset = qla24xx_abort_target,
1685 .lun_reset = qla24xx_lun_reset,
1686 .fabric_login = qla24xx_login_fabric,
1687 .fabric_logout = qla24xx_fabric_logout,
1688 .calc_req_entries = NULL,
1689 .build_iocbs = NULL,
1690 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1691 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1692 .read_nvram = qla24xx_read_nvram_data,
1693 .write_nvram = qla24xx_write_nvram_data,
1694 .fw_dump = qla24xx_fw_dump,
1695 .beacon_on = qla24xx_beacon_on,
1696 .beacon_off = qla24xx_beacon_off,
1697 .beacon_blink = qla24xx_beacon_blink,
1698 .read_optrom = qla82xx_read_optrom_data,
1699 .write_optrom = qla82xx_write_optrom_data,
1700 .get_flash_version = qla24xx_get_flash_version,
1701 .start_scsi = qla82xx_start_scsi,
1702 .abort_isp = qla82xx_abort_isp,
3a03eb79
AV
1703};
1704
ea5b6382 1705static inline void
e315cd28 1706qla2x00_set_isp_flags(struct qla_hw_data *ha)
ea5b6382
AV
1707{
1708 ha->device_type = DT_EXTENDED_IDS;
1709 switch (ha->pdev->device) {
1710 case PCI_DEVICE_ID_QLOGIC_ISP2100:
1711 ha->device_type |= DT_ISP2100;
1712 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1713 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382
AV
1714 break;
1715 case PCI_DEVICE_ID_QLOGIC_ISP2200:
1716 ha->device_type |= DT_ISP2200;
1717 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1718 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382
AV
1719 break;
1720 case PCI_DEVICE_ID_QLOGIC_ISP2300:
1721 ha->device_type |= DT_ISP2300;
4a59f71d 1722 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1723 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1724 break;
1725 case PCI_DEVICE_ID_QLOGIC_ISP2312:
1726 ha->device_type |= DT_ISP2312;
4a59f71d 1727 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1728 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1729 break;
1730 case PCI_DEVICE_ID_QLOGIC_ISP2322:
1731 ha->device_type |= DT_ISP2322;
4a59f71d 1732 ha->device_type |= DT_ZIO_SUPPORTED;
ea5b6382
AV
1733 if (ha->pdev->subsystem_vendor == 0x1028 &&
1734 ha->pdev->subsystem_device == 0x0170)
1735 ha->device_type |= DT_OEM_001;
441d1072 1736 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1737 break;
1738 case PCI_DEVICE_ID_QLOGIC_ISP6312:
1739 ha->device_type |= DT_ISP6312;
441d1072 1740 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1741 break;
1742 case PCI_DEVICE_ID_QLOGIC_ISP6322:
1743 ha->device_type |= DT_ISP6322;
441d1072 1744 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1745 break;
1746 case PCI_DEVICE_ID_QLOGIC_ISP2422:
1747 ha->device_type |= DT_ISP2422;
4a59f71d 1748 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1749 ha->device_type |= DT_FWI2;
c76f2c01 1750 ha->device_type |= DT_IIDMA;
441d1072 1751 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382
AV
1752 break;
1753 case PCI_DEVICE_ID_QLOGIC_ISP2432:
1754 ha->device_type |= DT_ISP2432;
4a59f71d 1755 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1756 ha->device_type |= DT_FWI2;
c76f2c01 1757 ha->device_type |= DT_IIDMA;
441d1072 1758 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1759 break;
4d4df193
HK
1760 case PCI_DEVICE_ID_QLOGIC_ISP8432:
1761 ha->device_type |= DT_ISP8432;
1762 ha->device_type |= DT_ZIO_SUPPORTED;
1763 ha->device_type |= DT_FWI2;
1764 ha->device_type |= DT_IIDMA;
1765 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1766 break;
044cc6c8
AV
1767 case PCI_DEVICE_ID_QLOGIC_ISP5422:
1768 ha->device_type |= DT_ISP5422;
e428924c 1769 ha->device_type |= DT_FWI2;
441d1072 1770 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1771 break;
044cc6c8
AV
1772 case PCI_DEVICE_ID_QLOGIC_ISP5432:
1773 ha->device_type |= DT_ISP5432;
e428924c 1774 ha->device_type |= DT_FWI2;
441d1072 1775 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1776 break;
c3a2f0df
AV
1777 case PCI_DEVICE_ID_QLOGIC_ISP2532:
1778 ha->device_type |= DT_ISP2532;
1779 ha->device_type |= DT_ZIO_SUPPORTED;
1780 ha->device_type |= DT_FWI2;
1781 ha->device_type |= DT_IIDMA;
441d1072 1782 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1783 break;
3a03eb79
AV
1784 case PCI_DEVICE_ID_QLOGIC_ISP8001:
1785 ha->device_type |= DT_ISP8001;
1786 ha->device_type |= DT_ZIO_SUPPORTED;
1787 ha->device_type |= DT_FWI2;
1788 ha->device_type |= DT_IIDMA;
1789 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1790 break;
a9083016
GM
1791 case PCI_DEVICE_ID_QLOGIC_ISP8021:
1792 ha->device_type |= DT_ISP8021;
1793 ha->device_type |= DT_ZIO_SUPPORTED;
1794 ha->device_type |= DT_FWI2;
1795 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1796 /* Initialize 82XX ISP flags */
1797 qla82xx_init_flags(ha);
1798 break;
ea5b6382 1799 }
e5b68a61 1800
a9083016
GM
1801 if (IS_QLA82XX(ha))
1802 ha->port_no = !(ha->portnum & 1);
1803 else
1804 /* Get adapter physical port no from interrupt pin register. */
1805 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1806
e5b68a61
AC
1807 if (ha->port_no & 1)
1808 ha->flags.port0 = 1;
1809 else
1810 ha->flags.port0 = 0;
ea5b6382
AV
1811}
1812
1da177e4 1813static int
e315cd28 1814qla2x00_iospace_config(struct qla_hw_data *ha)
1da177e4 1815{
3776541d 1816 resource_size_t pio;
73208dfd 1817 uint16_t msix;
68ca949c 1818 int cpus;
1da177e4 1819
a9083016
GM
1820 if (IS_QLA82XX(ha))
1821 return qla82xx_iospace_config(ha);
1822
285d0321
AV
1823 if (pci_request_selected_regions(ha->pdev, ha->bars,
1824 QLA2XXX_DRIVER_NAME)) {
1825 qla_printk(KERN_WARNING, ha,
1826 "Failed to reserve PIO/MMIO regions (%s)\n",
1827 pci_name(ha->pdev));
1828
1829 goto iospace_error_exit;
1830 }
1831 if (!(ha->bars & 1))
1832 goto skip_pio;
1833
1da177e4
LT
1834 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1835 pio = pci_resource_start(ha->pdev, 0);
3776541d
AV
1836 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1837 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1da177e4
LT
1838 qla_printk(KERN_WARNING, ha,
1839 "Invalid PCI I/O region size (%s)...\n",
1840 pci_name(ha->pdev));
1841 pio = 0;
1842 }
1843 } else {
1844 qla_printk(KERN_WARNING, ha,
1845 "region #0 not a PIO resource (%s)...\n",
1846 pci_name(ha->pdev));
1847 pio = 0;
1848 }
285d0321 1849 ha->pio_address = pio;
1da177e4 1850
285d0321 1851skip_pio:
1da177e4 1852 /* Use MMIO operations for all accesses. */
3776541d 1853 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1da177e4 1854 qla_printk(KERN_ERR, ha,
3776541d 1855 "region #1 not an MMIO resource (%s), aborting\n",
1da177e4
LT
1856 pci_name(ha->pdev));
1857 goto iospace_error_exit;
1858 }
3776541d 1859 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1da177e4
LT
1860 qla_printk(KERN_ERR, ha,
1861 "Invalid PCI mem region size (%s), aborting\n",
1862 pci_name(ha->pdev));
1863 goto iospace_error_exit;
1864 }
1865
3776541d 1866 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1da177e4
LT
1867 if (!ha->iobase) {
1868 qla_printk(KERN_ERR, ha,
1869 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1870
1871 goto iospace_error_exit;
1872 }
1873
73208dfd 1874 /* Determine queue resources */
2afa19a9 1875 ha->max_req_queues = ha->max_rsp_queues = 1;
d84a47c2
MH
1876 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1877 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
2afa19a9 1878 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
17d98630 1879 goto mqiobase_exit;
d84a47c2 1880
17d98630
AC
1881 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1882 pci_resource_len(ha->pdev, 3));
1883 if (ha->mqiobase) {
1884 /* Read MSIX vector size of the board */
1885 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1886 ha->msix_count = msix;
68ca949c
AC
1887 /* Max queues are bounded by available msix vectors */
1888 /* queue 0 uses two msix vectors */
1889 if (ql2xmultique_tag) {
1890 cpus = num_online_cpus();
27dc9c5a 1891 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
68ca949c
AC
1892 (cpus + 1) : (ha->msix_count - 1);
1893 ha->max_req_queues = 2;
1894 } else if (ql2xmaxqueues > 1) {
2afa19a9
AC
1895 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1896 QLA_MQ_SIZE : ql2xmaxqueues;
1897 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1898 " of request queues:%d\n", ha->max_req_queues));
1899 }
68ca949c
AC
1900 qla_printk(KERN_INFO, ha,
1901 "MSI-X vector count: %d\n", msix);
2afa19a9
AC
1902 } else
1903 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
17d98630
AC
1904
1905mqiobase_exit:
2afa19a9 1906 ha->msix_count = ha->max_rsp_queues + 1;
1da177e4
LT
1907 return (0);
1908
1909iospace_error_exit:
1910 return (-ENOMEM);
1911}
1912
1e99e33a
AV
1913static void
1914qla2xxx_scan_start(struct Scsi_Host *shost)
1915{
e315cd28 1916 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1917
cbc8eb67
AV
1918 if (vha->hw->flags.running_gold_fw)
1919 return;
1920
e315cd28
AC
1921 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1922 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1923 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1924 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1e99e33a
AV
1925}
1926
1927static int
1928qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1929{
e315cd28 1930 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1931
e315cd28 1932 if (!vha->host)
1e99e33a 1933 return 1;
e315cd28 1934 if (time > vha->hw->loop_reset_delay * HZ)
1e99e33a
AV
1935 return 1;
1936
e315cd28 1937 return atomic_read(&vha->loop_state) == LOOP_READY;
1e99e33a
AV
1938}
1939
1da177e4
LT
1940/*
1941 * PCI driver interface
1942 */
7ee61397
AV
1943static int __devinit
1944qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4 1945{
a1541d5a 1946 int ret = -ENODEV;
1da177e4 1947 struct Scsi_Host *host;
e315cd28
AC
1948 scsi_qla_host_t *base_vha = NULL;
1949 struct qla_hw_data *ha;
29856e28 1950 char pci_info[30];
1da177e4 1951 char fw_str[30];
5433383e 1952 struct scsi_host_template *sht;
c51da4ec 1953 int bars, max_id, mem_only = 0;
e315cd28 1954 uint16_t req_length = 0, rsp_length = 0;
73208dfd
AC
1955 struct req_que *req = NULL;
1956 struct rsp_que *rsp = NULL;
1da177e4 1957
285d0321 1958 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
a5326f86 1959 sht = &qla2xxx_driver_template;
5433383e 1960 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
8bc69e7d 1961 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
4d4df193 1962 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
8bc69e7d 1963 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
c3a2f0df 1964 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
3a03eb79 1965 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
a9083016
GM
1966 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1967 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
285d0321 1968 bars = pci_select_bars(pdev, IORESOURCE_MEM);
09483916 1969 mem_only = 1;
285d0321
AV
1970 }
1971
09483916
BH
1972 if (mem_only) {
1973 if (pci_enable_device_mem(pdev))
1974 goto probe_out;
1975 } else {
1976 if (pci_enable_device(pdev))
1977 goto probe_out;
1978 }
285d0321 1979
0927678f
JB
1980 /* This may fail but that's ok */
1981 pci_enable_pcie_error_reporting(pdev);
285d0321 1982
e315cd28
AC
1983 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1984 if (!ha) {
1985 DEBUG(printk("Unable to allocate memory for ha\n"));
1986 goto probe_out;
1da177e4 1987 }
e315cd28 1988 ha->pdev = pdev;
1da177e4
LT
1989
1990 /* Clear our data area */
285d0321 1991 ha->bars = bars;
09483916 1992 ha->mem_only = mem_only;
df4bf0bb 1993 spin_lock_init(&ha->hardware_lock);
1da177e4 1994
ea5b6382
AV
1995 /* Set ISP-type information. */
1996 qla2x00_set_isp_flags(ha);
ca79cf66
DG
1997
1998 /* Set EEH reset type to fundamental if required by hba */
1999 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
2000 pdev->needs_freset = 1;
ca79cf66
DG
2001 }
2002
1da177e4
LT
2003 /* Configure PCI I/O space */
2004 ret = qla2x00_iospace_config(ha);
a1541d5a 2005 if (ret)
e315cd28 2006 goto probe_hw_failed;
1da177e4 2007
1da177e4 2008 qla_printk(KERN_INFO, ha,
5433383e
AV
2009 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
2010 ha->iobase);
1da177e4 2011
1da177e4 2012 ha->prev_topology = 0;
fca29703 2013 ha->init_cb_size = sizeof(init_cb_t);
d8b45213 2014 ha->link_data_rate = PORT_SPEED_UNKNOWN;
854165f4 2015 ha->optrom_size = OPTROM_SIZE_2300;
1da177e4 2016
abbd8870 2017 /* Assign ISP specific operations. */
e315cd28 2018 max_id = MAX_TARGETS_2200;
1da177e4 2019 if (IS_QLA2100(ha)) {
e315cd28 2020 max_id = MAX_TARGETS_2100;
1da177e4 2021 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
e315cd28
AC
2022 req_length = REQUEST_ENTRY_CNT_2100;
2023 rsp_length = RESPONSE_ENTRY_CNT_2100;
2024 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 2025 ha->gid_list_info_size = 4;
3a03eb79
AV
2026 ha->flash_conf_off = ~0;
2027 ha->flash_data_off = ~0;
2028 ha->nvram_conf_off = ~0;
2029 ha->nvram_data_off = ~0;
fd34f556 2030 ha->isp_ops = &qla2100_isp_ops;
1da177e4 2031 } else if (IS_QLA2200(ha)) {
1da177e4 2032 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2033 req_length = REQUEST_ENTRY_CNT_2200;
2034 rsp_length = RESPONSE_ENTRY_CNT_2100;
2035 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 2036 ha->gid_list_info_size = 4;
3a03eb79
AV
2037 ha->flash_conf_off = ~0;
2038 ha->flash_data_off = ~0;
2039 ha->nvram_conf_off = ~0;
2040 ha->nvram_data_off = ~0;
fd34f556 2041 ha->isp_ops = &qla2100_isp_ops;
fca29703 2042 } else if (IS_QLA23XX(ha)) {
1da177e4 2043 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2044 req_length = REQUEST_ENTRY_CNT_2200;
2045 rsp_length = RESPONSE_ENTRY_CNT_2300;
2046 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
abbd8870 2047 ha->gid_list_info_size = 6;
854165f4
AV
2048 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2049 ha->optrom_size = OPTROM_SIZE_2322;
3a03eb79
AV
2050 ha->flash_conf_off = ~0;
2051 ha->flash_data_off = ~0;
2052 ha->nvram_conf_off = ~0;
2053 ha->nvram_data_off = ~0;
fd34f556 2054 ha->isp_ops = &qla2300_isp_ops;
4d4df193 2055 } else if (IS_QLA24XX_TYPE(ha)) {
fca29703 2056 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2057 req_length = REQUEST_ENTRY_CNT_24XX;
2058 rsp_length = RESPONSE_ENTRY_CNT_2300;
2059 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2c3dfe3f 2060 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
fca29703 2061 ha->gid_list_info_size = 8;
854165f4 2062 ha->optrom_size = OPTROM_SIZE_24XX;
73208dfd 2063 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
fd34f556 2064 ha->isp_ops = &qla24xx_isp_ops;
3a03eb79
AV
2065 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2066 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2067 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2068 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
c3a2f0df 2069 } else if (IS_QLA25XX(ha)) {
c3a2f0df 2070 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2071 req_length = REQUEST_ENTRY_CNT_24XX;
2072 rsp_length = RESPONSE_ENTRY_CNT_2300;
2073 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
c3a2f0df 2074 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
c3a2f0df
AV
2075 ha->gid_list_info_size = 8;
2076 ha->optrom_size = OPTROM_SIZE_25XX;
73208dfd 2077 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
c3a2f0df 2078 ha->isp_ops = &qla25xx_isp_ops;
3a03eb79
AV
2079 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2080 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2081 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2082 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2083 } else if (IS_QLA81XX(ha)) {
2084 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2085 req_length = REQUEST_ENTRY_CNT_24XX;
2086 rsp_length = RESPONSE_ENTRY_CNT_2300;
2087 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2088 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2089 ha->gid_list_info_size = 8;
2090 ha->optrom_size = OPTROM_SIZE_81XX;
40859ae5 2091 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3a03eb79
AV
2092 ha->isp_ops = &qla81xx_isp_ops;
2093 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2094 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2095 ha->nvram_conf_off = ~0;
2096 ha->nvram_data_off = ~0;
a9083016
GM
2097 } else if (IS_QLA82XX(ha)) {
2098 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2099 req_length = REQUEST_ENTRY_CNT_82XX;
2100 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2101 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2102 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2103 ha->gid_list_info_size = 8;
2104 ha->optrom_size = OPTROM_SIZE_82XX;
2105 ha->isp_ops = &qla82xx_isp_ops;
2106 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2107 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2108 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2109 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1da177e4 2110 }
1da177e4 2111
6c2f527c 2112 mutex_init(&ha->vport_lock);
0b05a1f0
MB
2113 init_completion(&ha->mbx_cmd_comp);
2114 complete(&ha->mbx_cmd_comp);
2115 init_completion(&ha->mbx_intr_comp);
1da177e4 2116
2c3dfe3f 2117 set_bit(0, (unsigned long *) ha->vp_idx_map);
1da177e4 2118
53303c42 2119 qla2x00_config_dma_addressing(ha);
73208dfd 2120 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
e315cd28 2121 if (!ret) {
1da177e4
LT
2122 qla_printk(KERN_WARNING, ha,
2123 "[ERROR] Failed to allocate memory for adapter\n");
2124
e315cd28
AC
2125 goto probe_hw_failed;
2126 }
2127
73208dfd 2128 req->max_q_depth = MAX_Q_DEPTH;
e315cd28 2129 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
73208dfd
AC
2130 req->max_q_depth = ql2xmaxqdepth;
2131
e315cd28
AC
2132
2133 base_vha = qla2x00_create_host(sht, ha);
2134 if (!base_vha) {
2135 qla_printk(KERN_WARNING, ha,
2136 "[ERROR] Failed to allocate memory for scsi_host\n");
2137
a1541d5a 2138 ret = -ENOMEM;
6e9f21f3 2139 qla2x00_mem_free(ha);
2afa19a9
AC
2140 qla2x00_free_req_que(ha, req);
2141 qla2x00_free_rsp_que(ha, rsp);
e315cd28 2142 goto probe_hw_failed;
1da177e4
LT
2143 }
2144
e315cd28
AC
2145 pci_set_drvdata(pdev, base_vha);
2146
e315cd28 2147 host = base_vha->host;
2afa19a9 2148 base_vha->req = req;
73208dfd
AC
2149 host->can_queue = req->length + 128;
2150 if (IS_QLA2XXX_MIDTYPE(ha))
e315cd28 2151 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
73208dfd 2152 else
e315cd28
AC
2153 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2154 base_vha->vp_idx;
e315cd28
AC
2155 if (IS_QLA2100(ha))
2156 host->sg_tablesize = 32;
2157 host->max_id = max_id;
2158 host->this_id = 255;
2159 host->cmd_per_lun = 3;
2160 host->unique_id = host->host_no;
2161 host->max_cmd_len = MAX_CMDSZ;
2162 host->max_channel = MAX_BUSES - 1;
2163 host->max_lun = MAX_LUNS;
2164 host->transportt = qla2xxx_transport_template;
9a069e19 2165 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
e315cd28 2166
73208dfd
AC
2167 /* Set up the irqs */
2168 ret = qla2x00_request_irqs(ha, rsp);
2169 if (ret)
6e9f21f3 2170 goto probe_init_failed;
90a86fc0
JC
2171
2172 pci_save_state(pdev);
2173
73208dfd 2174 /* Alloc arrays of request and response ring ptrs */
7163ea81 2175que_init:
73208dfd
AC
2176 if (!qla2x00_alloc_queues(ha)) {
2177 qla_printk(KERN_WARNING, ha,
2178 "[ERROR] Failed to allocate memory for queue"
2179 " pointers\n");
6e9f21f3 2180 goto probe_init_failed;
73208dfd 2181 }
a9083016 2182
73208dfd
AC
2183 ha->rsp_q_map[0] = rsp;
2184 ha->req_q_map[0] = req;
2afa19a9
AC
2185 rsp->req = req;
2186 req->rsp = rsp;
2187 set_bit(0, ha->req_qid_map);
2188 set_bit(0, ha->rsp_qid_map);
08029990
AV
2189 /* FWI2-capable only. */
2190 req->req_q_in = &ha->iobase->isp24.req_q_in;
2191 req->req_q_out = &ha->iobase->isp24.req_q_out;
2192 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2193 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
17d98630 2194 if (ha->mqenable) {
08029990
AV
2195 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2196 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2197 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2198 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
17d98630
AC
2199 }
2200
a9083016
GM
2201 if (IS_QLA82XX(ha)) {
2202 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2203 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2204 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2205 }
2206
e315cd28 2207 if (qla2x00_initialize_adapter(base_vha)) {
1da177e4
LT
2208 qla_printk(KERN_WARNING, ha,
2209 "Failed to initialize adapter\n");
2210
2211 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
2212 "Adapter flags %x.\n",
e315cd28 2213 base_vha->host_no, base_vha->device_flags));
1da177e4 2214
a9083016
GM
2215 if (IS_QLA82XX(ha)) {
2216 qla82xx_idc_lock(ha);
2217 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2218 QLA82XX_DEV_FAILED);
2219 qla82xx_idc_unlock(ha);
2220 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2221 }
2222
a1541d5a 2223 ret = -ENODEV;
1da177e4
LT
2224 goto probe_failed;
2225 }
2226
7163ea81
AC
2227 if (ha->mqenable) {
2228 if (qla25xx_setup_mode(base_vha)) {
68ca949c
AC
2229 qla_printk(KERN_WARNING, ha,
2230 "Can't create queues, falling back to single"
2231 " queue mode\n");
7163ea81
AC
2232 goto que_init;
2233 }
2234 }
68ca949c 2235
cbc8eb67
AV
2236 if (ha->flags.running_gold_fw)
2237 goto skip_dpc;
2238
1da177e4
LT
2239 /*
2240 * Startup the kernel thread for this host adapter
2241 */
39a11240 2242 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
e315cd28 2243 "%s_dpc", base_vha->host_str);
39a11240 2244 if (IS_ERR(ha->dpc_thread)) {
1da177e4
LT
2245 qla_printk(KERN_WARNING, ha,
2246 "Unable to start DPC thread!\n");
39a11240 2247 ret = PTR_ERR(ha->dpc_thread);
1da177e4
LT
2248 goto probe_failed;
2249 }
1da177e4 2250
cbc8eb67 2251skip_dpc:
e315cd28
AC
2252 list_add_tail(&base_vha->list, &ha->vp_list);
2253 base_vha->host->irq = ha->pdev->irq;
1da177e4
LT
2254
2255 /* Initialized the timer */
e315cd28 2256 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1da177e4
LT
2257
2258 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
e315cd28 2259 base_vha->host_no, ha));
d19044c3 2260
bad75002
AE
2261 if (IS_QLA25XX(ha) && ql2xenabledif) {
2262 if (ha->fw_attributes & BIT_4) {
2263 base_vha->flags.difdix_supported = 1;
2264 DEBUG18(qla_printk(KERN_INFO, ha,
2265 "Registering for DIF/DIX type 1 and 3"
2266 " protection.\n"));
2267 scsi_host_set_prot(host,
2268 SHOST_DIF_TYPE1_PROTECTION
2269 | SHOST_DIF_TYPE3_PROTECTION
2270 | SHOST_DIX_TYPE1_PROTECTION
2271 | SHOST_DIX_TYPE3_PROTECTION);
2272 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2273 } else
2274 base_vha->flags.difdix_supported = 0;
2275 }
2276
a9083016
GM
2277 ha->isp_ops->enable_intrs(ha);
2278
a1541d5a
AV
2279 ret = scsi_add_host(host, &pdev->dev);
2280 if (ret)
2281 goto probe_failed;
2282
1486400f
MR
2283 base_vha->flags.init_done = 1;
2284 base_vha->flags.online = 1;
2285
1e99e33a
AV
2286 scsi_scan_host(host);
2287
e315cd28 2288 qla2x00_alloc_sysfs_attr(base_vha);
a1541d5a 2289
e315cd28 2290 qla2x00_init_host_attr(base_vha);
a1541d5a 2291
e315cd28 2292 qla2x00_dfs_setup(base_vha);
df613b96 2293
1da177e4
LT
2294 qla_printk(KERN_INFO, ha, "\n"
2295 " QLogic Fibre Channel HBA Driver: %s\n"
2296 " QLogic %s - %s\n"
5433383e
AV
2297 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2298 qla2x00_version_str, ha->model_number,
e315cd28
AC
2299 ha->model_desc ? ha->model_desc : "", pdev->device,
2300 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2301 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2302 ha->isp_ops->fw_version_str(base_vha, fw_str));
1da177e4 2303
1da177e4
LT
2304 return 0;
2305
6e9f21f3 2306probe_init_failed:
2afa19a9
AC
2307 qla2x00_free_req_que(ha, req);
2308 qla2x00_free_rsp_que(ha, rsp);
2309 ha->max_req_queues = ha->max_rsp_queues = 0;
6e9f21f3 2310
1da177e4 2311probe_failed:
b9978769
AV
2312 if (base_vha->timer_active)
2313 qla2x00_stop_timer(base_vha);
2314 base_vha->flags.online = 0;
2315 if (ha->dpc_thread) {
2316 struct task_struct *t = ha->dpc_thread;
2317
2318 ha->dpc_thread = NULL;
2319 kthread_stop(t);
2320 }
2321
e315cd28 2322 qla2x00_free_device(base_vha);
1da177e4 2323
e315cd28 2324 scsi_host_put(base_vha->host);
1da177e4 2325
e315cd28 2326probe_hw_failed:
a9083016
GM
2327 if (IS_QLA82XX(ha)) {
2328 qla82xx_idc_lock(ha);
2329 qla82xx_clear_drv_active(ha);
2330 qla82xx_idc_unlock(ha);
2331 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2332 if (!ql2xdbwr)
2333 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2334 } else {
2335 if (ha->iobase)
2336 iounmap(ha->iobase);
2337 }
e315cd28
AC
2338 pci_release_selected_regions(ha->pdev, ha->bars);
2339 kfree(ha);
2340 ha = NULL;
1da177e4 2341
a1541d5a 2342probe_out:
e315cd28 2343 pci_disable_device(pdev);
a1541d5a 2344 return ret;
1da177e4 2345}
1da177e4 2346
4c993f76 2347static void
7ee61397 2348qla2x00_remove_one(struct pci_dev *pdev)
1da177e4 2349{
e315cd28
AC
2350 scsi_qla_host_t *base_vha, *vha, *temp;
2351 struct qla_hw_data *ha;
2352
2353 base_vha = pci_get_drvdata(pdev);
2354 ha = base_vha->hw;
2355
2356 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
2357 if (vha && vha->fc_vport)
2358 fc_vport_terminate(vha->fc_vport);
2359 }
1da177e4 2360
e315cd28 2361 set_bit(UNLOADING, &base_vha->dpc_flags);
1da177e4 2362
b9978769
AV
2363 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2364
e315cd28 2365 qla2x00_dfs_remove(base_vha);
c795c1e4 2366
e315cd28 2367 qla84xx_put_chip(base_vha);
c795c1e4 2368
b9978769
AV
2369 /* Disable timer */
2370 if (base_vha->timer_active)
2371 qla2x00_stop_timer(base_vha);
2372
2373 base_vha->flags.online = 0;
2374
68ca949c
AC
2375 /* Flush the work queue and remove it */
2376 if (ha->wq) {
2377 flush_workqueue(ha->wq);
2378 destroy_workqueue(ha->wq);
2379 ha->wq = NULL;
2380 }
2381
b9978769
AV
2382 /* Kill the kernel thread for this host */
2383 if (ha->dpc_thread) {
2384 struct task_struct *t = ha->dpc_thread;
2385
2386 /*
2387 * qla2xxx_wake_dpc checks for ->dpc_thread
2388 * so we need to zero it out.
2389 */
2390 ha->dpc_thread = NULL;
2391 kthread_stop(t);
2392 }
2393
e315cd28 2394 qla2x00_free_sysfs_attr(base_vha);
df613b96 2395
e315cd28 2396 fc_remove_host(base_vha->host);
4d4df193 2397
e315cd28 2398 scsi_remove_host(base_vha->host);
1da177e4 2399
e315cd28 2400 qla2x00_free_device(base_vha);
bdf79621 2401
e315cd28 2402 scsi_host_put(base_vha->host);
1da177e4 2403
a9083016 2404 if (IS_QLA82XX(ha)) {
b963752f
GM
2405 qla82xx_idc_lock(ha);
2406 qla82xx_clear_drv_active(ha);
2407 qla82xx_idc_unlock(ha);
2408
a9083016
GM
2409 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2410 if (!ql2xdbwr)
2411 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2412 } else {
2413 if (ha->iobase)
2414 iounmap(ha->iobase);
1da177e4 2415
a9083016
GM
2416 if (ha->mqiobase)
2417 iounmap(ha->mqiobase);
2418 }
73208dfd 2419
e315cd28
AC
2420 pci_release_selected_regions(ha->pdev, ha->bars);
2421 kfree(ha);
2422 ha = NULL;
1da177e4 2423
90a86fc0
JC
2424 pci_disable_pcie_error_reporting(pdev);
2425
665db93b 2426 pci_disable_device(pdev);
1da177e4
LT
2427 pci_set_drvdata(pdev, NULL);
2428}
1da177e4
LT
2429
2430static void
e315cd28 2431qla2x00_free_device(scsi_qla_host_t *vha)
1da177e4 2432{
e315cd28 2433 struct qla_hw_data *ha = vha->hw;
1da177e4 2434
85880801
AV
2435 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2436
2437 /* Disable timer */
2438 if (vha->timer_active)
2439 qla2x00_stop_timer(vha);
2440
2441 /* Kill the kernel thread for this host */
2442 if (ha->dpc_thread) {
2443 struct task_struct *t = ha->dpc_thread;
2444
2445 /*
2446 * qla2xxx_wake_dpc checks for ->dpc_thread
2447 * so we need to zero it out.
2448 */
2449 ha->dpc_thread = NULL;
2450 kthread_stop(t);
2451 }
2452
2afa19a9
AC
2453 qla25xx_delete_queues(vha);
2454
df613b96 2455 if (ha->flags.fce_enabled)
e315cd28 2456 qla2x00_disable_fce_trace(vha, NULL, NULL);
df613b96 2457
a7a167bf 2458 if (ha->eft)
e315cd28 2459 qla2x00_disable_eft_trace(vha);
a7a167bf 2460
f6ef3b18 2461 /* Stop currently executing firmware. */
e315cd28 2462 qla2x00_try_to_stop_firmware(vha);
1da177e4 2463
85880801
AV
2464 vha->flags.online = 0;
2465
f6ef3b18 2466 /* turn-off interrupts on the card */
a9083016
GM
2467 if (ha->interrupts_on) {
2468 vha->flags.init_done = 0;
fd34f556 2469 ha->isp_ops->disable_intrs(ha);
a9083016 2470 }
f6ef3b18 2471
e315cd28 2472 qla2x00_free_irqs(vha);
1da177e4 2473
e315cd28 2474 qla2x00_mem_free(ha);
73208dfd
AC
2475
2476 qla2x00_free_queues(ha);
1da177e4
LT
2477}
2478
d97994dc 2479static inline void
e315cd28 2480qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
d97994dc
AV
2481 int defer)
2482{
d97994dc 2483 struct fc_rport *rport;
67becc00 2484 scsi_qla_host_t *base_vha;
d97994dc
AV
2485
2486 if (!fcport->rport)
2487 return;
2488
2489 rport = fcport->rport;
2490 if (defer) {
67becc00 2491 base_vha = pci_get_drvdata(vha->hw->pdev);
e315cd28 2492 spin_lock_irq(vha->host->host_lock);
d97994dc 2493 fcport->drport = rport;
e315cd28 2494 spin_unlock_irq(vha->host->host_lock);
67becc00
AV
2495 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2496 qla2xxx_wake_dpc(base_vha);
5f3a9a20 2497 } else
d97994dc 2498 fc_remote_port_delete(rport);
d97994dc
AV
2499}
2500
1da177e4
LT
2501/*
2502 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2503 *
2504 * Input: ha = adapter block pointer. fcport = port structure pointer.
2505 *
2506 * Return: None.
2507 *
2508 * Context:
2509 */
e315cd28 2510void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
d97994dc 2511 int do_login, int defer)
1da177e4 2512{
2c3dfe3f 2513 if (atomic_read(&fcport->state) == FCS_ONLINE &&
e315cd28
AC
2514 vha->vp_idx == fcport->vp_idx) {
2515 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2516 qla2x00_schedule_rport_del(vha, fcport, defer);
2517 }
fa2a1ce5 2518 /*
1da177e4
LT
2519 * We may need to retry the login, so don't change the state of the
2520 * port but do the retries.
2521 */
2522 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2523 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2524
2525 if (!do_login)
2526 return;
2527
2528 if (fcport->login_retry == 0) {
e315cd28
AC
2529 fcport->login_retry = vha->hw->login_retry_count;
2530 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1da177e4
LT
2531
2532 DEBUG(printk("scsi(%ld): Port login retry: "
2533 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2534 "id = 0x%04x retry cnt=%d\n",
e315cd28 2535 vha->host_no,
1da177e4
LT
2536 fcport->port_name[0],
2537 fcport->port_name[1],
2538 fcport->port_name[2],
2539 fcport->port_name[3],
2540 fcport->port_name[4],
2541 fcport->port_name[5],
2542 fcport->port_name[6],
2543 fcport->port_name[7],
2544 fcport->loop_id,
2545 fcport->login_retry));
2546 }
2547}
2548
2549/*
2550 * qla2x00_mark_all_devices_lost
2551 * Updates fcport state when device goes offline.
2552 *
2553 * Input:
2554 * ha = adapter block pointer.
2555 * fcport = port structure pointer.
2556 *
2557 * Return:
2558 * None.
2559 *
2560 * Context:
2561 */
2562void
e315cd28 2563qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1da177e4
LT
2564{
2565 fc_port_t *fcport;
2566
e315cd28 2567 list_for_each_entry(fcport, &vha->vp_fcports, list) {
0d6e61bc 2568 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
1da177e4 2569 continue;
0d6e61bc 2570
1da177e4
LT
2571 /*
2572 * No point in marking the device as lost, if the device is
2573 * already DEAD.
2574 */
2575 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2576 continue;
e315cd28 2577 if (atomic_read(&fcport->state) == FCS_ONLINE) {
0d6e61bc
AV
2578 if (defer)
2579 qla2x00_schedule_rport_del(vha, fcport, defer);
2580 else if (vha->vp_idx == fcport->vp_idx)
2581 qla2x00_schedule_rport_del(vha, fcport, defer);
2582 }
2583 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1da177e4
LT
2584 }
2585}
2586
2587/*
2588* qla2x00_mem_alloc
2589* Allocates adapter memory.
2590*
2591* Returns:
2592* 0 = success.
e8711085 2593* !0 = failure.
1da177e4 2594*/
e8711085 2595static int
73208dfd
AC
2596qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2597 struct req_que **req, struct rsp_que **rsp)
1da177e4
LT
2598{
2599 char name[16];
1da177e4 2600
e8711085 2601 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
e315cd28 2602 &ha->init_cb_dma, GFP_KERNEL);
e8711085 2603 if (!ha->init_cb)
e315cd28 2604 goto fail;
e8711085 2605
e315cd28
AC
2606 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2607 &ha->gid_list_dma, GFP_KERNEL);
2608 if (!ha->gid_list)
e8711085 2609 goto fail_free_init_cb;
1da177e4 2610
e8711085
AV
2611 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2612 if (!ha->srb_mempool)
e315cd28 2613 goto fail_free_gid_list;
e8711085 2614
a9083016
GM
2615 if (IS_QLA82XX(ha)) {
2616 /* Allocate cache for CT6 Ctx. */
2617 if (!ctx_cachep) {
2618 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2619 sizeof(struct ct6_dsd), 0,
2620 SLAB_HWCACHE_ALIGN, NULL);
2621 if (!ctx_cachep)
2622 goto fail_free_gid_list;
2623 }
2624 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2625 ctx_cachep);
2626 if (!ha->ctx_mempool)
2627 goto fail_free_srb_mempool;
2628 }
2629
e8711085
AV
2630 /* Get memory for cached NVRAM */
2631 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2632 if (!ha->nvram)
a9083016 2633 goto fail_free_ctx_mempool;
e8711085 2634
e315cd28
AC
2635 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2636 ha->pdev->device);
2637 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2638 DMA_POOL_SIZE, 8, 0);
2639 if (!ha->s_dma_pool)
2640 goto fail_free_nvram;
2641
bad75002 2642 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2643 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2644 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2645 if (!ha->dl_dma_pool) {
2646 qla_printk(KERN_WARNING, ha,
2647 "Memory Allocation failed - dl_dma_pool\n");
2648 goto fail_s_dma_pool;
2649 }
2650
2651 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2652 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2653 if (!ha->fcp_cmnd_dma_pool) {
2654 qla_printk(KERN_WARNING, ha,
2655 "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2656 goto fail_dl_dma_pool;
2657 }
2658 }
2659
e8711085
AV
2660 /* Allocate memory for SNS commands */
2661 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
e315cd28 2662 /* Get consistent memory allocated for SNS commands */
e8711085 2663 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2664 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
e8711085 2665 if (!ha->sns_cmd)
e315cd28 2666 goto fail_dma_pool;
e8711085 2667 } else {
e315cd28 2668 /* Get consistent memory allocated for MS IOCB */
e8711085 2669 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
e315cd28 2670 &ha->ms_iocb_dma);
e8711085 2671 if (!ha->ms_iocb)
e315cd28
AC
2672 goto fail_dma_pool;
2673 /* Get consistent memory allocated for CT SNS commands */
e8711085 2674 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2675 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
e8711085
AV
2676 if (!ha->ct_sns)
2677 goto fail_free_ms_iocb;
1da177e4
LT
2678 }
2679
e315cd28 2680 /* Allocate memory for request ring */
73208dfd
AC
2681 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2682 if (!*req) {
e315cd28
AC
2683 DEBUG(printk("Unable to allocate memory for req\n"));
2684 goto fail_req;
2685 }
73208dfd
AC
2686 (*req)->length = req_len;
2687 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2688 ((*req)->length + 1) * sizeof(request_t),
2689 &(*req)->dma, GFP_KERNEL);
2690 if (!(*req)->ring) {
e315cd28
AC
2691 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2692 goto fail_req_ring;
2693 }
2694 /* Allocate memory for response ring */
73208dfd
AC
2695 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2696 if (!*rsp) {
2697 qla_printk(KERN_WARNING, ha,
2698 "Unable to allocate memory for rsp\n");
e315cd28
AC
2699 goto fail_rsp;
2700 }
73208dfd
AC
2701 (*rsp)->hw = ha;
2702 (*rsp)->length = rsp_len;
2703 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2704 ((*rsp)->length + 1) * sizeof(response_t),
2705 &(*rsp)->dma, GFP_KERNEL);
2706 if (!(*rsp)->ring) {
2707 qla_printk(KERN_WARNING, ha,
2708 "Unable to allocate memory for rsp_ring\n");
e315cd28
AC
2709 goto fail_rsp_ring;
2710 }
73208dfd
AC
2711 (*req)->rsp = *rsp;
2712 (*rsp)->req = *req;
2713 /* Allocate memory for NVRAM data for vports */
2714 if (ha->nvram_npiv_size) {
2715 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2716 ha->nvram_npiv_size, GFP_KERNEL);
2717 if (!ha->npiv_info) {
2718 qla_printk(KERN_WARNING, ha,
2719 "Unable to allocate memory for npiv info\n");
2720 goto fail_npiv_info;
2721 }
2722 } else
2723 ha->npiv_info = NULL;
e8711085 2724
b64b0e8f 2725 /* Get consistent memory allocated for EX-INIT-CB. */
a9083016 2726 if (IS_QLA8XXX_TYPE(ha)) {
b64b0e8f
AV
2727 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2728 &ha->ex_init_cb_dma);
2729 if (!ha->ex_init_cb)
2730 goto fail_ex_init_cb;
2731 }
2732
a9083016
GM
2733 INIT_LIST_HEAD(&ha->gbl_dsd_list);
2734
5ff1d584
AV
2735 /* Get consistent memory allocated for Async Port-Database. */
2736 if (!IS_FWI2_CAPABLE(ha)) {
2737 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2738 &ha->async_pd_dma);
2739 if (!ha->async_pd)
2740 goto fail_async_pd;
2741 }
2742
e315cd28
AC
2743 INIT_LIST_HEAD(&ha->vp_list);
2744 return 1;
2745
5ff1d584
AV
2746fail_async_pd:
2747 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
b64b0e8f
AV
2748fail_ex_init_cb:
2749 kfree(ha->npiv_info);
73208dfd
AC
2750fail_npiv_info:
2751 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2752 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2753 (*rsp)->ring = NULL;
2754 (*rsp)->dma = 0;
e315cd28 2755fail_rsp_ring:
73208dfd 2756 kfree(*rsp);
e315cd28 2757fail_rsp:
73208dfd
AC
2758 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2759 sizeof(request_t), (*req)->ring, (*req)->dma);
2760 (*req)->ring = NULL;
2761 (*req)->dma = 0;
e315cd28 2762fail_req_ring:
73208dfd 2763 kfree(*req);
e315cd28
AC
2764fail_req:
2765 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2766 ha->ct_sns, ha->ct_sns_dma);
2767 ha->ct_sns = NULL;
2768 ha->ct_sns_dma = 0;
e8711085
AV
2769fail_free_ms_iocb:
2770 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2771 ha->ms_iocb = NULL;
2772 ha->ms_iocb_dma = 0;
e315cd28 2773fail_dma_pool:
bad75002 2774 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2775 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2776 ha->fcp_cmnd_dma_pool = NULL;
2777 }
2778fail_dl_dma_pool:
bad75002 2779 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2780 dma_pool_destroy(ha->dl_dma_pool);
2781 ha->dl_dma_pool = NULL;
2782 }
2783fail_s_dma_pool:
e315cd28
AC
2784 dma_pool_destroy(ha->s_dma_pool);
2785 ha->s_dma_pool = NULL;
e8711085
AV
2786fail_free_nvram:
2787 kfree(ha->nvram);
2788 ha->nvram = NULL;
a9083016
GM
2789fail_free_ctx_mempool:
2790 mempool_destroy(ha->ctx_mempool);
2791 ha->ctx_mempool = NULL;
e8711085
AV
2792fail_free_srb_mempool:
2793 mempool_destroy(ha->srb_mempool);
2794 ha->srb_mempool = NULL;
e8711085
AV
2795fail_free_gid_list:
2796 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2797 ha->gid_list_dma);
e8711085
AV
2798 ha->gid_list = NULL;
2799 ha->gid_list_dma = 0;
e315cd28
AC
2800fail_free_init_cb:
2801 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2802 ha->init_cb_dma);
2803 ha->init_cb = NULL;
2804 ha->init_cb_dma = 0;
e8711085 2805fail:
e315cd28 2806 DEBUG(printk("%s: Memory allocation failure\n", __func__));
e8711085 2807 return -ENOMEM;
1da177e4
LT
2808}
2809
2810/*
2811* qla2x00_mem_free
2812* Frees all adapter allocated memory.
2813*
2814* Input:
2815* ha = adapter block pointer.
2816*/
a824ebb3 2817static void
e315cd28 2818qla2x00_mem_free(struct qla_hw_data *ha)
1da177e4 2819{
e8711085
AV
2820 if (ha->srb_mempool)
2821 mempool_destroy(ha->srb_mempool);
1da177e4 2822
df613b96
AV
2823 if (ha->fce)
2824 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
e315cd28 2825 ha->fce_dma);
df613b96 2826
a7a167bf
AV
2827 if (ha->fw_dump) {
2828 if (ha->eft)
2829 dma_free_coherent(&ha->pdev->dev,
e315cd28 2830 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
a7a167bf
AV
2831 vfree(ha->fw_dump);
2832 }
2833
11bbc1d8
AV
2834 if (ha->dcbx_tlv)
2835 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2836 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2837
ce0423f4
AV
2838 if (ha->xgmac_data)
2839 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2840 ha->xgmac_data, ha->xgmac_data_dma);
2841
1da177e4
LT
2842 if (ha->sns_cmd)
2843 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
e315cd28 2844 ha->sns_cmd, ha->sns_cmd_dma);
1da177e4
LT
2845
2846 if (ha->ct_sns)
2847 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
e315cd28 2848 ha->ct_sns, ha->ct_sns_dma);
1da177e4 2849
88729e53
AV
2850 if (ha->sfp_data)
2851 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2852
ad0ecd61
JC
2853 if (ha->edc_data)
2854 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2855
1da177e4
LT
2856 if (ha->ms_iocb)
2857 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2858
b64b0e8f 2859 if (ha->ex_init_cb)
a9083016
GM
2860 dma_pool_free(ha->s_dma_pool,
2861 ha->ex_init_cb, ha->ex_init_cb_dma);
b64b0e8f 2862
5ff1d584
AV
2863 if (ha->async_pd)
2864 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2865
1da177e4
LT
2866 if (ha->s_dma_pool)
2867 dma_pool_destroy(ha->s_dma_pool);
2868
1da177e4
LT
2869 if (ha->gid_list)
2870 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2871 ha->gid_list_dma);
1da177e4 2872
a9083016
GM
2873 if (IS_QLA82XX(ha)) {
2874 if (!list_empty(&ha->gbl_dsd_list)) {
2875 struct dsd_dma *dsd_ptr, *tdsd_ptr;
2876
2877 /* clean up allocated prev pool */
2878 list_for_each_entry_safe(dsd_ptr,
2879 tdsd_ptr, &ha->gbl_dsd_list, list) {
2880 dma_pool_free(ha->dl_dma_pool,
2881 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2882 list_del(&dsd_ptr->list);
2883 kfree(dsd_ptr);
2884 }
2885 }
2886 }
2887
2888 if (ha->dl_dma_pool)
2889 dma_pool_destroy(ha->dl_dma_pool);
2890
2891 if (ha->fcp_cmnd_dma_pool)
2892 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2893
2894 if (ha->ctx_mempool)
2895 mempool_destroy(ha->ctx_mempool);
2896
e315cd28
AC
2897 if (ha->init_cb)
2898 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
a9083016 2899 ha->init_cb, ha->init_cb_dma);
e315cd28
AC
2900 vfree(ha->optrom_buffer);
2901 kfree(ha->nvram);
73208dfd 2902 kfree(ha->npiv_info);
1da177e4 2903
e8711085 2904 ha->srb_mempool = NULL;
a9083016 2905 ha->ctx_mempool = NULL;
a7a167bf
AV
2906 ha->eft = NULL;
2907 ha->eft_dma = 0;
1da177e4
LT
2908 ha->sns_cmd = NULL;
2909 ha->sns_cmd_dma = 0;
2910 ha->ct_sns = NULL;
2911 ha->ct_sns_dma = 0;
2912 ha->ms_iocb = NULL;
2913 ha->ms_iocb_dma = 0;
1da177e4
LT
2914 ha->init_cb = NULL;
2915 ha->init_cb_dma = 0;
b64b0e8f
AV
2916 ha->ex_init_cb = NULL;
2917 ha->ex_init_cb_dma = 0;
5ff1d584
AV
2918 ha->async_pd = NULL;
2919 ha->async_pd_dma = 0;
1da177e4
LT
2920
2921 ha->s_dma_pool = NULL;
a9083016
GM
2922 ha->dl_dma_pool = NULL;
2923 ha->fcp_cmnd_dma_pool = NULL;
1da177e4 2924
1da177e4
LT
2925 ha->gid_list = NULL;
2926 ha->gid_list_dma = 0;
2927
e315cd28
AC
2928 ha->fw_dump = NULL;
2929 ha->fw_dumped = 0;
2930 ha->fw_dump_reading = 0;
e315cd28 2931}
1da177e4 2932
e315cd28
AC
2933struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2934 struct qla_hw_data *ha)
2935{
2936 struct Scsi_Host *host;
2937 struct scsi_qla_host *vha = NULL;
854165f4 2938
e315cd28
AC
2939 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2940 if (host == NULL) {
2941 printk(KERN_WARNING
2942 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2943 goto fail;
2944 }
2945
2946 /* Clear our data area */
2947 vha = shost_priv(host);
2948 memset(vha, 0, sizeof(scsi_qla_host_t));
2949
2950 vha->host = host;
2951 vha->host_no = host->host_no;
2952 vha->hw = ha;
2953
2954 INIT_LIST_HEAD(&vha->vp_fcports);
2955 INIT_LIST_HEAD(&vha->work_list);
2956 INIT_LIST_HEAD(&vha->list);
2957
f999f4c1
AV
2958 spin_lock_init(&vha->work_lock);
2959
e315cd28
AC
2960 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2961 return vha;
2962
2963fail:
2964 return vha;
1da177e4
LT
2965}
2966
01ef66bb 2967static struct qla_work_evt *
f999f4c1 2968qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
0971de7f
AV
2969{
2970 struct qla_work_evt *e;
2971
f999f4c1 2972 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
0971de7f
AV
2973 if (!e)
2974 return NULL;
2975
2976 INIT_LIST_HEAD(&e->list);
2977 e->type = type;
2978 e->flags = QLA_EVT_FLAG_FREE;
2979 return e;
2980}
2981
01ef66bb 2982static int
f999f4c1 2983qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
0971de7f 2984{
f999f4c1 2985 unsigned long flags;
0971de7f 2986
f999f4c1 2987 spin_lock_irqsave(&vha->work_lock, flags);
e315cd28 2988 list_add_tail(&e->list, &vha->work_list);
f999f4c1 2989 spin_unlock_irqrestore(&vha->work_lock, flags);
e315cd28 2990 qla2xxx_wake_dpc(vha);
f999f4c1 2991
0971de7f
AV
2992 return QLA_SUCCESS;
2993}
2994
2995int
e315cd28 2996qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
0971de7f
AV
2997 u32 data)
2998{
2999 struct qla_work_evt *e;
3000
f999f4c1 3001 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
0971de7f
AV
3002 if (!e)
3003 return QLA_FUNCTION_FAILED;
3004
3005 e->u.aen.code = code;
3006 e->u.aen.data = data;
f999f4c1 3007 return qla2x00_post_work(vha, e);
0971de7f
AV
3008}
3009
8a659571
AV
3010int
3011qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
3012{
3013 struct qla_work_evt *e;
3014
f999f4c1 3015 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
8a659571
AV
3016 if (!e)
3017 return QLA_FUNCTION_FAILED;
3018
3019 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
f999f4c1 3020 return qla2x00_post_work(vha, e);
8a659571
AV
3021}
3022
ac280b67
AV
3023#define qla2x00_post_async_work(name, type) \
3024int qla2x00_post_async_##name##_work( \
3025 struct scsi_qla_host *vha, \
3026 fc_port_t *fcport, uint16_t *data) \
3027{ \
3028 struct qla_work_evt *e; \
3029 \
3030 e = qla2x00_alloc_work(vha, type); \
3031 if (!e) \
3032 return QLA_FUNCTION_FAILED; \
3033 \
3034 e->u.logio.fcport = fcport; \
3035 if (data) { \
3036 e->u.logio.data[0] = data[0]; \
3037 e->u.logio.data[1] = data[1]; \
3038 } \
3039 return qla2x00_post_work(vha, e); \
3040}
3041
3042qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
3043qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
3044qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
3045qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
5ff1d584
AV
3046qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
3047qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
ac280b67 3048
3420d36c
AV
3049int
3050qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
3051{
3052 struct qla_work_evt *e;
3053
3054 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
3055 if (!e)
3056 return QLA_FUNCTION_FAILED;
3057
3058 e->u.uevent.code = code;
3059 return qla2x00_post_work(vha, e);
3060}
3061
3062static void
3063qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
3064{
3065 char event_string[40];
3066 char *envp[] = { event_string, NULL };
3067
3068 switch (code) {
3069 case QLA_UEVENT_CODE_FW_DUMP:
3070 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
3071 vha->host_no);
3072 break;
3073 default:
3074 /* do nothing */
3075 break;
3076 }
3077 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
3078}
3079
ac280b67 3080void
e315cd28 3081qla2x00_do_work(struct scsi_qla_host *vha)
0971de7f 3082{
f999f4c1
AV
3083 struct qla_work_evt *e, *tmp;
3084 unsigned long flags;
3085 LIST_HEAD(work);
0971de7f 3086
f999f4c1
AV
3087 spin_lock_irqsave(&vha->work_lock, flags);
3088 list_splice_init(&vha->work_list, &work);
3089 spin_unlock_irqrestore(&vha->work_lock, flags);
3090
3091 list_for_each_entry_safe(e, tmp, &work, list) {
0971de7f 3092 list_del_init(&e->list);
0971de7f
AV
3093
3094 switch (e->type) {
3095 case QLA_EVT_AEN:
e315cd28 3096 fc_host_post_event(vha->host, fc_get_event_number(),
0971de7f
AV
3097 e->u.aen.code, e->u.aen.data);
3098 break;
8a659571
AV
3099 case QLA_EVT_IDC_ACK:
3100 qla81xx_idc_ack(vha, e->u.idc_ack.mb);
3101 break;
ac280b67
AV
3102 case QLA_EVT_ASYNC_LOGIN:
3103 qla2x00_async_login(vha, e->u.logio.fcport,
3104 e->u.logio.data);
3105 break;
3106 case QLA_EVT_ASYNC_LOGIN_DONE:
3107 qla2x00_async_login_done(vha, e->u.logio.fcport,
3108 e->u.logio.data);
3109 break;
3110 case QLA_EVT_ASYNC_LOGOUT:
3111 qla2x00_async_logout(vha, e->u.logio.fcport);
3112 break;
3113 case QLA_EVT_ASYNC_LOGOUT_DONE:
3114 qla2x00_async_logout_done(vha, e->u.logio.fcport,
3115 e->u.logio.data);
3116 break;
5ff1d584
AV
3117 case QLA_EVT_ASYNC_ADISC:
3118 qla2x00_async_adisc(vha, e->u.logio.fcport,
3119 e->u.logio.data);
3120 break;
3121 case QLA_EVT_ASYNC_ADISC_DONE:
3122 qla2x00_async_adisc_done(vha, e->u.logio.fcport,
3123 e->u.logio.data);
3124 break;
3420d36c
AV
3125 case QLA_EVT_UEVENT:
3126 qla2x00_uevent_emit(vha, e->u.uevent.code);
3127 break;
0971de7f
AV
3128 }
3129 if (e->flags & QLA_EVT_FLAG_FREE)
3130 kfree(e);
e315cd28 3131 }
e315cd28 3132}
f999f4c1 3133
e315cd28
AC
3134/* Relogins all the fcports of a vport
3135 * Context: dpc thread
3136 */
3137void qla2x00_relogin(struct scsi_qla_host *vha)
3138{
3139 fc_port_t *fcport;
c6b2fca8 3140 int status;
e315cd28
AC
3141 uint16_t next_loopid = 0;
3142 struct qla_hw_data *ha = vha->hw;
ac280b67 3143 uint16_t data[2];
e315cd28
AC
3144
3145 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3146 /*
3147 * If the port is not ONLINE then try to login
3148 * to it if we haven't run out of retries.
3149 */
5ff1d584
AV
3150 if (atomic_read(&fcport->state) != FCS_ONLINE &&
3151 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
ac280b67 3152 fcport->login_retry--;
e315cd28 3153 if (fcport->flags & FCF_FABRIC_DEVICE) {
f08b7251 3154 if (fcport->flags & FCF_FCP2_DEVICE)
e315cd28
AC
3155 ha->isp_ops->fabric_logout(vha,
3156 fcport->loop_id,
3157 fcport->d_id.b.domain,
3158 fcport->d_id.b.area,
3159 fcport->d_id.b.al_pa);
3160
ac280b67 3161 if (IS_ALOGIO_CAPABLE(ha)) {
5ff1d584 3162 fcport->flags |= FCF_ASYNC_SENT;
ac280b67
AV
3163 data[0] = 0;
3164 data[1] = QLA_LOGIO_LOGIN_RETRIED;
3165 status = qla2x00_post_async_login_work(
3166 vha, fcport, data);
3167 if (status == QLA_SUCCESS)
3168 continue;
3169 /* Attempt a retry. */
3170 status = 1;
3171 } else
3172 status = qla2x00_fabric_login(vha,
3173 fcport, &next_loopid);
e315cd28
AC
3174 } else
3175 status = qla2x00_local_device_login(vha,
3176 fcport);
3177
e315cd28
AC
3178 if (status == QLA_SUCCESS) {
3179 fcport->old_loop_id = fcport->loop_id;
3180
3181 DEBUG(printk("scsi(%ld): port login OK: logged "
3182 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
3183
3184 qla2x00_update_fcport(vha, fcport);
3185
3186 } else if (status == 1) {
3187 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3188 /* retry the login again */
3189 DEBUG(printk("scsi(%ld): Retrying"
3190 " %d login again loop_id 0x%x\n",
3191 vha->host_no, fcport->login_retry,
3192 fcport->loop_id));
3193 } else {
3194 fcport->login_retry = 0;
3195 }
3196
3197 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3198 fcport->loop_id = FC_NO_LOOP_ID;
3199 }
3200 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3201 break;
0971de7f 3202 }
0971de7f
AV
3203}
3204
1da177e4
LT
3205/**************************************************************************
3206* qla2x00_do_dpc
3207* This kernel thread is a task that is schedule by the interrupt handler
3208* to perform the background processing for interrupts.
3209*
3210* Notes:
3211* This task always run in the context of a kernel thread. It
3212* is kick-off by the driver's detect code and starts up
3213* up one per adapter. It immediately goes to sleep and waits for
3214* some fibre event. When either the interrupt handler or
3215* the timer routine detects a event it will one of the task
3216* bits then wake us up.
3217**************************************************************************/
3218static int
3219qla2x00_do_dpc(void *data)
3220{
2c3dfe3f 3221 int rval;
e315cd28
AC
3222 scsi_qla_host_t *base_vha;
3223 struct qla_hw_data *ha;
1da177e4 3224
e315cd28
AC
3225 ha = (struct qla_hw_data *)data;
3226 base_vha = pci_get_drvdata(ha->pdev);
1da177e4 3227
1da177e4
LT
3228 set_user_nice(current, -20);
3229
39a11240 3230 while (!kthread_should_stop()) {
1da177e4
LT
3231 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3232
39a11240
CH
3233 set_current_state(TASK_INTERRUPTIBLE);
3234 schedule();
3235 __set_current_state(TASK_RUNNING);
1da177e4
LT
3236
3237 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
3238
3239 /* Initialization not yet finished. Don't do anything yet. */
e315cd28 3240 if (!base_vha->flags.init_done)
1da177e4
LT
3241 continue;
3242
85880801
AV
3243 if (ha->flags.eeh_busy) {
3244 DEBUG17(qla_printk(KERN_WARNING, ha,
3245 "qla2x00_do_dpc: dpc_flags: %lx\n",
3246 base_vha->dpc_flags));
3247 continue;
3248 }
3249
e315cd28 3250 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
1da177e4
LT
3251
3252 ha->dpc_active = 1;
3253
1da177e4 3254 if (ha->flags.mbox_busy) {
1da177e4
LT
3255 ha->dpc_active = 0;
3256 continue;
3257 }
3258
e315cd28 3259 qla2x00_do_work(base_vha);
0971de7f 3260
a9083016
GM
3261 if (IS_QLA82XX(ha)) {
3262 if (test_and_clear_bit(ISP_UNRECOVERABLE,
3263 &base_vha->dpc_flags)) {
3264 qla82xx_idc_lock(ha);
3265 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3266 QLA82XX_DEV_FAILED);
3267 qla82xx_idc_unlock(ha);
3268 qla_printk(KERN_INFO, ha,
3269 "HW State: FAILED\n");
3270 qla82xx_device_state_handler(base_vha);
3271 continue;
3272 }
3273
3274 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3275 &base_vha->dpc_flags)) {
3276
3277 DEBUG(printk(KERN_INFO
3278 "scsi(%ld): dpc: sched "
3279 "qla82xx_fcoe_ctx_reset ha = %p\n",
3280 base_vha->host_no, ha));
3281 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3282 &base_vha->dpc_flags))) {
3283 if (qla82xx_fcoe_ctx_reset(base_vha)) {
3284 /* FCoE-ctx reset failed.
3285 * Escalate to chip-reset
3286 */
3287 set_bit(ISP_ABORT_NEEDED,
3288 &base_vha->dpc_flags);
3289 }
3290 clear_bit(ABORT_ISP_ACTIVE,
3291 &base_vha->dpc_flags);
3292 }
3293
3294 DEBUG(printk("scsi(%ld): dpc:"
3295 " qla82xx_fcoe_ctx_reset end\n",
3296 base_vha->host_no));
3297 }
3298 }
3299
e315cd28
AC
3300 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3301 &base_vha->dpc_flags)) {
1da177e4
LT
3302
3303 DEBUG(printk("scsi(%ld): dpc: sched "
3304 "qla2x00_abort_isp ha = %p\n",
e315cd28 3305 base_vha->host_no, ha));
1da177e4 3306 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
e315cd28 3307 &base_vha->dpc_flags))) {
1da177e4 3308
a9083016 3309 if (ha->isp_ops->abort_isp(base_vha)) {
1da177e4
LT
3310 /* failed. retry later */
3311 set_bit(ISP_ABORT_NEEDED,
e315cd28 3312 &base_vha->dpc_flags);
99363ef8 3313 }
e315cd28
AC
3314 clear_bit(ABORT_ISP_ACTIVE,
3315 &base_vha->dpc_flags);
99363ef8
SJ
3316 }
3317
1da177e4 3318 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
e315cd28 3319 base_vha->host_no));
1da177e4
LT
3320 }
3321
e315cd28
AC
3322 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
3323 qla2x00_update_fcports(base_vha);
3324 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
c9c5ced9 3325 }
d97994dc 3326
e315cd28
AC
3327 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3328 &base_vha->dpc_flags) &&
3329 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
1da177e4
LT
3330
3331 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
e315cd28 3332 base_vha->host_no));
1da177e4 3333
e315cd28
AC
3334 qla2x00_rst_aen(base_vha);
3335 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
1da177e4
LT
3336 }
3337
3338 /* Retry each device up to login retry count */
e315cd28
AC
3339 if ((test_and_clear_bit(RELOGIN_NEEDED,
3340 &base_vha->dpc_flags)) &&
3341 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3342 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
1da177e4
LT
3343
3344 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
e315cd28
AC
3345 base_vha->host_no));
3346 qla2x00_relogin(base_vha);
3347
1da177e4 3348 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
e315cd28 3349 base_vha->host_no));
1da177e4
LT
3350 }
3351
e315cd28
AC
3352 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3353 &base_vha->dpc_flags)) {
1da177e4
LT
3354
3355 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
e315cd28 3356 base_vha->host_no));
1da177e4
LT
3357
3358 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
e315cd28 3359 &base_vha->dpc_flags))) {
1da177e4 3360
e315cd28 3361 rval = qla2x00_loop_resync(base_vha);
1da177e4 3362
e315cd28
AC
3363 clear_bit(LOOP_RESYNC_ACTIVE,
3364 &base_vha->dpc_flags);
1da177e4
LT
3365 }
3366
3367 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
e315cd28 3368 base_vha->host_no));
1da177e4
LT
3369 }
3370
e315cd28
AC
3371 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
3372 atomic_read(&base_vha->loop_state) == LOOP_READY) {
3373 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
3374 qla2xxx_flash_npiv_conf(base_vha);
272976ca
AV
3375 }
3376
1da177e4 3377 if (!ha->interrupts_on)
fd34f556 3378 ha->isp_ops->enable_intrs(ha);
1da177e4 3379
e315cd28
AC
3380 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
3381 &base_vha->dpc_flags))
3382 ha->isp_ops->beacon_blink(base_vha);
f6df144c 3383
e315cd28 3384 qla2x00_do_dpc_all_vps(base_vha);
2c3dfe3f 3385
1da177e4
LT
3386 ha->dpc_active = 0;
3387 } /* End of while(1) */
3388
e315cd28 3389 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
1da177e4
LT
3390
3391 /*
3392 * Make sure that nobody tries to wake us up again.
3393 */
1da177e4
LT
3394 ha->dpc_active = 0;
3395
ac280b67
AV
3396 /* Cleanup any residual CTX SRBs. */
3397 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3398
39a11240
CH
3399 return 0;
3400}
3401
3402void
e315cd28 3403qla2xxx_wake_dpc(struct scsi_qla_host *vha)
39a11240 3404{
e315cd28 3405 struct qla_hw_data *ha = vha->hw;
c795c1e4
AV
3406 struct task_struct *t = ha->dpc_thread;
3407
e315cd28 3408 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
c795c1e4 3409 wake_up_process(t);
1da177e4
LT
3410}
3411
1da177e4
LT
3412/*
3413* qla2x00_rst_aen
3414* Processes asynchronous reset.
3415*
3416* Input:
3417* ha = adapter block pointer.
3418*/
3419static void
e315cd28 3420qla2x00_rst_aen(scsi_qla_host_t *vha)
1da177e4 3421{
e315cd28
AC
3422 if (vha->flags.online && !vha->flags.reset_active &&
3423 !atomic_read(&vha->loop_down_timer) &&
3424 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
1da177e4 3425 do {
e315cd28 3426 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
3427
3428 /*
3429 * Issue marker command only when we are going to start
3430 * the I/O.
3431 */
e315cd28
AC
3432 vha->marker_needed = 1;
3433 } while (!atomic_read(&vha->loop_down_timer) &&
3434 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
1da177e4
LT
3435 }
3436}
3437
f4f051eb 3438static void
e315cd28 3439qla2x00_sp_free_dma(srb_t *sp)
f4f051eb
AV
3440{
3441 struct scsi_cmnd *cmd = sp->cmd;
bad75002 3442 struct qla_hw_data *ha = sp->fcport->vha->hw;
f4f051eb
AV
3443
3444 if (sp->flags & SRB_DMA_VALID) {
385d70b4 3445 scsi_dma_unmap(cmd);
f4f051eb
AV
3446 sp->flags &= ~SRB_DMA_VALID;
3447 }
bad75002
AE
3448
3449 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3450 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3451 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3452 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3453 }
3454
3455 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3456 /* List assured to be having elements */
3457 qla2x00_clean_dsd_pool(ha, sp);
3458 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3459 }
3460
3461 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3462 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3463 ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3464 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3465 }
3466
fca29703 3467 CMD_SP(cmd) = NULL;
f4f051eb
AV
3468}
3469
3470void
73208dfd 3471qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
f4f051eb
AV
3472{
3473 struct scsi_cmnd *cmd = sp->cmd;
3474
e315cd28 3475 qla2x00_sp_free_dma(sp);
f4f051eb 3476
a9083016
GM
3477 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3478 struct ct6_dsd *ctx = sp->ctx;
3479 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3480 ctx->fcp_cmnd_dma);
3481 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3482 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3483 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3484 mempool_free(sp->ctx, ha->ctx_mempool);
3485 sp->ctx = NULL;
3486 }
f4f051eb 3487
a9083016 3488 mempool_free(sp, ha->srb_mempool);
f4f051eb
AV
3489 cmd->scsi_done(cmd);
3490}
bdf79621 3491
1da177e4
LT
3492/**************************************************************************
3493* qla2x00_timer
3494*
3495* Description:
3496* One second timer
3497*
3498* Context: Interrupt
3499***************************************************************************/
2c3dfe3f 3500void
e315cd28 3501qla2x00_timer(scsi_qla_host_t *vha)
1da177e4 3502{
1da177e4
LT
3503 unsigned long cpu_flags = 0;
3504 fc_port_t *fcport;
1da177e4
LT
3505 int start_dpc = 0;
3506 int index;
3507 srb_t *sp;
f4f051eb 3508 int t;
85880801 3509 uint16_t w;
e315cd28 3510 struct qla_hw_data *ha = vha->hw;
73208dfd 3511 struct req_que *req;
85880801 3512
a9083016
GM
3513 if (IS_QLA82XX(ha))
3514 qla82xx_watchdog(vha);
3515
85880801
AV
3516 /* Hardware read to raise pending EEH errors during mailbox waits. */
3517 if (!pci_channel_offline(ha->pdev))
3518 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1da177e4
LT
3519 /*
3520 * Ports - Port down timer.
3521 *
3522 * Whenever, a port is in the LOST state we start decrementing its port
3523 * down timer every second until it reaches zero. Once it reaches zero
fa2a1ce5 3524 * the port it marked DEAD.
1da177e4
LT
3525 */
3526 t = 0;
e315cd28 3527 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
3528 if (fcport->port_type != FCT_TARGET)
3529 continue;
3530
3531 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3532
3533 if (atomic_read(&fcport->port_down_timer) == 0)
3534 continue;
3535
fa2a1ce5 3536 if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
1da177e4 3537 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
fa2a1ce5 3538
1da177e4 3539 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
fca29703 3540 "%d remaining\n",
e315cd28 3541 vha->host_no,
1da177e4
LT
3542 t, atomic_read(&fcport->port_down_timer)));
3543 }
3544 t++;
3545 } /* End of for fcport */
3546
1da177e4
LT
3547
3548 /* Loop down handler. */
e315cd28
AC
3549 if (atomic_read(&vha->loop_down_timer) > 0 &&
3550 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3551 && vha->flags.online) {
1da177e4 3552
e315cd28
AC
3553 if (atomic_read(&vha->loop_down_timer) ==
3554 vha->loop_down_abort_time) {
1da177e4
LT
3555
3556 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3557 "queues before time expire\n",
e315cd28 3558 vha->host_no));
1da177e4 3559
e315cd28
AC
3560 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3561 atomic_set(&vha->loop_state, LOOP_DEAD);
1da177e4 3562
f08b7251
AV
3563 /*
3564 * Schedule an ISP abort to return any FCP2-device
3565 * commands.
3566 */
2c3dfe3f 3567 /* NPIV - scan physical port only */
e315cd28 3568 if (!vha->vp_idx) {
2c3dfe3f
SJ
3569 spin_lock_irqsave(&ha->hardware_lock,
3570 cpu_flags);
73208dfd 3571 req = ha->req_q_map[0];
2c3dfe3f
SJ
3572 for (index = 1;
3573 index < MAX_OUTSTANDING_COMMANDS;
3574 index++) {
3575 fc_port_t *sfcp;
3576
e315cd28 3577 sp = req->outstanding_cmds[index];
2c3dfe3f
SJ
3578 if (!sp)
3579 continue;
bad75002 3580 if (sp->ctx && !IS_PROT_IO(sp))
cf53b069 3581 continue;
2c3dfe3f 3582 sfcp = sp->fcport;
f08b7251 3583 if (!(sfcp->flags & FCF_FCP2_DEVICE))
2c3dfe3f 3584 continue;
bdf79621 3585
2c3dfe3f 3586 set_bit(ISP_ABORT_NEEDED,
e315cd28 3587 &vha->dpc_flags);
2c3dfe3f
SJ
3588 break;
3589 }
3590 spin_unlock_irqrestore(&ha->hardware_lock,
e315cd28 3591 cpu_flags);
1da177e4 3592 }
1da177e4
LT
3593 start_dpc++;
3594 }
3595
3596 /* if the loop has been down for 4 minutes, reinit adapter */
e315cd28 3597 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
0d6e61bc 3598 if (!(vha->device_flags & DFLG_NO_CABLE)) {
1da177e4
LT
3599 DEBUG(printk("scsi(%ld): Loop down - "
3600 "aborting ISP.\n",
e315cd28 3601 vha->host_no));
1da177e4
LT
3602 qla_printk(KERN_WARNING, ha,
3603 "Loop down - aborting ISP.\n");
3604
e315cd28 3605 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
3606 }
3607 }
fca29703 3608 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
e315cd28
AC
3609 vha->host_no,
3610 atomic_read(&vha->loop_down_timer)));
1da177e4
LT
3611 }
3612
f6df144c
AV
3613 /* Check if beacon LED needs to be blinked */
3614 if (ha->beacon_blink_led == 1) {
e315cd28 3615 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
f6df144c
AV
3616 start_dpc++;
3617 }
3618
550bf57d 3619 /* Process any deferred work. */
e315cd28 3620 if (!list_empty(&vha->work_list))
550bf57d
AV
3621 start_dpc++;
3622
1da177e4 3623 /* Schedule the DPC routine if needed */
e315cd28
AC
3624 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3625 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3626 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
1da177e4 3627 start_dpc ||
e315cd28
AC
3628 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3629 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
a9083016
GM
3630 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3631 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
e315cd28
AC
3632 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3633 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3634 qla2xxx_wake_dpc(vha);
1da177e4 3635
e315cd28 3636 qla2x00_restart_timer(vha, WATCH_INTERVAL);
1da177e4
LT
3637}
3638
5433383e
AV
3639/* Firmware interface routines. */
3640
a9083016 3641#define FW_BLOBS 8
5433383e
AV
3642#define FW_ISP21XX 0
3643#define FW_ISP22XX 1
3644#define FW_ISP2300 2
3645#define FW_ISP2322 3
48c02fde 3646#define FW_ISP24XX 4
c3a2f0df 3647#define FW_ISP25XX 5
3a03eb79 3648#define FW_ISP81XX 6
a9083016 3649#define FW_ISP82XX 7
5433383e 3650
bb8ee499
AV
3651#define FW_FILE_ISP21XX "ql2100_fw.bin"
3652#define FW_FILE_ISP22XX "ql2200_fw.bin"
3653#define FW_FILE_ISP2300 "ql2300_fw.bin"
3654#define FW_FILE_ISP2322 "ql2322_fw.bin"
3655#define FW_FILE_ISP24XX "ql2400_fw.bin"
c3a2f0df 3656#define FW_FILE_ISP25XX "ql2500_fw.bin"
3a03eb79 3657#define FW_FILE_ISP81XX "ql8100_fw.bin"
a9083016 3658#define FW_FILE_ISP82XX "ql8200_fw.bin"
bb8ee499 3659
e1e82b6f 3660static DEFINE_MUTEX(qla_fw_lock);
5433383e
AV
3661
3662static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
bb8ee499
AV
3663 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3664 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3665 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3666 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3667 { .name = FW_FILE_ISP24XX, },
c3a2f0df 3668 { .name = FW_FILE_ISP25XX, },
3a03eb79 3669 { .name = FW_FILE_ISP81XX, },
a9083016 3670 { .name = FW_FILE_ISP82XX, },
5433383e
AV
3671};
3672
3673struct fw_blob *
e315cd28 3674qla2x00_request_firmware(scsi_qla_host_t *vha)
5433383e 3675{
e315cd28 3676 struct qla_hw_data *ha = vha->hw;
5433383e
AV
3677 struct fw_blob *blob;
3678
3679 blob = NULL;
3680 if (IS_QLA2100(ha)) {
3681 blob = &qla_fw_blobs[FW_ISP21XX];
3682 } else if (IS_QLA2200(ha)) {
3683 blob = &qla_fw_blobs[FW_ISP22XX];
48c02fde 3684 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
5433383e 3685 blob = &qla_fw_blobs[FW_ISP2300];
48c02fde 3686 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
5433383e 3687 blob = &qla_fw_blobs[FW_ISP2322];
4d4df193 3688 } else if (IS_QLA24XX_TYPE(ha)) {
5433383e 3689 blob = &qla_fw_blobs[FW_ISP24XX];
c3a2f0df
AV
3690 } else if (IS_QLA25XX(ha)) {
3691 blob = &qla_fw_blobs[FW_ISP25XX];
3a03eb79
AV
3692 } else if (IS_QLA81XX(ha)) {
3693 blob = &qla_fw_blobs[FW_ISP81XX];
a9083016
GM
3694 } else if (IS_QLA82XX(ha)) {
3695 blob = &qla_fw_blobs[FW_ISP82XX];
5433383e
AV
3696 }
3697
e1e82b6f 3698 mutex_lock(&qla_fw_lock);
5433383e
AV
3699 if (blob->fw)
3700 goto out;
3701
3702 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3703 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
e315cd28 3704 "(%s).\n", vha->host_no, blob->name));
5433383e
AV
3705 blob->fw = NULL;
3706 blob = NULL;
3707 goto out;
3708 }
3709
3710out:
e1e82b6f 3711 mutex_unlock(&qla_fw_lock);
5433383e
AV
3712 return blob;
3713}
3714
3715static void
3716qla2x00_release_firmware(void)
3717{
3718 int idx;
3719
e1e82b6f 3720 mutex_lock(&qla_fw_lock);
5433383e
AV
3721 for (idx = 0; idx < FW_BLOBS; idx++)
3722 if (qla_fw_blobs[idx].fw)
3723 release_firmware(qla_fw_blobs[idx].fw);
e1e82b6f 3724 mutex_unlock(&qla_fw_lock);
5433383e
AV
3725}
3726
14e660e6
SJ
3727static pci_ers_result_t
3728qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3729{
85880801
AV
3730 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3731 struct qla_hw_data *ha = vha->hw;
3732
3733 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3734 state));
b9b12f73 3735
14e660e6
SJ
3736 switch (state) {
3737 case pci_channel_io_normal:
85880801 3738 ha->flags.eeh_busy = 0;
14e660e6
SJ
3739 return PCI_ERS_RESULT_CAN_RECOVER;
3740 case pci_channel_io_frozen:
85880801 3741 ha->flags.eeh_busy = 1;
90a86fc0 3742 qla2x00_free_irqs(vha);
14e660e6
SJ
3743 pci_disable_device(pdev);
3744 return PCI_ERS_RESULT_NEED_RESET;
3745 case pci_channel_io_perm_failure:
85880801
AV
3746 ha->flags.pci_channel_io_perm_failure = 1;
3747 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
14e660e6
SJ
3748 return PCI_ERS_RESULT_DISCONNECT;
3749 }
3750 return PCI_ERS_RESULT_NEED_RESET;
3751}
3752
3753static pci_ers_result_t
3754qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3755{
3756 int risc_paused = 0;
3757 uint32_t stat;
3758 unsigned long flags;
e315cd28
AC
3759 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3760 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3761 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3762 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3763
3764 spin_lock_irqsave(&ha->hardware_lock, flags);
3765 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3766 stat = RD_REG_DWORD(&reg->hccr);
3767 if (stat & HCCR_RISC_PAUSE)
3768 risc_paused = 1;
3769 } else if (IS_QLA23XX(ha)) {
3770 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3771 if (stat & HSR_RISC_PAUSED)
3772 risc_paused = 1;
3773 } else if (IS_FWI2_CAPABLE(ha)) {
3774 stat = RD_REG_DWORD(&reg24->host_status);
3775 if (stat & HSRX_RISC_PAUSED)
3776 risc_paused = 1;
3777 }
3778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3779
3780 if (risc_paused) {
3781 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3782 "Dumping firmware!\n");
e315cd28 3783 ha->isp_ops->fw_dump(base_vha, 0);
14e660e6
SJ
3784
3785 return PCI_ERS_RESULT_NEED_RESET;
3786 } else
3787 return PCI_ERS_RESULT_RECOVERED;
3788}
3789
3790static pci_ers_result_t
3791qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3792{
3793 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
e315cd28
AC
3794 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3795 struct qla_hw_data *ha = base_vha->hw;
90a86fc0
JC
3796 struct rsp_que *rsp;
3797 int rc, retries = 10;
09483916 3798
85880801
AV
3799 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3800
90a86fc0
JC
3801 /* Workaround: qla2xxx driver which access hardware earlier
3802 * needs error state to be pci_channel_io_online.
3803 * Otherwise mailbox command timesout.
3804 */
3805 pdev->error_state = pci_channel_io_normal;
3806
3807 pci_restore_state(pdev);
3808
8c1496bd
RL
3809 /* pci_restore_state() clears the saved_state flag of the device
3810 * save restored state which resets saved_state flag
3811 */
3812 pci_save_state(pdev);
3813
09483916
BH
3814 if (ha->mem_only)
3815 rc = pci_enable_device_mem(pdev);
3816 else
3817 rc = pci_enable_device(pdev);
14e660e6 3818
09483916 3819 if (rc) {
14e660e6
SJ
3820 qla_printk(KERN_WARNING, ha,
3821 "Can't re-enable PCI device after reset.\n");
14e660e6
SJ
3822 return ret;
3823 }
14e660e6 3824
90a86fc0
JC
3825 rsp = ha->rsp_q_map[0];
3826 if (qla2x00_request_irqs(ha, rsp))
3827 return ret;
3828
e315cd28 3829 if (ha->isp_ops->pci_config(base_vha))
14e660e6
SJ
3830 return ret;
3831
90a86fc0
JC
3832 while (ha->flags.mbox_busy && retries--)
3833 msleep(1000);
85880801 3834
e315cd28 3835 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 3836 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
14e660e6 3837 ret = PCI_ERS_RESULT_RECOVERED;
e315cd28 3838 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
14e660e6 3839
90a86fc0 3840
85880801
AV
3841 DEBUG17(qla_printk(KERN_WARNING, ha,
3842 "slot_reset-return:ret=%x\n", ret));
3843
14e660e6
SJ
3844 return ret;
3845}
3846
3847static void
3848qla2xxx_pci_resume(struct pci_dev *pdev)
3849{
e315cd28
AC
3850 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3851 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3852 int ret;
3853
85880801
AV
3854 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3855
e315cd28 3856 ret = qla2x00_wait_for_hba_online(base_vha);
14e660e6
SJ
3857 if (ret != QLA_SUCCESS) {
3858 qla_printk(KERN_ERR, ha,
3859 "the device failed to resume I/O "
3860 "from slot/link_reset");
3861 }
85880801 3862
3e46f031
LC
3863 pci_cleanup_aer_uncorrect_error_status(pdev);
3864
85880801 3865 ha->flags.eeh_busy = 0;
14e660e6
SJ
3866}
3867
3868static struct pci_error_handlers qla2xxx_err_handler = {
3869 .error_detected = qla2xxx_pci_error_detected,
3870 .mmio_enabled = qla2xxx_pci_mmio_enabled,
3871 .slot_reset = qla2xxx_pci_slot_reset,
3872 .resume = qla2xxx_pci_resume,
3873};
3874
5433383e 3875static struct pci_device_id qla2xxx_pci_tbl[] = {
47f5e069
AV
3876 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
3877 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
3878 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
3879 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
3880 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
3881 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
3882 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
3883 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
3884 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4d4df193 3885 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
47f5e069
AV
3886 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3887 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
c3a2f0df 3888 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3a03eb79 3889 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
a9083016 3890 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5433383e
AV
3891 { 0 },
3892};
3893MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
3894
fca29703 3895static struct pci_driver qla2xxx_pci_driver = {
cb63067a 3896 .name = QLA2XXX_DRIVER_NAME,
0a21ef1e
JB
3897 .driver = {
3898 .owner = THIS_MODULE,
3899 },
fca29703 3900 .id_table = qla2xxx_pci_tbl,
7ee61397 3901 .probe = qla2x00_probe_one,
4c993f76 3902 .remove = qla2x00_remove_one,
14e660e6 3903 .err_handler = &qla2xxx_err_handler,
fca29703
AV
3904};
3905
6a03b4cd
HZ
3906static struct file_operations apidev_fops = {
3907 .owner = THIS_MODULE,
3908};
3909
1da177e4
LT
3910/**
3911 * qla2x00_module_init - Module initialization.
3912 **/
3913static int __init
3914qla2x00_module_init(void)
3915{
fca29703
AV
3916 int ret = 0;
3917
1da177e4 3918 /* Allocate cache for SRBs. */
354d6b21 3919 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
20c2df83 3920 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
3921 if (srb_cachep == NULL) {
3922 printk(KERN_ERR
3923 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
3924 return -ENOMEM;
3925 }
3926
3927 /* Derive version string. */
3928 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
11010fec 3929 if (ql2xextended_error_logging)
0181944f
AV
3930 strcat(qla2x00_version_str, "-debug");
3931
1c97a12a
AV
3932 qla2xxx_transport_template =
3933 fc_attach_transport(&qla2xxx_transport_functions);
2c3dfe3f
SJ
3934 if (!qla2xxx_transport_template) {
3935 kmem_cache_destroy(srb_cachep);
1da177e4 3936 return -ENODEV;
2c3dfe3f 3937 }
6a03b4cd
HZ
3938
3939 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
3940 if (apidev_major < 0) {
3941 printk(KERN_WARNING "qla2xxx: Unable to register char device "
3942 "%s\n", QLA2XXX_APIDEV);
3943 }
3944
2c3dfe3f
SJ
3945 qla2xxx_transport_vport_template =
3946 fc_attach_transport(&qla2xxx_transport_vport_functions);
3947 if (!qla2xxx_transport_vport_template) {
3948 kmem_cache_destroy(srb_cachep);
3949 fc_release_transport(qla2xxx_transport_template);
1da177e4 3950 return -ENODEV;
2c3dfe3f 3951 }
1da177e4 3952
fd9a29f0
AV
3953 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
3954 qla2x00_version_str);
7ee61397 3955 ret = pci_register_driver(&qla2xxx_pci_driver);
fca29703
AV
3956 if (ret) {
3957 kmem_cache_destroy(srb_cachep);
3958 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3959 fc_release_transport(qla2xxx_transport_vport_template);
fca29703
AV
3960 }
3961 return ret;
1da177e4
LT
3962}
3963
3964/**
3965 * qla2x00_module_exit - Module cleanup.
3966 **/
3967static void __exit
3968qla2x00_module_exit(void)
3969{
6a03b4cd 3970 unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
7ee61397 3971 pci_unregister_driver(&qla2xxx_pci_driver);
5433383e 3972 qla2x00_release_firmware();
354d6b21 3973 kmem_cache_destroy(srb_cachep);
a9083016
GM
3974 if (ctx_cachep)
3975 kmem_cache_destroy(ctx_cachep);
1da177e4 3976 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3977 fc_release_transport(qla2xxx_transport_vport_template);
1da177e4
LT
3978}
3979
3980module_init(qla2x00_module_init);
3981module_exit(qla2x00_module_exit);
3982
3983MODULE_AUTHOR("QLogic Corporation");
3984MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
3985MODULE_LICENSE("GPL");
3986MODULE_VERSION(QLA2XXX_VERSION);
bb8ee499
AV
3987MODULE_FIRMWARE(FW_FILE_ISP21XX);
3988MODULE_FIRMWARE(FW_FILE_ISP22XX);
3989MODULE_FIRMWARE(FW_FILE_ISP2300);
3990MODULE_FIRMWARE(FW_FILE_ISP2322);
3991MODULE_FIRMWARE(FW_FILE_ISP24XX);
61623fc3 3992MODULE_FIRMWARE(FW_FILE_ISP25XX);