]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla2xxx/qla_init.c
qla2xxx: Remove unused reverse_ini_mode
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
726b8548
QT
39static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
4d4df193 42
ac280b67
AV
43/* SRB Extensions ---------------------------------------------------------- */
44
9ba56b95
GM
45void
46qla2x00_sp_timeout(unsigned long __data)
ac280b67
AV
47{
48 srb_t *sp = (srb_t *)__data;
4916392b 49 struct srb_iocb *iocb;
726b8548
QT
50 scsi_qla_host_t *vha = (scsi_qla_host_t *)sp->vha;
51 struct qla_hw_data *ha = vha->hw;
ac280b67
AV
52 struct req_que *req;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 58 iocb = &sp->u.iocb_cmd;
4916392b 59 iocb->timeout(sp);
726b8548 60 sp->free(vha, sp);
6ac52608 61 spin_unlock_irqrestore(&ha->hardware_lock, flags);
ac280b67
AV
62}
63
9ba56b95
GM
64void
65qla2x00_sp_free(void *data, void *ptr)
ac280b67 66{
9ba56b95
GM
67 srb_t *sp = (srb_t *)ptr;
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
69 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
ac280b67 70
4d97cc53 71 del_timer(&iocb->timer);
b00ee7d7 72 qla2x00_rel_sp(vha, sp);
ac280b67
AV
73}
74
ac280b67
AV
75/* Asynchronous Login/Logout Routines -------------------------------------- */
76
a9b6f722 77unsigned long
5b91490e
AV
78qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79{
80 unsigned long tmo;
81 struct qla_hw_data *ha = vha->hw;
82
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
88 /*
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
91 */
92 tmo = ha->login_timeout;
93 }
94 return tmo;
95}
ac280b67 96
726b8548 97void
9ba56b95 98qla2x00_async_iocb_timeout(void *data)
ac280b67 99{
9ba56b95 100 srb_t *sp = (srb_t *)data;
ac280b67 101 fc_port_t *fcport = sp->fcport;
726b8548
QT
102 struct srb_iocb *lio = &sp->u.iocb_cmd;
103 struct event_arg ea;
ac280b67 104
7c3df132 105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
726b8548
QT
106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
ac280b67 108
5ff1d584 109 fcport->flags &= ~FCF_ASYNC_SENT;
726b8548
QT
110
111 switch (sp->type) {
112 case SRB_LOGIN_CMD:
6ac52608
AV
113 /* Retry as needed. */
114 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
115 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
116 QLA_LOGIO_LOGIN_RETRIED : 0;
726b8548
QT
117 memset(&ea, 0, sizeof(ea));
118 ea.event = FCME_PLOGI_DONE;
119 ea.fcport = sp->fcport;
120 ea.data[0] = lio->u.logio.data[0];
121 ea.data[1] = lio->u.logio.data[1];
122 ea.sp = sp;
123 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
124 break;
125 case SRB_LOGOUT_CMD:
a6ca8878 126 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
726b8548
QT
127 break;
128 case SRB_CT_PTHRU_CMD:
129 case SRB_MB_IOCB:
130 case SRB_NACK_PLOGI:
131 case SRB_NACK_PRLI:
132 case SRB_NACK_LOGO:
133 sp->done(sp->vha, sp, QLA_FUNCTION_TIMEOUT);
134 break;
6ac52608 135 }
ac280b67
AV
136}
137
99b0bec7 138static void
9ba56b95 139qla2x00_async_login_sp_done(void *data, void *ptr, int res)
99b0bec7 140{
9ba56b95
GM
141 srb_t *sp = (srb_t *)ptr;
142 struct srb_iocb *lio = &sp->u.iocb_cmd;
143 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
726b8548
QT
144 struct event_arg ea;
145
146 ql_dbg(ql_dbg_disc, vha, 0xffff,
147 "%s %8phC res %d \n",
148 __func__, sp->fcport->port_name, res);
149
150 sp->fcport->flags &= ~FCF_ASYNC_SENT;
151 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
152 memset(&ea, 0, sizeof(ea));
153 ea.event = FCME_PLOGI_DONE;
154 ea.fcport = sp->fcport;
155 ea.data[0] = lio->u.logio.data[0];
156 ea.data[1] = lio->u.logio.data[1];
157 ea.iop[0] = lio->u.logio.iop[0];
158 ea.iop[1] = lio->u.logio.iop[1];
159 ea.sp = sp;
160 qla2x00_fcport_event_handler(vha, &ea);
161 }
9ba56b95 162
9ba56b95 163 sp->free(sp->fcport->vha, sp);
99b0bec7
AV
164}
165
ac280b67
AV
166int
167qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
168 uint16_t *data)
169{
ac280b67 170 srb_t *sp;
4916392b 171 struct srb_iocb *lio;
726b8548
QT
172 int rval = QLA_FUNCTION_FAILED;
173
174 if (!vha->flags.online)
175 goto done;
176
177 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
178 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
179 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
180 goto done;
ac280b67 181
9ba56b95 182 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
183 if (!sp)
184 goto done;
185
726b8548
QT
186 fcport->flags |= FCF_ASYNC_SENT;
187 fcport->logout_completed = 0;
188
9ba56b95
GM
189 sp->type = SRB_LOGIN_CMD;
190 sp->name = "login";
191 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
192
193 lio = &sp->u.iocb_cmd;
3822263e 194 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 195 sp->done = qla2x00_async_login_sp_done;
4916392b 196 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
ac280b67 197 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 198 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
ac280b67 199 rval = qla2x00_start_sp(sp);
080c9517
CD
200 if (rval != QLA_SUCCESS) {
201 fcport->flags &= ~FCF_ASYNC_SENT;
202 fcport->flags |= FCF_LOGIN_NEEDED;
203 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 204 goto done_free_sp;
080c9517 205 }
ac280b67 206
7c3df132 207 ql_dbg(ql_dbg_disc, vha, 0x2072,
726b8548
QT
208 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
209 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
cfb0919c
CD
210 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
211 fcport->login_retry);
ac280b67
AV
212 return rval;
213
214done_free_sp:
9ba56b95 215 sp->free(fcport->vha, sp);
ac280b67 216done:
726b8548 217 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
218 return rval;
219}
220
99b0bec7 221static void
9ba56b95 222qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
99b0bec7 223{
9ba56b95
GM
224 srb_t *sp = (srb_t *)ptr;
225 struct srb_iocb *lio = &sp->u.iocb_cmd;
226 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
227
726b8548 228 sp->fcport->flags &= ~FCF_ASYNC_SENT;
9ba56b95
GM
229 if (!test_bit(UNLOADING, &vha->dpc_flags))
230 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
231 lio->u.logio.data);
232 sp->free(sp->fcport->vha, sp);
99b0bec7
AV
233}
234
ac280b67
AV
235int
236qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
237{
ac280b67 238 srb_t *sp;
4916392b 239 struct srb_iocb *lio;
ac280b67
AV
240 int rval;
241
242 rval = QLA_FUNCTION_FAILED;
726b8548 243 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 244 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
245 if (!sp)
246 goto done;
247
9ba56b95
GM
248 sp->type = SRB_LOGOUT_CMD;
249 sp->name = "logout";
250 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
251
252 lio = &sp->u.iocb_cmd;
3822263e 253 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 254 sp->done = qla2x00_async_logout_sp_done;
ac280b67
AV
255 rval = qla2x00_start_sp(sp);
256 if (rval != QLA_SUCCESS)
257 goto done_free_sp;
258
7c3df132 259 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 260 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 261 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
262 fcport->d_id.b.area, fcport->d_id.b.al_pa,
263 fcport->port_name);
ac280b67
AV
264 return rval;
265
266done_free_sp:
9ba56b95 267 sp->free(fcport->vha, sp);
ac280b67 268done:
726b8548 269 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
270 return rval;
271}
272
5ff1d584 273static void
9ba56b95 274qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
5ff1d584 275{
9ba56b95
GM
276 srb_t *sp = (srb_t *)ptr;
277 struct srb_iocb *lio = &sp->u.iocb_cmd;
278 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
279
280 if (!test_bit(UNLOADING, &vha->dpc_flags))
281 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
282 lio->u.logio.data);
283 sp->free(sp->fcport->vha, sp);
5ff1d584
AV
284}
285
286int
287qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
288 uint16_t *data)
289{
5ff1d584 290 srb_t *sp;
4916392b 291 struct srb_iocb *lio;
5ff1d584
AV
292 int rval;
293
294 rval = QLA_FUNCTION_FAILED;
726b8548 295 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 296 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
297 if (!sp)
298 goto done;
299
9ba56b95
GM
300 sp->type = SRB_ADISC_CMD;
301 sp->name = "adisc";
302 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
303
304 lio = &sp->u.iocb_cmd;
3822263e 305 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 306 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 307 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 308 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584
AV
309 rval = qla2x00_start_sp(sp);
310 if (rval != QLA_SUCCESS)
311 goto done_free_sp;
312
7c3df132 313 ql_dbg(ql_dbg_disc, vha, 0x206f,
cfb0919c
CD
314 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
315 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
316 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5ff1d584
AV
317 return rval;
318
319done_free_sp:
9ba56b95 320 sp->free(fcport->vha, sp);
5ff1d584 321done:
726b8548
QT
322 fcport->flags &= ~FCF_ASYNC_SENT;
323 return rval;
324}
325
326static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
327 struct event_arg *ea)
328{
329 fc_port_t *fcport, *conflict_fcport;
330 struct get_name_list_extended *e;
331 u16 i, n, found = 0, loop_id;
332 port_id_t id;
333 u64 wwn;
334 u8 opt = 0;
335
336 fcport = ea->fcport;
337
338 if (ea->rc) { /* rval */
339 if (fcport->login_retry == 0) {
340 fcport->login_retry = vha->hw->login_retry_count;
341 ql_dbg(ql_dbg_disc, vha, 0xffff,
342 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
343 fcport->port_name, fcport->login_retry);
344 }
345 return;
346 }
347
348 if (fcport->last_rscn_gen != fcport->rscn_gen) {
349 ql_dbg(ql_dbg_disc, vha, 0xffff,
350 "%s %8phC rscn gen changed rscn %d|%d \n",
351 __func__, fcport->port_name,
352 fcport->last_rscn_gen, fcport->rscn_gen);
353 qla24xx_post_gidpn_work(vha, fcport);
354 return;
355 } else if (fcport->last_login_gen != fcport->login_gen) {
356 ql_dbg(ql_dbg_disc, vha, 0xffff,
357 "%s %8phC login gen changed login %d|%d \n",
358 __func__, fcport->port_name,
359 fcport->last_login_gen, fcport->login_gen);
360 return;
361 }
362
363 n = ea->data[0] / sizeof(struct get_name_list_extended);
364
365 ql_dbg(ql_dbg_disc, vha, 0xffff,
366 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
367 __func__, __LINE__, fcport->port_name, n,
368 fcport->d_id.b.domain, fcport->d_id.b.area,
369 fcport->d_id.b.al_pa, fcport->loop_id);
370
371 for (i = 0; i < n; i++) {
372 e = &vha->gnl.l[i];
373 wwn = wwn_to_u64(e->port_name);
374
375 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
376 continue;
377
378 found = 1;
379 id.b.domain = e->port_id[2];
380 id.b.area = e->port_id[1];
381 id.b.al_pa = e->port_id[0];
382 id.b.rsvd_1 = 0;
383
384 loop_id = le16_to_cpu(e->nport_handle);
385 loop_id = (loop_id & 0x7fff);
386
387 ql_dbg(ql_dbg_disc, vha, 0xffff,
388 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
389 __func__, fcport->port_name,
390 e->current_login_state, fcport->fw_login_state,
391 id.b.domain, id.b.area, id.b.al_pa,
392 fcport->d_id.b.domain, fcport->d_id.b.area,
393 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
394
395 if ((id.b24 != fcport->d_id.b24) ||
396 ((fcport->loop_id != FC_NO_LOOP_ID) &&
397 (fcport->loop_id != loop_id))) {
398 ql_dbg(ql_dbg_disc, vha, 0xffff,
399 "%s %d %8phC post del sess\n",
400 __func__, __LINE__, fcport->port_name);
401 qlt_schedule_sess_for_deletion(fcport, 1);
402 return;
403 }
404
405 fcport->loop_id = loop_id;
406
407 wwn = wwn_to_u64(fcport->port_name);
408 qlt_find_sess_invalidate_other(vha, wwn,
409 id, loop_id, &conflict_fcport);
410
411 if (conflict_fcport) {
412 /*
413 * Another share fcport share the same loop_id &
414 * nport id. Conflict fcport needs to finish
415 * cleanup before this fcport can proceed to login.
416 */
417 conflict_fcport->conflict = fcport;
418 fcport->login_pause = 1;
419 }
420
421 switch (e->current_login_state) {
422 case DSC_LS_PRLI_COMP:
423 ql_dbg(ql_dbg_disc, vha, 0xffff,
424 "%s %d %8phC post gpdb\n",
425 __func__, __LINE__, fcport->port_name);
426 opt = PDO_FORCE_ADISC;
427 qla24xx_post_gpdb_work(vha, fcport, opt);
428 break;
429
430 case DSC_LS_PORT_UNAVAIL:
431 default:
432 if (fcport->loop_id == FC_NO_LOOP_ID) {
433 qla2x00_find_new_loop_id(vha, fcport);
434 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
435 }
436 ql_dbg(ql_dbg_disc, vha, 0xffff,
437 "%s %d %8phC \n",
438 __func__, __LINE__, fcport->port_name);
439 qla24xx_fcport_handle_login(vha, fcport);
440 break;
441 }
442 }
443
444 if (!found) {
445 /* fw has no record of this port */
446 if (fcport->loop_id == FC_NO_LOOP_ID) {
447 qla2x00_find_new_loop_id(vha, fcport);
448 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
449 } else {
450 for (i = 0; i < n; i++) {
451 e = &vha->gnl.l[i];
452 id.b.domain = e->port_id[0];
453 id.b.area = e->port_id[1];
454 id.b.al_pa = e->port_id[2];
455 id.b.rsvd_1 = 0;
456 loop_id = le16_to_cpu(e->nport_handle);
457
458 if (fcport->d_id.b24 == id.b24) {
459 conflict_fcport =
460 qla2x00_find_fcport_by_wwpn(vha,
461 e->port_name, 0);
462
463 ql_dbg(ql_dbg_disc, vha, 0xffff,
464 "%s %d %8phC post del sess\n",
465 __func__, __LINE__,
466 conflict_fcport->port_name);
467 qlt_schedule_sess_for_deletion
468 (conflict_fcport, 1);
469 }
470
471 if (fcport->loop_id == loop_id) {
472 /* FW already picked this loop id for another fcport */
473 qla2x00_find_new_loop_id(vha, fcport);
474 }
475 }
476 }
477 qla24xx_fcport_handle_login(vha, fcport);
478 }
479} /* gnl_event */
480
481static void
482qla24xx_async_gnl_sp_done(void *v, void *s, int res)
483{
484 struct scsi_qla_host *vha = (struct scsi_qla_host *)v;
485 struct srb *sp = (struct srb *)s;
486 unsigned long flags;
487 struct fc_port *fcport = NULL, *tf;
488 u16 i, n = 0, loop_id;
489 struct event_arg ea;
490 struct get_name_list_extended *e;
491 u64 wwn;
492 struct list_head h;
493
494 ql_dbg(ql_dbg_disc, vha, 0xffff,
495 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
496 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
497 sp->u.iocb_cmd.u.mbx.in_mb[2]);
498
499 memset(&ea, 0, sizeof(ea));
500 ea.sp = sp;
501 ea.rc = res;
502 ea.event = FCME_GNL_DONE;
503
504 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
505 sizeof(struct get_name_list_extended)) {
506 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
507 sizeof(struct get_name_list_extended);
508 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
509 }
510
511 for (i = 0; i < n; i++) {
512 e = &vha->gnl.l[i];
513 loop_id = le16_to_cpu(e->nport_handle);
514 /* mask out reserve bit */
515 loop_id = (loop_id & 0x7fff);
516 set_bit(loop_id, vha->hw->loop_id_map);
517 wwn = wwn_to_u64(e->port_name);
518
519 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
520 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
521 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
522 e->port_id[0], e->current_login_state, e->last_login_state,
523 (loop_id & 0x7fff));
524 }
525
526 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
527 vha->gnl.sent = 0;
528
529 INIT_LIST_HEAD(&h);
530 fcport = tf = NULL;
531 if (!list_empty(&vha->gnl.fcports))
532 list_splice_init(&vha->gnl.fcports, &h);
533
534 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
535 list_del_init(&fcport->gnl_entry);
536 fcport->flags &= ~FCF_ASYNC_SENT;
537 ea.fcport = fcport;
538
539 qla2x00_fcport_event_handler(vha, &ea);
540 }
541
542 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
543
544 sp->free(vha, sp);
545}
546
547int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
548{
549 srb_t *sp;
550 struct srb_iocb *mbx;
551 int rval = QLA_FUNCTION_FAILED;
552 unsigned long flags;
553 u16 *mb;
554
555 if (!vha->flags.online)
556 goto done;
557
558 ql_dbg(ql_dbg_disc, vha, 0xffff,
559 "Async-gnlist WWPN %8phC \n", fcport->port_name);
560
561 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
562 fcport->flags |= FCF_ASYNC_SENT;
563 fcport->disc_state = DSC_GNL;
564 fcport->last_rscn_gen = fcport->rscn_gen;
565 fcport->last_login_gen = fcport->login_gen;
566
567 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
568 if (vha->gnl.sent) {
569 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
570 rval = QLA_SUCCESS;
571 goto done;
572 }
573 vha->gnl.sent = 1;
574 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
575
576 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
577 if (!sp)
578 goto done;
579 sp->type = SRB_MB_IOCB;
580 sp->name = "gnlist";
581 sp->gen1 = fcport->rscn_gen;
582 sp->gen2 = fcport->login_gen;
583
584 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
585
586 mb = sp->u.iocb_cmd.u.mbx.out_mb;
587 mb[0] = MBC_PORT_NODE_NAME_LIST;
588 mb[1] = BIT_2 | BIT_3;
589 mb[2] = MSW(vha->gnl.ldma);
590 mb[3] = LSW(vha->gnl.ldma);
591 mb[6] = MSW(MSD(vha->gnl.ldma));
592 mb[7] = LSW(MSD(vha->gnl.ldma));
593 mb[8] = vha->gnl.size;
594 mb[9] = vha->vp_idx;
595
596 mbx = &sp->u.iocb_cmd;
597 mbx->timeout = qla2x00_async_iocb_timeout;
598
599 sp->done = qla24xx_async_gnl_sp_done;
600
601 rval = qla2x00_start_sp(sp);
602 if (rval != QLA_SUCCESS)
603 goto done_free_sp;
604
605 ql_dbg(ql_dbg_disc, vha, 0xffff,
606 "Async-%s - OUT WWPN %8phC hndl %x\n",
607 sp->name, fcport->port_name, sp->handle);
608
609 return rval;
610
611done_free_sp:
612 sp->free(fcport->vha, sp);
613done:
614 fcport->flags &= ~FCF_ASYNC_SENT;
615 return rval;
616}
617
618int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
619{
620 struct qla_work_evt *e;
621
622 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
623 if (!e)
624 return QLA_FUNCTION_FAILED;
625
626 e->u.fcport.fcport = fcport;
627 return qla2x00_post_work(vha, e);
628}
629
630static
631void qla24xx_async_gpdb_sp_done(void *v, void *s, int res)
632{
633 struct scsi_qla_host *vha = (struct scsi_qla_host *)v;
634 struct srb *sp = (struct srb *)s;
635 struct qla_hw_data *ha = vha->hw;
636 uint64_t zero = 0;
637 struct port_database_24xx *pd;
638 fc_port_t *fcport = sp->fcport;
639 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
640 int rval = QLA_SUCCESS;
641 struct event_arg ea;
642
643 ql_dbg(ql_dbg_disc, vha, 0xffff,
644 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
645 sp->name, res, fcport->port_name, mb[1], mb[2]);
646
647 fcport->flags &= ~FCF_ASYNC_SENT;
648
649 if (res) {
650 rval = res;
651 goto gpd_error_out;
652 }
653
654 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
655
656 /* Check for logged in state. */
657 if (pd->current_login_state != PDS_PRLI_COMPLETE &&
658 pd->last_login_state != PDS_PRLI_COMPLETE) {
659 ql_dbg(ql_dbg_mbx, vha, 0xffff,
660 "Unable to verify login-state (%x/%x) for "
661 "loop_id %x.\n", pd->current_login_state,
662 pd->last_login_state, fcport->loop_id);
663 rval = QLA_FUNCTION_FAILED;
664 goto gpd_error_out;
665 }
666
667 if (fcport->loop_id == FC_NO_LOOP_ID ||
668 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
669 memcmp(fcport->port_name, pd->port_name, 8))) {
670 /* We lost the device mid way. */
671 rval = QLA_NOT_LOGGED_IN;
672 goto gpd_error_out;
673 }
674
675 /* Names are little-endian. */
676 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
677
678 /* Get port_id of device. */
679 fcport->d_id.b.domain = pd->port_id[0];
680 fcport->d_id.b.area = pd->port_id[1];
681 fcport->d_id.b.al_pa = pd->port_id[2];
682 fcport->d_id.b.rsvd_1 = 0;
683
684 /* If not target must be initiator or unknown type. */
685 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
686 fcport->port_type = FCT_INITIATOR;
687 else
688 fcport->port_type = FCT_TARGET;
689
690 /* Passback COS information. */
691 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
692 FC_COS_CLASS2 : FC_COS_CLASS3;
693
694 if (pd->prli_svc_param_word_3[0] & BIT_7) {
695 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
696 fcport->conf_compl_supported = 1;
697 }
698
699gpd_error_out:
700 memset(&ea, 0, sizeof(ea));
701 ea.event = FCME_GPDB_DONE;
702 ea.rc = rval;
703 ea.fcport = fcport;
704 ea.sp = sp;
705
706 qla2x00_fcport_event_handler(vha, &ea);
707
708 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
709 sp->u.iocb_cmd.u.mbx.in_dma);
710
711 sp->free(vha, sp);
712}
713
714static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
715 u8 opt)
716{
717 struct qla_work_evt *e;
718
719 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
720 if (!e)
721 return QLA_FUNCTION_FAILED;
722
723 e->u.fcport.fcport = fcport;
724 e->u.fcport.opt = opt;
725 return qla2x00_post_work(vha, e);
726}
727
728int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
729{
730 srb_t *sp;
731 struct srb_iocb *mbx;
732 int rval = QLA_FUNCTION_FAILED;
733 u16 *mb;
734 dma_addr_t pd_dma;
735 struct port_database_24xx *pd;
736 struct qla_hw_data *ha = vha->hw;
737
738 if (!vha->flags.online)
739 goto done;
740
741 fcport->flags |= FCF_ASYNC_SENT;
742 fcport->disc_state = DSC_GPDB;
743
744 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
745 if (!sp)
746 goto done;
747
748 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
749 if (pd == NULL) {
750 ql_log(ql_log_warn, vha, 0xffff,
751 "Failed to allocate port database structure.\n");
752 goto done_free_sp;
753 }
754 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
755
756 sp->type = SRB_MB_IOCB;
757 sp->name = "gpdb";
758 sp->gen1 = fcport->rscn_gen;
759 sp->gen2 = fcport->login_gen;
760 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
761
762 mb = sp->u.iocb_cmd.u.mbx.out_mb;
763 mb[0] = MBC_GET_PORT_DATABASE;
764 mb[1] = fcport->loop_id;
765 mb[2] = MSW(pd_dma);
766 mb[3] = LSW(pd_dma);
767 mb[6] = MSW(MSD(pd_dma));
768 mb[7] = LSW(MSD(pd_dma));
769 mb[9] = vha->vp_idx;
770 mb[10] = opt;
771
772 mbx = &sp->u.iocb_cmd;
773 mbx->timeout = qla2x00_async_iocb_timeout;
774 mbx->u.mbx.in = (void *)pd;
775 mbx->u.mbx.in_dma = pd_dma;
776
777 sp->done = qla24xx_async_gpdb_sp_done;
778
779 rval = qla2x00_start_sp(sp);
780 if (rval != QLA_SUCCESS)
781 goto done_free_sp;
782
783 ql_dbg(ql_dbg_disc, vha, 0xffff,
784 "Async-%s %8phC hndl %x opt %x\n",
785 sp->name, fcport->port_name, sp->handle, opt);
786
787 return rval;
788
789done_free_sp:
790 if (pd)
791 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
792
793 sp->free(vha, sp);
794done:
795 fcport->flags &= ~FCF_ASYNC_SENT;
796 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
797 return rval;
798}
799
726b8548
QT
800static
801void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
802{
803 int rval = ea->rc;
804 fc_port_t *fcport = ea->fcport;
805 unsigned long flags;
806
807 fcport->flags &= ~FCF_ASYNC_SENT;
808
809 ql_dbg(ql_dbg_disc, vha, 0xffff,
810 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
811 fcport->disc_state, fcport->fw_login_state, rval);
812
813 if (ea->sp->gen2 != fcport->login_gen) {
814 /* target side must have changed it. */
815 ql_dbg(ql_dbg_disc, vha, 0xffff,
816 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
817 __func__, fcport->port_name, fcport->last_rscn_gen,
818 fcport->rscn_gen, fcport->last_login_gen,
819 fcport->login_gen);
820 return;
821 } else if (ea->sp->gen1 != fcport->rscn_gen) {
822 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
823 __func__, __LINE__, fcport->port_name);
824 qla24xx_post_gidpn_work(vha, fcport);
825 return;
826 }
827
828 if (rval != QLA_SUCCESS) {
829 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n",
830 __func__, __LINE__, fcport->port_name);
831 qlt_schedule_sess_for_deletion_lock(fcport);
832 return;
833 }
834
835 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
836 ea->fcport->login_gen++;
837 ea->fcport->deleted = 0;
838 ea->fcport->logout_on_delete = 1;
839
840 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
841 vha->fcport_count++;
842 ea->fcport->login_succ = 1;
843
844 if (!IS_IIDMA_CAPABLE(vha->hw) ||
845 !vha->hw->flags.gpsc_supported) {
846 ql_dbg(ql_dbg_disc, vha, 0xffff,
847 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
848 __func__, __LINE__, fcport->port_name,
849 vha->fcport_count);
850
851 qla24xx_post_upd_fcport_work(vha, fcport);
852 } else {
853 ql_dbg(ql_dbg_disc, vha, 0xffff,
854 "%s %d %8phC post gpsc fcp_cnt %d\n",
855 __func__, __LINE__, fcport->port_name,
856 vha->fcport_count);
857
858 qla24xx_post_gpsc_work(vha, fcport);
859 }
860 }
861 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
862} /* gpdb event */
863
864int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
865{
866 if (fcport->login_retry == 0)
867 return 0;
868
869 if (fcport->scan_state != QLA_FCPORT_FOUND)
870 return 0;
871
872 ql_dbg(ql_dbg_disc, vha, 0xffff,
873 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
874 __func__, fcport->port_name, fcport->disc_state,
875 fcport->fw_login_state, fcport->login_pause, fcport->flags,
876 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
877 fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
878 fcport->loop_id);
879
880 fcport->login_retry--;
881
882 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
883 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
884 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
885 return 0;
886
887 /* for pure Target Mode. Login will not be initiated */
888 if (vha->host->active_mode == MODE_TARGET)
889 return 0;
890
891 if (fcport->flags & FCF_ASYNC_SENT) {
892 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
893 return 0;
894 }
895
896 switch (fcport->disc_state) {
897 case DSC_DELETED:
898 if (fcport->loop_id == FC_NO_LOOP_ID) {
899 ql_dbg(ql_dbg_disc, vha, 0xffff,
900 "%s %d %8phC post gnl\n",
901 __func__, __LINE__, fcport->port_name);
902 qla24xx_async_gnl(vha, fcport);
903 } else {
904 ql_dbg(ql_dbg_disc, vha, 0xffff,
905 "%s %d %8phC post login\n",
906 __func__, __LINE__, fcport->port_name);
907 fcport->disc_state = DSC_LOGIN_PEND;
908 qla2x00_post_async_login_work(vha, fcport, NULL);
909 }
910 break;
911
912 case DSC_GNL:
913 if (fcport->login_pause) {
914 fcport->last_rscn_gen = fcport->rscn_gen;
915 fcport->last_login_gen = fcport->login_gen;
916 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
917 break;
918 }
919
920 if (fcport->flags & FCF_FCP2_DEVICE) {
921 u8 opt = PDO_FORCE_ADISC;
922
923 ql_dbg(ql_dbg_disc, vha, 0xffff,
924 "%s %d %8phC post gpdb\n",
925 __func__, __LINE__, fcport->port_name);
926
927 fcport->disc_state = DSC_GPDB;
928 qla24xx_post_gpdb_work(vha, fcport, opt);
929 } else {
930 ql_dbg(ql_dbg_disc, vha, 0xffff,
931 "%s %d %8phC post login \n",
932 __func__, __LINE__, fcport->port_name);
933 fcport->disc_state = DSC_LOGIN_PEND;
934 qla2x00_post_async_login_work(vha, fcport, NULL);
935 }
936
937 break;
938
939 case DSC_LOGIN_FAILED:
940 ql_dbg(ql_dbg_disc, vha, 0xffff,
941 "%s %d %8phC post gidpn \n",
942 __func__, __LINE__, fcport->port_name);
943
944 qla24xx_post_gidpn_work(vha, fcport);
945 break;
946
947 case DSC_LOGIN_COMPLETE:
948 /* recheck login state */
949 ql_dbg(ql_dbg_disc, vha, 0xffff,
950 "%s %d %8phC post gpdb \n",
951 __func__, __LINE__, fcport->port_name);
952
953 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
954 break;
955
956 default:
957 break;
958 }
959
960 return 0;
961}
962
963static
964void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
965{
966 fcport->rscn_gen++;
967
968 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
969 "%s %8phC DS %d LS %d\n",
970 __func__, fcport->port_name, fcport->disc_state,
971 fcport->fw_login_state);
972
973 if (fcport->flags & FCF_ASYNC_SENT)
974 return;
975
976 switch (fcport->disc_state) {
977 case DSC_DELETED:
978 case DSC_LOGIN_COMPLETE:
979 qla24xx_post_gidpn_work(fcport->vha, fcport);
980 break;
981
982 default:
983 break;
984 }
985}
986
987int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
988 u8 *port_name, void *pla)
989{
990 struct qla_work_evt *e;
991 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
992 if (!e)
993 return QLA_FUNCTION_FAILED;
994
995 e->u.new_sess.id = *id;
996 e->u.new_sess.pla = pla;
997 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
998
999 return qla2x00_post_work(vha, e);
1000}
1001
1002static
1003int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1004 struct event_arg *ea)
1005{
1006 fc_port_t *fcport = ea->fcport;
1007
1008 if (test_bit(UNLOADING, &vha->dpc_flags))
1009 return 0;
1010
1011 switch (vha->host->active_mode) {
1012 case MODE_INITIATOR:
1013 case MODE_DUAL:
1014 if (fcport->scan_state == QLA_FCPORT_FOUND)
1015 qla24xx_fcport_handle_login(vha, fcport);
1016 break;
1017
1018 case MODE_TARGET:
1019 default:
1020 /* no-op */
1021 break;
1022 }
1023
1024 return 0;
1025}
1026
1027static
1028void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1029 struct event_arg *ea)
1030{
1031 fc_port_t *fcport = ea->fcport;
1032
1033 if (fcport->scan_state != QLA_FCPORT_FOUND) {
1034 fcport->login_retry++;
1035 return;
1036 }
1037
1038 ql_dbg(ql_dbg_disc, vha, 0xffff,
1039 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1040 __func__, fcport->port_name, fcport->disc_state,
1041 fcport->fw_login_state, fcport->login_pause,
1042 fcport->deleted, fcport->conflict,
1043 fcport->last_rscn_gen, fcport->rscn_gen,
1044 fcport->last_login_gen, fcport->login_gen,
1045 fcport->flags);
1046
1047 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1048 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
1049 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1050 return;
1051
1052 if (fcport->flags & FCF_ASYNC_SENT) {
1053 fcport->login_retry++;
1054 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1055 return;
1056 }
1057
1058 if (fcport->disc_state == DSC_DELETE_PEND) {
1059 fcport->login_retry++;
1060 return;
1061 }
1062
1063 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1064 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
1065 __func__, __LINE__, fcport->port_name);
1066
1067 qla24xx_async_gidpn(vha, fcport);
1068 return;
1069 }
1070
1071 qla24xx_fcport_handle_login(vha, fcport);
1072}
1073
1074void qla2x00_fcport_event_handler(scsi_qla_host_t *vha,
1075 struct event_arg *ea)
1076{
1077 fc_port_t *fcport;
1078 int rc;
1079
1080 switch (ea->event) {
1081 case FCME_RELOGIN:
1082 if (test_bit(UNLOADING, &vha->dpc_flags))
1083 return;
1084
1085 qla24xx_handle_relogin_event(vha, ea);
1086 break;
1087 case FCME_RSCN:
1088 if (test_bit(UNLOADING, &vha->dpc_flags))
1089 return;
1090
1091 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1092 if (!fcport) {
1093 /* cable moved */
1094 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1095 if (rc) {
1096 ql_log(ql_log_warn, vha, 0xffff,
1097 "RSCN GPNID work failed %02x%02x%02x\n",
1098 ea->id.b.domain, ea->id.b.area,
1099 ea->id.b.al_pa);
1100 }
1101 } else {
1102 ea->fcport = fcport;
1103 qla24xx_handle_rscn_event(fcport, ea);
1104 }
1105 break;
1106 case FCME_GIDPN_DONE:
1107 qla24xx_handle_gidpn_event(vha, ea);
1108 break;
1109 case FCME_GNL_DONE:
1110 qla24xx_handle_gnl_done_event(vha, ea);
1111 break;
1112 case FCME_GPSC_DONE:
1113 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1114 break;
1115 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1116 qla24xx_handle_plogi_done_event(vha, ea);
1117 break;
1118 case FCME_GPDB_DONE:
1119 qla24xx_handle_gpdb_event(vha, ea);
1120 break;
1121 case FCME_GPNID_DONE:
1122 qla24xx_handle_gpnid_event(vha, ea);
1123 break;
1124 case FCME_DELETE_DONE:
1125 qla24xx_handle_delete_done_event(vha, ea);
1126 break;
1127 default:
1128 BUG_ON(1);
1129 break;
1130 }
1131}
1132
3822263e 1133static void
faef62d1 1134qla2x00_tmf_iocb_timeout(void *data)
3822263e 1135{
faef62d1
AB
1136 srb_t *sp = (srb_t *)data;
1137 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1138
faef62d1
AB
1139 tmf->u.tmf.comp_status = CS_TIMEOUT;
1140 complete(&tmf->u.tmf.comp);
1141}
9ba56b95 1142
faef62d1
AB
1143static void
1144qla2x00_tmf_sp_done(void *data, void *ptr, int res)
1145{
1146 srb_t *sp = (srb_t *)ptr;
1147 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1148 complete(&tmf->u.tmf.comp);
3822263e
MI
1149}
1150
1151int
faef62d1 1152qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1153 uint32_t tag)
1154{
1155 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1156 struct srb_iocb *tm_iocb;
3822263e 1157 srb_t *sp;
faef62d1 1158 int rval = QLA_FUNCTION_FAILED;
3822263e 1159
9ba56b95 1160 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1161 if (!sp)
1162 goto done;
1163
faef62d1 1164 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1165 sp->type = SRB_TM_CMD;
1166 sp->name = "tmf";
faef62d1
AB
1167 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1168 tm_iocb->u.tmf.flags = flags;
1169 tm_iocb->u.tmf.lun = lun;
1170 tm_iocb->u.tmf.data = tag;
1171 sp->done = qla2x00_tmf_sp_done;
1172 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1173 init_completion(&tm_iocb->u.tmf.comp);
3822263e
MI
1174
1175 rval = qla2x00_start_sp(sp);
1176 if (rval != QLA_SUCCESS)
1177 goto done_free_sp;
1178
7c3df132 1179 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1180 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1181 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1182 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1
AB
1183
1184 wait_for_completion(&tm_iocb->u.tmf.comp);
1185
1186 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1187 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1188
1189 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1190 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1191 "TM IOCB failed (%x).\n", rval);
1192 }
1193
1194 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1195 flags = tm_iocb->u.tmf.flags;
1196 lun = (uint16_t)tm_iocb->u.tmf.lun;
1197
1198 /* Issue Marker IOCB */
1199 qla2x00_marker(vha, vha->hw->req_q_map[0],
1200 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1201 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1202 }
3822263e
MI
1203
1204done_free_sp:
faef62d1 1205 sp->free(vha, sp);
3822263e
MI
1206done:
1207 return rval;
1208}
1209
4440e46d
AB
1210static void
1211qla24xx_abort_iocb_timeout(void *data)
1212{
1213 srb_t *sp = (srb_t *)data;
1214 struct srb_iocb *abt = &sp->u.iocb_cmd;
1215
1216 abt->u.abt.comp_status = CS_TIMEOUT;
1217 complete(&abt->u.abt.comp);
1218}
1219
1220static void
1221qla24xx_abort_sp_done(void *data, void *ptr, int res)
1222{
1223 srb_t *sp = (srb_t *)ptr;
1224 struct srb_iocb *abt = &sp->u.iocb_cmd;
1225
1226 complete(&abt->u.abt.comp);
1227}
1228
1229static int
1230qla24xx_async_abort_cmd(srb_t *cmd_sp)
1231{
1232 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
1233 fc_port_t *fcport = cmd_sp->fcport;
1234 struct srb_iocb *abt_iocb;
1235 srb_t *sp;
1236 int rval = QLA_FUNCTION_FAILED;
1237
1238 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1239 if (!sp)
1240 goto done;
1241
1242 abt_iocb = &sp->u.iocb_cmd;
1243 sp->type = SRB_ABT_CMD;
1244 sp->name = "abort";
1245 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1246 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1247 sp->done = qla24xx_abort_sp_done;
1248 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1249 init_completion(&abt_iocb->u.abt.comp);
1250
1251 rval = qla2x00_start_sp(sp);
1252 if (rval != QLA_SUCCESS)
1253 goto done_free_sp;
1254
1255 ql_dbg(ql_dbg_async, vha, 0x507c,
1256 "Abort command issued - hdl=%x, target_id=%x\n",
1257 cmd_sp->handle, fcport->tgt_id);
1258
1259 wait_for_completion(&abt_iocb->u.abt.comp);
1260
1261 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1262 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1263
1264done_free_sp:
1265 sp->free(vha, sp);
1266done:
1267 return rval;
1268}
1269
1270int
1271qla24xx_async_abort_command(srb_t *sp)
1272{
1273 unsigned long flags = 0;
1274
1275 uint32_t handle;
1276 fc_port_t *fcport = sp->fcport;
1277 struct scsi_qla_host *vha = fcport->vha;
1278 struct qla_hw_data *ha = vha->hw;
1279 struct req_que *req = vha->req;
1280
1281 spin_lock_irqsave(&ha->hardware_lock, flags);
1282 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1283 if (req->outstanding_cmds[handle] == sp)
1284 break;
1285 }
1286 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1287 if (handle == req->num_outstanding_cmds) {
1288 /* Command not found. */
1289 return QLA_FUNCTION_FAILED;
1290 }
1291 if (sp->type == SRB_FXIOCB_DCMD)
1292 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1293 FXDISC_ABORT_IOCTL);
1294
1295 return qla24xx_async_abort_cmd(sp);
1296}
1297
726b8548
QT
1298static void
1299qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1300{
726b8548 1301 port_id_t cid; /* conflict Nport id */
ac280b67 1302
726b8548 1303 switch (ea->data[0]) {
ac280b67 1304 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
1305 /*
1306 * Driver must validate login state - If PRLI not complete,
1307 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1308 * requests.
1309 */
726b8548
QT
1310 ql_dbg(ql_dbg_disc, vha, 0xffff,
1311 "%s %d %8phC post gpdb\n",
1312 __func__, __LINE__, ea->fcport->port_name);
1313 ea->fcport->chip_reset = vha->hw->chip_reset;
1314 ea->fcport->logout_on_delete = 1;
1315 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
ac280b67
AV
1316 break;
1317 case MBS_COMMAND_ERROR:
726b8548
QT
1318 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n",
1319 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1320
1321 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1322 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1323 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
1324 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1325 else
726b8548 1326 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
1327 break;
1328 case MBS_LOOP_ID_USED:
726b8548
QT
1329 /* data[1] = IO PARAM 1 = nport ID */
1330 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1331 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1332 cid.b.al_pa = ea->iop[1] & 0xff;
1333 cid.b.rsvd_1 = 0;
1334
1335 ql_dbg(ql_dbg_disc, vha, 0xffff,
1336 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1337 __func__, __LINE__, ea->fcport->port_name,
1338 ea->fcport->loop_id);
1339
1340 if (IS_SW_RESV_ADDR(cid)) {
1341 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1342 ea->fcport->loop_id = FC_NO_LOOP_ID;
1343 } else {
1344 qla2x00_clear_loop_id(ea->fcport);
ac280b67 1345 }
726b8548
QT
1346 qla24xx_post_gnl_work(vha, ea->fcport);
1347 break;
1348 case MBS_PORT_ID_USED:
1349 ql_dbg(ql_dbg_disc, vha, 0xffff,
1350 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1351 __func__, __LINE__, ea->fcport->port_name,
1352 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1353 ea->fcport->d_id.b.al_pa);
1354
1355 qla2x00_clear_loop_id(ea->fcport);
1356 qla24xx_post_gidpn_work(vha, ea->fcport);
ac280b67
AV
1357 break;
1358 }
4916392b 1359 return;
ac280b67
AV
1360}
1361
4916392b 1362void
ac280b67
AV
1363qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1364 uint16_t *data)
1365{
726b8548 1366 qla2x00_mark_device_lost(vha, fcport, 1, 0);
a6ca8878 1367 qlt_logo_completion_handler(fcport, data[0]);
726b8548 1368 fcport->login_gen++;
4916392b 1369 return;
ac280b67
AV
1370}
1371
4916392b 1372void
5ff1d584
AV
1373qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1374 uint16_t *data)
1375{
1376 if (data[0] == MBS_COMMAND_COMPLETE) {
1377 qla2x00_update_fcport(vha, fcport);
1378
4916392b 1379 return;
5ff1d584
AV
1380 }
1381
1382 /* Retry login. */
1383 fcport->flags &= ~FCF_ASYNC_SENT;
1384 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1385 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1386 else
80d79440 1387 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5ff1d584 1388
4916392b 1389 return;
5ff1d584
AV
1390}
1391
1da177e4
LT
1392/****************************************************************************/
1393/* QLogic ISP2x00 Hardware Support Functions. */
1394/****************************************************************************/
1395
fa492630 1396static int
7d613ac6
SV
1397qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1398{
1399 int rval = QLA_SUCCESS;
1400 struct qla_hw_data *ha = vha->hw;
1401 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 1402 uint16_t config[4];
7d613ac6
SV
1403
1404 qla83xx_idc_lock(vha, 0);
1405
1406 /* SV: TODO: Assign initialization timeout from
1407 * flash-info / other param
1408 */
1409 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1410 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1411
1412 /* Set our fcoe function presence */
1413 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1414 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1415 "Error while setting DRV-Presence.\n");
1416 rval = QLA_FUNCTION_FAILED;
1417 goto exit;
1418 }
1419
1420 /* Decide the reset ownership */
1421 qla83xx_reset_ownership(vha);
1422
1423 /*
1424 * On first protocol driver load:
1425 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1426 * register.
1427 * Others: Check compatibility with current IDC Major version.
1428 */
1429 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1430 if (ha->flags.nic_core_reset_owner) {
1431 /* Set IDC Major version */
1432 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1433 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1434
1435 /* Clearing IDC-Lock-Recovery register */
1436 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1437 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1438 /*
1439 * Clear further IDC participation if we are not compatible with
1440 * the current IDC Major Version.
1441 */
1442 ql_log(ql_log_warn, vha, 0xb07d,
1443 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1444 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1445 __qla83xx_clear_drv_presence(vha);
1446 rval = QLA_FUNCTION_FAILED;
1447 goto exit;
1448 }
1449 /* Each function sets its supported Minor version. */
1450 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1451 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1452 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1453
711aa7f7
SK
1454 if (ha->flags.nic_core_reset_owner) {
1455 memset(config, 0, sizeof(config));
1456 if (!qla81xx_get_port_config(vha, config))
1457 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1458 QLA8XXX_DEV_READY);
1459 }
1460
7d613ac6
SV
1461 rval = qla83xx_idc_state_handler(vha);
1462
1463exit:
1464 qla83xx_idc_unlock(vha, 0);
1465
1466 return rval;
1467}
1468
1da177e4
LT
1469/*
1470* qla2x00_initialize_adapter
1471* Initialize board.
1472*
1473* Input:
1474* ha = adapter block pointer.
1475*
1476* Returns:
1477* 0 = success
1478*/
1479int
e315cd28 1480qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
1481{
1482 int rval;
e315cd28 1483 struct qla_hw_data *ha = vha->hw;
73208dfd 1484 struct req_que *req = ha->req_q_map[0];
2533cf67 1485
fc90adaf
JC
1486 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1487 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1488
1da177e4 1489 /* Clear adapter flags. */
e315cd28 1490 vha->flags.online = 0;
2533cf67 1491 ha->flags.chip_reset_done = 0;
e315cd28 1492 vha->flags.reset_active = 0;
85880801
AV
1493 ha->flags.pci_channel_io_perm_failure = 0;
1494 ha->flags.eeh_busy = 0;
fabbb8df 1495 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
1496 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1497 atomic_set(&vha->loop_state, LOOP_DOWN);
1498 vha->device_flags = DFLG_NO_CABLE;
1499 vha->dpc_flags = 0;
1500 vha->flags.management_server_logged_in = 0;
1501 vha->marker_needed = 0;
1da177e4
LT
1502 ha->isp_abort_cnt = 0;
1503 ha->beacon_blink_led = 0;
1504
73208dfd
AC
1505 set_bit(0, ha->req_qid_map);
1506 set_bit(0, ha->rsp_qid_map);
1507
cfb0919c 1508 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 1509 "Configuring PCI space...\n");
e315cd28 1510 rval = ha->isp_ops->pci_config(vha);
1da177e4 1511 if (rval) {
7c3df132
SK
1512 ql_log(ql_log_warn, vha, 0x0044,
1513 "Unable to configure PCI space.\n");
1da177e4
LT
1514 return (rval);
1515 }
1516
e315cd28 1517 ha->isp_ops->reset_chip(vha);
1da177e4 1518
e315cd28 1519 rval = qla2xxx_get_flash_info(vha);
c00d8994 1520 if (rval) {
7c3df132
SK
1521 ql_log(ql_log_fatal, vha, 0x004f,
1522 "Unable to validate FLASH data.\n");
7ec0effd
AD
1523 return rval;
1524 }
1525
1526 if (IS_QLA8044(ha)) {
1527 qla8044_read_reset_template(vha);
1528
1529 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1530 * If DONRESET_BIT0 is set, drivers should not set dev_state
1531 * to NEED_RESET. But if NEED_RESET is set, drivers should
1532 * should honor the reset. */
1533 if (ql2xdontresethba == 1)
1534 qla8044_set_idc_dontreset(vha);
c00d8994
AV
1535 }
1536
73208dfd 1537 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 1538 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 1539 "Configure NVRAM parameters...\n");
0107109e 1540
e315cd28 1541 ha->isp_ops->nvram_config(vha);
1da177e4 1542
d4c760c2
AV
1543 if (ha->flags.disable_serdes) {
1544 /* Mask HBA via NVRAM settings? */
7c3df132 1545 ql_log(ql_log_info, vha, 0x0077,
7b833558 1546 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
1547 return QLA_FUNCTION_FAILED;
1548 }
1549
cfb0919c 1550 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 1551 "Verifying loaded RISC code...\n");
1da177e4 1552
e315cd28
AC
1553 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1554 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
1555 if (rval)
1556 return (rval);
e315cd28 1557 rval = qla2x00_setup_chip(vha);
d19044c3
AV
1558 if (rval)
1559 return (rval);
1da177e4 1560 }
a9083016 1561
4d4df193 1562 if (IS_QLA84XX(ha)) {
e315cd28 1563 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 1564 if (!ha->cs84xx) {
7c3df132 1565 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
1566 "Unable to configure ISP84XX.\n");
1567 return QLA_FUNCTION_FAILED;
1568 }
1569 }
2d70c103 1570
ead03855 1571 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
1572 rval = qla2x00_init_rings(vha);
1573
2533cf67 1574 ha->flags.chip_reset_done = 1;
1da177e4 1575
9a069e19 1576 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 1577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
1578 rval = qla84xx_init_chip(vha);
1579 if (rval != QLA_SUCCESS) {
7c3df132
SK
1580 ql_log(ql_log_warn, vha, 0x00d4,
1581 "Unable to initialize ISP84XX.\n");
8d2b21db 1582 qla84xx_put_chip(vha);
9a069e19
GM
1583 }
1584 }
1585
7d613ac6
SV
1586 /* Load the NIC Core f/w if we are the first protocol driver. */
1587 if (IS_QLA8031(ha)) {
1588 rval = qla83xx_nic_core_fw_load(vha);
1589 if (rval)
1590 ql_log(ql_log_warn, vha, 0x0124,
1591 "Error in initializing NIC Core f/w.\n");
1592 }
1593
2f0f3f4f
MI
1594 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1595 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 1596
c46e65c7
JC
1597 if (IS_P3P_TYPE(ha))
1598 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1599 else
1600 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1601
1da177e4
LT
1602 return (rval);
1603}
1604
1605/**
abbd8870 1606 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1da177e4
LT
1607 * @ha: HA context
1608 *
1609 * Returns 0 on success.
1610 */
abbd8870 1611int
e315cd28 1612qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 1613{
a157b101 1614 uint16_t w;
abbd8870 1615 unsigned long flags;
e315cd28 1616 struct qla_hw_data *ha = vha->hw;
3d71644c 1617 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1618
1da177e4 1619 pci_set_master(ha->pdev);
af6177d8 1620 pci_try_set_mwi(ha->pdev);
1da177e4 1621
1da177e4 1622 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1623 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
1624 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1625
737faece 1626 pci_disable_rom(ha->pdev);
1da177e4
LT
1627
1628 /* Get PCI bus information. */
1629 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1630 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
1631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1632
abbd8870
AV
1633 return QLA_SUCCESS;
1634}
1da177e4 1635
abbd8870
AV
1636/**
1637 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1638 * @ha: HA context
1639 *
1640 * Returns 0 on success.
1641 */
1642int
e315cd28 1643qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 1644{
a157b101 1645 uint16_t w;
abbd8870
AV
1646 unsigned long flags = 0;
1647 uint32_t cnt;
e315cd28 1648 struct qla_hw_data *ha = vha->hw;
3d71644c 1649 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1650
abbd8870 1651 pci_set_master(ha->pdev);
af6177d8 1652 pci_try_set_mwi(ha->pdev);
1da177e4 1653
abbd8870 1654 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1655 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 1656
abbd8870
AV
1657 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1658 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 1659 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 1660
abbd8870
AV
1661 /*
1662 * If this is a 2300 card and not 2312, reset the
1663 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1664 * the 2310 also reports itself as a 2300 so we need to get the
1665 * fb revision level -- a 6 indicates it really is a 2300 and
1666 * not a 2310.
1667 */
1668 if (IS_QLA2300(ha)) {
1669 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 1670
abbd8870 1671 /* Pause RISC. */
3d71644c 1672 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 1673 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1674 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 1675 break;
1da177e4 1676
abbd8870
AV
1677 udelay(10);
1678 }
1da177e4 1679
abbd8870 1680 /* Select FPM registers. */
3d71644c
AV
1681 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1682 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1683
1684 /* Get the fb rev level */
3d71644c 1685 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
1686
1687 if (ha->fb_rev == FPM_2300)
a157b101 1688 pci_clear_mwi(ha->pdev);
abbd8870
AV
1689
1690 /* Deselect FPM registers. */
3d71644c
AV
1691 WRT_REG_WORD(&reg->ctrl_status, 0x0);
1692 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1693
1694 /* Release RISC module. */
3d71644c 1695 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 1696 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1697 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
1698 break;
1699
1700 udelay(10);
1da177e4 1701 }
1da177e4 1702
abbd8870
AV
1703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704 }
1da177e4 1705
abbd8870
AV
1706 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1707
737faece 1708 pci_disable_rom(ha->pdev);
1da177e4 1709
abbd8870
AV
1710 /* Get PCI bus information. */
1711 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1712 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1713 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1714
1715 return QLA_SUCCESS;
1da177e4
LT
1716}
1717
0107109e
AV
1718/**
1719 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1720 * @ha: HA context
1721 *
1722 * Returns 0 on success.
1723 */
1724int
e315cd28 1725qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 1726{
a157b101 1727 uint16_t w;
0107109e 1728 unsigned long flags = 0;
e315cd28 1729 struct qla_hw_data *ha = vha->hw;
0107109e 1730 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
1731
1732 pci_set_master(ha->pdev);
af6177d8 1733 pci_try_set_mwi(ha->pdev);
0107109e
AV
1734
1735 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1736 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
1737 w &= ~PCI_COMMAND_INTX_DISABLE;
1738 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1739
1740 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1741
1742 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
1743 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1744 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
1745
1746 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1747 if (pci_is_pcie(ha->pdev))
5ffd3a52 1748 pcie_set_readrq(ha->pdev, 4096);
0107109e 1749
737faece 1750 pci_disable_rom(ha->pdev);
0107109e 1751
44c10138 1752 ha->chip_revision = ha->pdev->revision;
a8488abe 1753
0107109e
AV
1754 /* Get PCI bus information. */
1755 spin_lock_irqsave(&ha->hardware_lock, flags);
1756 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
1757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1758
1759 return QLA_SUCCESS;
1760}
1761
c3a2f0df
AV
1762/**
1763 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1764 * @ha: HA context
1765 *
1766 * Returns 0 on success.
1767 */
1768int
e315cd28 1769qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
1770{
1771 uint16_t w;
e315cd28 1772 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
1773
1774 pci_set_master(ha->pdev);
1775 pci_try_set_mwi(ha->pdev);
1776
1777 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1778 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1779 w &= ~PCI_COMMAND_INTX_DISABLE;
1780 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1781
1782 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1783 if (pci_is_pcie(ha->pdev))
5ffd3a52 1784 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 1785
737faece 1786 pci_disable_rom(ha->pdev);
c3a2f0df
AV
1787
1788 ha->chip_revision = ha->pdev->revision;
1789
1790 return QLA_SUCCESS;
1791}
1792
1da177e4
LT
1793/**
1794 * qla2x00_isp_firmware() - Choose firmware image.
1795 * @ha: HA context
1796 *
1797 * Returns 0 on success.
1798 */
1799static int
e315cd28 1800qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
1801{
1802 int rval;
42e421b1
AV
1803 uint16_t loop_id, topo, sw_cap;
1804 uint8_t domain, area, al_pa;
e315cd28 1805 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1806
1807 /* Assume loading risc code */
fa2a1ce5 1808 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
1809
1810 if (ha->flags.disable_risc_code_load) {
7c3df132 1811 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
1812
1813 /* Verify checksum of loaded RISC code. */
e315cd28 1814 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
1815 if (rval == QLA_SUCCESS) {
1816 /* And, verify we are not in ROM code. */
e315cd28 1817 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
1818 &area, &domain, &topo, &sw_cap);
1819 }
1da177e4
LT
1820 }
1821
7c3df132
SK
1822 if (rval)
1823 ql_dbg(ql_dbg_init, vha, 0x007a,
1824 "**** Load RISC code ****.\n");
1da177e4
LT
1825
1826 return (rval);
1827}
1828
1829/**
1830 * qla2x00_reset_chip() - Reset ISP chip.
1831 * @ha: HA context
1832 *
1833 * Returns 0 on success.
1834 */
abbd8870 1835void
e315cd28 1836qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
1837{
1838 unsigned long flags = 0;
e315cd28 1839 struct qla_hw_data *ha = vha->hw;
3d71644c 1840 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1841 uint32_t cnt;
1da177e4
LT
1842 uint16_t cmd;
1843
85880801
AV
1844 if (unlikely(pci_channel_offline(ha->pdev)))
1845 return;
1846
fd34f556 1847 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
1848
1849 spin_lock_irqsave(&ha->hardware_lock, flags);
1850
1851 /* Turn off master enable */
1852 cmd = 0;
1853 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
1854 cmd &= ~PCI_COMMAND_MASTER;
1855 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1856
1857 if (!IS_QLA2100(ha)) {
1858 /* Pause RISC. */
1859 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1860 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
1861 for (cnt = 0; cnt < 30000; cnt++) {
1862 if ((RD_REG_WORD(&reg->hccr) &
1863 HCCR_RISC_PAUSE) != 0)
1864 break;
1865 udelay(100);
1866 }
1867 } else {
1868 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1869 udelay(10);
1870 }
1871
1872 /* Select FPM registers. */
1873 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1874 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1875
1876 /* FPM Soft Reset. */
1877 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
1878 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1879
1880 /* Toggle Fpm Reset. */
1881 if (!IS_QLA2200(ha)) {
1882 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
1883 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1884 }
1885
1886 /* Select frame buffer registers. */
1887 WRT_REG_WORD(&reg->ctrl_status, 0x10);
1888 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1889
1890 /* Reset frame buffer FIFOs. */
1891 if (IS_QLA2200(ha)) {
1892 WRT_FB_CMD_REG(ha, reg, 0xa000);
1893 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
1894 } else {
1895 WRT_FB_CMD_REG(ha, reg, 0x00fc);
1896
1897 /* Read back fb_cmd until zero or 3 seconds max */
1898 for (cnt = 0; cnt < 3000; cnt++) {
1899 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
1900 break;
1901 udelay(100);
1902 }
1903 }
1904
1905 /* Select RISC module registers. */
1906 WRT_REG_WORD(&reg->ctrl_status, 0);
1907 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1908
1909 /* Reset RISC processor. */
1910 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1911 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1912
1913 /* Release RISC processor. */
1914 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1915 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1916 }
1917
1918 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1919 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
1920
1921 /* Reset ISP chip. */
1922 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1923
1924 /* Wait for RISC to recover from reset. */
1925 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1926 /*
1927 * It is necessary to for a delay here since the card doesn't
1928 * respond to PCI reads during a reset. On some architectures
1929 * this will result in an MCA.
1930 */
1931 udelay(20);
1932 for (cnt = 30000; cnt; cnt--) {
1933 if ((RD_REG_WORD(&reg->ctrl_status) &
1934 CSR_ISP_SOFT_RESET) == 0)
1935 break;
1936 udelay(100);
1937 }
1938 } else
1939 udelay(10);
1940
1941 /* Reset RISC processor. */
1942 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1943
1944 WRT_REG_WORD(&reg->semaphore, 0);
1945
1946 /* Release RISC processor. */
1947 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1948 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1949
1950 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1951 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 1952 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 1953 break;
1da177e4
LT
1954
1955 udelay(100);
1956 }
1957 } else
1958 udelay(100);
1959
1960 /* Turn on master enable */
1961 cmd |= PCI_COMMAND_MASTER;
1962 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1963
1964 /* Disable RISC pause on FPM parity error. */
1965 if (!IS_QLA2100(ha)) {
1966 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
1967 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1968 }
1969
1970 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1971}
1972
b1d46989
MI
1973/**
1974 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
1975 *
1976 * Returns 0 on success.
1977 */
fa492630 1978static int
b1d46989
MI
1979qla81xx_reset_mpi(scsi_qla_host_t *vha)
1980{
1981 uint16_t mb[4] = {0x1010, 0, 1, 0};
1982
6246b8a1
GM
1983 if (!IS_QLA81XX(vha->hw))
1984 return QLA_SUCCESS;
1985
b1d46989
MI
1986 return qla81xx_write_mpi_register(vha, mb);
1987}
1988
0107109e 1989/**
88c26663 1990 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
0107109e
AV
1991 * @ha: HA context
1992 *
1993 * Returns 0 on success.
1994 */
d14e72fb 1995static inline int
e315cd28 1996qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
1997{
1998 unsigned long flags = 0;
e315cd28 1999 struct qla_hw_data *ha = vha->hw;
0107109e 2000 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2001 uint32_t cnt;
335a1cc9 2002 uint16_t wd;
b1d46989 2003 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2004 int rval = QLA_SUCCESS;
0107109e 2005
0107109e
AV
2006 spin_lock_irqsave(&ha->hardware_lock, flags);
2007
2008 /* Reset RISC. */
2009 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2010 for (cnt = 0; cnt < 30000; cnt++) {
2011 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2012 break;
2013
2014 udelay(10);
2015 }
2016
d14e72fb
HM
2017 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2018 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2019
2020 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2021 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2022 RD_REG_DWORD(&reg->hccr),
2023 RD_REG_DWORD(&reg->ctrl_status),
2024 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2025
0107109e
AV
2026 WRT_REG_DWORD(&reg->ctrl_status,
2027 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2028 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2029
335a1cc9 2030 udelay(100);
d14e72fb 2031
88c26663 2032 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2033 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2034 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2035 rval == QLA_SUCCESS; cnt--) {
88c26663 2036 barrier();
d14e72fb
HM
2037 if (cnt)
2038 udelay(5);
2039 else
2040 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2041 }
2042
d14e72fb
HM
2043 if (rval == QLA_SUCCESS)
2044 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2045
2046 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2047 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2048 RD_REG_DWORD(&reg->hccr),
2049 RD_REG_DWORD(&reg->mailbox0));
2050
335a1cc9 2051 /* Wait for soft-reset to complete. */
52c82823 2052 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2053 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2054 barrier();
d14e72fb
HM
2055 if ((RD_REG_DWORD(&reg->ctrl_status) &
2056 CSRX_ISP_SOFT_RESET) == 0)
2057 break;
2058
2059 udelay(5);
0107109e 2060 }
d14e72fb
HM
2061 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2062 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2063
2064 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2065 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2066 RD_REG_DWORD(&reg->hccr),
2067 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2068
b1d46989
MI
2069 /* If required, do an MPI FW reset now */
2070 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2071 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2072 if (++abts_cnt < 5) {
2073 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2074 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2075 } else {
2076 /*
2077 * We exhausted the ISP abort retries. We have to
2078 * set the board offline.
2079 */
2080 abts_cnt = 0;
2081 vha->flags.online = 0;
2082 }
2083 }
2084 }
2085
0107109e
AV
2086 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2087 RD_REG_DWORD(&reg->hccr);
2088
2089 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2090 RD_REG_DWORD(&reg->hccr);
2091
2092 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2093 RD_REG_DWORD(&reg->hccr);
2094
52c82823 2095 RD_REG_WORD(&reg->mailbox0);
200ffb15 2096 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2097 rval == QLA_SUCCESS; cnt--) {
0107109e 2098 barrier();
d14e72fb
HM
2099 if (cnt)
2100 udelay(5);
2101 else
2102 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2103 }
d14e72fb
HM
2104 if (rval == QLA_SUCCESS)
2105 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2106
2107 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2108 "Host Risc 0x%x, mailbox0 0x%x\n",
2109 RD_REG_DWORD(&reg->hccr),
2110 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2111
2112 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2113
d14e72fb
HM
2114 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2115 "Driver in %s mode\n",
2116 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2117
124f85e6
AV
2118 if (IS_NOPOLLING_TYPE(ha))
2119 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2120
2121 return rval;
0107109e
AV
2122}
2123
4ea2c9c7
JC
2124static void
2125qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2126{
2127 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2128
2129 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2130 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2131
2132}
2133
2134static void
2135qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2136{
2137 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2138
2139 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2140 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2141}
2142
2143static void
2144qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2145{
4ea2c9c7
JC
2146 uint32_t wd32 = 0;
2147 uint delta_msec = 100;
2148 uint elapsed_msec = 0;
2149 uint timeout_msec;
2150 ulong n;
2151
cc790764
JC
2152 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2153 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2154 return;
2155
8dd7e3a5
JC
2156 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2157 udelay(100);
2158
4ea2c9c7
JC
2159attempt:
2160 timeout_msec = TIMEOUT_SEMAPHORE;
2161 n = timeout_msec / delta_msec;
2162 while (n--) {
2163 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2164 qla25xx_read_risc_sema_reg(vha, &wd32);
2165 if (wd32 & RISC_SEMAPHORE)
2166 break;
2167 msleep(delta_msec);
2168 elapsed_msec += delta_msec;
2169 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2170 goto force;
2171 }
2172
2173 if (!(wd32 & RISC_SEMAPHORE))
2174 goto force;
2175
2176 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2177 goto acquired;
2178
2179 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2180 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2181 n = timeout_msec / delta_msec;
2182 while (n--) {
2183 qla25xx_read_risc_sema_reg(vha, &wd32);
2184 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2185 break;
2186 msleep(delta_msec);
2187 elapsed_msec += delta_msec;
2188 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2189 goto force;
2190 }
2191
2192 if (wd32 & RISC_SEMAPHORE_FORCE)
2193 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2194
2195 goto attempt;
2196
2197force:
2198 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2199
2200acquired:
2201 return;
2202}
2203
88c26663
AV
2204/**
2205 * qla24xx_reset_chip() - Reset ISP24xx chip.
2206 * @ha: HA context
2207 *
2208 * Returns 0 on success.
2209 */
2210void
e315cd28 2211qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2212{
e315cd28 2213 struct qla_hw_data *ha = vha->hw;
85880801
AV
2214
2215 if (pci_channel_offline(ha->pdev) &&
2216 ha->flags.pci_channel_io_perm_failure) {
2217 return;
2218 }
2219
fd34f556 2220 ha->isp_ops->disable_intrs(ha);
88c26663 2221
4ea2c9c7
JC
2222 qla25xx_manipulate_risc_semaphore(vha);
2223
88c26663 2224 /* Perform RISC reset. */
e315cd28 2225 qla24xx_reset_risc(vha);
88c26663
AV
2226}
2227
1da177e4
LT
2228/**
2229 * qla2x00_chip_diag() - Test chip for proper operation.
2230 * @ha: HA context
2231 *
2232 * Returns 0 on success.
2233 */
abbd8870 2234int
e315cd28 2235qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2236{
2237 int rval;
e315cd28 2238 struct qla_hw_data *ha = vha->hw;
3d71644c 2239 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2240 unsigned long flags = 0;
2241 uint16_t data;
2242 uint32_t cnt;
2243 uint16_t mb[5];
73208dfd 2244 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2245
2246 /* Assume a failed state */
2247 rval = QLA_FUNCTION_FAILED;
2248
7c3df132
SK
2249 ql_dbg(ql_dbg_init, vha, 0x007b,
2250 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1da177e4
LT
2251
2252 spin_lock_irqsave(&ha->hardware_lock, flags);
2253
2254 /* Reset ISP chip. */
2255 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2256
2257 /*
2258 * We need to have a delay here since the card will not respond while
2259 * in reset causing an MCA on some architectures.
2260 */
2261 udelay(20);
2262 data = qla2x00_debounce_register(&reg->ctrl_status);
2263 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2264 udelay(5);
2265 data = RD_REG_WORD(&reg->ctrl_status);
2266 barrier();
2267 }
2268
2269 if (!cnt)
2270 goto chip_diag_failed;
2271
7c3df132
SK
2272 ql_dbg(ql_dbg_init, vha, 0x007c,
2273 "Reset register cleared by chip reset.\n");
1da177e4
LT
2274
2275 /* Reset RISC processor. */
2276 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2277 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2278
2279 /* Workaround for QLA2312 PCI parity error */
2280 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2281 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2282 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2283 udelay(5);
2284 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 2285 barrier();
1da177e4
LT
2286 }
2287 } else
2288 udelay(10);
2289
2290 if (!cnt)
2291 goto chip_diag_failed;
2292
2293 /* Check product ID of chip */
7c3df132 2294 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1da177e4
LT
2295
2296 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2297 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2298 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2299 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2300 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2301 mb[3] != PROD_ID_3) {
7c3df132
SK
2302 ql_log(ql_log_warn, vha, 0x0062,
2303 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2304 mb[1], mb[2], mb[3]);
1da177e4
LT
2305
2306 goto chip_diag_failed;
2307 }
2308 ha->product_id[0] = mb[1];
2309 ha->product_id[1] = mb[2];
2310 ha->product_id[2] = mb[3];
2311 ha->product_id[3] = mb[4];
2312
2313 /* Adjust fw RISC transfer size */
73208dfd 2314 if (req->length > 1024)
1da177e4
LT
2315 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2316 else
2317 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 2318 req->length;
1da177e4
LT
2319
2320 if (IS_QLA2200(ha) &&
2321 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2322 /* Limit firmware transfer size with a 2200A */
7c3df132 2323 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 2324
ea5b6382 2325 ha->device_type |= DT_ISP2200A;
1da177e4
LT
2326 ha->fw_transfer_size = 128;
2327 }
2328
2329 /* Wrap Incoming Mailboxes Test. */
2330 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2331
7c3df132 2332 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 2333 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
2334 if (rval)
2335 ql_log(ql_log_warn, vha, 0x0080,
2336 "Failed mailbox send register test.\n");
2337 else
1da177e4
LT
2338 /* Flag a successful rval */
2339 rval = QLA_SUCCESS;
1da177e4
LT
2340 spin_lock_irqsave(&ha->hardware_lock, flags);
2341
2342chip_diag_failed:
2343 if (rval)
7c3df132
SK
2344 ql_log(ql_log_info, vha, 0x0081,
2345 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
2346
2347 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2348
2349 return (rval);
2350}
2351
0107109e
AV
2352/**
2353 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2354 * @ha: HA context
2355 *
2356 * Returns 0 on success.
2357 */
2358int
e315cd28 2359qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
2360{
2361 int rval;
e315cd28 2362 struct qla_hw_data *ha = vha->hw;
73208dfd 2363 struct req_que *req = ha->req_q_map[0];
0107109e 2364
7ec0effd 2365 if (IS_P3P_TYPE(ha))
a9083016
GM
2366 return QLA_SUCCESS;
2367
73208dfd 2368 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 2369
e315cd28 2370 rval = qla2x00_mbx_reg_test(vha);
0107109e 2371 if (rval) {
7c3df132
SK
2372 ql_log(ql_log_warn, vha, 0x0082,
2373 "Failed mailbox send register test.\n");
0107109e
AV
2374 } else {
2375 /* Flag a successful rval */
2376 rval = QLA_SUCCESS;
2377 }
2378
2379 return rval;
2380}
2381
a7a167bf 2382void
e315cd28 2383qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
0107109e 2384{
a7a167bf
AV
2385 int rval;
2386 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
73208dfd 2387 eft_size, fce_size, mq_size;
df613b96
AV
2388 dma_addr_t tc_dma;
2389 void *tc;
e315cd28 2390 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
2391 struct req_que *req = ha->req_q_map[0];
2392 struct rsp_que *rsp = ha->rsp_q_map[0];
a7a167bf
AV
2393
2394 if (ha->fw_dump) {
7c3df132
SK
2395 ql_dbg(ql_dbg_init, vha, 0x00bd,
2396 "Firmware dump already allocated.\n");
a7a167bf
AV
2397 return;
2398 }
d4e3e04d 2399
0107109e 2400 ha->fw_dumped = 0;
61f098dd 2401 ha->fw_dump_cap_flags = 0;
f73cb695
CD
2402 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2403 req_q_size = rsp_q_size = 0;
2404
2405 if (IS_QLA27XX(ha))
2406 goto try_fce;
2407
d4e3e04d 2408 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
a7a167bf 2409 fixed_size = sizeof(struct qla2100_fw_dump);
d4e3e04d 2410 } else if (IS_QLA23XX(ha)) {
a7a167bf
AV
2411 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2412 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2413 sizeof(uint16_t);
e428924c 2414 } else if (IS_FWI2_CAPABLE(ha)) {
b20f02e1 2415 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
6246b8a1
GM
2416 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2417 else if (IS_QLA81XX(ha))
3a03eb79
AV
2418 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2419 else if (IS_QLA25XX(ha))
2420 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2421 else
2422 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
f73cb695 2423
a7a167bf
AV
2424 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2425 sizeof(uint32_t);
050c9bb1 2426 if (ha->mqenable) {
b20f02e1 2427 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
6246b8a1 2428 mq_size = sizeof(struct qla2xxx_mq_chain);
050c9bb1
GM
2429 /*
2430 * Allocate maximum buffer size for all queues.
2431 * Resizing must be done at end-of-dump processing.
2432 */
2433 mq_size += ha->max_req_queues *
2434 (req->length * sizeof(request_t));
2435 mq_size += ha->max_rsp_queues *
2436 (rsp->length * sizeof(response_t));
2437 }
00876ae8 2438 if (ha->tgt.atio_ring)
2d70c103 2439 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
df613b96 2440 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695
CD
2441 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2442 !IS_QLA27XX(ha))
436a7b11 2443 goto try_eft;
df613b96 2444
f73cb695
CD
2445try_fce:
2446 if (ha->fce)
2447 dma_free_coherent(&ha->pdev->dev,
2448 FCE_SIZE, ha->fce, ha->fce_dma);
2449
2450 /* Allocate memory for Fibre Channel Event Buffer. */
0ea85b50
JP
2451 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2452 GFP_KERNEL);
df613b96 2453 if (!tc) {
7c3df132
SK
2454 ql_log(ql_log_warn, vha, 0x00be,
2455 "Unable to allocate (%d KB) for FCE.\n",
2456 FCE_SIZE / 1024);
17d98630 2457 goto try_eft;
df613b96
AV
2458 }
2459
e315cd28 2460 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
2461 ha->fce_mb, &ha->fce_bufs);
2462 if (rval) {
7c3df132
SK
2463 ql_log(ql_log_warn, vha, 0x00bf,
2464 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
2465 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2466 tc_dma);
2467 ha->flags.fce_enabled = 0;
17d98630 2468 goto try_eft;
df613b96 2469 }
cfb0919c 2470 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 2471 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 2472
7d9dade3 2473 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
df613b96
AV
2474 ha->flags.fce_enabled = 1;
2475 ha->fce_dma = tc_dma;
2476 ha->fce = tc;
f73cb695 2477
436a7b11 2478try_eft:
f73cb695
CD
2479 if (ha->eft)
2480 dma_free_coherent(&ha->pdev->dev,
2481 EFT_SIZE, ha->eft, ha->eft_dma);
2482
436a7b11 2483 /* Allocate memory for Extended Trace Buffer. */
0ea85b50
JP
2484 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2485 GFP_KERNEL);
436a7b11 2486 if (!tc) {
7c3df132
SK
2487 ql_log(ql_log_warn, vha, 0x00c1,
2488 "Unable to allocate (%d KB) for EFT.\n",
2489 EFT_SIZE / 1024);
436a7b11
AV
2490 goto cont_alloc;
2491 }
2492
e315cd28 2493 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 2494 if (rval) {
7c3df132
SK
2495 ql_log(ql_log_warn, vha, 0x00c2,
2496 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
2497 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2498 tc_dma);
2499 goto cont_alloc;
2500 }
cfb0919c 2501 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 2502 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11
AV
2503
2504 eft_size = EFT_SIZE;
2505 ha->eft_dma = tc_dma;
2506 ha->eft = tc;
d4e3e04d 2507 }
f73cb695 2508
a7a167bf 2509cont_alloc:
f73cb695
CD
2510 if (IS_QLA27XX(ha)) {
2511 if (!ha->fw_dump_template) {
2512 ql_log(ql_log_warn, vha, 0x00ba,
2513 "Failed missing fwdump template\n");
2514 return;
2515 }
2516 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2517 ql_dbg(ql_dbg_init, vha, 0x00fa,
2518 "-> allocating fwdump (%x bytes)...\n", dump_size);
2519 goto allocate;
2520 }
2521
73208dfd
AC
2522 req_q_size = req->length * sizeof(request_t);
2523 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 2524 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 2525 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
2526 ha->chain_offset = dump_size;
2527 dump_size += mq_size + fce_size;
d4e3e04d 2528
f73cb695 2529allocate:
d4e3e04d 2530 ha->fw_dump = vmalloc(dump_size);
a7a167bf 2531 if (!ha->fw_dump) {
7c3df132
SK
2532 ql_log(ql_log_warn, vha, 0x00c4,
2533 "Unable to allocate (%d KB) for firmware dump.\n",
2534 dump_size / 1024);
a7a167bf 2535
e30d1756
MI
2536 if (ha->fce) {
2537 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2538 ha->fce_dma);
2539 ha->fce = NULL;
2540 ha->fce_dma = 0;
2541 }
2542
a7a167bf
AV
2543 if (ha->eft) {
2544 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2545 ha->eft_dma);
2546 ha->eft = NULL;
2547 ha->eft_dma = 0;
2548 }
2549 return;
2550 }
f73cb695 2551 ha->fw_dump_len = dump_size;
cfb0919c 2552 ql_dbg(ql_dbg_init, vha, 0x00c5,
7c3df132 2553 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
a7a167bf 2554
f73cb695
CD
2555 if (IS_QLA27XX(ha))
2556 return;
2557
a7a167bf
AV
2558 ha->fw_dump->signature[0] = 'Q';
2559 ha->fw_dump->signature[1] = 'L';
2560 ha->fw_dump->signature[2] = 'G';
2561 ha->fw_dump->signature[3] = 'C';
ad950360 2562 ha->fw_dump->version = htonl(1);
a7a167bf
AV
2563
2564 ha->fw_dump->fixed_size = htonl(fixed_size);
2565 ha->fw_dump->mem_size = htonl(mem_size);
2566 ha->fw_dump->req_q_size = htonl(req_q_size);
2567 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2568
2569 ha->fw_dump->eft_size = htonl(eft_size);
2570 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2571 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2572
2573 ha->fw_dump->header_size =
2574 htonl(offsetof(struct qla2xxx_fw_dump, isp));
0107109e
AV
2575}
2576
18e7555a
AV
2577static int
2578qla81xx_mpi_sync(scsi_qla_host_t *vha)
2579{
2580#define MPS_MASK 0xe0
2581 int rval;
2582 uint16_t dc;
2583 uint32_t dw;
18e7555a
AV
2584
2585 if (!IS_QLA81XX(vha->hw))
2586 return QLA_SUCCESS;
2587
2588 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2589 if (rval != QLA_SUCCESS) {
7c3df132
SK
2590 ql_log(ql_log_warn, vha, 0x0105,
2591 "Unable to acquire semaphore.\n");
18e7555a
AV
2592 goto done;
2593 }
2594
2595 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2596 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2597 if (rval != QLA_SUCCESS) {
7c3df132 2598 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
2599 goto done_release;
2600 }
2601
2602 dc &= MPS_MASK;
2603 if (dc == (dw & MPS_MASK))
2604 goto done_release;
2605
2606 dw &= ~MPS_MASK;
2607 dw |= dc;
2608 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2609 if (rval != QLA_SUCCESS) {
7c3df132 2610 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
2611 }
2612
2613done_release:
2614 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2615 if (rval != QLA_SUCCESS) {
7c3df132
SK
2616 ql_log(ql_log_warn, vha, 0x006d,
2617 "Unable to release semaphore.\n");
18e7555a
AV
2618 }
2619
2620done:
2621 return rval;
2622}
2623
8d93f550
CD
2624int
2625qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2626{
2627 /* Don't try to reallocate the array */
2628 if (req->outstanding_cmds)
2629 return QLA_SUCCESS;
2630
d7459527 2631 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
2632 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2633 else {
03e8c680
QT
2634 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2635 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 2636 else
03e8c680 2637 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
2638 }
2639
2640 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2641 req->num_outstanding_cmds, GFP_KERNEL);
2642
2643 if (!req->outstanding_cmds) {
2644 /*
2645 * Try to allocate a minimal size just so we can get through
2646 * initialization.
2647 */
2648 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2649 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2650 req->num_outstanding_cmds, GFP_KERNEL);
2651
2652 if (!req->outstanding_cmds) {
2653 ql_log(ql_log_fatal, NULL, 0x0126,
2654 "Failed to allocate memory for "
2655 "outstanding_cmds for req_que %p.\n", req);
2656 req->num_outstanding_cmds = 0;
2657 return QLA_FUNCTION_FAILED;
2658 }
2659 }
2660
2661 return QLA_SUCCESS;
2662}
2663
1da177e4
LT
2664/**
2665 * qla2x00_setup_chip() - Load and start RISC firmware.
2666 * @ha: HA context
2667 *
2668 * Returns 0 on success.
2669 */
2670static int
e315cd28 2671qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 2672{
0107109e
AV
2673 int rval;
2674 uint32_t srisc_address = 0;
e315cd28 2675 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
2676 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2677 unsigned long flags;
dda772e8 2678 uint16_t fw_major_version;
3db0652e 2679
7ec0effd 2680 if (IS_P3P_TYPE(ha)) {
a9083016 2681 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
2682 if (rval == QLA_SUCCESS) {
2683 qla2x00_stop_firmware(vha);
a9083016 2684 goto enable_82xx_npiv;
14e303d9 2685 } else
b963752f 2686 goto failed;
a9083016
GM
2687 }
2688
3db0652e
AV
2689 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2690 /* Disable SRAM, Instruction RAM and GP RAM parity. */
2691 spin_lock_irqsave(&ha->hardware_lock, flags);
2692 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
2693 RD_REG_WORD(&reg->hccr);
2694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2695 }
1da177e4 2696
18e7555a
AV
2697 qla81xx_mpi_sync(vha);
2698
1da177e4 2699 /* Load firmware sequences */
e315cd28 2700 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 2701 if (rval == QLA_SUCCESS) {
7c3df132
SK
2702 ql_dbg(ql_dbg_init, vha, 0x00c9,
2703 "Verifying Checksum of loaded RISC code.\n");
1da177e4 2704
e315cd28 2705 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
2706 if (rval == QLA_SUCCESS) {
2707 /* Start firmware execution. */
7c3df132
SK
2708 ql_dbg(ql_dbg_init, vha, 0x00ca,
2709 "Starting firmware.\n");
1da177e4 2710
b0d6cabd
HM
2711 if (ql2xexlogins)
2712 ha->flags.exlogins_enabled = 1;
2713
2f56a7f1
HM
2714 if (ql2xexchoffld)
2715 ha->flags.exchoffld_enabled = 1;
2716
e315cd28 2717 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 2718 /* Retrieve firmware information. */
dda772e8 2719 if (rval == QLA_SUCCESS) {
b0d6cabd
HM
2720 rval = qla2x00_set_exlogins_buffer(vha);
2721 if (rval != QLA_SUCCESS)
2722 goto failed;
2723
2f56a7f1
HM
2724 rval = qla2x00_set_exchoffld_buffer(vha);
2725 if (rval != QLA_SUCCESS)
2726 goto failed;
2727
a9083016 2728enable_82xx_npiv:
dda772e8 2729 fw_major_version = ha->fw_major_version;
7ec0effd 2730 if (IS_P3P_TYPE(ha))
3173167f 2731 qla82xx_check_md_needed(vha);
6246b8a1
GM
2732 else
2733 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
2734 if (rval != QLA_SUCCESS)
2735 goto failed;
2c3dfe3f 2736 ha->flags.npiv_supported = 0;
e315cd28 2737 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 2738 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 2739 ha->flags.npiv_supported = 1;
4d0ea247
SJ
2740 if ((!ha->max_npiv_vports) ||
2741 ((ha->max_npiv_vports + 1) %
eb66dc60 2742 MIN_MULTI_ID_FABRIC))
4d0ea247 2743 ha->max_npiv_vports =
eb66dc60 2744 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 2745 }
03e8c680 2746 qla2x00_get_resource_cnts(vha);
d743de66 2747
8d93f550
CD
2748 /*
2749 * Allocate the array of outstanding commands
2750 * now that we know the firmware resources.
2751 */
2752 rval = qla2x00_alloc_outstanding_cmds(ha,
2753 vha->req);
2754 if (rval != QLA_SUCCESS)
2755 goto failed;
2756
be5ea3cf 2757 if (!fw_major_version && ql2xallocfwdump
7ec0effd 2758 && !(IS_P3P_TYPE(ha)))
08de2844 2759 qla2x00_alloc_fw_dump(vha);
3b6e5b9d
CD
2760 } else {
2761 goto failed;
1da177e4
LT
2762 }
2763 } else {
7c3df132
SK
2764 ql_log(ql_log_fatal, vha, 0x00cd,
2765 "ISP Firmware failed checksum.\n");
2766 goto failed;
1da177e4 2767 }
c74d88a4
AV
2768 } else
2769 goto failed;
1da177e4 2770
3db0652e
AV
2771 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2772 /* Enable proper parity. */
2773 spin_lock_irqsave(&ha->hardware_lock, flags);
2774 if (IS_QLA2300(ha))
2775 /* SRAM parity */
2776 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
2777 else
2778 /* SRAM, Instruction RAM and GP RAM parity */
2779 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
2780 RD_REG_WORD(&reg->hccr);
2781 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2782 }
2783
f3982d89
CD
2784 if (IS_QLA27XX(ha))
2785 ha->flags.fac_supported = 1;
2786 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
2787 uint32_t size;
2788
2789 rval = qla81xx_fac_get_sector_size(vha, &size);
2790 if (rval == QLA_SUCCESS) {
2791 ha->flags.fac_supported = 1;
2792 ha->fdt_block_size = size << 2;
2793 } else {
7c3df132 2794 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
2795 "Unsupported FAC firmware (%d.%02d.%02d).\n",
2796 ha->fw_major_version, ha->fw_minor_version,
2797 ha->fw_subminor_version);
1ca60e3b 2798
f73cb695 2799 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6246b8a1
GM
2800 ha->flags.fac_supported = 0;
2801 rval = QLA_SUCCESS;
2802 }
1d2874de
JC
2803 }
2804 }
ca9e9c3e 2805failed:
1da177e4 2806 if (rval) {
7c3df132
SK
2807 ql_log(ql_log_fatal, vha, 0x00cf,
2808 "Setup chip ****FAILED****.\n");
1da177e4
LT
2809 }
2810
2811 return (rval);
2812}
2813
2814/**
2815 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2816 * @ha: HA context
2817 *
2818 * Beginning of request ring has initialization control block already built
2819 * by nvram config routine.
2820 *
2821 * Returns 0 on success.
2822 */
73208dfd
AC
2823void
2824qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
2825{
2826 uint16_t cnt;
2827 response_t *pkt;
2828
2afa19a9
AC
2829 rsp->ring_ptr = rsp->ring;
2830 rsp->ring_index = 0;
2831 rsp->status_srb = NULL;
e315cd28
AC
2832 pkt = rsp->ring_ptr;
2833 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
2834 pkt->signature = RESPONSE_PROCESSED;
2835 pkt++;
2836 }
1da177e4
LT
2837}
2838
2839/**
2840 * qla2x00_update_fw_options() - Read and process firmware options.
2841 * @ha: HA context
2842 *
2843 * Returns 0 on success.
2844 */
abbd8870 2845void
e315cd28 2846qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
2847{
2848 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 2849 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2850
2851 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 2852 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
2853
2854 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2855 return;
2856
2857 /* Serial Link options. */
7c3df132
SK
2858 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
2859 "Serial link options.\n");
2860 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
2861 (uint8_t *)&ha->fw_seriallink_options,
2862 sizeof(ha->fw_seriallink_options));
1da177e4
LT
2863
2864 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
2865 if (ha->fw_seriallink_options[3] & BIT_2) {
2866 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
2867
2868 /* 1G settings */
2869 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
2870 emphasis = (ha->fw_seriallink_options[2] &
2871 (BIT_4 | BIT_3)) >> 3;
2872 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 2873 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
2874 rx_sens = (ha->fw_seriallink_options[0] &
2875 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2876 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
2877 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2878 if (rx_sens == 0x0)
2879 rx_sens = 0x3;
2880 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
2881 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2882 ha->fw_options[10] |= BIT_5 |
2883 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2884 (tx_sens & (BIT_1 | BIT_0));
2885
2886 /* 2G settings */
2887 swing = (ha->fw_seriallink_options[2] &
2888 (BIT_7 | BIT_6 | BIT_5)) >> 5;
2889 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
2890 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 2891 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
2892 rx_sens = (ha->fw_seriallink_options[1] &
2893 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2894 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
2895 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2896 if (rx_sens == 0x0)
2897 rx_sens = 0x3;
2898 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
2899 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2900 ha->fw_options[11] |= BIT_5 |
2901 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2902 (tx_sens & (BIT_1 | BIT_0));
2903 }
2904
2905 /* FCP2 options. */
2906 /* Return command IOCBs without waiting for an ABTS to complete. */
2907 ha->fw_options[3] |= BIT_13;
2908
2909 /* LED scheme. */
2910 if (ha->flags.enable_led_scheme)
2911 ha->fw_options[2] |= BIT_12;
2912
48c02fde
AV
2913 /* Detect ISP6312. */
2914 if (IS_QLA6312(ha))
2915 ha->fw_options[2] |= BIT_13;
2916
088d09d4
GM
2917 /* Set Retry FLOGI in case of P2P connection */
2918 if (ha->operating_mode == P2P) {
2919 ha->fw_options[2] |= BIT_3;
2920 ql_dbg(ql_dbg_disc, vha, 0x2100,
2921 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
2922 __func__, ha->fw_options[2]);
2923 }
2924
1da177e4 2925 /* Update firmware options. */
e315cd28 2926 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
2927}
2928
0107109e 2929void
e315cd28 2930qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
2931{
2932 int rval;
e315cd28 2933 struct qla_hw_data *ha = vha->hw;
0107109e 2934
7ec0effd 2935 if (IS_P3P_TYPE(ha))
a9083016
GM
2936 return;
2937
f198cafa
HM
2938 /* Hold status IOCBs until ABTS response received. */
2939 if (ql2xfwholdabts)
2940 ha->fw_options[3] |= BIT_12;
2941
088d09d4
GM
2942 /* Set Retry FLOGI in case of P2P connection */
2943 if (ha->operating_mode == P2P) {
2944 ha->fw_options[2] |= BIT_3;
2945 ql_dbg(ql_dbg_disc, vha, 0x2101,
2946 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
2947 __func__, ha->fw_options[2]);
2948 }
2949
0107109e 2950 /* Update Serial Link options. */
f94097ed 2951 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
2952 return;
2953
e315cd28 2954 rval = qla2x00_set_serdes_params(vha,
f94097ed
AV
2955 le16_to_cpu(ha->fw_seriallink_options24[1]),
2956 le16_to_cpu(ha->fw_seriallink_options24[2]),
2957 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 2958 if (rval != QLA_SUCCESS) {
7c3df132 2959 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
2960 "Unable to update Serial Link options (%x).\n", rval);
2961 }
2962}
2963
abbd8870 2964void
e315cd28 2965qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 2966{
e315cd28 2967 struct qla_hw_data *ha = vha->hw;
3d71644c 2968 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
2969 struct req_que *req = ha->req_q_map[0];
2970 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
2971
2972 /* Setup ring parameters in initialization control block. */
ad950360
BVA
2973 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
2974 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
2975 ha->init_cb->request_q_length = cpu_to_le16(req->length);
2976 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
2977 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
2978 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
2979 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
2980 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
2981
2982 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
2983 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
2984 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
2985 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
2986 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
2987}
2988
0107109e 2989void
e315cd28 2990qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 2991{
e315cd28 2992 struct qla_hw_data *ha = vha->hw;
118e2ef9 2993 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
2994 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
2995 struct qla_msix_entry *msix;
0107109e 2996 struct init_cb_24xx *icb;
73208dfd
AC
2997 uint16_t rid = 0;
2998 struct req_que *req = ha->req_q_map[0];
2999 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3000
6246b8a1 3001 /* Setup ring parameters in initialization control block. */
0107109e 3002 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3003 icb->request_q_outpointer = cpu_to_le16(0);
3004 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3005 icb->request_q_length = cpu_to_le16(req->length);
3006 icb->response_q_length = cpu_to_le16(rsp->length);
3007 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3008 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3009 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3010 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 3011
2d70c103 3012 /* Setup ATIO queue dma pointers for target mode */
ad950360 3013 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103
NB
3014 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3015 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3016 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3017
7c6300e3 3018 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 3019 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 3020
f73cb695 3021 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ad950360
BVA
3022 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3023 icb->rid = cpu_to_le16(rid);
73208dfd
AC
3024 if (ha->flags.msix_enabled) {
3025 msix = &ha->msix_entries[1];
7c3df132
SK
3026 ql_dbg(ql_dbg_init, vha, 0x00fd,
3027 "Registering vector 0x%x for base que.\n",
3028 msix->entry);
73208dfd
AC
3029 icb->msix = cpu_to_le16(msix->entry);
3030 }
3031 /* Use alternate PCI bus number */
3032 if (MSB(rid))
ad950360 3033 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
3034 /* Use alternate PCI devfn */
3035 if (LSB(rid))
ad950360 3036 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 3037
3155754a 3038 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
3039 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3040 (ha->flags.msix_enabled)) {
ad950360 3041 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 3042 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
3043 ql_dbg(ql_dbg_init, vha, 0x00fe,
3044 "MSIX Handshake Disable Mode turned on.\n");
3155754a 3045 } else {
ad950360 3046 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 3047 }
ad950360 3048 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
3049
3050 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3051 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3052 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3053 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3054 } else {
3055 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3056 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3057 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3058 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3059 }
aa230bc5 3060 qlt_24xx_config_rings(vha);
2d70c103 3061
73208dfd
AC
3062 /* PCI posting */
3063 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
3064}
3065
1da177e4
LT
3066/**
3067 * qla2x00_init_rings() - Initializes firmware.
3068 * @ha: HA context
3069 *
3070 * Beginning of request ring has initialization control block already built
3071 * by nvram config routine.
3072 *
3073 * Returns 0 on success.
3074 */
8ae6d9c7 3075int
e315cd28 3076qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
3077{
3078 int rval;
3079 unsigned long flags = 0;
29bdccbe 3080 int cnt, que;
e315cd28 3081 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
3082 struct req_que *req;
3083 struct rsp_que *rsp;
2c3dfe3f
SJ
3084 struct mid_init_cb_24xx *mid_init_cb =
3085 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
3086
3087 spin_lock_irqsave(&ha->hardware_lock, flags);
3088
3089 /* Clear outstanding commands array. */
2afa19a9 3090 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 3091 req = ha->req_q_map[que];
cb43285f 3092 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 3093 continue;
7c6300e3
JC
3094 req->out_ptr = (void *)(req->ring + req->length);
3095 *req->out_ptr = 0;
8d93f550 3096 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 3097 req->outstanding_cmds[cnt] = NULL;
1da177e4 3098
2afa19a9 3099 req->current_outstanding_cmd = 1;
1da177e4 3100
29bdccbe
AC
3101 /* Initialize firmware. */
3102 req->ring_ptr = req->ring;
3103 req->ring_index = 0;
3104 req->cnt = req->length;
3105 }
1da177e4 3106
2afa19a9 3107 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 3108 rsp = ha->rsp_q_map[que];
cb43285f 3109 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 3110 continue;
7c6300e3
JC
3111 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3112 *rsp->in_ptr = 0;
29bdccbe 3113 /* Initialize response queue entries */
8ae6d9c7
GM
3114 if (IS_QLAFX00(ha))
3115 qlafx00_init_response_q_entries(rsp);
3116 else
3117 qla2x00_init_response_q_entries(rsp);
29bdccbe 3118 }
1da177e4 3119
2d70c103
NB
3120 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3121 ha->tgt.atio_ring_index = 0;
3122 /* Initialize ATIO queue entries */
3123 qlt_init_atio_q_entries(vha);
3124
e315cd28 3125 ha->isp_ops->config_rings(vha);
1da177e4
LT
3126
3127 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3128
8ae6d9c7
GM
3129 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3130
3131 if (IS_QLAFX00(ha)) {
3132 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3133 goto next_check;
3134 }
3135
1da177e4 3136 /* Update any ISP specific firmware options before initialization. */
e315cd28 3137 ha->isp_ops->update_fw_options(vha);
1da177e4 3138
605aa2bc 3139 if (ha->flags.npiv_supported) {
45980cc2 3140 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 3141 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 3142 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
3143 }
3144
24a08138 3145 if (IS_FWI2_CAPABLE(ha)) {
ad950360 3146 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 3147 mid_init_cb->init_cb.execution_throttle =
03e8c680 3148 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
3149 ha->flags.dport_enabled =
3150 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3151 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3152 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3153 /* FA-WWPN Status */
2486c627 3154 ha->flags.fawwpn_enabled =
40f3862b 3155 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
2486c627
HM
3156 ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
3157 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 3158 }
2c3dfe3f 3159
e315cd28 3160 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 3161next_check:
1da177e4 3162 if (rval) {
7c3df132
SK
3163 ql_log(ql_log_fatal, vha, 0x00d2,
3164 "Init Firmware **** FAILED ****.\n");
1da177e4 3165 } else {
7c3df132
SK
3166 ql_dbg(ql_dbg_init, vha, 0x00d3,
3167 "Init Firmware -- success.\n");
1da177e4
LT
3168 }
3169
3170 return (rval);
3171}
3172
3173/**
3174 * qla2x00_fw_ready() - Waits for firmware ready.
3175 * @ha: HA context
3176 *
3177 * Returns 0 on success.
3178 */
3179static int
e315cd28 3180qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
3181{
3182 int rval;
4d4df193 3183 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
3184 uint16_t min_wait; /* Minimum wait time if loop is down */
3185 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 3186 uint16_t state[6];
e315cd28 3187 struct qla_hw_data *ha = vha->hw;
1da177e4 3188
8ae6d9c7
GM
3189 if (IS_QLAFX00(vha->hw))
3190 return qlafx00_fw_ready(vha);
3191
1da177e4
LT
3192 rval = QLA_SUCCESS;
3193
33461491
CD
3194 /* Time to wait for loop down */
3195 if (IS_P3P_TYPE(ha))
3196 min_wait = 30;
3197 else
3198 min_wait = 20;
1da177e4
LT
3199
3200 /*
3201 * Firmware should take at most one RATOV to login, plus 5 seconds for
3202 * our own processing.
3203 */
3204 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3205 wait_time = min_wait;
3206 }
3207
3208 /* Min wait time if loop down */
3209 mtime = jiffies + (min_wait * HZ);
3210
3211 /* wait time before firmware ready */
3212 wtime = jiffies + (wait_time * HZ);
3213
3214 /* Wait for ISP to finish LIP */
e315cd28 3215 if (!vha->flags.init_done)
7c3df132
SK
3216 ql_log(ql_log_info, vha, 0x801e,
3217 "Waiting for LIP to complete.\n");
1da177e4
LT
3218
3219 do {
5b939038 3220 memset(state, -1, sizeof(state));
e315cd28 3221 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 3222 if (rval == QLA_SUCCESS) {
4d4df193 3223 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 3224 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 3225 }
4d4df193 3226 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
3227 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3228 "fw_state=%x 84xx=%x.\n", state[0],
3229 state[2]);
4d4df193
HK
3230 if ((state[2] & FSTATE_LOGGED_IN) &&
3231 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
3232 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3233 "Sending verify iocb.\n");
4d4df193
HK
3234
3235 cs84xx_time = jiffies;
e315cd28 3236 rval = qla84xx_init_chip(vha);
7c3df132
SK
3237 if (rval != QLA_SUCCESS) {
3238 ql_log(ql_log_warn,
cfb0919c 3239 vha, 0x8007,
7c3df132 3240 "Init chip failed.\n");
4d4df193 3241 break;
7c3df132 3242 }
4d4df193
HK
3243
3244 /* Add time taken to initialize. */
3245 cs84xx_time = jiffies - cs84xx_time;
3246 wtime += cs84xx_time;
3247 mtime += cs84xx_time;
cfb0919c 3248 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
3249 "Increasing wait time by %ld. "
3250 "New time %ld.\n", cs84xx_time,
3251 wtime);
4d4df193
HK
3252 }
3253 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
3254 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3255 "F/W Ready - OK.\n");
1da177e4 3256
e315cd28 3257 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
3258 &ha->login_timeout, &ha->r_a_tov);
3259
3260 rval = QLA_SUCCESS;
3261 break;
3262 }
3263
3264 rval = QLA_FUNCTION_FAILED;
3265
e315cd28 3266 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 3267 state[0] != FSTATE_READY) {
1da177e4 3268 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
3269 * other than Wait for Login.
3270 */
1da177e4 3271 if (time_after_eq(jiffies, mtime)) {
7c3df132 3272 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
3273 "Cable is unplugged...\n");
3274
e315cd28 3275 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
3276 break;
3277 }
3278 }
3279 } else {
3280 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 3281 if (time_after_eq(jiffies, mtime) ||
7190575f 3282 ha->flags.isp82xx_fw_hung)
1da177e4
LT
3283 break;
3284 }
3285
3286 if (time_after_eq(jiffies, wtime))
3287 break;
3288
3289 /* Delay for a while */
3290 msleep(500);
1da177e4
LT
3291 } while (1);
3292
7c3df132 3293 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
3294 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3295 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 3296
cfb0919c 3297 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
3298 ql_log(ql_log_warn, vha, 0x803b,
3299 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
3300 }
3301
3302 return (rval);
3303}
3304
3305/*
3306* qla2x00_configure_hba
3307* Setup adapter context.
3308*
3309* Input:
3310* ha = adapter state pointer.
3311*
3312* Returns:
3313* 0 = success
3314*
3315* Context:
3316* Kernel context.
3317*/
3318static int
e315cd28 3319qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
3320{
3321 int rval;
3322 uint16_t loop_id;
3323 uint16_t topo;
2c3dfe3f 3324 uint16_t sw_cap;
1da177e4
LT
3325 uint8_t al_pa;
3326 uint8_t area;
3327 uint8_t domain;
3328 char connect_type[22];
e315cd28 3329 struct qla_hw_data *ha = vha->hw;
f24b5cb8 3330 unsigned long flags;
61e1b269 3331 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
3332
3333 /* Get host addresses. */
e315cd28 3334 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 3335 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 3336 if (rval != QLA_SUCCESS) {
e315cd28 3337 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 3338 IS_CNA_CAPABLE(ha) ||
33135aa2 3339 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
3340 ql_dbg(ql_dbg_disc, vha, 0x2008,
3341 "Loop is in a transition state.\n");
33135aa2 3342 } else {
7c3df132
SK
3343 ql_log(ql_log_warn, vha, 0x2009,
3344 "Unable to get host loop ID.\n");
61e1b269
JC
3345 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3346 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3347 ql_log(ql_log_warn, vha, 0x1151,
3348 "Doing link init.\n");
3349 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3350 return rval;
3351 }
e315cd28 3352 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 3353 }
1da177e4
LT
3354 return (rval);
3355 }
3356
3357 if (topo == 4) {
7c3df132
SK
3358 ql_log(ql_log_info, vha, 0x200a,
3359 "Cannot get topology - retrying.\n");
1da177e4
LT
3360 return (QLA_FUNCTION_FAILED);
3361 }
3362
e315cd28 3363 vha->loop_id = loop_id;
1da177e4
LT
3364
3365 /* initialize */
3366 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3367 ha->operating_mode = LOOP;
2c3dfe3f 3368 ha->switch_cap = 0;
1da177e4
LT
3369
3370 switch (topo) {
3371 case 0:
7c3df132 3372 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
3373 ha->current_topology = ISP_CFG_NL;
3374 strcpy(connect_type, "(Loop)");
3375 break;
3376
3377 case 1:
7c3df132 3378 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 3379 ha->switch_cap = sw_cap;
1da177e4
LT
3380 ha->current_topology = ISP_CFG_FL;
3381 strcpy(connect_type, "(FL_Port)");
3382 break;
3383
3384 case 2:
7c3df132 3385 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
3386 ha->operating_mode = P2P;
3387 ha->current_topology = ISP_CFG_N;
3388 strcpy(connect_type, "(N_Port-to-N_Port)");
3389 break;
3390
3391 case 3:
7c3df132 3392 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 3393 ha->switch_cap = sw_cap;
1da177e4
LT
3394 ha->operating_mode = P2P;
3395 ha->current_topology = ISP_CFG_F;
3396 strcpy(connect_type, "(F_Port)");
3397 break;
3398
3399 default:
7c3df132
SK
3400 ql_dbg(ql_dbg_disc, vha, 0x200f,
3401 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
3402 ha->current_topology = ISP_CFG_NL;
3403 strcpy(connect_type, "(Loop)");
3404 break;
3405 }
3406
3407 /* Save Host port and loop ID. */
3408 /* byte order - Big Endian */
e315cd28
AC
3409 vha->d_id.b.domain = domain;
3410 vha->d_id.b.area = area;
3411 vha->d_id.b.al_pa = al_pa;
1da177e4 3412
f24b5cb8 3413 spin_lock_irqsave(&ha->vport_slock, flags);
2d70c103 3414 qlt_update_vp_map(vha, SET_AL_PA);
f24b5cb8 3415 spin_unlock_irqrestore(&ha->vport_slock, flags);
2d70c103 3416
e315cd28 3417 if (!vha->flags.init_done)
7c3df132
SK
3418 ql_log(ql_log_info, vha, 0x2010,
3419 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 3420 connect_type, vha->loop_id);
1da177e4 3421
1da177e4
LT
3422 return(rval);
3423}
3424
a9083016 3425inline void
e315cd28
AC
3426qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3427 char *def)
9bb9fcf2
AV
3428{
3429 char *st, *en;
3430 uint16_t index;
e315cd28 3431 struct qla_hw_data *ha = vha->hw;
ab671149 3432 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 3433 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
3434
3435 if (memcmp(model, BINZERO, len) != 0) {
3436 strncpy(ha->model_number, model, len);
3437 st = en = ha->model_number;
3438 en += len - 1;
3439 while (en > st) {
3440 if (*en != 0x20 && *en != 0x00)
3441 break;
3442 *en-- = '\0';
3443 }
3444
3445 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3446 if (use_tbl &&
3447 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 3448 index < QLA_MODEL_NAMES)
1ee27146
JC
3449 strncpy(ha->model_desc,
3450 qla2x00_model_name[index * 2 + 1],
3451 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3452 } else {
3453 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3454 if (use_tbl &&
3455 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
3456 index < QLA_MODEL_NAMES) {
3457 strcpy(ha->model_number,
3458 qla2x00_model_name[index * 2]);
1ee27146
JC
3459 strncpy(ha->model_desc,
3460 qla2x00_model_name[index * 2 + 1],
3461 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3462 } else {
3463 strcpy(ha->model_number, def);
3464 }
3465 }
1ee27146 3466 if (IS_FWI2_CAPABLE(ha))
e315cd28 3467 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 3468 sizeof(ha->model_desc));
9bb9fcf2
AV
3469}
3470
4e08df3f
DM
3471/* On sparc systems, obtain port and node WWN from firmware
3472 * properties.
3473 */
e315cd28 3474static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
3475{
3476#ifdef CONFIG_SPARC
e315cd28 3477 struct qla_hw_data *ha = vha->hw;
4e08df3f 3478 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
3479 struct device_node *dp = pci_device_to_OF_node(pdev);
3480 const u8 *val;
4e08df3f
DM
3481 int len;
3482
3483 val = of_get_property(dp, "port-wwn", &len);
3484 if (val && len >= WWN_SIZE)
3485 memcpy(nv->port_name, val, WWN_SIZE);
3486
3487 val = of_get_property(dp, "node-wwn", &len);
3488 if (val && len >= WWN_SIZE)
3489 memcpy(nv->node_name, val, WWN_SIZE);
3490#endif
3491}
3492
1da177e4
LT
3493/*
3494* NVRAM configuration for ISP 2xxx
3495*
3496* Input:
3497* ha = adapter block pointer.
3498*
3499* Output:
3500* initialization control block in response_ring
3501* host adapters parameters in host adapter block
3502*
3503* Returns:
3504* 0 = success.
3505*/
abbd8870 3506int
e315cd28 3507qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 3508{
4e08df3f 3509 int rval;
0107109e
AV
3510 uint8_t chksum = 0;
3511 uint16_t cnt;
3512 uint8_t *dptr1, *dptr2;
e315cd28 3513 struct qla_hw_data *ha = vha->hw;
0107109e 3514 init_cb_t *icb = ha->init_cb;
281afe19
SJ
3515 nvram_t *nv = ha->nvram;
3516 uint8_t *ptr = ha->nvram;
3d71644c 3517 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 3518
4e08df3f
DM
3519 rval = QLA_SUCCESS;
3520
1da177e4 3521 /* Determine NVRAM starting address. */
0107109e 3522 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
3523 ha->nvram_base = 0;
3524 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3525 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
3526 ha->nvram_base = 0x80;
3527
3528 /* Get NVRAM data and calculate checksum. */
e315cd28 3529 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
3530 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3531 chksum += *ptr++;
1da177e4 3532
7c3df132
SK
3533 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3534 "Contents of NVRAM.\n");
3535 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3536 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
3537
3538 /* Bad NVRAM data, set defaults parameters. */
3539 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3540 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3541 /* Reset NVRAM data. */
7c3df132 3542 ql_log(ql_log_warn, vha, 0x0064,
9e336520 3543 "Inconsistent NVRAM "
7c3df132
SK
3544 "detected: checksum=0x%x id=%c version=0x%x.\n",
3545 chksum, nv->id[0], nv->nvram_version);
3546 ql_log(ql_log_warn, vha, 0x0065,
3547 "Falling back to "
3548 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
3549
3550 /*
3551 * Set default initialization control block.
3552 */
3553 memset(nv, 0, ha->nvram_size);
3554 nv->parameter_block_version = ICB_VERSION;
3555
3556 if (IS_QLA23XX(ha)) {
3557 nv->firmware_options[0] = BIT_2 | BIT_1;
3558 nv->firmware_options[1] = BIT_7 | BIT_5;
3559 nv->add_firmware_options[0] = BIT_5;
3560 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3561 nv->frame_payload_size = 2048;
4e08df3f
DM
3562 nv->special_options[1] = BIT_7;
3563 } else if (IS_QLA2200(ha)) {
3564 nv->firmware_options[0] = BIT_2 | BIT_1;
3565 nv->firmware_options[1] = BIT_7 | BIT_5;
3566 nv->add_firmware_options[0] = BIT_5;
3567 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3568 nv->frame_payload_size = 1024;
4e08df3f
DM
3569 } else if (IS_QLA2100(ha)) {
3570 nv->firmware_options[0] = BIT_3 | BIT_1;
3571 nv->firmware_options[1] = BIT_5;
98aee70d 3572 nv->frame_payload_size = 1024;
4e08df3f
DM
3573 }
3574
ad950360
BVA
3575 nv->max_iocb_allocation = cpu_to_le16(256);
3576 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
3577 nv->retry_count = 8;
3578 nv->retry_delay = 1;
3579
3580 nv->port_name[0] = 33;
3581 nv->port_name[3] = 224;
3582 nv->port_name[4] = 139;
3583
e315cd28 3584 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
3585
3586 nv->login_timeout = 4;
3587
3588 /*
3589 * Set default host adapter parameters
3590 */
3591 nv->host_p[1] = BIT_2;
3592 nv->reset_delay = 5;
3593 nv->port_down_retry_count = 8;
ad950360 3594 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
3595 nv->link_down_timeout = 60;
3596
3597 rval = 1;
1da177e4
LT
3598 }
3599
3600#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
3601 /*
3602 * The SN2 does not provide BIOS emulation which means you can't change
3603 * potentially bogus BIOS settings. Force the use of default settings
3604 * for link rate and frame size. Hope that the rest of the settings
3605 * are valid.
3606 */
3607 if (ia64_platform_is("sn2")) {
98aee70d 3608 nv->frame_payload_size = 2048;
1da177e4
LT
3609 if (IS_QLA23XX(ha))
3610 nv->special_options[1] = BIT_7;
3611 }
3612#endif
3613
3614 /* Reset Initialization control block */
0107109e 3615 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
3616
3617 /*
3618 * Setup driver NVRAM options.
3619 */
3620 nv->firmware_options[0] |= (BIT_6 | BIT_1);
3621 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
3622 nv->firmware_options[1] |= (BIT_5 | BIT_0);
3623 nv->firmware_options[1] &= ~BIT_4;
3624
3625 if (IS_QLA23XX(ha)) {
3626 nv->firmware_options[0] |= BIT_2;
3627 nv->firmware_options[0] &= ~BIT_3;
2d70c103 3628 nv->special_options[0] &= ~BIT_6;
0107109e 3629 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
3630
3631 if (IS_QLA2300(ha)) {
3632 if (ha->fb_rev == FPM_2310) {
3633 strcpy(ha->model_number, "QLA2310");
3634 } else {
3635 strcpy(ha->model_number, "QLA2300");
3636 }
3637 } else {
e315cd28 3638 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 3639 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
3640 }
3641 } else if (IS_QLA2200(ha)) {
3642 nv->firmware_options[0] |= BIT_2;
3643 /*
3644 * 'Point-to-point preferred, else loop' is not a safe
3645 * connection mode setting.
3646 */
3647 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
3648 (BIT_5 | BIT_4)) {
3649 /* Force 'loop preferred, else point-to-point'. */
3650 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
3651 nv->add_firmware_options[0] |= BIT_5;
3652 }
3653 strcpy(ha->model_number, "QLA22xx");
3654 } else /*if (IS_QLA2100(ha))*/ {
3655 strcpy(ha->model_number, "QLA2100");
3656 }
3657
3658 /*
3659 * Copy over NVRAM RISC parameter block to initialization control block.
3660 */
3661 dptr1 = (uint8_t *)icb;
3662 dptr2 = (uint8_t *)&nv->parameter_block_version;
3663 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
3664 while (cnt--)
3665 *dptr1++ = *dptr2++;
3666
3667 /* Copy 2nd half. */
3668 dptr1 = (uint8_t *)icb->add_firmware_options;
3669 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
3670 while (cnt--)
3671 *dptr1++ = *dptr2++;
3672
5341e868
AV
3673 /* Use alternate WWN? */
3674 if (nv->host_p[1] & BIT_7) {
3675 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3676 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3677 }
3678
1da177e4
LT
3679 /* Prepare nodename */
3680 if ((icb->firmware_options[1] & BIT_6) == 0) {
3681 /*
3682 * Firmware will apply the following mask if the nodename was
3683 * not provided.
3684 */
3685 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3686 icb->node_name[0] &= 0xF0;
3687 }
3688
3689 /*
3690 * Set host adapter parameters.
3691 */
3ce8866c
SK
3692
3693 /*
3694 * BIT_7 in the host-parameters section allows for modification to
3695 * internal driver logging.
3696 */
0181944f 3697 if (nv->host_p[0] & BIT_7)
cfb0919c 3698 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
3699 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
3700 /* Always load RISC code on non ISP2[12]00 chips. */
3701 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
3702 ha->flags.disable_risc_code_load = 0;
3703 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
3704 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
3705 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 3706 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 3707 ha->flags.disable_serdes = 0;
1da177e4
LT
3708
3709 ha->operating_mode =
3710 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
3711
3712 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
3713 sizeof(ha->fw_seriallink_options));
3714
3715 /* save HBA serial number */
3716 ha->serial0 = icb->port_name[5];
3717 ha->serial1 = icb->port_name[6];
3718 ha->serial2 = icb->port_name[7];
e315cd28
AC
3719 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3720 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 3721
ad950360 3722 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
3723
3724 ha->retry_count = nv->retry_count;
3725
3726 /* Set minimum login_timeout to 4 seconds. */
5b91490e 3727 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
3728 nv->login_timeout = ql2xlogintimeout;
3729 if (nv->login_timeout < 4)
3730 nv->login_timeout = 4;
3731 ha->login_timeout = nv->login_timeout;
1da177e4 3732
00a537b8
AV
3733 /* Set minimum RATOV to 100 tenths of a second. */
3734 ha->r_a_tov = 100;
1da177e4 3735
1da177e4
LT
3736 ha->loop_reset_delay = nv->reset_delay;
3737
1da177e4
LT
3738 /* Link Down Timeout = 0:
3739 *
3740 * When Port Down timer expires we will start returning
3741 * I/O's to OS with "DID_NO_CONNECT".
3742 *
3743 * Link Down Timeout != 0:
3744 *
3745 * The driver waits for the link to come up after link down
3746 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 3747 */
1da177e4
LT
3748 if (nv->link_down_timeout == 0) {
3749 ha->loop_down_abort_time =
354d6b21 3750 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
3751 } else {
3752 ha->link_down_timeout = nv->link_down_timeout;
3753 ha->loop_down_abort_time =
3754 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 3755 }
1da177e4 3756
1da177e4
LT
3757 /*
3758 * Need enough time to try and get the port back.
3759 */
3760 ha->port_down_retry_count = nv->port_down_retry_count;
3761 if (qlport_down_retry)
3762 ha->port_down_retry_count = qlport_down_retry;
3763 /* Set login_retry_count */
3764 ha->login_retry_count = nv->retry_count;
3765 if (ha->port_down_retry_count == nv->port_down_retry_count &&
3766 ha->port_down_retry_count > 3)
3767 ha->login_retry_count = ha->port_down_retry_count;
3768 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3769 ha->login_retry_count = ha->port_down_retry_count;
3770 if (ql2xloginretrycount)
3771 ha->login_retry_count = ql2xloginretrycount;
3772
ad950360 3773 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
3774 icb->command_resource_count = 0;
3775 icb->immediate_notify_resource_count = 0;
ad950360 3776 icb->timeout = cpu_to_le16(0);
1da177e4
LT
3777
3778 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3779 /* Enable RIO */
3780 icb->firmware_options[0] &= ~BIT_3;
3781 icb->add_firmware_options[0] &=
3782 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
3783 icb->add_firmware_options[0] |= BIT_2;
3784 icb->response_accumulation_timer = 3;
3785 icb->interrupt_delay_timer = 5;
3786
e315cd28 3787 vha->flags.process_response_queue = 1;
1da177e4 3788 } else {
4fdfefe5 3789 /* Enable ZIO. */
e315cd28 3790 if (!vha->flags.init_done) {
4fdfefe5
AV
3791 ha->zio_mode = icb->add_firmware_options[0] &
3792 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3793 ha->zio_timer = icb->interrupt_delay_timer ?
3794 icb->interrupt_delay_timer: 2;
3795 }
1da177e4
LT
3796 icb->add_firmware_options[0] &=
3797 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 3798 vha->flags.process_response_queue = 0;
4fdfefe5 3799 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d
AV
3800 ha->zio_mode = QLA_ZIO_MODE_6;
3801
7c3df132 3802 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
3803 "ZIO mode %d enabled; timer delay (%d us).\n",
3804 ha->zio_mode, ha->zio_timer * 100);
1da177e4 3805
4fdfefe5
AV
3806 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
3807 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 3808 vha->flags.process_response_queue = 1;
1da177e4
LT
3809 }
3810 }
3811
4e08df3f 3812 if (rval) {
7c3df132
SK
3813 ql_log(ql_log_warn, vha, 0x0069,
3814 "NVRAM configuration failed.\n");
4e08df3f
DM
3815 }
3816 return (rval);
1da177e4
LT
3817}
3818
19a7b4ae
JSEC
3819static void
3820qla2x00_rport_del(void *data)
3821{
3822 fc_port_t *fcport = data;
d97994dc 3823 struct fc_rport *rport;
044d78e1 3824 unsigned long flags;
d97994dc 3825
044d78e1 3826 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 3827 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 3828 fcport->drport = NULL;
044d78e1 3829 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548
QT
3830 if (rport) {
3831 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
3832 "%s %8phN. rport %p roles %x \n",
3833 __func__, fcport->port_name, rport,
3834 rport->roles);
3835
d97994dc 3836 fc_remote_port_delete(rport);
726b8548 3837 }
19a7b4ae
JSEC
3838}
3839
1da177e4
LT
3840/**
3841 * qla2x00_alloc_fcport() - Allocate a generic fcport.
3842 * @ha: HA context
3843 * @flags: allocation flags
3844 *
3845 * Returns a pointer to the allocated fcport, or NULL, if none available.
3846 */
9a069e19 3847fc_port_t *
e315cd28 3848qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
3849{
3850 fc_port_t *fcport;
3851
bbfbbbc1
MK
3852 fcport = kzalloc(sizeof(fc_port_t), flags);
3853 if (!fcport)
3854 return NULL;
1da177e4
LT
3855
3856 /* Setup fcport template structure. */
e315cd28 3857 fcport->vha = vha;
1da177e4
LT
3858 fcport->port_type = FCT_UNKNOWN;
3859 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 3860 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 3861 fcport->supported_classes = FC_COS_UNSPECIFIED;
1da177e4 3862
726b8548
QT
3863 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
3864 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
3865 GFP_ATOMIC);
3866 fcport->disc_state = DSC_DELETED;
3867 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
3868 fcport->deleted = QLA_SESS_DELETED;
3869 fcport->login_retry = vha->hw->login_retry_count;
3870 fcport->login_retry = 5;
3871 fcport->logout_on_delete = 1;
3872
3873 if (!fcport->ct_desc.ct_sns) {
3874 ql_log(ql_log_warn, vha, 0xffff,
3875 "Failed to allocate ct_sns request.\n");
3876 kfree(fcport);
3877 fcport = NULL;
3878 }
3879 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
3880 INIT_LIST_HEAD(&fcport->gnl_entry);
3881 INIT_LIST_HEAD(&fcport->list);
3882
bbfbbbc1 3883 return fcport;
1da177e4
LT
3884}
3885
726b8548
QT
3886void
3887qla2x00_free_fcport(fc_port_t *fcport)
3888{
3889 if (fcport->ct_desc.ct_sns) {
3890 dma_free_coherent(&fcport->vha->hw->pdev->dev,
3891 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
3892 fcport->ct_desc.ct_sns_dma);
3893
3894 fcport->ct_desc.ct_sns = NULL;
3895 }
3896 kfree(fcport);
3897}
3898
1da177e4
LT
3899/*
3900 * qla2x00_configure_loop
3901 * Updates Fibre Channel Device Database with what is actually on loop.
3902 *
3903 * Input:
3904 * ha = adapter block pointer.
3905 *
3906 * Returns:
3907 * 0 = success.
3908 * 1 = error.
3909 * 2 = database was full and device was not configured.
3910 */
3911static int
e315cd28 3912qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
3913{
3914 int rval;
3915 unsigned long flags, save_flags;
e315cd28 3916 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3917 rval = QLA_SUCCESS;
3918
3919 /* Get Initiator ID */
e315cd28
AC
3920 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
3921 rval = qla2x00_configure_hba(vha);
1da177e4 3922 if (rval != QLA_SUCCESS) {
7c3df132
SK
3923 ql_dbg(ql_dbg_disc, vha, 0x2013,
3924 "Unable to configure HBA.\n");
1da177e4
LT
3925 return (rval);
3926 }
3927 }
3928
e315cd28 3929 save_flags = flags = vha->dpc_flags;
7c3df132
SK
3930 ql_dbg(ql_dbg_disc, vha, 0x2014,
3931 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
3932
3933 /*
3934 * If we have both an RSCN and PORT UPDATE pending then handle them
3935 * both at the same time.
3936 */
e315cd28
AC
3937 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3938 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 3939
3064ff39
MH
3940 qla2x00_get_data_rate(vha);
3941
1da177e4
LT
3942 /* Determine what we need to do */
3943 if (ha->current_topology == ISP_CFG_FL &&
3944 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3945
1da177e4
LT
3946 set_bit(RSCN_UPDATE, &flags);
3947
3948 } else if (ha->current_topology == ISP_CFG_F &&
3949 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3950
1da177e4
LT
3951 set_bit(RSCN_UPDATE, &flags);
3952 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
3953
3954 } else if (ha->current_topology == ISP_CFG_N) {
3955 clear_bit(RSCN_UPDATE, &flags);
1da177e4 3956
e315cd28 3957 } else if (!vha->flags.online ||
1da177e4
LT
3958 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
3959
1da177e4
LT
3960 set_bit(RSCN_UPDATE, &flags);
3961 set_bit(LOCAL_LOOP_UPDATE, &flags);
3962 }
3963
3964 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
3965 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
3966 ql_dbg(ql_dbg_disc, vha, 0x2015,
3967 "Loop resync needed, failing.\n");
1da177e4 3968 rval = QLA_FUNCTION_FAILED;
642ef983 3969 } else
e315cd28 3970 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
3971 }
3972
3973 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132
SK
3974 if (LOOP_TRANSITION(vha)) {
3975 ql_dbg(ql_dbg_disc, vha, 0x201e,
3976 "Needs RSCN update and loop transition.\n");
1da177e4 3977 rval = QLA_FUNCTION_FAILED;
7c3df132 3978 }
e315cd28
AC
3979 else
3980 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
3981 }
3982
3983 if (rval == QLA_SUCCESS) {
e315cd28
AC
3984 if (atomic_read(&vha->loop_down_timer) ||
3985 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
3986 rval = QLA_FUNCTION_FAILED;
3987 } else {
e315cd28 3988 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
3989 ql_dbg(ql_dbg_disc, vha, 0x2069,
3990 "LOOP READY.\n");
3bb67df5
DKU
3991
3992 /*
3993 * Process any ATIO queue entries that came in
3994 * while we weren't online.
3995 */
ead03855
QT
3996 if (qla_tgt_mode_enabled(vha) ||
3997 qla_dual_mode_enabled(vha)) {
3bb67df5
DKU
3998 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
3999 spin_lock_irqsave(&ha->tgt.atio_lock,
4000 flags);
4001 qlt_24xx_process_atio_queue(vha, 0);
4002 spin_unlock_irqrestore(
4003 &ha->tgt.atio_lock, flags);
4004 } else {
4005 spin_lock_irqsave(&ha->hardware_lock,
4006 flags);
4007 qlt_24xx_process_atio_queue(vha, 1);
4008 spin_unlock_irqrestore(
4009 &ha->hardware_lock, flags);
4010 }
4011 }
1da177e4
LT
4012 }
4013 }
4014
4015 if (rval) {
7c3df132
SK
4016 ql_dbg(ql_dbg_disc, vha, 0x206a,
4017 "%s *** FAILED ***.\n", __func__);
1da177e4 4018 } else {
7c3df132
SK
4019 ql_dbg(ql_dbg_disc, vha, 0x206b,
4020 "%s: exiting normally.\n", __func__);
1da177e4
LT
4021 }
4022
cc3ef7bc 4023 /* Restore state if a resync event occurred during processing */
e315cd28 4024 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 4025 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 4026 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 4027 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 4028 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 4029 }
1da177e4
LT
4030 }
4031
4032 return (rval);
4033}
4034
4035
4036
4037/*
4038 * qla2x00_configure_local_loop
4039 * Updates Fibre Channel Device Database with local loop devices.
4040 *
4041 * Input:
4042 * ha = adapter block pointer.
4043 *
4044 * Returns:
4045 * 0 = success.
4046 */
4047static int
e315cd28 4048qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
4049{
4050 int rval, rval2;
4051 int found_devs;
4052 int found;
4053 fc_port_t *fcport, *new_fcport;
4054
4055 uint16_t index;
4056 uint16_t entries;
4057 char *id_iter;
4058 uint16_t loop_id;
4059 uint8_t domain, area, al_pa;
e315cd28 4060 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4061
4062 found_devs = 0;
4063 new_fcport = NULL;
642ef983 4064 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 4065
1da177e4 4066 /* Get list of logged in devices. */
642ef983 4067 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 4068 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
4069 &entries);
4070 if (rval != QLA_SUCCESS)
4071 goto cleanup_allocation;
4072
7c3df132
SK
4073 ql_dbg(ql_dbg_disc, vha, 0x2017,
4074 "Entries in ID list (%d).\n", entries);
4075 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4076 (uint8_t *)ha->gid_list,
4077 entries * sizeof(struct gid_list_info));
1da177e4
LT
4078
4079 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4080 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4081 if (new_fcport == NULL) {
7c3df132
SK
4082 ql_log(ql_log_warn, vha, 0x2018,
4083 "Memory allocation failed for fcport.\n");
1da177e4
LT
4084 rval = QLA_MEMORY_ALLOC_FAILED;
4085 goto cleanup_allocation;
4086 }
4087 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4088
4089 /*
4090 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4091 */
e315cd28 4092 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4093 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4094 fcport->port_type != FCT_BROADCAST &&
4095 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4096
7c3df132
SK
4097 ql_dbg(ql_dbg_disc, vha, 0x2019,
4098 "Marking port lost loop_id=0x%04x.\n",
4099 fcport->loop_id);
1da177e4 4100
ec426e10 4101 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1da177e4
LT
4102 }
4103 }
4104
4105 /* Add devices to port list. */
4106 id_iter = (char *)ha->gid_list;
4107 for (index = 0; index < entries; index++) {
4108 domain = ((struct gid_list_info *)id_iter)->domain;
4109 area = ((struct gid_list_info *)id_iter)->area;
4110 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 4111 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
4112 loop_id = (uint16_t)
4113 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 4114 else
1da177e4
LT
4115 loop_id = le16_to_cpu(
4116 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 4117 id_iter += ha->gid_list_info_size;
1da177e4
LT
4118
4119 /* Bypass reserved domain fields. */
4120 if ((domain & 0xf0) == 0xf0)
4121 continue;
4122
4123 /* Bypass if not same domain and area of adapter. */
f7d289f6 4124 if (area && domain &&
e315cd28 4125 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
1da177e4
LT
4126 continue;
4127
4128 /* Bypass invalid local loop ID. */
4129 if (loop_id > LAST_LOCAL_LOOP_ID)
4130 continue;
4131
370d550e
AE
4132 memset(new_fcport, 0, sizeof(fc_port_t));
4133
1da177e4
LT
4134 /* Fill in member data. */
4135 new_fcport->d_id.b.domain = domain;
4136 new_fcport->d_id.b.area = area;
4137 new_fcport->d_id.b.al_pa = al_pa;
4138 new_fcport->loop_id = loop_id;
e315cd28 4139 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 4140 if (rval2 != QLA_SUCCESS) {
7c3df132
SK
4141 ql_dbg(ql_dbg_disc, vha, 0x201a,
4142 "Failed to retrieve fcport information "
4143 "-- get_port_database=%x, loop_id=0x%04x.\n",
4144 rval2, new_fcport->loop_id);
4145 ql_dbg(ql_dbg_disc, vha, 0x201b,
4146 "Scheduling resync.\n");
e315cd28 4147 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
4148 continue;
4149 }
4150
4151 /* Check for matching device in port list. */
4152 found = 0;
4153 fcport = NULL;
e315cd28 4154 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4155 if (memcmp(new_fcport->port_name, fcport->port_name,
4156 WWN_SIZE))
4157 continue;
4158
ddb9b126 4159 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
4160 fcport->loop_id = new_fcport->loop_id;
4161 fcport->port_type = new_fcport->port_type;
4162 fcport->d_id.b24 = new_fcport->d_id.b24;
4163 memcpy(fcport->node_name, new_fcport->node_name,
4164 WWN_SIZE);
4165
4166 found++;
4167 break;
4168 }
4169
4170 if (!found) {
4171 /* New device, add to fcports list. */
e315cd28 4172 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
4173
4174 /* Allocate a new replacement fcport. */
4175 fcport = new_fcport;
e315cd28 4176 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4177 if (new_fcport == NULL) {
7c3df132
SK
4178 ql_log(ql_log_warn, vha, 0x201c,
4179 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4180 rval = QLA_MEMORY_ALLOC_FAILED;
4181 goto cleanup_allocation;
4182 }
4183 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4184 }
4185
d8b45213 4186 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 4187 fcport->fp_speed = ha->link_data_rate;
d8b45213 4188
e315cd28 4189 qla2x00_update_fcport(vha, fcport);
1da177e4
LT
4190
4191 found_devs++;
4192 }
4193
4194cleanup_allocation:
c9475cb0 4195 kfree(new_fcport);
1da177e4
LT
4196
4197 if (rval != QLA_SUCCESS) {
7c3df132
SK
4198 ql_dbg(ql_dbg_disc, vha, 0x201d,
4199 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
4200 }
4201
1da177e4
LT
4202 return (rval);
4203}
4204
d8b45213 4205static void
e315cd28 4206qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 4207{
d8b45213 4208 int rval;
93f2bd67 4209 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4210 struct qla_hw_data *ha = vha->hw;
d8b45213 4211
c76f2c01 4212 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
4213 return;
4214
c9afb9a2
GM
4215 if (atomic_read(&fcport->state) != FCS_ONLINE)
4216 return;
4217
39bd9622
AV
4218 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4219 fcport->fp_speed > ha->link_data_rate)
d8b45213
AV
4220 return;
4221
e315cd28 4222 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 4223 mb);
d8b45213 4224 if (rval != QLA_SUCCESS) {
7c3df132 4225 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
4226 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4227 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 4228 } else {
7c3df132 4229 ql_dbg(ql_dbg_disc, vha, 0x2005,
7b833558 4230 "iIDMA adjusted to %s GB/s on %8phN.\n",
d0297c9a 4231 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
7b833558 4232 fcport->port_name);
d8b45213
AV
4233 }
4234}
4235
726b8548 4236/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 4237static void
e315cd28 4238qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118
AV
4239{
4240 struct fc_rport_identifiers rport_ids;
bdf79621 4241 struct fc_rport *rport;
044d78e1 4242 unsigned long flags;
8482e118 4243
f8b02a85
AV
4244 rport_ids.node_name = wwn_to_u64(fcport->node_name);
4245 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118
AV
4246 rport_ids.port_id = fcport->d_id.b.domain << 16 |
4247 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 4248 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 4249 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 4250 if (!rport) {
7c3df132
SK
4251 ql_log(ql_log_warn, vha, 0x2006,
4252 "Unable to allocate fc remote port.\n");
77d74143
AV
4253 return;
4254 }
2d70c103 4255
044d78e1 4256 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 4257 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 4258 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 4259
ad3e0eda 4260 rport->supported_classes = fcport->supported_classes;
77d74143 4261
8482e118
AV
4262 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4263 if (fcport->port_type == FCT_INITIATOR)
4264 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4265 if (fcport->port_type == FCT_TARGET)
4266 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
726b8548
QT
4267
4268 ql_dbg(ql_dbg_disc, vha, 0xffff,
4269 "%s %8phN. rport %p is %s mode \n",
4270 __func__, fcport->port_name, rport,
4271 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
4272
77d74143 4273 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
4274}
4275
23be331d
AB
4276/*
4277 * qla2x00_update_fcport
4278 * Updates device on list.
4279 *
4280 * Input:
4281 * ha = adapter block pointer.
4282 * fcport = port structure pointer.
4283 *
4284 * Return:
4285 * 0 - Success
4286 * BIT_0 - error
4287 *
4288 * Context:
4289 * Kernel context.
4290 */
4291void
e315cd28 4292qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 4293{
e315cd28 4294 fcport->vha = vha;
8ae6d9c7 4295
726b8548
QT
4296 if (IS_SW_RESV_ADDR(fcport->d_id))
4297 return;
4298
4299 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n",
4300 __func__, fcport->port_name);
4301
8ae6d9c7
GM
4302 if (IS_QLAFX00(vha->hw)) {
4303 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
d20ed91b 4304 goto reg_port;
8ae6d9c7 4305 }
23be331d 4306 fcport->login_retry = 0;
5ff1d584 4307 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
726b8548
QT
4308 fcport->disc_state = DSC_LOGIN_COMPLETE;
4309 fcport->deleted = 0;
4310 fcport->logout_on_delete = 1;
23be331d 4311
1f93da52 4312 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e315cd28 4313 qla2x00_iidma_fcport(vha, fcport);
21090cbe 4314 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b
AP
4315
4316reg_port:
726b8548
QT
4317 switch (vha->host->active_mode) {
4318 case MODE_INITIATOR:
4319 qla2x00_reg_remote_port(vha, fcport);
4320 break;
4321 case MODE_TARGET:
4322 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4323 !vha->vha_tgt.qla_tgt->tgt_stopped)
4324 qlt_fc_port_added(vha, fcport);
4325 break;
4326 case MODE_DUAL:
d20ed91b 4327 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
4328 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4329 !vha->vha_tgt.qla_tgt->tgt_stopped)
4330 qlt_fc_port_added(vha, fcport);
4331 break;
4332 default:
4333 break;
4334 }
23be331d
AB
4335}
4336
1da177e4
LT
4337/*
4338 * qla2x00_configure_fabric
4339 * Setup SNS devices with loop ID's.
4340 *
4341 * Input:
4342 * ha = adapter block pointer.
4343 *
4344 * Returns:
4345 * 0 = success.
4346 * BIT_0 = error
4347 */
4348static int
e315cd28 4349qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 4350{
b3b02e6e 4351 int rval;
726b8548 4352 fc_port_t *fcport;
1da177e4 4353 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 4354 uint16_t loop_id;
1da177e4 4355 LIST_HEAD(new_fcports);
e315cd28 4356 struct qla_hw_data *ha = vha->hw;
df673274 4357 int discovery_gen;
1da177e4
LT
4358
4359 /* If FL port exists, then SNS is present */
e428924c 4360 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4361 loop_id = NPH_F_PORT;
4362 else
4363 loop_id = SNS_FL_PORT;
e315cd28 4364 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 4365 if (rval != QLA_SUCCESS) {
7c3df132
SK
4366 ql_dbg(ql_dbg_disc, vha, 0x201f,
4367 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 4368
e315cd28 4369 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
4370 return (QLA_SUCCESS);
4371 }
e315cd28 4372 vha->device_flags |= SWITCH_FOUND;
1da177e4 4373
1da177e4 4374 do {
726b8548
QT
4375 qla2x00_mgmt_svr_login(vha);
4376
cca5335c
AV
4377 /* FDMI support. */
4378 if (ql2xfdmienable &&
e315cd28
AC
4379 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4380 qla2x00_fdmi_register(vha);
cca5335c 4381
1da177e4 4382 /* Ensure we are logged into the SNS. */
e428924c 4383 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4384 loop_id = NPH_SNS;
4385 else
4386 loop_id = SIMPLE_NAME_SERVER;
0b91d116
CD
4387 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4388 0xfc, mb, BIT_1|BIT_0);
4389 if (rval != QLA_SUCCESS) {
4390 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 4391 return rval;
0b91d116 4392 }
1da177e4 4393 if (mb[0] != MBS_COMMAND_COMPLETE) {
7c3df132
SK
4394 ql_dbg(ql_dbg_disc, vha, 0x2042,
4395 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
4396 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
4397 mb[2], mb[6], mb[7]);
1da177e4
LT
4398 return (QLA_SUCCESS);
4399 }
4400
e315cd28
AC
4401 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4402 if (qla2x00_rft_id(vha)) {
1da177e4 4403 /* EMPTY */
7c3df132
SK
4404 ql_dbg(ql_dbg_disc, vha, 0x2045,
4405 "Register FC-4 TYPE failed.\n");
1da177e4 4406 }
e315cd28 4407 if (qla2x00_rff_id(vha)) {
1da177e4 4408 /* EMPTY */
7c3df132
SK
4409 ql_dbg(ql_dbg_disc, vha, 0x2049,
4410 "Register FC-4 Features failed.\n");
1da177e4 4411 }
e315cd28 4412 if (qla2x00_rnn_id(vha)) {
1da177e4 4413 /* EMPTY */
7c3df132
SK
4414 ql_dbg(ql_dbg_disc, vha, 0x204f,
4415 "Register Node Name failed.\n");
e315cd28 4416 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 4417 /* EMPTY */
7c3df132
SK
4418 ql_dbg(ql_dbg_disc, vha, 0x2053,
4419 "Register Symobilic Node Name failed.\n");
1da177e4
LT
4420 }
4421 }
4422
827210ba
JC
4423 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4424 fcport->scan_state = QLA_FCPORT_SCAN;
4425 }
4426
df673274
AP
4427 /* Mark the time right before querying FW for connected ports.
4428 * This process is long, asynchronous and by the time it's done,
4429 * collected information might not be accurate anymore. E.g.
4430 * disconnected port might have re-connected and a brand new
4431 * session has been created. In this case session's generation
4432 * will be newer than discovery_gen. */
4433 qlt_do_generation_tick(vha, &discovery_gen);
4434
726b8548 4435 rval = qla2x00_find_all_fabric_devs(vha);
1da177e4
LT
4436 if (rval != QLA_SUCCESS)
4437 break;
1da177e4
LT
4438 } while (0);
4439
726b8548 4440 if (rval)
7c3df132
SK
4441 ql_dbg(ql_dbg_disc, vha, 0x2068,
4442 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
4443
4444 return (rval);
4445}
4446
1da177e4
LT
4447/*
4448 * qla2x00_find_all_fabric_devs
4449 *
4450 * Input:
4451 * ha = adapter block pointer.
4452 * dev = database device entry pointer.
4453 *
4454 * Returns:
4455 * 0 = success.
4456 *
4457 * Context:
4458 * Kernel context.
4459 */
4460static int
726b8548 4461qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
4462{
4463 int rval;
4464 uint16_t loop_id;
726b8548 4465 fc_port_t *fcport, *new_fcport;
1da177e4
LT
4466 int found;
4467
4468 sw_info_t *swl;
4469 int swl_idx;
4470 int first_dev, last_dev;
1516ef44 4471 port_id_t wrap = {}, nxt_d_id;
e315cd28 4472 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 4473 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 4474 unsigned long flags;
1da177e4
LT
4475
4476 rval = QLA_SUCCESS;
4477
4478 /* Try GID_PT to get device list, else GAN. */
7a67735b 4479 if (!ha->swl)
642ef983 4480 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
4481 GFP_KERNEL);
4482 swl = ha->swl;
bbfbbbc1 4483 if (!swl) {
1da177e4 4484 /*EMPTY*/
7c3df132
SK
4485 ql_dbg(ql_dbg_disc, vha, 0x2054,
4486 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 4487 } else {
642ef983 4488 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 4489 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 4490 swl = NULL;
e315cd28 4491 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4492 swl = NULL;
e315cd28 4493 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4494 swl = NULL;
726b8548
QT
4495 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
4496 swl = NULL;
1da177e4 4497 }
e8c72ba5
CD
4498
4499 /* If other queries succeeded probe for FC-4 type */
4500 if (swl)
4501 qla2x00_gff_id(vha, swl);
1da177e4
LT
4502 }
4503 swl_idx = 0;
4504
4505 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4506 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4507 if (new_fcport == NULL) {
7c3df132
SK
4508 ql_log(ql_log_warn, vha, 0x205e,
4509 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4510 return (QLA_MEMORY_ALLOC_FAILED);
4511 }
4512 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
4513 /* Set start port ID scan at adapter ID. */
4514 first_dev = 1;
4515 last_dev = 0;
4516
4517 /* Starting free loop ID. */
e315cd28
AC
4518 loop_id = ha->min_external_loopid;
4519 for (; loop_id <= ha->max_loop_id; loop_id++) {
4520 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
4521 continue;
4522
3a6478df
GM
4523 if (ha->current_topology == ISP_CFG_FL &&
4524 (atomic_read(&vha->loop_down_timer) ||
4525 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
4526 atomic_set(&vha->loop_down_timer, 0);
4527 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4528 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 4529 break;
bb2d52b2 4530 }
1da177e4
LT
4531
4532 if (swl != NULL) {
4533 if (last_dev) {
4534 wrap.b24 = new_fcport->d_id.b24;
4535 } else {
4536 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
4537 memcpy(new_fcport->node_name,
4538 swl[swl_idx].node_name, WWN_SIZE);
4539 memcpy(new_fcport->port_name,
4540 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
4541 memcpy(new_fcport->fabric_port_name,
4542 swl[swl_idx].fabric_port_name, WWN_SIZE);
4543 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 4544 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4
LT
4545
4546 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
4547 last_dev = 1;
4548 }
4549 swl_idx++;
4550 }
4551 } else {
4552 /* Send GA_NXT to the switch */
e315cd28 4553 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 4554 if (rval != QLA_SUCCESS) {
7c3df132
SK
4555 ql_log(ql_log_warn, vha, 0x2064,
4556 "SNS scan failed -- assuming "
4557 "zero-entry result.\n");
1da177e4
LT
4558 rval = QLA_SUCCESS;
4559 break;
4560 }
4561 }
4562
4563 /* If wrap on switch device list, exit. */
4564 if (first_dev) {
4565 wrap.b24 = new_fcport->d_id.b24;
4566 first_dev = 0;
4567 } else if (new_fcport->d_id.b24 == wrap.b24) {
7c3df132
SK
4568 ql_dbg(ql_dbg_disc, vha, 0x2065,
4569 "Device wrap (%02x%02x%02x).\n",
4570 new_fcport->d_id.b.domain,
4571 new_fcport->d_id.b.area,
4572 new_fcport->d_id.b.al_pa);
1da177e4
LT
4573 break;
4574 }
4575
2c3dfe3f 4576 /* Bypass if same physical adapter. */
e315cd28 4577 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
4578 continue;
4579
2c3dfe3f 4580 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
4581 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
4582 continue;
2c3dfe3f 4583
f7d289f6
AV
4584 /* Bypass if same domain and area of adapter. */
4585 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 4586 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
4587 ISP_CFG_FL)
4588 continue;
4589
1da177e4
LT
4590 /* Bypass reserved domain fields. */
4591 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
4592 continue;
4593
e8c72ba5 4594 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
4595 if (ql2xgffidenable &&
4596 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
4597 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
4598 continue;
4599
726b8548
QT
4600 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4601
1da177e4
LT
4602 /* Locate matching device in database. */
4603 found = 0;
e315cd28 4604 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4605 if (memcmp(new_fcport->port_name, fcport->port_name,
4606 WWN_SIZE))
4607 continue;
4608
827210ba 4609 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 4610
1da177e4
LT
4611 found++;
4612
d8b45213
AV
4613 /* Update port state. */
4614 memcpy(fcport->fabric_port_name,
4615 new_fcport->fabric_port_name, WWN_SIZE);
4616 fcport->fp_speed = new_fcport->fp_speed;
4617
1da177e4 4618 /*
b2032fd5
RD
4619 * If address the same and state FCS_ONLINE
4620 * (or in target mode), nothing changed.
1da177e4
LT
4621 */
4622 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 4623 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 4624 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
4625 break;
4626 }
4627
4628 /*
4629 * If device was not a fabric device before.
4630 */
4631 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4632 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 4633 qla2x00_clear_loop_id(fcport);
1da177e4
LT
4634 fcport->flags |= (FCF_FABRIC_DEVICE |
4635 FCF_LOGIN_NEEDED);
1da177e4
LT
4636 break;
4637 }
4638
4639 /*
4640 * Port ID changed or device was marked to be updated;
4641 * Log it out if still logged in and mark it for
4642 * relogin later.
4643 */
726b8548 4644 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
4645 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
4646 "port changed FC ID, %8phC"
4647 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
4648 fcport->port_name,
4649 fcport->d_id.b.domain,
4650 fcport->d_id.b.area,
4651 fcport->d_id.b.al_pa,
4652 fcport->loop_id,
4653 new_fcport->d_id.b.domain,
4654 new_fcport->d_id.b.area,
4655 new_fcport->d_id.b.al_pa);
4656 fcport->d_id.b24 = new_fcport->d_id.b24;
4657 break;
4658 }
4659
1da177e4
LT
4660 fcport->d_id.b24 = new_fcport->d_id.b24;
4661 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
4662 break;
4663 }
4664
726b8548
QT
4665 if (found) {
4666 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 4667 continue;
726b8548 4668 }
1da177e4 4669 /* If device was not in our fcports list, then add it. */
b2032fd5 4670 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
4671 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4672
4673 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4674
1da177e4
LT
4675
4676 /* Allocate a new replacement fcport. */
4677 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 4678 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4679 if (new_fcport == NULL) {
7c3df132
SK
4680 ql_log(ql_log_warn, vha, 0x2066,
4681 "Memory allocation failed for fcport.\n");
1da177e4
LT
4682 return (QLA_MEMORY_ALLOC_FAILED);
4683 }
4684 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
4685 new_fcport->d_id.b24 = nxt_d_id.b24;
4686 }
4687
726b8548
QT
4688 qla2x00_free_fcport(new_fcport);
4689
4690 /*
4691 * Logout all previous fabric dev marked lost, except FCP2 devices.
4692 */
4693 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4694 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4695 break;
4696
4697 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
4698 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
4699 continue;
4700
4701 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4702 if ((qla_dual_mode_enabled(vha) ||
4703 qla_ini_mode_enabled(vha)) &&
4704 atomic_read(&fcport->state) == FCS_ONLINE) {
4705 qla2x00_mark_device_lost(vha, fcport,
4706 ql2xplogiabsentdevice, 0);
4707 if (fcport->loop_id != FC_NO_LOOP_ID &&
4708 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4709 fcport->port_type != FCT_INITIATOR &&
4710 fcport->port_type != FCT_BROADCAST) {
4711 ql_dbg(ql_dbg_disc, vha, 0xffff,
4712 "%s %d %8phC post del sess\n",
4713 __func__, __LINE__,
4714 fcport->port_name);
4715
4716 qlt_schedule_sess_for_deletion_lock
4717 (fcport);
4718 continue;
4719 }
4720 }
4721 }
1da177e4 4722
726b8548
QT
4723 if (fcport->scan_state == QLA_FCPORT_FOUND)
4724 qla24xx_fcport_handle_login(vha, fcport);
4725 }
1da177e4
LT
4726 return (rval);
4727}
4728
4729/*
4730 * qla2x00_find_new_loop_id
4731 * Scan through our port list and find a new usable loop ID.
4732 *
4733 * Input:
4734 * ha: adapter state pointer.
4735 * dev: port structure pointer.
4736 *
4737 * Returns:
4738 * qla2x00 local function return status code.
4739 *
4740 * Context:
4741 * Kernel context.
4742 */
03bcfb57 4743int
e315cd28 4744qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
4745{
4746 int rval;
e315cd28 4747 struct qla_hw_data *ha = vha->hw;
feafb7b1 4748 unsigned long flags = 0;
1da177e4
LT
4749
4750 rval = QLA_SUCCESS;
4751
5f16b331 4752 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 4753
5f16b331
CD
4754 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
4755 LOOPID_MAP_SIZE);
4756 if (dev->loop_id >= LOOPID_MAP_SIZE ||
4757 qla2x00_is_reserved_id(vha, dev->loop_id)) {
4758 dev->loop_id = FC_NO_LOOP_ID;
4759 rval = QLA_FUNCTION_FAILED;
4760 } else
4761 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 4762
5f16b331 4763 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 4764
5f16b331
CD
4765 if (rval == QLA_SUCCESS)
4766 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
4767 "Assigning new loopid=%x, portid=%x.\n",
4768 dev->loop_id, dev->d_id.b24);
4769 else
4770 ql_log(ql_log_warn, dev->vha, 0x2087,
4771 "No loop_id's available, portid=%x.\n",
4772 dev->d_id.b24);
1da177e4
LT
4773
4774 return (rval);
4775}
4776
1da177e4
LT
4777
4778/*
4779 * qla2x00_fabric_login
4780 * Issue fabric login command.
4781 *
4782 * Input:
4783 * ha = adapter block pointer.
4784 * device = pointer to FC device type structure.
4785 *
4786 * Returns:
4787 * 0 - Login successfully
4788 * 1 - Login failed
4789 * 2 - Initiator device
4790 * 3 - Fatal error
4791 */
4792int
e315cd28 4793qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
4794 uint16_t *next_loopid)
4795{
4796 int rval;
4797 int retry;
4798 uint16_t tmp_loopid;
4799 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4800 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4801
4802 retry = 0;
4803 tmp_loopid = 0;
4804
4805 for (;;) {
7c3df132
SK
4806 ql_dbg(ql_dbg_disc, vha, 0x2000,
4807 "Trying Fabric Login w/loop id 0x%04x for port "
4808 "%02x%02x%02x.\n",
4809 fcport->loop_id, fcport->d_id.b.domain,
4810 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
4811
4812 /* Login fcport on switch. */
0b91d116 4813 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
4814 fcport->d_id.b.domain, fcport->d_id.b.area,
4815 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
4816 if (rval != QLA_SUCCESS) {
4817 return rval;
4818 }
1da177e4
LT
4819 if (mb[0] == MBS_PORT_ID_USED) {
4820 /*
4821 * Device has another loop ID. The firmware team
0107109e
AV
4822 * recommends the driver perform an implicit login with
4823 * the specified ID again. The ID we just used is save
4824 * here so we return with an ID that can be tried by
4825 * the next login.
1da177e4
LT
4826 */
4827 retry++;
4828 tmp_loopid = fcport->loop_id;
4829 fcport->loop_id = mb[1];
4830
7c3df132
SK
4831 ql_dbg(ql_dbg_disc, vha, 0x2001,
4832 "Fabric Login: port in use - next loop "
4833 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 4834 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 4835 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
4836
4837 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
4838 /*
4839 * Login succeeded.
4840 */
4841 if (retry) {
4842 /* A retry occurred before. */
4843 *next_loopid = tmp_loopid;
4844 } else {
4845 /*
4846 * No retry occurred before. Just increment the
4847 * ID value for next login.
4848 */
4849 *next_loopid = (fcport->loop_id + 1);
4850 }
4851
4852 if (mb[1] & BIT_0) {
4853 fcport->port_type = FCT_INITIATOR;
4854 } else {
4855 fcport->port_type = FCT_TARGET;
4856 if (mb[1] & BIT_1) {
8474f3a0 4857 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
4858 }
4859 }
4860
ad3e0eda
AV
4861 if (mb[10] & BIT_0)
4862 fcport->supported_classes |= FC_COS_CLASS2;
4863 if (mb[10] & BIT_1)
4864 fcport->supported_classes |= FC_COS_CLASS3;
4865
2d70c103
NB
4866 if (IS_FWI2_CAPABLE(ha)) {
4867 if (mb[10] & BIT_7)
4868 fcport->flags |=
4869 FCF_CONF_COMP_SUPPORTED;
4870 }
4871
1da177e4
LT
4872 rval = QLA_SUCCESS;
4873 break;
4874 } else if (mb[0] == MBS_LOOP_ID_USED) {
4875 /*
4876 * Loop ID already used, try next loop ID.
4877 */
4878 fcport->loop_id++;
e315cd28 4879 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
4880 if (rval != QLA_SUCCESS) {
4881 /* Ran out of loop IDs to use */
4882 break;
4883 }
4884 } else if (mb[0] == MBS_COMMAND_ERROR) {
4885 /*
4886 * Firmware possibly timed out during login. If NO
4887 * retries are left to do then the device is declared
4888 * dead.
4889 */
4890 *next_loopid = fcport->loop_id;
e315cd28 4891 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
4892 fcport->d_id.b.domain, fcport->d_id.b.area,
4893 fcport->d_id.b.al_pa);
e315cd28 4894 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
4895
4896 rval = 1;
4897 break;
4898 } else {
4899 /*
4900 * unrecoverable / not handled error
4901 */
7c3df132
SK
4902 ql_dbg(ql_dbg_disc, vha, 0x2002,
4903 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
4904 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
4905 fcport->d_id.b.area, fcport->d_id.b.al_pa,
4906 fcport->loop_id, jiffies);
1da177e4
LT
4907
4908 *next_loopid = fcport->loop_id;
e315cd28 4909 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
4910 fcport->d_id.b.domain, fcport->d_id.b.area,
4911 fcport->d_id.b.al_pa);
5f16b331 4912 qla2x00_clear_loop_id(fcport);
0eedfcf0 4913 fcport->login_retry = 0;
1da177e4
LT
4914
4915 rval = 3;
4916 break;
4917 }
4918 }
4919
4920 return (rval);
4921}
4922
4923/*
4924 * qla2x00_local_device_login
4925 * Issue local device login command.
4926 *
4927 * Input:
4928 * ha = adapter block pointer.
4929 * loop_id = loop id of device to login to.
4930 *
4931 * Returns (Where's the #define!!!!):
4932 * 0 - Login successfully
4933 * 1 - Login failed
4934 * 3 - Fatal error
4935 */
4936int
e315cd28 4937qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
4938{
4939 int rval;
4940 uint16_t mb[MAILBOX_REGISTER_COUNT];
4941
4942 memset(mb, 0, sizeof(mb));
e315cd28 4943 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
4944 if (rval == QLA_SUCCESS) {
4945 /* Interrogate mailbox registers for any errors */
4946 if (mb[0] == MBS_COMMAND_ERROR)
4947 rval = 1;
4948 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
4949 /* device not in PCB table */
4950 rval = 3;
4951 }
4952
4953 return (rval);
4954}
4955
4956/*
4957 * qla2x00_loop_resync
4958 * Resync with fibre channel devices.
4959 *
4960 * Input:
4961 * ha = adapter block pointer.
4962 *
4963 * Returns:
4964 * 0 = success
4965 */
4966int
e315cd28 4967qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 4968{
73208dfd 4969 int rval = QLA_SUCCESS;
1da177e4 4970 uint32_t wait_time;
67c2e93a
AC
4971 struct req_que *req;
4972 struct rsp_que *rsp;
4973
d7459527 4974 req = vha->req;
67c2e93a 4975 rsp = req->rsp;
1da177e4 4976
e315cd28
AC
4977 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4978 if (vha->flags.online) {
4979 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
4980 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4981 wait_time = 256;
4982 do {
8ae6d9c7
GM
4983 if (!IS_QLAFX00(vha->hw)) {
4984 /*
4985 * Issue a marker after FW becomes
4986 * ready.
4987 */
4988 qla2x00_marker(vha, req, rsp, 0, 0,
4989 MK_SYNC_ALL);
4990 vha->marker_needed = 0;
4991 }
1da177e4
LT
4992
4993 /* Remap devices on Loop. */
e315cd28 4994 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 4995
8ae6d9c7
GM
4996 if (IS_QLAFX00(vha->hw))
4997 qlafx00_configure_devices(vha);
4998 else
4999 qla2x00_configure_loop(vha);
5000
1da177e4 5001 wait_time--;
e315cd28
AC
5002 } while (!atomic_read(&vha->loop_down_timer) &&
5003 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5004 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5005 &vha->dpc_flags)));
1da177e4 5006 }
1da177e4
LT
5007 }
5008
e315cd28 5009 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 5010 return (QLA_FUNCTION_FAILED);
1da177e4 5011
e315cd28 5012 if (rval)
7c3df132
SK
5013 ql_dbg(ql_dbg_disc, vha, 0x206c,
5014 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
5015
5016 return (rval);
5017}
5018
579d12b5
SK
5019/*
5020* qla2x00_perform_loop_resync
5021* Description: This function will set the appropriate flags and call
5022* qla2x00_loop_resync. If successful loop will be resynced
5023* Arguments : scsi_qla_host_t pointer
5024* returm : Success or Failure
5025*/
5026
5027int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5028{
5029 int32_t rval = 0;
5030
5031 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5032 /*Configure the flags so that resync happens properly*/
5033 atomic_set(&ha->loop_down_timer, 0);
5034 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5035 atomic_set(&ha->loop_state, LOOP_UP);
5036 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5037 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5038 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5039
5040 rval = qla2x00_loop_resync(ha);
5041 } else
5042 atomic_set(&ha->loop_state, LOOP_DEAD);
5043
5044 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5045 }
5046
5047 return rval;
5048}
5049
d97994dc 5050void
67becc00 5051qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc
AV
5052{
5053 fc_port_t *fcport;
feafb7b1
AE
5054 struct scsi_qla_host *vha;
5055 struct qla_hw_data *ha = base_vha->hw;
5056 unsigned long flags;
d97994dc 5057
feafb7b1 5058 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 5059 /* Go with deferred removal of rport references. */
feafb7b1
AE
5060 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5061 atomic_inc(&vha->vref_count);
5062 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 5063 if (fcport->drport &&
feafb7b1
AE
5064 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5065 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 5066 qla2x00_rport_del(fcport);
df673274 5067
feafb7b1
AE
5068 spin_lock_irqsave(&ha->vport_slock, flags);
5069 }
5070 }
5071 atomic_dec(&vha->vref_count);
5072 }
5073 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc
AV
5074}
5075
7d613ac6
SV
5076/* Assumes idc_lock always held on entry */
5077void
5078qla83xx_reset_ownership(scsi_qla_host_t *vha)
5079{
5080 struct qla_hw_data *ha = vha->hw;
5081 uint32_t drv_presence, drv_presence_mask;
5082 uint32_t dev_part_info1, dev_part_info2, class_type;
5083 uint32_t class_type_mask = 0x3;
5084 uint16_t fcoe_other_function = 0xffff, i;
5085
7ec0effd
AD
5086 if (IS_QLA8044(ha)) {
5087 drv_presence = qla8044_rd_direct(vha,
5088 QLA8044_CRB_DRV_ACTIVE_INDEX);
5089 dev_part_info1 = qla8044_rd_direct(vha,
5090 QLA8044_CRB_DEV_PART_INFO_INDEX);
5091 dev_part_info2 = qla8044_rd_direct(vha,
5092 QLA8044_CRB_DEV_PART_INFO2);
5093 } else {
5094 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5095 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5096 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5097 }
7d613ac6
SV
5098 for (i = 0; i < 8; i++) {
5099 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5100 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5101 (i != ha->portnum)) {
5102 fcoe_other_function = i;
5103 break;
5104 }
5105 }
5106 if (fcoe_other_function == 0xffff) {
5107 for (i = 0; i < 8; i++) {
5108 class_type = ((dev_part_info2 >> (i * 4)) &
5109 class_type_mask);
5110 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5111 ((i + 8) != ha->portnum)) {
5112 fcoe_other_function = i + 8;
5113 break;
5114 }
5115 }
5116 }
5117 /*
5118 * Prepare drv-presence mask based on fcoe functions present.
5119 * However consider only valid physical fcoe function numbers (0-15).
5120 */
5121 drv_presence_mask = ~((1 << (ha->portnum)) |
5122 ((fcoe_other_function == 0xffff) ?
5123 0 : (1 << (fcoe_other_function))));
5124
5125 /* We are the reset owner iff:
5126 * - No other protocol drivers present.
5127 * - This is the lowest among fcoe functions. */
5128 if (!(drv_presence & drv_presence_mask) &&
5129 (ha->portnum < fcoe_other_function)) {
5130 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5131 "This host is Reset owner.\n");
5132 ha->flags.nic_core_reset_owner = 1;
5133 }
5134}
5135
fa492630 5136static int
7d613ac6
SV
5137__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5138{
5139 int rval = QLA_SUCCESS;
5140 struct qla_hw_data *ha = vha->hw;
5141 uint32_t drv_ack;
5142
5143 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5144 if (rval == QLA_SUCCESS) {
5145 drv_ack |= (1 << ha->portnum);
5146 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5147 }
5148
5149 return rval;
5150}
5151
fa492630 5152static int
7d613ac6
SV
5153__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5154{
5155 int rval = QLA_SUCCESS;
5156 struct qla_hw_data *ha = vha->hw;
5157 uint32_t drv_ack;
5158
5159 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5160 if (rval == QLA_SUCCESS) {
5161 drv_ack &= ~(1 << ha->portnum);
5162 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5163 }
5164
5165 return rval;
5166}
5167
fa492630 5168static const char *
7d613ac6
SV
5169qla83xx_dev_state_to_string(uint32_t dev_state)
5170{
5171 switch (dev_state) {
5172 case QLA8XXX_DEV_COLD:
5173 return "COLD/RE-INIT";
5174 case QLA8XXX_DEV_INITIALIZING:
5175 return "INITIALIZING";
5176 case QLA8XXX_DEV_READY:
5177 return "READY";
5178 case QLA8XXX_DEV_NEED_RESET:
5179 return "NEED RESET";
5180 case QLA8XXX_DEV_NEED_QUIESCENT:
5181 return "NEED QUIESCENT";
5182 case QLA8XXX_DEV_FAILED:
5183 return "FAILED";
5184 case QLA8XXX_DEV_QUIESCENT:
5185 return "QUIESCENT";
5186 default:
5187 return "Unknown";
5188 }
5189}
5190
5191/* Assumes idc-lock always held on entry */
5192void
5193qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5194{
5195 struct qla_hw_data *ha = vha->hw;
5196 uint32_t idc_audit_reg = 0, duration_secs = 0;
5197
5198 switch (audit_type) {
5199 case IDC_AUDIT_TIMESTAMP:
5200 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5201 idc_audit_reg = (ha->portnum) |
5202 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5203 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5204 break;
5205
5206 case IDC_AUDIT_COMPLETION:
5207 duration_secs = ((jiffies_to_msecs(jiffies) -
5208 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5209 idc_audit_reg = (ha->portnum) |
5210 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5211 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5212 break;
5213
5214 default:
5215 ql_log(ql_log_warn, vha, 0xb078,
5216 "Invalid audit type specified.\n");
5217 break;
5218 }
5219}
5220
5221/* Assumes idc_lock always held on entry */
fa492630 5222static int
7d613ac6
SV
5223qla83xx_initiating_reset(scsi_qla_host_t *vha)
5224{
5225 struct qla_hw_data *ha = vha->hw;
5226 uint32_t idc_control, dev_state;
5227
5228 __qla83xx_get_idc_control(vha, &idc_control);
5229 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5230 ql_log(ql_log_info, vha, 0xb080,
5231 "NIC Core reset has been disabled. idc-control=0x%x\n",
5232 idc_control);
5233 return QLA_FUNCTION_FAILED;
5234 }
5235
5236 /* Set NEED-RESET iff in READY state and we are the reset-owner */
5237 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5238 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5239 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5240 QLA8XXX_DEV_NEED_RESET);
5241 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5242 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5243 } else {
5244 const char *state = qla83xx_dev_state_to_string(dev_state);
5245 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5246
5247 /* SV: XXX: Is timeout required here? */
5248 /* Wait for IDC state change READY -> NEED_RESET */
5249 while (dev_state == QLA8XXX_DEV_READY) {
5250 qla83xx_idc_unlock(vha, 0);
5251 msleep(200);
5252 qla83xx_idc_lock(vha, 0);
5253 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5254 }
5255 }
5256
5257 /* Send IDC ack by writing to drv-ack register */
5258 __qla83xx_set_drv_ack(vha);
5259
5260 return QLA_SUCCESS;
5261}
5262
5263int
5264__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5265{
5266 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5267}
5268
7d613ac6
SV
5269int
5270__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5271{
5272 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5273}
5274
fa492630 5275static int
7d613ac6
SV
5276qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5277{
5278 uint32_t drv_presence = 0;
5279 struct qla_hw_data *ha = vha->hw;
5280
5281 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5282 if (drv_presence & (1 << ha->portnum))
5283 return QLA_SUCCESS;
5284 else
5285 return QLA_TEST_FAILED;
5286}
5287
5288int
5289qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5290{
5291 int rval = QLA_SUCCESS;
5292 struct qla_hw_data *ha = vha->hw;
5293
5294 ql_dbg(ql_dbg_p3p, vha, 0xb058,
5295 "Entered %s().\n", __func__);
5296
5297 if (vha->device_flags & DFLG_DEV_FAILED) {
5298 ql_log(ql_log_warn, vha, 0xb059,
5299 "Device in unrecoverable FAILED state.\n");
5300 return QLA_FUNCTION_FAILED;
5301 }
5302
5303 qla83xx_idc_lock(vha, 0);
5304
5305 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5306 ql_log(ql_log_warn, vha, 0xb05a,
5307 "Function=0x%x has been removed from IDC participation.\n",
5308 ha->portnum);
5309 rval = QLA_FUNCTION_FAILED;
5310 goto exit;
5311 }
5312
5313 qla83xx_reset_ownership(vha);
5314
5315 rval = qla83xx_initiating_reset(vha);
5316
5317 /*
5318 * Perform reset if we are the reset-owner,
5319 * else wait till IDC state changes to READY/FAILED.
5320 */
5321 if (rval == QLA_SUCCESS) {
5322 rval = qla83xx_idc_state_handler(vha);
5323
5324 if (rval == QLA_SUCCESS)
5325 ha->flags.nic_core_hung = 0;
5326 __qla83xx_clear_drv_ack(vha);
5327 }
5328
5329exit:
5330 qla83xx_idc_unlock(vha, 0);
5331
5332 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5333
5334 return rval;
5335}
5336
81178772
SK
5337int
5338qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5339{
5340 struct qla_hw_data *ha = vha->hw;
5341 int rval = QLA_FUNCTION_FAILED;
5342
5343 if (!IS_MCTP_CAPABLE(ha)) {
5344 /* This message can be removed from the final version */
5345 ql_log(ql_log_info, vha, 0x506d,
5346 "This board is not MCTP capable\n");
5347 return rval;
5348 }
5349
5350 if (!ha->mctp_dump) {
5351 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5352 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5353
5354 if (!ha->mctp_dump) {
5355 ql_log(ql_log_warn, vha, 0x506e,
5356 "Failed to allocate memory for mctp dump\n");
5357 return rval;
5358 }
5359 }
5360
5361#define MCTP_DUMP_STR_ADDR 0x00000000
5362 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5363 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5364 if (rval != QLA_SUCCESS) {
5365 ql_log(ql_log_warn, vha, 0x506f,
5366 "Failed to capture mctp dump\n");
5367 } else {
5368 ql_log(ql_log_info, vha, 0x5070,
5369 "Mctp dump capture for host (%ld/%p).\n",
5370 vha->host_no, ha->mctp_dump);
5371 ha->mctp_dumped = 1;
5372 }
5373
409ee0fe 5374 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
5375 ha->flags.nic_core_reset_hdlr_active = 1;
5376 rval = qla83xx_restart_nic_firmware(vha);
5377 if (rval)
5378 /* NIC Core reset failed. */
5379 ql_log(ql_log_warn, vha, 0x5071,
5380 "Failed to restart nic firmware\n");
5381 else
5382 ql_dbg(ql_dbg_p3p, vha, 0xb084,
5383 "Restarted NIC firmware successfully.\n");
5384 ha->flags.nic_core_reset_hdlr_active = 0;
5385 }
5386
5387 return rval;
5388
5389}
5390
579d12b5 5391/*
8fcd6b8b 5392* qla2x00_quiesce_io
579d12b5
SK
5393* Description: This function will block the new I/Os
5394* Its not aborting any I/Os as context
5395* is not destroyed during quiescence
5396* Arguments: scsi_qla_host_t
5397* return : void
5398*/
5399void
8fcd6b8b 5400qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
5401{
5402 struct qla_hw_data *ha = vha->hw;
5403 struct scsi_qla_host *vp;
5404
8fcd6b8b
CD
5405 ql_dbg(ql_dbg_dpc, vha, 0x401d,
5406 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
5407
5408 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
5409 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5410 atomic_set(&vha->loop_state, LOOP_DOWN);
5411 qla2x00_mark_all_devices_lost(vha, 0);
5412 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 5413 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
5414 } else {
5415 if (!atomic_read(&vha->loop_down_timer))
5416 atomic_set(&vha->loop_down_timer,
5417 LOOP_DOWN_TIME);
5418 }
5419 /* Wait for pending cmds to complete */
5420 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
5421}
5422
a9083016
GM
5423void
5424qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5425{
5426 struct qla_hw_data *ha = vha->hw;
579d12b5 5427 struct scsi_qla_host *vp;
feafb7b1 5428 unsigned long flags;
6aef87be 5429 fc_port_t *fcport;
a9083016 5430
e46ef004
SK
5431 /* For ISP82XX, driver waits for completion of the commands.
5432 * online flag should be set.
5433 */
7ec0effd 5434 if (!(IS_P3P_TYPE(ha)))
e46ef004 5435 vha->flags.online = 0;
a9083016
GM
5436 ha->flags.chip_reset_done = 0;
5437 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 5438 vha->qla_stats.total_isp_aborts++;
a9083016 5439
7c3df132
SK
5440 ql_log(ql_log_info, vha, 0x00af,
5441 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 5442
e46ef004
SK
5443 /* For ISP82XX, reset_chip is just disabling interrupts.
5444 * Driver waits for the completion of the commands.
5445 * the interrupts need to be enabled.
5446 */
7ec0effd 5447 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
5448 ha->isp_ops->reset_chip(vha);
5449
726b8548
QT
5450 ha->chip_reset++;
5451
a9083016
GM
5452 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5453 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5454 atomic_set(&vha->loop_state, LOOP_DOWN);
5455 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
5456
5457 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 5458 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
5459 atomic_inc(&vp->vref_count);
5460 spin_unlock_irqrestore(&ha->vport_slock, flags);
5461
a9083016 5462 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
5463
5464 spin_lock_irqsave(&ha->vport_slock, flags);
5465 atomic_dec(&vp->vref_count);
5466 }
5467 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
5468 } else {
5469 if (!atomic_read(&vha->loop_down_timer))
5470 atomic_set(&vha->loop_down_timer,
5471 LOOP_DOWN_TIME);
5472 }
5473
6aef87be
AV
5474 /* Clear all async request states across all VPs. */
5475 list_for_each_entry(fcport, &vha->vp_fcports, list)
5476 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5477 spin_lock_irqsave(&ha->vport_slock, flags);
5478 list_for_each_entry(vp, &ha->vp_list, list) {
5479 atomic_inc(&vp->vref_count);
5480 spin_unlock_irqrestore(&ha->vport_slock, flags);
5481
5482 list_for_each_entry(fcport, &vp->vp_fcports, list)
5483 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5484
5485 spin_lock_irqsave(&ha->vport_slock, flags);
5486 atomic_dec(&vp->vref_count);
5487 }
5488 spin_unlock_irqrestore(&ha->vport_slock, flags);
5489
bddd2d65
LC
5490 if (!ha->flags.eeh_busy) {
5491 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 5492 if (IS_P3P_TYPE(ha)) {
7190575f 5493 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
5494 ql_log(ql_log_info, vha, 0x00b4,
5495 "Done chip reset cleanup.\n");
a9083016 5496
e46ef004
SK
5497 /* Done waiting for pending commands.
5498 * Reset the online flag.
5499 */
5500 vha->flags.online = 0;
4d78c973 5501 }
a9083016 5502
bddd2d65
LC
5503 /* Requeue all commands in outstanding command list. */
5504 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5505 }
b6a029e1
AE
5506 /* memory barrier */
5507 wmb();
a9083016
GM
5508}
5509
1da177e4
LT
5510/*
5511* qla2x00_abort_isp
5512* Resets ISP and aborts all outstanding commands.
5513*
5514* Input:
5515* ha = adapter block pointer.
5516*
5517* Returns:
5518* 0 = success
5519*/
5520int
e315cd28 5521qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 5522{
476e8978 5523 int rval;
1da177e4 5524 uint8_t status = 0;
e315cd28
AC
5525 struct qla_hw_data *ha = vha->hw;
5526 struct scsi_qla_host *vp;
73208dfd 5527 struct req_que *req = ha->req_q_map[0];
feafb7b1 5528 unsigned long flags;
1da177e4 5529
e315cd28 5530 if (vha->flags.online) {
a9083016 5531 qla2x00_abort_isp_cleanup(vha);
1da177e4 5532
a6171297
SV
5533 if (IS_QLA8031(ha)) {
5534 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
5535 "Clearing fcoe driver presence.\n");
5536 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
5537 ql_dbg(ql_dbg_p3p, vha, 0xb073,
5538 "Error while clearing DRV-Presence.\n");
5539 }
5540
85880801
AV
5541 if (unlikely(pci_channel_offline(ha->pdev) &&
5542 ha->flags.pci_channel_io_perm_failure)) {
5543 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5544 status = 0;
5545 return status;
5546 }
5547
73208dfd 5548 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 5549
e315cd28 5550 ha->isp_ops->nvram_config(vha);
1da177e4 5551
e315cd28
AC
5552 if (!qla2x00_restart_isp(vha)) {
5553 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 5554
e315cd28 5555 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
5556 /*
5557 * Issue marker command only when we are going
5558 * to start the I/O .
5559 */
e315cd28 5560 vha->marker_needed = 1;
1da177e4
LT
5561 }
5562
e315cd28 5563 vha->flags.online = 1;
1da177e4 5564
fd34f556 5565 ha->isp_ops->enable_intrs(ha);
1da177e4 5566
fa2a1ce5 5567 ha->isp_abort_cnt = 0;
e315cd28 5568 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 5569
6246b8a1
GM
5570 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
5571 qla2x00_get_fw_version(vha);
df613b96
AV
5572 if (ha->fce) {
5573 ha->flags.fce_enabled = 1;
5574 memset(ha->fce, 0,
5575 fce_calc_size(ha->fce_bufs));
e315cd28 5576 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
5577 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5578 &ha->fce_bufs);
5579 if (rval) {
7c3df132 5580 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
5581 "Unable to reinitialize FCE "
5582 "(%d).\n", rval);
5583 ha->flags.fce_enabled = 0;
5584 }
5585 }
436a7b11
AV
5586
5587 if (ha->eft) {
5588 memset(ha->eft, 0, EFT_SIZE);
e315cd28 5589 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
5590 ha->eft_dma, EFT_NUM_BUFFERS);
5591 if (rval) {
7c3df132 5592 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
5593 "Unable to reinitialize EFT "
5594 "(%d).\n", rval);
5595 }
5596 }
1da177e4 5597 } else { /* failed the ISP abort */
e315cd28
AC
5598 vha->flags.online = 1;
5599 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 5600 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
5601 ql_log(ql_log_fatal, vha, 0x8035,
5602 "ISP error recover failed - "
5603 "board disabled.\n");
fa2a1ce5 5604 /*
1da177e4
LT
5605 * The next call disables the board
5606 * completely.
5607 */
e315cd28
AC
5608 ha->isp_ops->reset_adapter(vha);
5609 vha->flags.online = 0;
1da177e4 5610 clear_bit(ISP_ABORT_RETRY,
e315cd28 5611 &vha->dpc_flags);
1da177e4
LT
5612 status = 0;
5613 } else { /* schedule another ISP abort */
5614 ha->isp_abort_cnt--;
7c3df132
SK
5615 ql_dbg(ql_dbg_taskm, vha, 0x8020,
5616 "ISP abort - retry remaining %d.\n",
5617 ha->isp_abort_cnt);
1da177e4
LT
5618 status = 1;
5619 }
5620 } else {
5621 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
5622 ql_dbg(ql_dbg_taskm, vha, 0x8021,
5623 "ISP error recovery - retrying (%d) "
5624 "more times.\n", ha->isp_abort_cnt);
e315cd28 5625 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
5626 status = 1;
5627 }
5628 }
fa2a1ce5 5629
1da177e4
LT
5630 }
5631
e315cd28 5632 if (!status) {
7c3df132 5633 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
feafb7b1
AE
5634
5635 spin_lock_irqsave(&ha->vport_slock, flags);
5636 list_for_each_entry(vp, &ha->vp_list, list) {
5637 if (vp->vp_idx) {
5638 atomic_inc(&vp->vref_count);
5639 spin_unlock_irqrestore(&ha->vport_slock, flags);
5640
e315cd28 5641 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
5642
5643 spin_lock_irqsave(&ha->vport_slock, flags);
5644 atomic_dec(&vp->vref_count);
5645 }
e315cd28 5646 }
feafb7b1
AE
5647 spin_unlock_irqrestore(&ha->vport_slock, flags);
5648
7d613ac6
SV
5649 if (IS_QLA8031(ha)) {
5650 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
5651 "Setting back fcoe driver presence.\n");
5652 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
5653 ql_dbg(ql_dbg_p3p, vha, 0xb074,
5654 "Error while setting DRV-Presence.\n");
5655 }
e315cd28 5656 } else {
d8424f68
JP
5657 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
5658 __func__);
1da177e4
LT
5659 }
5660
5661 return(status);
5662}
5663
5664/*
5665* qla2x00_restart_isp
5666* restarts the ISP after a reset
5667*
5668* Input:
5669* ha = adapter block pointer.
5670*
5671* Returns:
5672* 0 = success
5673*/
5674static int
e315cd28 5675qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 5676{
c6b2fca8 5677 int status = 0;
e315cd28 5678 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
5679 struct req_que *req = ha->req_q_map[0];
5680 struct rsp_que *rsp = ha->rsp_q_map[0];
1da177e4
LT
5681
5682 /* If firmware needs to be loaded */
e315cd28
AC
5683 if (qla2x00_isp_firmware(vha)) {
5684 vha->flags.online = 0;
5685 status = ha->isp_ops->chip_diag(vha);
5686 if (!status)
5687 status = qla2x00_setup_chip(vha);
1da177e4
LT
5688 }
5689
e315cd28
AC
5690 if (!status && !(status = qla2x00_init_rings(vha))) {
5691 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 5692 ha->flags.chip_reset_done = 1;
7108b76e 5693
73208dfd
AC
5694 /* Initialize the queues in use */
5695 qla25xx_init_queues(ha);
5696
e315cd28
AC
5697 status = qla2x00_fw_ready(vha);
5698 if (!status) {
0107109e 5699 /* Issue a marker after FW becomes ready. */
73208dfd 5700 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7108b76e 5701 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
5702 }
5703
5704 /* if no cable then assume it's good */
e315cd28 5705 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 5706 status = 0;
1da177e4
LT
5707 }
5708 return (status);
5709}
5710
73208dfd
AC
5711static int
5712qla25xx_init_queues(struct qla_hw_data *ha)
5713{
5714 struct rsp_que *rsp = NULL;
5715 struct req_que *req = NULL;
5716 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5717 int ret = -1;
5718 int i;
5719
2afa19a9 5720 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 5721 rsp = ha->rsp_q_map[i];
cb43285f 5722 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 5723 rsp->options &= ~BIT_0;
618a7523 5724 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 5725 if (ret != QLA_SUCCESS)
7c3df132
SK
5726 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
5727 "%s Rsp que: %d init failed.\n",
5728 __func__, rsp->id);
73208dfd 5729 else
7c3df132
SK
5730 ql_dbg(ql_dbg_init, base_vha, 0x0100,
5731 "%s Rsp que: %d inited.\n",
5732 __func__, rsp->id);
73208dfd 5733 }
2afa19a9
AC
5734 }
5735 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 5736 req = ha->req_q_map[i];
cb43285f
QT
5737 if (req && test_bit(i, ha->req_qid_map)) {
5738 /* Clear outstanding commands array. */
73208dfd 5739 req->options &= ~BIT_0;
618a7523 5740 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 5741 if (ret != QLA_SUCCESS)
7c3df132
SK
5742 ql_dbg(ql_dbg_init, base_vha, 0x0101,
5743 "%s Req que: %d init failed.\n",
5744 __func__, req->id);
73208dfd 5745 else
7c3df132
SK
5746 ql_dbg(ql_dbg_init, base_vha, 0x0102,
5747 "%s Req que: %d inited.\n",
5748 __func__, req->id);
73208dfd
AC
5749 }
5750 }
5751 return ret;
5752}
5753
1da177e4
LT
5754/*
5755* qla2x00_reset_adapter
5756* Reset adapter.
5757*
5758* Input:
5759* ha = adapter block pointer.
5760*/
abbd8870 5761void
e315cd28 5762qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
5763{
5764 unsigned long flags = 0;
e315cd28 5765 struct qla_hw_data *ha = vha->hw;
3d71644c 5766 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 5767
e315cd28 5768 vha->flags.online = 0;
fd34f556 5769 ha->isp_ops->disable_intrs(ha);
1da177e4 5770
1da177e4
LT
5771 spin_lock_irqsave(&ha->hardware_lock, flags);
5772 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
5773 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
5774 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
5775 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
5776 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5777}
0107109e
AV
5778
5779void
e315cd28 5780qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
5781{
5782 unsigned long flags = 0;
e315cd28 5783 struct qla_hw_data *ha = vha->hw;
0107109e
AV
5784 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5785
7ec0effd 5786 if (IS_P3P_TYPE(ha))
a9083016
GM
5787 return;
5788
e315cd28 5789 vha->flags.online = 0;
fd34f556 5790 ha->isp_ops->disable_intrs(ha);
0107109e
AV
5791
5792 spin_lock_irqsave(&ha->hardware_lock, flags);
5793 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
5794 RD_REG_DWORD(&reg->hccr);
5795 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
5796 RD_REG_DWORD(&reg->hccr);
5797 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
5798
5799 if (IS_NOPOLLING_TYPE(ha))
5800 ha->isp_ops->enable_intrs(ha);
0107109e
AV
5801}
5802
4e08df3f
DM
5803/* On sparc systems, obtain port and node WWN from firmware
5804 * properties.
5805 */
e315cd28
AC
5806static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
5807 struct nvram_24xx *nv)
4e08df3f
DM
5808{
5809#ifdef CONFIG_SPARC
e315cd28 5810 struct qla_hw_data *ha = vha->hw;
4e08df3f 5811 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
5812 struct device_node *dp = pci_device_to_OF_node(pdev);
5813 const u8 *val;
4e08df3f
DM
5814 int len;
5815
5816 val = of_get_property(dp, "port-wwn", &len);
5817 if (val && len >= WWN_SIZE)
5818 memcpy(nv->port_name, val, WWN_SIZE);
5819
5820 val = of_get_property(dp, "node-wwn", &len);
5821 if (val && len >= WWN_SIZE)
5822 memcpy(nv->node_name, val, WWN_SIZE);
5823#endif
5824}
5825
0107109e 5826int
e315cd28 5827qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 5828{
4e08df3f 5829 int rval;
0107109e
AV
5830 struct init_cb_24xx *icb;
5831 struct nvram_24xx *nv;
5832 uint32_t *dptr;
5833 uint8_t *dptr1, *dptr2;
5834 uint32_t chksum;
5835 uint16_t cnt;
e315cd28 5836 struct qla_hw_data *ha = vha->hw;
0107109e 5837
4e08df3f 5838 rval = QLA_SUCCESS;
0107109e 5839 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 5840 nv = ha->nvram;
0107109e
AV
5841
5842 /* Determine NVRAM starting address. */
f73cb695 5843 if (ha->port_no == 0) {
e5b68a61
AC
5844 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
5845 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
5846 } else {
0107109e 5847 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790
AV
5848 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
5849 }
f73cb695 5850
e5b68a61
AC
5851 ha->nvram_size = sizeof(struct nvram_24xx);
5852 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 5853
281afe19
SJ
5854 /* Get VPD data into cache */
5855 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 5856 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
5857 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
5858
5859 /* Get NVRAM data into cache and calculate checksum. */
0107109e 5860 dptr = (uint32_t *)nv;
e315cd28 5861 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e 5862 ha->nvram_size);
da08ef5c
JC
5863 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
5864 chksum += le32_to_cpu(*dptr);
0107109e 5865
7c3df132
SK
5866 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
5867 "Contents of NVRAM\n");
5868 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
5869 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
5870
5871 /* Bad NVRAM data, set defaults parameters. */
5872 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5873 || nv->id[3] != ' ' ||
ad950360 5874 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
0107109e 5875 /* Reset NVRAM data. */
7c3df132 5876 ql_log(ql_log_warn, vha, 0x006b,
9e336520 5877 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
5878 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
5879 ql_log(ql_log_warn, vha, 0x006c,
5880 "Falling back to functioning (yet invalid -- WWPN) "
5881 "defaults.\n");
4e08df3f
DM
5882
5883 /*
5884 * Set default initialization control block.
5885 */
5886 memset(nv, 0, ha->nvram_size);
ad950360
BVA
5887 nv->nvram_version = cpu_to_le16(ICB_VERSION);
5888 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 5889 nv->frame_payload_size = 2048;
ad950360
BVA
5890 nv->execution_throttle = cpu_to_le16(0xFFFF);
5891 nv->exchange_count = cpu_to_le16(0);
5892 nv->hard_address = cpu_to_le16(124);
4e08df3f 5893 nv->port_name[0] = 0x21;
f73cb695 5894 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
5895 nv->port_name[2] = 0x00;
5896 nv->port_name[3] = 0xe0;
5897 nv->port_name[4] = 0x8b;
5898 nv->port_name[5] = 0x1c;
5899 nv->port_name[6] = 0x55;
5900 nv->port_name[7] = 0x86;
5901 nv->node_name[0] = 0x20;
5902 nv->node_name[1] = 0x00;
5903 nv->node_name[2] = 0x00;
5904 nv->node_name[3] = 0xe0;
5905 nv->node_name[4] = 0x8b;
5906 nv->node_name[5] = 0x1c;
5907 nv->node_name[6] = 0x55;
5908 nv->node_name[7] = 0x86;
e315cd28 5909 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
5910 nv->login_retry_count = cpu_to_le16(8);
5911 nv->interrupt_delay_timer = cpu_to_le16(0);
5912 nv->login_timeout = cpu_to_le16(0);
4e08df3f 5913 nv->firmware_options_1 =
ad950360
BVA
5914 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5915 nv->firmware_options_2 = cpu_to_le32(2 << 4);
5916 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5917 nv->firmware_options_3 = cpu_to_le32(2 << 13);
5918 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
5919 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 5920 nv->reset_delay = 5;
ad950360
BVA
5921 nv->max_luns_per_target = cpu_to_le16(128);
5922 nv->port_down_retry_count = cpu_to_le16(30);
5923 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
5924
5925 rval = 1;
0107109e
AV
5926 }
5927
726b8548 5928 if (qla_tgt_mode_enabled(vha)) {
2d70c103 5929 /* Don't enable full login after initial LIP */
ad950360 5930 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 5931 /* Don't enable LIP full login for initiator */
ad950360 5932 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
5933 }
5934
5935 qlt_24xx_config_nvram_stage1(vha, nv);
5936
0107109e 5937 /* Reset Initialization control block */
e315cd28 5938 memset(icb, 0, ha->init_cb_size);
0107109e
AV
5939
5940 /* Copy 1st segment. */
5941 dptr1 = (uint8_t *)icb;
5942 dptr2 = (uint8_t *)&nv->version;
5943 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5944 while (cnt--)
5945 *dptr1++ = *dptr2++;
5946
5947 icb->login_retry_count = nv->login_retry_count;
3ea66e28 5948 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
5949
5950 /* Copy 2nd segment. */
5951 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5952 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5953 cnt = (uint8_t *)&icb->reserved_3 -
5954 (uint8_t *)&icb->interrupt_delay_timer;
5955 while (cnt--)
5956 *dptr1++ = *dptr2++;
5957
5958 /*
5959 * Setup driver NVRAM options.
5960 */
e315cd28 5961 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 5962 "QLA2462");
0107109e 5963
2d70c103
NB
5964 qlt_24xx_config_nvram_stage2(vha, icb);
5965
ad950360 5966 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 5967 /* Use alternate WWN? */
5341e868
AV
5968 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5969 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5970 }
5971
0107109e 5972 /* Prepare nodename */
ad950360 5973 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
5974 /*
5975 * Firmware will apply the following mask if the nodename was
5976 * not provided.
5977 */
5978 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5979 icb->node_name[0] &= 0xF0;
5980 }
5981
5982 /* Set host adapter parameters. */
5983 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
5984 ha->flags.enable_lip_reset = 0;
5985 ha->flags.enable_lip_full_login =
5986 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5987 ha->flags.enable_target_reset =
5988 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 5989 ha->flags.enable_led_scheme = 0;
d4c760c2 5990 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 5991
fd0e7e4d
AV
5992 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5993 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
5994
5995 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
5996 sizeof(ha->fw_seriallink_options24));
5997
5998 /* save HBA serial number */
5999 ha->serial0 = icb->port_name[5];
6000 ha->serial1 = icb->port_name[6];
6001 ha->serial2 = icb->port_name[7];
e315cd28
AC
6002 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6003 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 6004
ad950360 6005 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 6006
0107109e
AV
6007 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6008
6009 /* Set minimum login_timeout to 4 seconds. */
6010 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6011 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6012 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 6013 nv->login_timeout = cpu_to_le16(4);
0107109e 6014 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 6015
00a537b8
AV
6016 /* Set minimum RATOV to 100 tenths of a second. */
6017 ha->r_a_tov = 100;
0107109e
AV
6018
6019 ha->loop_reset_delay = nv->reset_delay;
6020
6021 /* Link Down Timeout = 0:
6022 *
6023 * When Port Down timer expires we will start returning
6024 * I/O's to OS with "DID_NO_CONNECT".
6025 *
6026 * Link Down Timeout != 0:
6027 *
6028 * The driver waits for the link to come up after link down
6029 * before returning I/Os to OS with "DID_NO_CONNECT".
6030 */
6031 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6032 ha->loop_down_abort_time =
6033 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6034 } else {
6035 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6036 ha->loop_down_abort_time =
6037 (LOOP_DOWN_TIME - ha->link_down_timeout);
6038 }
6039
6040 /* Need enough time to try and get the port back. */
6041 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6042 if (qlport_down_retry)
6043 ha->port_down_retry_count = qlport_down_retry;
6044
6045 /* Set login_retry_count */
6046 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6047 if (ha->port_down_retry_count ==
6048 le16_to_cpu(nv->port_down_retry_count) &&
6049 ha->port_down_retry_count > 3)
6050 ha->login_retry_count = ha->port_down_retry_count;
6051 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6052 ha->login_retry_count = ha->port_down_retry_count;
6053 if (ql2xloginretrycount)
6054 ha->login_retry_count = ql2xloginretrycount;
6055
4fdfefe5 6056 /* Enable ZIO. */
e315cd28 6057 if (!vha->flags.init_done) {
4fdfefe5
AV
6058 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6059 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6060 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6061 le16_to_cpu(icb->interrupt_delay_timer): 2;
6062 }
ad950360 6063 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 6064 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
e315cd28 6065 vha->flags.process_response_queue = 0;
4fdfefe5 6066 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d
AV
6067 ha->zio_mode = QLA_ZIO_MODE_6;
6068
7c3df132 6069 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
6070 "ZIO mode %d enabled; timer delay (%d us).\n",
6071 ha->zio_mode, ha->zio_timer * 100);
6072
6073 icb->firmware_options_2 |= cpu_to_le32(
6074 (uint32_t)ha->zio_mode);
6075 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
e315cd28 6076 vha->flags.process_response_queue = 1;
4fdfefe5
AV
6077 }
6078
4e08df3f 6079 if (rval) {
7c3df132
SK
6080 ql_log(ql_log_warn, vha, 0x0070,
6081 "NVRAM configuration failed.\n");
4e08df3f
DM
6082 }
6083 return (rval);
0107109e
AV
6084}
6085
4243c115
SC
6086uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6087{
6088 struct qla27xx_image_status pri_image_status, sec_image_status;
6089 uint8_t valid_pri_image, valid_sec_image;
6090 uint32_t *wptr;
6091 uint32_t cnt, chksum, size;
6092 struct qla_hw_data *ha = vha->hw;
6093
6094 valid_pri_image = valid_sec_image = 1;
6095 ha->active_image = 0;
6096 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6097
6098 if (!ha->flt_region_img_status_pri) {
6099 valid_pri_image = 0;
6100 goto check_sec_image;
6101 }
6102
6103 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6104 ha->flt_region_img_status_pri, size);
6105
6106 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6107 ql_dbg(ql_dbg_init, vha, 0x018b,
6108 "Primary image signature (0x%x) not valid\n",
6109 pri_image_status.signature);
6110 valid_pri_image = 0;
6111 goto check_sec_image;
6112 }
6113
6114 wptr = (uint32_t *)(&pri_image_status);
6115 cnt = size;
6116
da08ef5c
JC
6117 for (chksum = 0; cnt--; wptr++)
6118 chksum += le32_to_cpu(*wptr);
4243c115
SC
6119 if (chksum) {
6120 ql_dbg(ql_dbg_init, vha, 0x018c,
6121 "Checksum validation failed for primary image (0x%x)\n",
6122 chksum);
6123 valid_pri_image = 0;
6124 }
6125
6126check_sec_image:
6127 if (!ha->flt_region_img_status_sec) {
6128 valid_sec_image = 0;
6129 goto check_valid_image;
6130 }
6131
6132 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6133 ha->flt_region_img_status_sec, size);
6134
6135 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6136 ql_dbg(ql_dbg_init, vha, 0x018d,
6137 "Secondary image signature(0x%x) not valid\n",
6138 sec_image_status.signature);
6139 valid_sec_image = 0;
6140 goto check_valid_image;
6141 }
6142
6143 wptr = (uint32_t *)(&sec_image_status);
6144 cnt = size;
da08ef5c
JC
6145 for (chksum = 0; cnt--; wptr++)
6146 chksum += le32_to_cpu(*wptr);
4243c115
SC
6147 if (chksum) {
6148 ql_dbg(ql_dbg_init, vha, 0x018e,
6149 "Checksum validation failed for secondary image (0x%x)\n",
6150 chksum);
6151 valid_sec_image = 0;
6152 }
6153
6154check_valid_image:
6155 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6156 ha->active_image = QLA27XX_PRIMARY_IMAGE;
6157 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6158 if (!ha->active_image ||
6159 pri_image_status.generation_number <
6160 sec_image_status.generation_number)
6161 ha->active_image = QLA27XX_SECONDARY_IMAGE;
6162 }
6163
6164 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6165 ha->active_image == 0 ? "default bootld and fw" :
6166 ha->active_image == 1 ? "primary" :
6167 ha->active_image == 2 ? "secondary" :
6168 "Invalid");
6169
6170 return ha->active_image;
6171}
6172
413975a0 6173static int
cbc8eb67
AV
6174qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6175 uint32_t faddr)
d1c61909 6176{
73208dfd 6177 int rval = QLA_SUCCESS;
d1c61909 6178 int segments, fragment;
d1c61909
AV
6179 uint32_t *dcode, dlen;
6180 uint32_t risc_addr;
6181 uint32_t risc_size;
6182 uint32_t i;
e315cd28 6183 struct qla_hw_data *ha = vha->hw;
73208dfd 6184 struct req_que *req = ha->req_q_map[0];
eaac30be 6185
7c3df132 6186 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 6187 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 6188
d1c61909
AV
6189 rval = QLA_SUCCESS;
6190
6191 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6192 dcode = (uint32_t *)req->ring;
d1c61909
AV
6193 *srisc_addr = 0;
6194
4243c115
SC
6195 if (IS_QLA27XX(ha) &&
6196 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6197 faddr = ha->flt_region_fw_sec;
6198
d1c61909 6199 /* Validate firmware image by checking version. */
e315cd28 6200 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
6201 for (i = 0; i < 4; i++)
6202 dcode[i] = be32_to_cpu(dcode[i]);
6203 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6204 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6205 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6206 dcode[3] == 0)) {
7c3df132
SK
6207 ql_log(ql_log_fatal, vha, 0x008c,
6208 "Unable to verify the integrity of flash firmware "
6209 "image.\n");
6210 ql_log(ql_log_fatal, vha, 0x008d,
6211 "Firmware data: %08x %08x %08x %08x.\n",
6212 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
6213
6214 return QLA_FUNCTION_FAILED;
6215 }
6216
6217 while (segments && rval == QLA_SUCCESS) {
6218 /* Read segment's load information. */
e315cd28 6219 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
6220
6221 risc_addr = be32_to_cpu(dcode[2]);
6222 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6223 risc_size = be32_to_cpu(dcode[3]);
6224
6225 fragment = 0;
6226 while (risc_size > 0 && rval == QLA_SUCCESS) {
6227 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6228 if (dlen > risc_size)
6229 dlen = risc_size;
6230
7c3df132
SK
6231 ql_dbg(ql_dbg_init, vha, 0x008e,
6232 "Loading risc segment@ risc addr %x "
6233 "number of dwords 0x%x offset 0x%x.\n",
6234 risc_addr, dlen, faddr);
d1c61909 6235
e315cd28 6236 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
6237 for (i = 0; i < dlen; i++)
6238 dcode[i] = swab32(dcode[i]);
6239
73208dfd 6240 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
6241 dlen);
6242 if (rval) {
7c3df132
SK
6243 ql_log(ql_log_fatal, vha, 0x008f,
6244 "Failed to load segment %d of firmware.\n",
6245 fragment);
f261f7af 6246 return QLA_FUNCTION_FAILED;
d1c61909
AV
6247 }
6248
6249 faddr += dlen;
6250 risc_addr += dlen;
6251 risc_size -= dlen;
6252 fragment++;
6253 }
6254
6255 /* Next segment. */
6256 segments--;
6257 }
6258
f73cb695
CD
6259 if (!IS_QLA27XX(ha))
6260 return rval;
6261
6262 if (ha->fw_dump_template)
6263 vfree(ha->fw_dump_template);
6264 ha->fw_dump_template = NULL;
6265 ha->fw_dump_template_len = 0;
6266
6267 ql_dbg(ql_dbg_init, vha, 0x0161,
6268 "Loading fwdump template from %x\n", faddr);
6269 qla24xx_read_flash_data(vha, dcode, faddr, 7);
6270 risc_size = be32_to_cpu(dcode[2]);
6271 ql_dbg(ql_dbg_init, vha, 0x0162,
6272 "-> array size %x dwords\n", risc_size);
6273 if (risc_size == 0 || risc_size == ~0)
6274 goto default_template;
6275
6276 dlen = (risc_size - 8) * sizeof(*dcode);
6277 ql_dbg(ql_dbg_init, vha, 0x0163,
6278 "-> template allocating %x bytes...\n", dlen);
6279 ha->fw_dump_template = vmalloc(dlen);
6280 if (!ha->fw_dump_template) {
6281 ql_log(ql_log_warn, vha, 0x0164,
6282 "Failed fwdump template allocate %x bytes.\n", risc_size);
6283 goto default_template;
6284 }
6285
6286 faddr += 7;
6287 risc_size -= 8;
6288 dcode = ha->fw_dump_template;
6289 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6290 for (i = 0; i < risc_size; i++)
6291 dcode[i] = le32_to_cpu(dcode[i]);
6292
6293 if (!qla27xx_fwdt_template_valid(dcode)) {
6294 ql_log(ql_log_warn, vha, 0x0165,
6295 "Failed fwdump template validate\n");
6296 goto default_template;
6297 }
6298
6299 dlen = qla27xx_fwdt_template_size(dcode);
6300 ql_dbg(ql_dbg_init, vha, 0x0166,
6301 "-> template size %x bytes\n", dlen);
6302 if (dlen > risc_size * sizeof(*dcode)) {
6303 ql_log(ql_log_warn, vha, 0x0167,
97ea702b
CD
6304 "Failed fwdump template exceeds array by %x bytes\n",
6305 (uint32_t)(dlen - risc_size * sizeof(*dcode)));
f73cb695
CD
6306 goto default_template;
6307 }
6308 ha->fw_dump_template_len = dlen;
6309 return rval;
6310
6311default_template:
6312 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6313 if (ha->fw_dump_template)
6314 vfree(ha->fw_dump_template);
6315 ha->fw_dump_template = NULL;
6316 ha->fw_dump_template_len = 0;
6317
6318 dlen = qla27xx_fwdt_template_default_size();
6319 ql_dbg(ql_dbg_init, vha, 0x0169,
6320 "-> template allocating %x bytes...\n", dlen);
6321 ha->fw_dump_template = vmalloc(dlen);
6322 if (!ha->fw_dump_template) {
6323 ql_log(ql_log_warn, vha, 0x016a,
6324 "Failed fwdump template allocate %x bytes.\n", risc_size);
6325 goto failed_template;
6326 }
6327
6328 dcode = ha->fw_dump_template;
6329 risc_size = dlen / sizeof(*dcode);
6330 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6331 for (i = 0; i < risc_size; i++)
6332 dcode[i] = be32_to_cpu(dcode[i]);
6333
6334 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6335 ql_log(ql_log_warn, vha, 0x016b,
6336 "Failed fwdump template validate\n");
6337 goto failed_template;
6338 }
6339
6340 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6341 ql_dbg(ql_dbg_init, vha, 0x016c,
6342 "-> template size %x bytes\n", dlen);
6343 ha->fw_dump_template_len = dlen;
6344 return rval;
6345
6346failed_template:
6347 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6348 if (ha->fw_dump_template)
6349 vfree(ha->fw_dump_template);
6350 ha->fw_dump_template = NULL;
6351 ha->fw_dump_template_len = 0;
d1c61909
AV
6352 return rval;
6353}
6354
e9454a88 6355#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 6356
0107109e 6357int
e315cd28 6358qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
6359{
6360 int rval;
6361 int i, fragment;
6362 uint16_t *wcode, *fwcode;
6363 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6364 struct fw_blob *blob;
e315cd28 6365 struct qla_hw_data *ha = vha->hw;
73208dfd 6366 struct req_que *req = ha->req_q_map[0];
5433383e
AV
6367
6368 /* Load firmware blob. */
e315cd28 6369 blob = qla2x00_request_firmware(vha);
5433383e 6370 if (!blob) {
7c3df132 6371 ql_log(ql_log_info, vha, 0x0083,
94bcf830 6372 "Firmware image unavailable.\n");
7c3df132
SK
6373 ql_log(ql_log_info, vha, 0x0084,
6374 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
6375 return QLA_FUNCTION_FAILED;
6376 }
6377
6378 rval = QLA_SUCCESS;
6379
73208dfd 6380 wcode = (uint16_t *)req->ring;
5433383e
AV
6381 *srisc_addr = 0;
6382 fwcode = (uint16_t *)blob->fw->data;
6383 fwclen = 0;
6384
6385 /* Validate firmware image by checking version. */
6386 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132
SK
6387 ql_log(ql_log_fatal, vha, 0x0085,
6388 "Unable to verify integrity of firmware image (%Zd).\n",
5433383e
AV
6389 blob->fw->size);
6390 goto fail_fw_integrity;
6391 }
6392 for (i = 0; i < 4; i++)
6393 wcode[i] = be16_to_cpu(fwcode[i + 4]);
6394 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
6395 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
6396 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
6397 ql_log(ql_log_fatal, vha, 0x0086,
6398 "Unable to verify integrity of firmware image.\n");
6399 ql_log(ql_log_fatal, vha, 0x0087,
6400 "Firmware data: %04x %04x %04x %04x.\n",
6401 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
6402 goto fail_fw_integrity;
6403 }
6404
6405 seg = blob->segs;
6406 while (*seg && rval == QLA_SUCCESS) {
6407 risc_addr = *seg;
6408 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
6409 risc_size = be16_to_cpu(fwcode[3]);
6410
6411 /* Validate firmware image size. */
6412 fwclen += risc_size * sizeof(uint16_t);
6413 if (blob->fw->size < fwclen) {
7c3df132 6414 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 6415 "Unable to verify integrity of firmware image "
7c3df132 6416 "(%Zd).\n", blob->fw->size);
5433383e
AV
6417 goto fail_fw_integrity;
6418 }
6419
6420 fragment = 0;
6421 while (risc_size > 0 && rval == QLA_SUCCESS) {
6422 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
6423 if (wlen > risc_size)
6424 wlen = risc_size;
7c3df132
SK
6425 ql_dbg(ql_dbg_init, vha, 0x0089,
6426 "Loading risc segment@ risc addr %x number of "
6427 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
6428
6429 for (i = 0; i < wlen; i++)
6430 wcode[i] = swab16(fwcode[i]);
6431
73208dfd 6432 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
6433 wlen);
6434 if (rval) {
7c3df132
SK
6435 ql_log(ql_log_fatal, vha, 0x008a,
6436 "Failed to load segment %d of firmware.\n",
6437 fragment);
5433383e
AV
6438 break;
6439 }
6440
6441 fwcode += wlen;
6442 risc_addr += wlen;
6443 risc_size -= wlen;
6444 fragment++;
6445 }
6446
6447 /* Next segment. */
6448 seg++;
6449 }
6450 return rval;
6451
6452fail_fw_integrity:
6453 return QLA_FUNCTION_FAILED;
6454}
6455
eaac30be
AV
6456static int
6457qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
6458{
6459 int rval;
6460 int segments, fragment;
6461 uint32_t *dcode, dlen;
6462 uint32_t risc_addr;
6463 uint32_t risc_size;
6464 uint32_t i;
5433383e 6465 struct fw_blob *blob;
f73cb695
CD
6466 const uint32_t *fwcode;
6467 uint32_t fwclen;
e315cd28 6468 struct qla_hw_data *ha = vha->hw;
73208dfd 6469 struct req_que *req = ha->req_q_map[0];
0107109e 6470
5433383e 6471 /* Load firmware blob. */
e315cd28 6472 blob = qla2x00_request_firmware(vha);
5433383e 6473 if (!blob) {
7c3df132 6474 ql_log(ql_log_warn, vha, 0x0090,
94bcf830 6475 "Firmware image unavailable.\n");
7c3df132
SK
6476 ql_log(ql_log_warn, vha, 0x0091,
6477 "Firmware images can be retrieved from: "
6478 QLA_FW_URL ".\n");
d1c61909 6479
eaac30be 6480 return QLA_FUNCTION_FAILED;
0107109e
AV
6481 }
6482
cfb0919c
CD
6483 ql_dbg(ql_dbg_init, vha, 0x0092,
6484 "FW: Loading via request-firmware.\n");
eaac30be 6485
0107109e
AV
6486 rval = QLA_SUCCESS;
6487
6488 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6489 dcode = (uint32_t *)req->ring;
0107109e 6490 *srisc_addr = 0;
5433383e 6491 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
6492 fwclen = 0;
6493
6494 /* Validate firmware image by checking version. */
5433383e 6495 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132
SK
6496 ql_log(ql_log_fatal, vha, 0x0093,
6497 "Unable to verify integrity of firmware image (%Zd).\n",
5433383e 6498 blob->fw->size);
f73cb695 6499 return QLA_FUNCTION_FAILED;
0107109e
AV
6500 }
6501 for (i = 0; i < 4; i++)
6502 dcode[i] = be32_to_cpu(fwcode[i + 4]);
6503 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6504 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6505 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6506 dcode[3] == 0)) {
7c3df132
SK
6507 ql_log(ql_log_fatal, vha, 0x0094,
6508 "Unable to verify integrity of firmware image (%Zd).\n",
6509 blob->fw->size);
6510 ql_log(ql_log_fatal, vha, 0x0095,
6511 "Firmware data: %08x %08x %08x %08x.\n",
6512 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 6513 return QLA_FUNCTION_FAILED;
0107109e
AV
6514 }
6515
6516 while (segments && rval == QLA_SUCCESS) {
6517 risc_addr = be32_to_cpu(fwcode[2]);
6518 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6519 risc_size = be32_to_cpu(fwcode[3]);
6520
6521 /* Validate firmware image size. */
6522 fwclen += risc_size * sizeof(uint32_t);
5433383e 6523 if (blob->fw->size < fwclen) {
7c3df132 6524 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 6525 "Unable to verify integrity of firmware image "
7c3df132 6526 "(%Zd).\n", blob->fw->size);
f73cb695 6527 return QLA_FUNCTION_FAILED;
0107109e
AV
6528 }
6529
6530 fragment = 0;
6531 while (risc_size > 0 && rval == QLA_SUCCESS) {
6532 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6533 if (dlen > risc_size)
6534 dlen = risc_size;
6535
7c3df132
SK
6536 ql_dbg(ql_dbg_init, vha, 0x0097,
6537 "Loading risc segment@ risc addr %x "
6538 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
6539
6540 for (i = 0; i < dlen; i++)
6541 dcode[i] = swab32(fwcode[i]);
6542
73208dfd 6543 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 6544 dlen);
0107109e 6545 if (rval) {
7c3df132
SK
6546 ql_log(ql_log_fatal, vha, 0x0098,
6547 "Failed to load segment %d of firmware.\n",
6548 fragment);
f261f7af 6549 return QLA_FUNCTION_FAILED;
0107109e
AV
6550 }
6551
6552 fwcode += dlen;
6553 risc_addr += dlen;
6554 risc_size -= dlen;
6555 fragment++;
6556 }
6557
6558 /* Next segment. */
6559 segments--;
6560 }
f73cb695
CD
6561
6562 if (!IS_QLA27XX(ha))
6563 return rval;
6564
6565 if (ha->fw_dump_template)
6566 vfree(ha->fw_dump_template);
6567 ha->fw_dump_template = NULL;
6568 ha->fw_dump_template_len = 0;
6569
6570 ql_dbg(ql_dbg_init, vha, 0x171,
97ea702b
CD
6571 "Loading fwdump template from %x\n",
6572 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
f73cb695
CD
6573 risc_size = be32_to_cpu(fwcode[2]);
6574 ql_dbg(ql_dbg_init, vha, 0x172,
6575 "-> array size %x dwords\n", risc_size);
6576 if (risc_size == 0 || risc_size == ~0)
6577 goto default_template;
6578
6579 dlen = (risc_size - 8) * sizeof(*fwcode);
6580 ql_dbg(ql_dbg_init, vha, 0x0173,
6581 "-> template allocating %x bytes...\n", dlen);
6582 ha->fw_dump_template = vmalloc(dlen);
6583 if (!ha->fw_dump_template) {
6584 ql_log(ql_log_warn, vha, 0x0174,
6585 "Failed fwdump template allocate %x bytes.\n", risc_size);
6586 goto default_template;
6587 }
6588
6589 fwcode += 7;
6590 risc_size -= 8;
6591 dcode = ha->fw_dump_template;
6592 for (i = 0; i < risc_size; i++)
6593 dcode[i] = le32_to_cpu(fwcode[i]);
6594
6595 if (!qla27xx_fwdt_template_valid(dcode)) {
6596 ql_log(ql_log_warn, vha, 0x0175,
6597 "Failed fwdump template validate\n");
6598 goto default_template;
6599 }
6600
6601 dlen = qla27xx_fwdt_template_size(dcode);
6602 ql_dbg(ql_dbg_init, vha, 0x0176,
6603 "-> template size %x bytes\n", dlen);
6604 if (dlen > risc_size * sizeof(*fwcode)) {
6605 ql_log(ql_log_warn, vha, 0x0177,
97ea702b
CD
6606 "Failed fwdump template exceeds array by %x bytes\n",
6607 (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
f73cb695
CD
6608 goto default_template;
6609 }
6610 ha->fw_dump_template_len = dlen;
0107109e
AV
6611 return rval;
6612
f73cb695
CD
6613default_template:
6614 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
6615 if (ha->fw_dump_template)
6616 vfree(ha->fw_dump_template);
6617 ha->fw_dump_template = NULL;
6618 ha->fw_dump_template_len = 0;
6619
6620 dlen = qla27xx_fwdt_template_default_size();
6621 ql_dbg(ql_dbg_init, vha, 0x0179,
6622 "-> template allocating %x bytes...\n", dlen);
6623 ha->fw_dump_template = vmalloc(dlen);
6624 if (!ha->fw_dump_template) {
6625 ql_log(ql_log_warn, vha, 0x017a,
6626 "Failed fwdump template allocate %x bytes.\n", risc_size);
6627 goto failed_template;
6628 }
6629
6630 dcode = ha->fw_dump_template;
6631 risc_size = dlen / sizeof(*fwcode);
6632 fwcode = qla27xx_fwdt_template_default();
6633 for (i = 0; i < risc_size; i++)
6634 dcode[i] = be32_to_cpu(fwcode[i]);
6635
6636 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6637 ql_log(ql_log_warn, vha, 0x017b,
6638 "Failed fwdump template validate\n");
6639 goto failed_template;
6640 }
6641
6642 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6643 ql_dbg(ql_dbg_init, vha, 0x017c,
6644 "-> template size %x bytes\n", dlen);
6645 ha->fw_dump_template_len = dlen;
6646 return rval;
6647
6648failed_template:
6649 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
6650 if (ha->fw_dump_template)
6651 vfree(ha->fw_dump_template);
6652 ha->fw_dump_template = NULL;
6653 ha->fw_dump_template_len = 0;
6654 return rval;
0107109e 6655}
18c6c127 6656
eaac30be
AV
6657int
6658qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6659{
6660 int rval;
6661
e337d907
AV
6662 if (ql2xfwloadbin == 1)
6663 return qla81xx_load_risc(vha, srisc_addr);
6664
eaac30be
AV
6665 /*
6666 * FW Load priority:
6667 * 1) Firmware via request-firmware interface (.bin file).
6668 * 2) Firmware residing in flash.
6669 */
6670 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6671 if (rval == QLA_SUCCESS)
6672 return rval;
6673
cbc8eb67
AV
6674 return qla24xx_load_risc_flash(vha, srisc_addr,
6675 vha->hw->flt_region_fw);
eaac30be
AV
6676}
6677
6678int
6679qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6680{
6681 int rval;
cbc8eb67 6682 struct qla_hw_data *ha = vha->hw;
eaac30be 6683
e337d907 6684 if (ql2xfwloadbin == 2)
cbc8eb67 6685 goto try_blob_fw;
e337d907 6686
eaac30be
AV
6687 /*
6688 * FW Load priority:
6689 * 1) Firmware residing in flash.
6690 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 6691 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 6692 */
cbc8eb67 6693 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
6694 if (rval == QLA_SUCCESS)
6695 return rval;
6696
cbc8eb67
AV
6697try_blob_fw:
6698 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6699 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
6700 return rval;
6701
7c3df132
SK
6702 ql_log(ql_log_info, vha, 0x0099,
6703 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
6704 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
6705 if (rval != QLA_SUCCESS)
6706 return rval;
6707
7c3df132 6708 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 6709 ha->flags.running_gold_fw = 1;
cbc8eb67 6710 return rval;
eaac30be
AV
6711}
6712
18c6c127 6713void
e315cd28 6714qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
6715{
6716 int ret, retries;
e315cd28 6717 struct qla_hw_data *ha = vha->hw;
18c6c127 6718
85880801
AV
6719 if (ha->flags.pci_channel_io_perm_failure)
6720 return;
e428924c 6721 if (!IS_FWI2_CAPABLE(ha))
18c6c127 6722 return;
75edf81d
AV
6723 if (!ha->fw_major_version)
6724 return;
18c6c127 6725
e315cd28 6726 ret = qla2x00_stop_firmware(vha);
7c7f1f29 6727 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 6728 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
6729 ha->isp_ops->reset_chip(vha);
6730 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 6731 continue;
e315cd28 6732 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 6733 continue;
7c3df132
SK
6734 ql_log(ql_log_info, vha, 0x8015,
6735 "Attempting retry of stop-firmware command.\n");
e315cd28 6736 ret = qla2x00_stop_firmware(vha);
18c6c127
AV
6737 }
6738}
2c3dfe3f
SJ
6739
6740int
e315cd28 6741qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
6742{
6743 int rval = QLA_SUCCESS;
0b91d116 6744 int rval2;
2c3dfe3f 6745 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
6746 struct qla_hw_data *ha = vha->hw;
6747 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
6748 struct req_que *req;
6749 struct rsp_que *rsp;
2c3dfe3f 6750
e315cd28 6751 if (!vha->vp_idx)
2c3dfe3f
SJ
6752 return -EINVAL;
6753
e315cd28 6754 rval = qla2x00_fw_ready(base_vha);
d7459527
MH
6755 if (vha->qpair)
6756 req = vha->qpair->req;
67c2e93a 6757 else
d7459527 6758 req = ha->req_q_map[0];
67c2e93a
AC
6759 rsp = req->rsp;
6760
2c3dfe3f 6761 if (rval == QLA_SUCCESS) {
e315cd28 6762 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 6763 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
6764 }
6765
e315cd28 6766 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
6767
6768 /* Login to SNS first */
0b91d116
CD
6769 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
6770 BIT_1);
6771 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6772 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
6773 ql_dbg(ql_dbg_init, vha, 0x0120,
6774 "Failed SNS login: loop_id=%x, rval2=%d\n",
6775 NPH_SNS, rval2);
6776 else
6777 ql_dbg(ql_dbg_init, vha, 0x0103,
6778 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
6779 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
6780 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
6781 return (QLA_FUNCTION_FAILED);
6782 }
6783
e315cd28
AC
6784 atomic_set(&vha->loop_down_timer, 0);
6785 atomic_set(&vha->loop_state, LOOP_UP);
6786 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6787 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6788 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
6789
6790 return rval;
6791}
4d4df193
HK
6792
6793/* 84XX Support **************************************************************/
6794
6795static LIST_HEAD(qla_cs84xx_list);
6796static DEFINE_MUTEX(qla_cs84xx_mutex);
6797
6798static struct qla_chip_state_84xx *
e315cd28 6799qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
6800{
6801 struct qla_chip_state_84xx *cs84xx;
e315cd28 6802 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6803
6804 mutex_lock(&qla_cs84xx_mutex);
6805
6806 /* Find any shared 84xx chip. */
6807 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
6808 if (cs84xx->bus == ha->pdev->bus) {
6809 kref_get(&cs84xx->kref);
6810 goto done;
6811 }
6812 }
6813
6814 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
6815 if (!cs84xx)
6816 goto done;
6817
6818 kref_init(&cs84xx->kref);
6819 spin_lock_init(&cs84xx->access_lock);
6820 mutex_init(&cs84xx->fw_update_mutex);
6821 cs84xx->bus = ha->pdev->bus;
6822
6823 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
6824done:
6825 mutex_unlock(&qla_cs84xx_mutex);
6826 return cs84xx;
6827}
6828
6829static void
6830__qla84xx_chip_release(struct kref *kref)
6831{
6832 struct qla_chip_state_84xx *cs84xx =
6833 container_of(kref, struct qla_chip_state_84xx, kref);
6834
6835 mutex_lock(&qla_cs84xx_mutex);
6836 list_del(&cs84xx->list);
6837 mutex_unlock(&qla_cs84xx_mutex);
6838 kfree(cs84xx);
6839}
6840
6841void
e315cd28 6842qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 6843{
e315cd28 6844 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6845 if (ha->cs84xx)
6846 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
6847}
6848
6849static int
e315cd28 6850qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
6851{
6852 int rval;
6853 uint16_t status[2];
e315cd28 6854 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6855
6856 mutex_lock(&ha->cs84xx->fw_update_mutex);
6857
e315cd28 6858 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
6859
6860 mutex_unlock(&ha->cs84xx->fw_update_mutex);
6861
6862 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
6863 QLA_SUCCESS;
6864}
3a03eb79
AV
6865
6866/* 81XX Support **************************************************************/
6867
6868int
6869qla81xx_nvram_config(scsi_qla_host_t *vha)
6870{
6871 int rval;
6872 struct init_cb_81xx *icb;
6873 struct nvram_81xx *nv;
6874 uint32_t *dptr;
6875 uint8_t *dptr1, *dptr2;
6876 uint32_t chksum;
6877 uint16_t cnt;
6878 struct qla_hw_data *ha = vha->hw;
6879
6880 rval = QLA_SUCCESS;
6881 icb = (struct init_cb_81xx *)ha->init_cb;
6882 nv = ha->nvram;
6883
6884 /* Determine NVRAM starting address. */
6885 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 6886 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
6887 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
6888 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79
AV
6889
6890 /* Get VPD data into cache */
6891 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
6892 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
6893 ha->vpd_size);
3a03eb79
AV
6894
6895 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 6896 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 6897 ha->nvram_size);
3d79038f 6898 dptr = (uint32_t *)nv;
da08ef5c
JC
6899 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6900 chksum += le32_to_cpu(*dptr);
3a03eb79 6901
7c3df132
SK
6902 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
6903 "Contents of NVRAM:\n");
6904 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
6905 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
6906
6907 /* Bad NVRAM data, set defaults parameters. */
6908 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6909 || nv->id[3] != ' ' ||
ad950360 6910 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
3a03eb79 6911 /* Reset NVRAM data. */
7c3df132 6912 ql_log(ql_log_info, vha, 0x0073,
9e336520 6913 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 6914 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 6915 le16_to_cpu(nv->nvram_version));
7c3df132
SK
6916 ql_log(ql_log_info, vha, 0x0074,
6917 "Falling back to functioning (yet invalid -- WWPN) "
6918 "defaults.\n");
3a03eb79
AV
6919
6920 /*
6921 * Set default initialization control block.
6922 */
6923 memset(nv, 0, ha->nvram_size);
ad950360
BVA
6924 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6925 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 6926 nv->frame_payload_size = 2048;
ad950360
BVA
6927 nv->execution_throttle = cpu_to_le16(0xFFFF);
6928 nv->exchange_count = cpu_to_le16(0);
3a03eb79 6929 nv->port_name[0] = 0x21;
f73cb695 6930 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
6931 nv->port_name[2] = 0x00;
6932 nv->port_name[3] = 0xe0;
6933 nv->port_name[4] = 0x8b;
6934 nv->port_name[5] = 0x1c;
6935 nv->port_name[6] = 0x55;
6936 nv->port_name[7] = 0x86;
6937 nv->node_name[0] = 0x20;
6938 nv->node_name[1] = 0x00;
6939 nv->node_name[2] = 0x00;
6940 nv->node_name[3] = 0xe0;
6941 nv->node_name[4] = 0x8b;
6942 nv->node_name[5] = 0x1c;
6943 nv->node_name[6] = 0x55;
6944 nv->node_name[7] = 0x86;
ad950360
BVA
6945 nv->login_retry_count = cpu_to_le16(8);
6946 nv->interrupt_delay_timer = cpu_to_le16(0);
6947 nv->login_timeout = cpu_to_le16(0);
3a03eb79 6948 nv->firmware_options_1 =
ad950360
BVA
6949 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6950 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6951 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6952 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6953 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6954 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 6955 nv->reset_delay = 5;
ad950360
BVA
6956 nv->max_luns_per_target = cpu_to_le16(128);
6957 nv->port_down_retry_count = cpu_to_le16(30);
6958 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 6959 nv->enode_mac[0] = 0x00;
6246b8a1
GM
6960 nv->enode_mac[1] = 0xC0;
6961 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
6962 nv->enode_mac[3] = 0x04;
6963 nv->enode_mac[4] = 0x05;
f73cb695 6964 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
6965
6966 rval = 1;
6967 }
6968
9e522cd8
AE
6969 if (IS_T10_PI_CAPABLE(ha))
6970 nv->frame_payload_size &= ~7;
6971
aa230bc5
AE
6972 qlt_81xx_config_nvram_stage1(vha, nv);
6973
3a03eb79 6974 /* Reset Initialization control block */
773120e4 6975 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
6976
6977 /* Copy 1st segment. */
6978 dptr1 = (uint8_t *)icb;
6979 dptr2 = (uint8_t *)&nv->version;
6980 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6981 while (cnt--)
6982 *dptr1++ = *dptr2++;
6983
6984 icb->login_retry_count = nv->login_retry_count;
6985
6986 /* Copy 2nd segment. */
6987 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6988 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6989 cnt = (uint8_t *)&icb->reserved_5 -
6990 (uint8_t *)&icb->interrupt_delay_timer;
6991 while (cnt--)
6992 *dptr1++ = *dptr2++;
6993
6994 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
6995 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
6996 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
6997 icb->enode_mac[0] = 0x00;
6998 icb->enode_mac[1] = 0xC0;
6999 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
7000 icb->enode_mac[3] = 0x04;
7001 icb->enode_mac[4] = 0x05;
f73cb695 7002 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7003 }
7004
b64b0e8f
AV
7005 /* Use extended-initialization control block. */
7006 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7007
3a03eb79
AV
7008 /*
7009 * Setup driver NVRAM options.
7010 */
7011 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 7012 "QLE8XXX");
3a03eb79 7013
aa230bc5
AE
7014 qlt_81xx_config_nvram_stage2(vha, icb);
7015
3a03eb79 7016 /* Use alternate WWN? */
ad950360 7017 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
7018 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7019 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7020 }
7021
7022 /* Prepare nodename */
ad950360 7023 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
7024 /*
7025 * Firmware will apply the following mask if the nodename was
7026 * not provided.
7027 */
7028 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7029 icb->node_name[0] &= 0xF0;
7030 }
7031
7032 /* Set host adapter parameters. */
7033 ha->flags.disable_risc_code_load = 0;
7034 ha->flags.enable_lip_reset = 0;
7035 ha->flags.enable_lip_full_login =
7036 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7037 ha->flags.enable_target_reset =
7038 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7039 ha->flags.enable_led_scheme = 0;
7040 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7041
7042 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7043 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7044
7045 /* save HBA serial number */
7046 ha->serial0 = icb->port_name[5];
7047 ha->serial1 = icb->port_name[6];
7048 ha->serial2 = icb->port_name[7];
7049 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7050 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7051
ad950360 7052 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
7053
7054 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7055
7056 /* Set minimum login_timeout to 4 seconds. */
7057 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7058 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7059 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7060 nv->login_timeout = cpu_to_le16(4);
3a03eb79 7061 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
7062
7063 /* Set minimum RATOV to 100 tenths of a second. */
7064 ha->r_a_tov = 100;
7065
7066 ha->loop_reset_delay = nv->reset_delay;
7067
7068 /* Link Down Timeout = 0:
7069 *
7ec0effd 7070 * When Port Down timer expires we will start returning
3a03eb79
AV
7071 * I/O's to OS with "DID_NO_CONNECT".
7072 *
7073 * Link Down Timeout != 0:
7074 *
7075 * The driver waits for the link to come up after link down
7076 * before returning I/Os to OS with "DID_NO_CONNECT".
7077 */
7078 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7079 ha->loop_down_abort_time =
7080 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7081 } else {
7082 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7083 ha->loop_down_abort_time =
7084 (LOOP_DOWN_TIME - ha->link_down_timeout);
7085 }
7086
7087 /* Need enough time to try and get the port back. */
7088 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7089 if (qlport_down_retry)
7090 ha->port_down_retry_count = qlport_down_retry;
7091
7092 /* Set login_retry_count */
7093 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7094 if (ha->port_down_retry_count ==
7095 le16_to_cpu(nv->port_down_retry_count) &&
7096 ha->port_down_retry_count > 3)
7097 ha->login_retry_count = ha->port_down_retry_count;
7098 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7099 ha->login_retry_count = ha->port_down_retry_count;
7100 if (ql2xloginretrycount)
7101 ha->login_retry_count = ql2xloginretrycount;
7102
6246b8a1 7103 /* if not running MSI-X we need handshaking on interrupts */
f73cb695 7104 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
ad950360 7105 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 7106
3a03eb79
AV
7107 /* Enable ZIO. */
7108 if (!vha->flags.init_done) {
7109 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7110 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7111 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7112 le16_to_cpu(icb->interrupt_delay_timer): 2;
7113 }
ad950360 7114 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
7115 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7116 vha->flags.process_response_queue = 0;
7117 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7118 ha->zio_mode = QLA_ZIO_MODE_6;
7119
7c3df132 7120 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 7121 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
7122 ha->zio_mode,
7123 ha->zio_timer * 100);
3a03eb79
AV
7124
7125 icb->firmware_options_2 |= cpu_to_le32(
7126 (uint32_t)ha->zio_mode);
7127 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7128 vha->flags.process_response_queue = 1;
7129 }
7130
7131 if (rval) {
7c3df132
SK
7132 ql_log(ql_log_warn, vha, 0x0076,
7133 "NVRAM configuration failed.\n");
3a03eb79
AV
7134 }
7135 return (rval);
7136}
7137
a9083016
GM
7138int
7139qla82xx_restart_isp(scsi_qla_host_t *vha)
7140{
7141 int status, rval;
a9083016
GM
7142 struct qla_hw_data *ha = vha->hw;
7143 struct req_que *req = ha->req_q_map[0];
7144 struct rsp_que *rsp = ha->rsp_q_map[0];
7145 struct scsi_qla_host *vp;
feafb7b1 7146 unsigned long flags;
a9083016
GM
7147
7148 status = qla2x00_init_rings(vha);
7149 if (!status) {
7150 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7151 ha->flags.chip_reset_done = 1;
7152
7153 status = qla2x00_fw_ready(vha);
7154 if (!status) {
a9083016
GM
7155 /* Issue a marker after FW becomes ready. */
7156 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
a9083016 7157 vha->flags.online = 1;
7108b76e 7158 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
7159 }
7160
7161 /* if no cable then assume it's good */
7162 if ((vha->device_flags & DFLG_NO_CABLE))
7163 status = 0;
a9083016
GM
7164 }
7165
7166 if (!status) {
7167 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7168
7169 if (!atomic_read(&vha->loop_down_timer)) {
7170 /*
7171 * Issue marker command only when we are going
7172 * to start the I/O .
7173 */
7174 vha->marker_needed = 1;
7175 }
7176
a9083016
GM
7177 ha->isp_ops->enable_intrs(ha);
7178
7179 ha->isp_abort_cnt = 0;
7180 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7181
53296788 7182 /* Update the firmware version */
3173167f 7183 status = qla82xx_check_md_needed(vha);
53296788 7184
a9083016
GM
7185 if (ha->fce) {
7186 ha->flags.fce_enabled = 1;
7187 memset(ha->fce, 0,
7188 fce_calc_size(ha->fce_bufs));
7189 rval = qla2x00_enable_fce_trace(vha,
7190 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7191 &ha->fce_bufs);
7192 if (rval) {
cfb0919c 7193 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
7194 "Unable to reinitialize FCE (%d).\n",
7195 rval);
a9083016
GM
7196 ha->flags.fce_enabled = 0;
7197 }
7198 }
7199
7200 if (ha->eft) {
7201 memset(ha->eft, 0, EFT_SIZE);
7202 rval = qla2x00_enable_eft_trace(vha,
7203 ha->eft_dma, EFT_NUM_BUFFERS);
7204 if (rval) {
cfb0919c 7205 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
7206 "Unable to reinitialize EFT (%d).\n",
7207 rval);
a9083016
GM
7208 }
7209 }
a9083016
GM
7210 }
7211
7212 if (!status) {
cfb0919c 7213 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 7214 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
7215
7216 spin_lock_irqsave(&ha->vport_slock, flags);
7217 list_for_each_entry(vp, &ha->vp_list, list) {
7218 if (vp->vp_idx) {
7219 atomic_inc(&vp->vref_count);
7220 spin_unlock_irqrestore(&ha->vport_slock, flags);
7221
a9083016 7222 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
7223
7224 spin_lock_irqsave(&ha->vport_slock, flags);
7225 atomic_dec(&vp->vref_count);
7226 }
a9083016 7227 }
feafb7b1
AE
7228 spin_unlock_irqrestore(&ha->vport_slock, flags);
7229
a9083016 7230 } else {
cfb0919c 7231 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 7232 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
7233 }
7234
7235 return status;
7236}
7237
3a03eb79 7238void
ae97c91e 7239qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 7240{
ae97c91e
AV
7241 struct qla_hw_data *ha = vha->hw;
7242
f198cafa
HM
7243 /* Hold status IOCBs until ABTS response received. */
7244 if (ql2xfwholdabts)
7245 ha->fw_options[3] |= BIT_12;
7246
088d09d4
GM
7247 /* Set Retry FLOGI in case of P2P connection */
7248 if (ha->operating_mode == P2P) {
7249 ha->fw_options[2] |= BIT_3;
7250 ql_dbg(ql_dbg_disc, vha, 0x2103,
7251 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7252 __func__, ha->fw_options[2]);
7253 }
7254
ae97c91e 7255 if (!ql2xetsenable)
f198cafa 7256 goto out;
ae97c91e
AV
7257
7258 /* Enable ETS Burst. */
7259 memset(ha->fw_options, 0, sizeof(ha->fw_options));
7260 ha->fw_options[2] |= BIT_9;
f198cafa 7261out:
ae97c91e 7262 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 7263}
09ff701a
SR
7264
7265/*
7266 * qla24xx_get_fcp_prio
7267 * Gets the fcp cmd priority value for the logged in port.
7268 * Looks for a match of the port descriptors within
7269 * each of the fcp prio config entries. If a match is found,
7270 * the tag (priority) value is returned.
7271 *
7272 * Input:
21090cbe 7273 * vha = scsi host structure pointer.
09ff701a
SR
7274 * fcport = port structure pointer.
7275 *
7276 * Return:
6c452a45 7277 * non-zero (if found)
f28a0a96 7278 * -1 (if not found)
09ff701a
SR
7279 *
7280 * Context:
7281 * Kernel context
7282 */
f28a0a96 7283static int
09ff701a
SR
7284qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7285{
7286 int i, entries;
7287 uint8_t pid_match, wwn_match;
f28a0a96 7288 int priority;
09ff701a
SR
7289 uint32_t pid1, pid2;
7290 uint64_t wwn1, wwn2;
7291 struct qla_fcp_prio_entry *pri_entry;
7292 struct qla_hw_data *ha = vha->hw;
7293
7294 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 7295 return -1;
09ff701a 7296
f28a0a96 7297 priority = -1;
09ff701a
SR
7298 entries = ha->fcp_prio_cfg->num_entries;
7299 pri_entry = &ha->fcp_prio_cfg->entry[0];
7300
7301 for (i = 0; i < entries; i++) {
7302 pid_match = wwn_match = 0;
7303
7304 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7305 pri_entry++;
7306 continue;
7307 }
7308
7309 /* check source pid for a match */
7310 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7311 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7312 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7313 if (pid1 == INVALID_PORT_ID)
7314 pid_match++;
7315 else if (pid1 == pid2)
7316 pid_match++;
7317 }
7318
7319 /* check destination pid for a match */
7320 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7321 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7322 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7323 if (pid1 == INVALID_PORT_ID)
7324 pid_match++;
7325 else if (pid1 == pid2)
7326 pid_match++;
7327 }
7328
7329 /* check source WWN for a match */
7330 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7331 wwn1 = wwn_to_u64(vha->port_name);
7332 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
7333 if (wwn2 == (uint64_t)-1)
7334 wwn_match++;
7335 else if (wwn1 == wwn2)
7336 wwn_match++;
7337 }
7338
7339 /* check destination WWN for a match */
7340 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
7341 wwn1 = wwn_to_u64(fcport->port_name);
7342 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
7343 if (wwn2 == (uint64_t)-1)
7344 wwn_match++;
7345 else if (wwn1 == wwn2)
7346 wwn_match++;
7347 }
7348
7349 if (pid_match == 2 || wwn_match == 2) {
7350 /* Found a matching entry */
7351 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
7352 priority = pri_entry->tag;
7353 break;
7354 }
7355
7356 pri_entry++;
7357 }
7358
7359 return priority;
7360}
7361
7362/*
7363 * qla24xx_update_fcport_fcp_prio
7364 * Activates fcp priority for the logged in fc port
7365 *
7366 * Input:
21090cbe 7367 * vha = scsi host structure pointer.
09ff701a
SR
7368 * fcp = port structure pointer.
7369 *
7370 * Return:
7371 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7372 *
7373 * Context:
7374 * Kernel context.
7375 */
7376int
21090cbe 7377qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
7378{
7379 int ret;
f28a0a96 7380 int priority;
09ff701a
SR
7381 uint16_t mb[5];
7382
21090cbe
MI
7383 if (fcport->port_type != FCT_TARGET ||
7384 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
7385 return QLA_FUNCTION_FAILED;
7386
21090cbe 7387 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
7388 if (priority < 0)
7389 return QLA_FUNCTION_FAILED;
7390
7ec0effd 7391 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
7392 fcport->fcp_prio = priority & 0xf;
7393 return QLA_SUCCESS;
7394 }
7395
21090cbe 7396 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
7397 if (ret == QLA_SUCCESS) {
7398 if (fcport->fcp_prio != priority)
7399 ql_dbg(ql_dbg_user, vha, 0x709e,
7400 "Updated FCP_CMND priority - value=%d loop_id=%d "
7401 "port_id=%02x%02x%02x.\n", priority,
7402 fcport->loop_id, fcport->d_id.b.domain,
7403 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 7404 fcport->fcp_prio = priority & 0xf;
cfb0919c 7405 } else
7c3df132 7406 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
7407 "Unable to update FCP_CMND priority - ret=0x%x for "
7408 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
7409 fcport->d_id.b.domain, fcport->d_id.b.area,
7410 fcport->d_id.b.al_pa);
09ff701a
SR
7411 return ret;
7412}
7413
7414/*
7415 * qla24xx_update_all_fcp_prio
7416 * Activates fcp priority for all the logged in ports
7417 *
7418 * Input:
7419 * ha = adapter block pointer.
7420 *
7421 * Return:
7422 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7423 *
7424 * Context:
7425 * Kernel context.
7426 */
7427int
7428qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
7429{
7430 int ret;
7431 fc_port_t *fcport;
7432
7433 ret = QLA_FUNCTION_FAILED;
7434 /* We need to set priority for all logged in ports */
7435 list_for_each_entry(fcport, &vha->vp_fcports, list)
7436 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
7437
7438 return ret;
7439}
d7459527
MH
7440
7441struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
7442{
7443 int rsp_id = 0;
7444 int req_id = 0;
7445 int i;
7446 struct qla_hw_data *ha = vha->hw;
7447 uint16_t qpair_id = 0;
7448 struct qla_qpair *qpair = NULL;
7449 struct qla_msix_entry *msix;
7450
7451 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
7452 ql_log(ql_log_warn, vha, 0x00181,
7453 "FW/Driver is not multi-queue capable.\n");
7454 return NULL;
7455 }
7456
7457 if (ql2xmqsupport) {
7458 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
7459 if (qpair == NULL) {
7460 ql_log(ql_log_warn, vha, 0x0182,
7461 "Failed to allocate memory for queue pair.\n");
7462 return NULL;
7463 }
7464 memset(qpair, 0, sizeof(struct qla_qpair));
7465
7466 qpair->hw = vha->hw;
7467
7468 /* Assign available que pair id */
7469 mutex_lock(&ha->mq_lock);
7470 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
7471 if (qpair_id >= ha->max_qpairs) {
7472 mutex_unlock(&ha->mq_lock);
7473 ql_log(ql_log_warn, vha, 0x0183,
7474 "No resources to create additional q pair.\n");
7475 goto fail_qid_map;
7476 }
7477 set_bit(qpair_id, ha->qpair_qid_map);
7478 ha->queue_pair_map[qpair_id] = qpair;
7479 qpair->id = qpair_id;
7480 qpair->vp_idx = vp_idx;
7481
7482 for (i = 0; i < ha->msix_count; i++) {
093df737 7483 msix = &ha->msix_entries[i];
d7459527
MH
7484 if (msix->in_use)
7485 continue;
7486 qpair->msix = msix;
7487 ql_log(ql_dbg_multiq, vha, 0xc00f,
7488 "Vector %x selected for qpair\n", msix->vector);
7489 break;
7490 }
7491 if (!qpair->msix) {
7492 ql_log(ql_log_warn, vha, 0x0184,
7493 "Out of MSI-X vectors!.\n");
7494 goto fail_msix;
7495 }
7496
7497 qpair->msix->in_use = 1;
7498 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
7499
7500 mutex_unlock(&ha->mq_lock);
7501
7502 /* Create response queue first */
7503 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
7504 if (!rsp_id) {
7505 ql_log(ql_log_warn, vha, 0x0185,
7506 "Failed to create response queue.\n");
7507 goto fail_rsp;
7508 }
7509
7510 qpair->rsp = ha->rsp_q_map[rsp_id];
7511
7512 /* Create request queue */
7513 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
7514 if (!req_id) {
7515 ql_log(ql_log_warn, vha, 0x0186,
7516 "Failed to create request queue.\n");
7517 goto fail_req;
7518 }
7519
7520 qpair->req = ha->req_q_map[req_id];
7521 qpair->rsp->req = qpair->req;
7522
7523 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
7524 if (ha->fw_attributes & BIT_4)
7525 qpair->difdix_supported = 1;
7526 }
7527
7528 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
7529 if (!qpair->srb_mempool) {
7530 ql_log(ql_log_warn, vha, 0x0191,
7531 "Failed to create srb mempool for qpair %d\n",
7532 qpair->id);
7533 goto fail_mempool;
7534 }
7535
7536 /* Mark as online */
7537 qpair->online = 1;
7538
7539 if (!vha->flags.qpairs_available)
7540 vha->flags.qpairs_available = 1;
7541
7542 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
7543 "Request/Response queue pair created, id %d\n",
7544 qpair->id);
7545 ql_dbg(ql_dbg_init, vha, 0x0187,
7546 "Request/Response queue pair created, id %d\n",
7547 qpair->id);
7548 }
7549 return qpair;
7550
7551fail_mempool:
7552fail_req:
7553 qla25xx_delete_rsp_que(vha, qpair->rsp);
7554fail_rsp:
7555 mutex_lock(&ha->mq_lock);
7556 qpair->msix->in_use = 0;
7557 list_del(&qpair->qp_list_elem);
7558 if (list_empty(&vha->qp_list))
7559 vha->flags.qpairs_available = 0;
7560fail_msix:
7561 ha->queue_pair_map[qpair_id] = NULL;
7562 clear_bit(qpair_id, ha->qpair_qid_map);
7563 mutex_unlock(&ha->mq_lock);
7564fail_qid_map:
7565 kfree(qpair);
7566 return NULL;
7567}
7568
7569int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7570{
7571 int ret;
7572 struct qla_hw_data *ha = qpair->hw;
7573
7574 qpair->delete_in_progress = 1;
7575 while (atomic_read(&qpair->ref_count))
7576 msleep(500);
7577
7578 ret = qla25xx_delete_req_que(vha, qpair->req);
7579 if (ret != QLA_SUCCESS)
7580 goto fail;
7581 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
7582 if (ret != QLA_SUCCESS)
7583 goto fail;
7584
7585 mutex_lock(&ha->mq_lock);
7586 ha->queue_pair_map[qpair->id] = NULL;
7587 clear_bit(qpair->id, ha->qpair_qid_map);
7588 list_del(&qpair->qp_list_elem);
7589 if (list_empty(&vha->qp_list))
7590 vha->flags.qpairs_available = 0;
7591 mempool_destroy(qpair->srb_mempool);
7592 kfree(qpair);
7593 mutex_unlock(&ha->mq_lock);
7594
7595 return QLA_SUCCESS;
7596fail:
7597 return ret;
7598}