]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla2xxx/qla_init.c
[SCSI] mpt3sas: Remove phys on topology change
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
1e63395c 3 * Copyright (c) 2003-2013 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4
LT
32static int qla2x00_configure_fabric(scsi_qla_host_t *);
33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
1da177e4
LT
34static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 uint16_t *);
1da177e4
LT
36
37static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 38
4d4df193
HK
39static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
40static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 41static int qla25xx_init_queues(struct qla_hw_data *);
4d4df193 42
ac280b67
AV
43/* SRB Extensions ---------------------------------------------------------- */
44
9ba56b95
GM
45void
46qla2x00_sp_timeout(unsigned long __data)
ac280b67
AV
47{
48 srb_t *sp = (srb_t *)__data;
4916392b 49 struct srb_iocb *iocb;
ac280b67
AV
50 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw;
52 struct req_que *req;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 58 iocb = &sp->u.iocb_cmd;
4916392b 59 iocb->timeout(sp);
9ba56b95 60 sp->free(fcport->vha, sp);
6ac52608 61 spin_unlock_irqrestore(&ha->hardware_lock, flags);
ac280b67
AV
62}
63
9ba56b95
GM
64void
65qla2x00_sp_free(void *data, void *ptr)
ac280b67 66{
9ba56b95
GM
67 srb_t *sp = (srb_t *)ptr;
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
69 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
ac280b67 70
4d97cc53 71 del_timer(&iocb->timer);
b00ee7d7 72 qla2x00_rel_sp(vha, sp);
ac280b67
AV
73}
74
ac280b67
AV
75/* Asynchronous Login/Logout Routines -------------------------------------- */
76
a9b6f722 77unsigned long
5b91490e
AV
78qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79{
80 unsigned long tmo;
81 struct qla_hw_data *ha = vha->hw;
82
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
88 /*
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
91 */
92 tmo = ha->login_timeout;
93 }
94 return tmo;
95}
ac280b67
AV
96
97static void
9ba56b95 98qla2x00_async_iocb_timeout(void *data)
ac280b67 99{
9ba56b95 100 srb_t *sp = (srb_t *)data;
ac280b67 101 fc_port_t *fcport = sp->fcport;
ac280b67 102
7c3df132 103 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
cfb0919c 104 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
9ba56b95 105 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
7c3df132 106 fcport->d_id.b.al_pa);
ac280b67 107
5ff1d584 108 fcport->flags &= ~FCF_ASYNC_SENT;
9ba56b95
GM
109 if (sp->type == SRB_LOGIN_CMD) {
110 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67 111 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
6ac52608
AV
112 /* Retry as needed. */
113 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
114 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data);
118 }
ac280b67
AV
119}
120
99b0bec7 121static void
9ba56b95 122qla2x00_async_login_sp_done(void *data, void *ptr, int res)
99b0bec7 123{
9ba56b95
GM
124 srb_t *sp = (srb_t *)ptr;
125 struct srb_iocb *lio = &sp->u.iocb_cmd;
126 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
127
128 if (!test_bit(UNLOADING, &vha->dpc_flags))
129 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
130 lio->u.logio.data);
131 sp->free(sp->fcport->vha, sp);
99b0bec7
AV
132}
133
ac280b67
AV
134int
135qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
136 uint16_t *data)
137{
ac280b67 138 srb_t *sp;
4916392b 139 struct srb_iocb *lio;
ac280b67
AV
140 int rval;
141
142 rval = QLA_FUNCTION_FAILED;
9ba56b95 143 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
144 if (!sp)
145 goto done;
146
9ba56b95
GM
147 sp->type = SRB_LOGIN_CMD;
148 sp->name = "login";
149 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
150
151 lio = &sp->u.iocb_cmd;
3822263e 152 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 153 sp->done = qla2x00_async_login_sp_done;
4916392b 154 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
ac280b67 155 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 156 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
ac280b67
AV
157 rval = qla2x00_start_sp(sp);
158 if (rval != QLA_SUCCESS)
159 goto done_free_sp;
160
7c3df132 161 ql_dbg(ql_dbg_disc, vha, 0x2072,
cfb0919c
CD
162 "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
163 "retries=%d.\n", sp->handle, fcport->loop_id,
164 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
165 fcport->login_retry);
ac280b67
AV
166 return rval;
167
168done_free_sp:
9ba56b95 169 sp->free(fcport->vha, sp);
ac280b67
AV
170done:
171 return rval;
172}
173
99b0bec7 174static void
9ba56b95 175qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
99b0bec7 176{
9ba56b95
GM
177 srb_t *sp = (srb_t *)ptr;
178 struct srb_iocb *lio = &sp->u.iocb_cmd;
179 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
180
181 if (!test_bit(UNLOADING, &vha->dpc_flags))
182 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
183 lio->u.logio.data);
184 sp->free(sp->fcport->vha, sp);
99b0bec7
AV
185}
186
ac280b67
AV
187int
188qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
189{
ac280b67 190 srb_t *sp;
4916392b 191 struct srb_iocb *lio;
ac280b67
AV
192 int rval;
193
194 rval = QLA_FUNCTION_FAILED;
9ba56b95 195 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
196 if (!sp)
197 goto done;
198
9ba56b95
GM
199 sp->type = SRB_LOGOUT_CMD;
200 sp->name = "logout";
201 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
202
203 lio = &sp->u.iocb_cmd;
3822263e 204 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 205 sp->done = qla2x00_async_logout_sp_done;
ac280b67
AV
206 rval = qla2x00_start_sp(sp);
207 if (rval != QLA_SUCCESS)
208 goto done_free_sp;
209
7c3df132 210 ql_dbg(ql_dbg_disc, vha, 0x2070,
cfb0919c
CD
211 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
212 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
213 fcport->d_id.b.area, fcport->d_id.b.al_pa);
ac280b67
AV
214 return rval;
215
216done_free_sp:
9ba56b95 217 sp->free(fcport->vha, sp);
ac280b67
AV
218done:
219 return rval;
220}
221
5ff1d584 222static void
9ba56b95 223qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
5ff1d584 224{
9ba56b95
GM
225 srb_t *sp = (srb_t *)ptr;
226 struct srb_iocb *lio = &sp->u.iocb_cmd;
227 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
228
229 if (!test_bit(UNLOADING, &vha->dpc_flags))
230 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
231 lio->u.logio.data);
232 sp->free(sp->fcport->vha, sp);
5ff1d584
AV
233}
234
235int
236qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
237 uint16_t *data)
238{
5ff1d584 239 srb_t *sp;
4916392b 240 struct srb_iocb *lio;
5ff1d584
AV
241 int rval;
242
243 rval = QLA_FUNCTION_FAILED;
9ba56b95 244 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
245 if (!sp)
246 goto done;
247
9ba56b95
GM
248 sp->type = SRB_ADISC_CMD;
249 sp->name = "adisc";
250 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
251
252 lio = &sp->u.iocb_cmd;
3822263e 253 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 254 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 255 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 256 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584
AV
257 rval = qla2x00_start_sp(sp);
258 if (rval != QLA_SUCCESS)
259 goto done_free_sp;
260
7c3df132 261 ql_dbg(ql_dbg_disc, vha, 0x206f,
cfb0919c
CD
262 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
263 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
264 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5ff1d584
AV
265 return rval;
266
267done_free_sp:
9ba56b95 268 sp->free(fcport->vha, sp);
5ff1d584
AV
269done:
270 return rval;
271}
272
3822263e 273static void
9ba56b95 274qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
3822263e 275{
9ba56b95
GM
276 srb_t *sp = (srb_t *)ptr;
277 struct srb_iocb *iocb = &sp->u.iocb_cmd;
278 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
279 uint32_t flags;
280 uint16_t lun;
281 int rval;
3822263e 282
9ba56b95
GM
283 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
284 flags = iocb->u.tmf.flags;
285 lun = (uint16_t)iocb->u.tmf.lun;
286
287 /* Issue Marker IOCB */
288 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
289 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
290 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
291
292 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
293 ql_dbg(ql_dbg_taskm, vha, 0x8030,
294 "TM IOCB failed (%x).\n", rval);
295 }
296 }
297 sp->free(sp->fcport->vha, sp);
3822263e
MI
298}
299
300int
9ba56b95 301qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
3822263e
MI
302 uint32_t tag)
303{
304 struct scsi_qla_host *vha = fcport->vha;
3822263e 305 srb_t *sp;
3822263e
MI
306 struct srb_iocb *tcf;
307 int rval;
308
309 rval = QLA_FUNCTION_FAILED;
9ba56b95 310 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
311 if (!sp)
312 goto done;
313
9ba56b95
GM
314 sp->type = SRB_TM_CMD;
315 sp->name = "tmf";
316 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
317
318 tcf = &sp->u.iocb_cmd;
319 tcf->u.tmf.flags = tm_flags;
3822263e
MI
320 tcf->u.tmf.lun = lun;
321 tcf->u.tmf.data = tag;
322 tcf->timeout = qla2x00_async_iocb_timeout;
9ba56b95 323 sp->done = qla2x00_async_tm_cmd_done;
3822263e
MI
324
325 rval = qla2x00_start_sp(sp);
326 if (rval != QLA_SUCCESS)
327 goto done_free_sp;
328
7c3df132 329 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
330 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
331 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
332 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3822263e
MI
333 return rval;
334
335done_free_sp:
9ba56b95 336 sp->free(fcport->vha, sp);
3822263e
MI
337done:
338 return rval;
339}
340
4916392b 341void
ac280b67
AV
342qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
343 uint16_t *data)
344{
345 int rval;
ac280b67
AV
346
347 switch (data[0]) {
348 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
349 /*
350 * Driver must validate login state - If PRLI not complete,
351 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
352 * requests.
353 */
354 rval = qla2x00_get_port_database(vha, fcport, 0);
0eba25df
AE
355 if (rval == QLA_NOT_LOGGED_IN) {
356 fcport->flags &= ~FCF_ASYNC_SENT;
357 fcport->flags |= FCF_LOGIN_NEEDED;
358 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
359 break;
360 }
361
a4f92a32
AV
362 if (rval != QLA_SUCCESS) {
363 qla2x00_post_async_logout_work(vha, fcport, NULL);
364 qla2x00_post_async_login_work(vha, fcport, NULL);
365 break;
366 }
99b0bec7 367 if (fcport->flags & FCF_FCP2_DEVICE) {
5ff1d584
AV
368 qla2x00_post_async_adisc_work(vha, fcport, data);
369 break;
99b0bec7
AV
370 }
371 qla2x00_update_fcport(vha, fcport);
ac280b67
AV
372 break;
373 case MBS_COMMAND_ERROR:
5ff1d584 374 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
375 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
376 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
377 else
80d79440 378 qla2x00_mark_device_lost(vha, fcport, 1, 0);
ac280b67
AV
379 break;
380 case MBS_PORT_ID_USED:
381 fcport->loop_id = data[1];
6ac52608 382 qla2x00_post_async_logout_work(vha, fcport, NULL);
ac280b67
AV
383 qla2x00_post_async_login_work(vha, fcport, NULL);
384 break;
385 case MBS_LOOP_ID_USED:
386 fcport->loop_id++;
387 rval = qla2x00_find_new_loop_id(vha, fcport);
388 if (rval != QLA_SUCCESS) {
5ff1d584 389 fcport->flags &= ~FCF_ASYNC_SENT;
80d79440 390 qla2x00_mark_device_lost(vha, fcport, 1, 0);
ac280b67
AV
391 break;
392 }
393 qla2x00_post_async_login_work(vha, fcport, NULL);
394 break;
395 }
4916392b 396 return;
ac280b67
AV
397}
398
4916392b 399void
ac280b67
AV
400qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
401 uint16_t *data)
402{
403 qla2x00_mark_device_lost(vha, fcport, 1, 0);
4916392b 404 return;
ac280b67
AV
405}
406
4916392b 407void
5ff1d584
AV
408qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
409 uint16_t *data)
410{
411 if (data[0] == MBS_COMMAND_COMPLETE) {
412 qla2x00_update_fcport(vha, fcport);
413
4916392b 414 return;
5ff1d584
AV
415 }
416
417 /* Retry login. */
418 fcport->flags &= ~FCF_ASYNC_SENT;
419 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
420 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
421 else
80d79440 422 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5ff1d584 423
4916392b 424 return;
5ff1d584
AV
425}
426
1da177e4
LT
427/****************************************************************************/
428/* QLogic ISP2x00 Hardware Support Functions. */
429/****************************************************************************/
430
fa492630 431static int
7d613ac6
SV
432qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
433{
434 int rval = QLA_SUCCESS;
435 struct qla_hw_data *ha = vha->hw;
436 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 437 uint16_t config[4];
7d613ac6
SV
438
439 qla83xx_idc_lock(vha, 0);
440
441 /* SV: TODO: Assign initialization timeout from
442 * flash-info / other param
443 */
444 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
445 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
446
447 /* Set our fcoe function presence */
448 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
449 ql_dbg(ql_dbg_p3p, vha, 0xb077,
450 "Error while setting DRV-Presence.\n");
451 rval = QLA_FUNCTION_FAILED;
452 goto exit;
453 }
454
455 /* Decide the reset ownership */
456 qla83xx_reset_ownership(vha);
457
458 /*
459 * On first protocol driver load:
460 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
461 * register.
462 * Others: Check compatibility with current IDC Major version.
463 */
464 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
465 if (ha->flags.nic_core_reset_owner) {
466 /* Set IDC Major version */
467 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
468 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
469
470 /* Clearing IDC-Lock-Recovery register */
471 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
472 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
473 /*
474 * Clear further IDC participation if we are not compatible with
475 * the current IDC Major Version.
476 */
477 ql_log(ql_log_warn, vha, 0xb07d,
478 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
479 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
480 __qla83xx_clear_drv_presence(vha);
481 rval = QLA_FUNCTION_FAILED;
482 goto exit;
483 }
484 /* Each function sets its supported Minor version. */
485 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
486 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
487 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
488
711aa7f7
SK
489 if (ha->flags.nic_core_reset_owner) {
490 memset(config, 0, sizeof(config));
491 if (!qla81xx_get_port_config(vha, config))
492 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
493 QLA8XXX_DEV_READY);
494 }
495
7d613ac6
SV
496 rval = qla83xx_idc_state_handler(vha);
497
498exit:
499 qla83xx_idc_unlock(vha, 0);
500
501 return rval;
502}
503
1da177e4
LT
504/*
505* qla2x00_initialize_adapter
506* Initialize board.
507*
508* Input:
509* ha = adapter block pointer.
510*
511* Returns:
512* 0 = success
513*/
514int
e315cd28 515qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
516{
517 int rval;
e315cd28 518 struct qla_hw_data *ha = vha->hw;
73208dfd 519 struct req_que *req = ha->req_q_map[0];
2533cf67 520
1da177e4 521 /* Clear adapter flags. */
e315cd28 522 vha->flags.online = 0;
2533cf67 523 ha->flags.chip_reset_done = 0;
e315cd28 524 vha->flags.reset_active = 0;
85880801
AV
525 ha->flags.pci_channel_io_perm_failure = 0;
526 ha->flags.eeh_busy = 0;
fe52f6e1 527 ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
e315cd28
AC
528 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
529 atomic_set(&vha->loop_state, LOOP_DOWN);
530 vha->device_flags = DFLG_NO_CABLE;
531 vha->dpc_flags = 0;
532 vha->flags.management_server_logged_in = 0;
533 vha->marker_needed = 0;
1da177e4
LT
534 ha->isp_abort_cnt = 0;
535 ha->beacon_blink_led = 0;
536
73208dfd
AC
537 set_bit(0, ha->req_qid_map);
538 set_bit(0, ha->rsp_qid_map);
539
cfb0919c 540 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 541 "Configuring PCI space...\n");
e315cd28 542 rval = ha->isp_ops->pci_config(vha);
1da177e4 543 if (rval) {
7c3df132
SK
544 ql_log(ql_log_warn, vha, 0x0044,
545 "Unable to configure PCI space.\n");
1da177e4
LT
546 return (rval);
547 }
548
e315cd28 549 ha->isp_ops->reset_chip(vha);
1da177e4 550
e315cd28 551 rval = qla2xxx_get_flash_info(vha);
c00d8994 552 if (rval) {
7c3df132
SK
553 ql_log(ql_log_fatal, vha, 0x004f,
554 "Unable to validate FLASH data.\n");
c00d8994
AV
555 return (rval);
556 }
557
73208dfd 558 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 559 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 560 "Configure NVRAM parameters...\n");
0107109e 561
e315cd28 562 ha->isp_ops->nvram_config(vha);
1da177e4 563
d4c760c2
AV
564 if (ha->flags.disable_serdes) {
565 /* Mask HBA via NVRAM settings? */
7c3df132
SK
566 ql_log(ql_log_info, vha, 0x0077,
567 "Masking HBA WWPN "
d4c760c2 568 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
e315cd28
AC
569 vha->port_name[0], vha->port_name[1],
570 vha->port_name[2], vha->port_name[3],
571 vha->port_name[4], vha->port_name[5],
572 vha->port_name[6], vha->port_name[7]);
d4c760c2
AV
573 return QLA_FUNCTION_FAILED;
574 }
575
cfb0919c 576 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 577 "Verifying loaded RISC code...\n");
1da177e4 578
e315cd28
AC
579 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
580 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
581 if (rval)
582 return (rval);
e315cd28 583 rval = qla2x00_setup_chip(vha);
d19044c3
AV
584 if (rval)
585 return (rval);
1da177e4 586 }
a9083016 587
4d4df193 588 if (IS_QLA84XX(ha)) {
e315cd28 589 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 590 if (!ha->cs84xx) {
7c3df132 591 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
592 "Unable to configure ISP84XX.\n");
593 return QLA_FUNCTION_FAILED;
594 }
595 }
2d70c103
NB
596
597 if (qla_ini_mode_enabled(vha))
598 rval = qla2x00_init_rings(vha);
599
2533cf67 600 ha->flags.chip_reset_done = 1;
1da177e4 601
9a069e19 602 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 603 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
604 rval = qla84xx_init_chip(vha);
605 if (rval != QLA_SUCCESS) {
7c3df132
SK
606 ql_log(ql_log_warn, vha, 0x00d4,
607 "Unable to initialize ISP84XX.\n");
9a069e19
GM
608 qla84xx_put_chip(vha);
609 }
610 }
611
7d613ac6
SV
612 /* Load the NIC Core f/w if we are the first protocol driver. */
613 if (IS_QLA8031(ha)) {
614 rval = qla83xx_nic_core_fw_load(vha);
615 if (rval)
616 ql_log(ql_log_warn, vha, 0x0124,
617 "Error in initializing NIC Core f/w.\n");
618 }
619
2f0f3f4f
MI
620 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
621 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 622
1da177e4
LT
623 return (rval);
624}
625
626/**
abbd8870 627 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1da177e4
LT
628 * @ha: HA context
629 *
630 * Returns 0 on success.
631 */
abbd8870 632int
e315cd28 633qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 634{
a157b101 635 uint16_t w;
abbd8870 636 unsigned long flags;
e315cd28 637 struct qla_hw_data *ha = vha->hw;
3d71644c 638 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 639
1da177e4 640 pci_set_master(ha->pdev);
af6177d8 641 pci_try_set_mwi(ha->pdev);
1da177e4 642
1da177e4 643 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 644 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
645 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
646
737faece 647 pci_disable_rom(ha->pdev);
1da177e4
LT
648
649 /* Get PCI bus information. */
650 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 651 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
652 spin_unlock_irqrestore(&ha->hardware_lock, flags);
653
abbd8870
AV
654 return QLA_SUCCESS;
655}
1da177e4 656
abbd8870
AV
657/**
658 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
659 * @ha: HA context
660 *
661 * Returns 0 on success.
662 */
663int
e315cd28 664qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 665{
a157b101 666 uint16_t w;
abbd8870
AV
667 unsigned long flags = 0;
668 uint32_t cnt;
e315cd28 669 struct qla_hw_data *ha = vha->hw;
3d71644c 670 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 671
abbd8870 672 pci_set_master(ha->pdev);
af6177d8 673 pci_try_set_mwi(ha->pdev);
1da177e4 674
abbd8870 675 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 676 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 677
abbd8870
AV
678 if (IS_QLA2322(ha) || IS_QLA6322(ha))
679 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 680 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 681
abbd8870
AV
682 /*
683 * If this is a 2300 card and not 2312, reset the
684 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
685 * the 2310 also reports itself as a 2300 so we need to get the
686 * fb revision level -- a 6 indicates it really is a 2300 and
687 * not a 2310.
688 */
689 if (IS_QLA2300(ha)) {
690 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 691
abbd8870 692 /* Pause RISC. */
3d71644c 693 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 694 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 695 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 696 break;
1da177e4 697
abbd8870
AV
698 udelay(10);
699 }
1da177e4 700
abbd8870 701 /* Select FPM registers. */
3d71644c
AV
702 WRT_REG_WORD(&reg->ctrl_status, 0x20);
703 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
704
705 /* Get the fb rev level */
3d71644c 706 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
707
708 if (ha->fb_rev == FPM_2300)
a157b101 709 pci_clear_mwi(ha->pdev);
abbd8870
AV
710
711 /* Deselect FPM registers. */
3d71644c
AV
712 WRT_REG_WORD(&reg->ctrl_status, 0x0);
713 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
714
715 /* Release RISC module. */
3d71644c 716 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 717 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 718 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
719 break;
720
721 udelay(10);
1da177e4 722 }
1da177e4 723
abbd8870
AV
724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
725 }
1da177e4 726
abbd8870
AV
727 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
728
737faece 729 pci_disable_rom(ha->pdev);
1da177e4 730
abbd8870
AV
731 /* Get PCI bus information. */
732 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 733 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
735
736 return QLA_SUCCESS;
1da177e4
LT
737}
738
0107109e
AV
739/**
740 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
741 * @ha: HA context
742 *
743 * Returns 0 on success.
744 */
745int
e315cd28 746qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 747{
a157b101 748 uint16_t w;
0107109e 749 unsigned long flags = 0;
e315cd28 750 struct qla_hw_data *ha = vha->hw;
0107109e 751 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
752
753 pci_set_master(ha->pdev);
af6177d8 754 pci_try_set_mwi(ha->pdev);
0107109e
AV
755
756 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 757 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
758 w &= ~PCI_COMMAND_INTX_DISABLE;
759 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
760
761 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
762
763 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
764 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
765 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
766
767 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 768 if (pci_is_pcie(ha->pdev))
5ffd3a52 769 pcie_set_readrq(ha->pdev, 4096);
0107109e 770
737faece 771 pci_disable_rom(ha->pdev);
0107109e 772
44c10138 773 ha->chip_revision = ha->pdev->revision;
a8488abe 774
0107109e
AV
775 /* Get PCI bus information. */
776 spin_lock_irqsave(&ha->hardware_lock, flags);
777 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
779
780 return QLA_SUCCESS;
781}
782
c3a2f0df
AV
783/**
784 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
785 * @ha: HA context
786 *
787 * Returns 0 on success.
788 */
789int
e315cd28 790qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
791{
792 uint16_t w;
e315cd28 793 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
794
795 pci_set_master(ha->pdev);
796 pci_try_set_mwi(ha->pdev);
797
798 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
799 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
800 w &= ~PCI_COMMAND_INTX_DISABLE;
801 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
802
803 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 804 if (pci_is_pcie(ha->pdev))
5ffd3a52 805 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 806
737faece 807 pci_disable_rom(ha->pdev);
c3a2f0df
AV
808
809 ha->chip_revision = ha->pdev->revision;
810
811 return QLA_SUCCESS;
812}
813
1da177e4
LT
814/**
815 * qla2x00_isp_firmware() - Choose firmware image.
816 * @ha: HA context
817 *
818 * Returns 0 on success.
819 */
820static int
e315cd28 821qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
822{
823 int rval;
42e421b1
AV
824 uint16_t loop_id, topo, sw_cap;
825 uint8_t domain, area, al_pa;
e315cd28 826 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
827
828 /* Assume loading risc code */
fa2a1ce5 829 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
830
831 if (ha->flags.disable_risc_code_load) {
7c3df132 832 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
833
834 /* Verify checksum of loaded RISC code. */
e315cd28 835 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
836 if (rval == QLA_SUCCESS) {
837 /* And, verify we are not in ROM code. */
e315cd28 838 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
839 &area, &domain, &topo, &sw_cap);
840 }
1da177e4
LT
841 }
842
7c3df132
SK
843 if (rval)
844 ql_dbg(ql_dbg_init, vha, 0x007a,
845 "**** Load RISC code ****.\n");
1da177e4
LT
846
847 return (rval);
848}
849
850/**
851 * qla2x00_reset_chip() - Reset ISP chip.
852 * @ha: HA context
853 *
854 * Returns 0 on success.
855 */
abbd8870 856void
e315cd28 857qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
858{
859 unsigned long flags = 0;
e315cd28 860 struct qla_hw_data *ha = vha->hw;
3d71644c 861 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 862 uint32_t cnt;
1da177e4
LT
863 uint16_t cmd;
864
85880801
AV
865 if (unlikely(pci_channel_offline(ha->pdev)))
866 return;
867
fd34f556 868 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
869
870 spin_lock_irqsave(&ha->hardware_lock, flags);
871
872 /* Turn off master enable */
873 cmd = 0;
874 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
875 cmd &= ~PCI_COMMAND_MASTER;
876 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
877
878 if (!IS_QLA2100(ha)) {
879 /* Pause RISC. */
880 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
881 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
882 for (cnt = 0; cnt < 30000; cnt++) {
883 if ((RD_REG_WORD(&reg->hccr) &
884 HCCR_RISC_PAUSE) != 0)
885 break;
886 udelay(100);
887 }
888 } else {
889 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
890 udelay(10);
891 }
892
893 /* Select FPM registers. */
894 WRT_REG_WORD(&reg->ctrl_status, 0x20);
895 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
896
897 /* FPM Soft Reset. */
898 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
899 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
900
901 /* Toggle Fpm Reset. */
902 if (!IS_QLA2200(ha)) {
903 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
904 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
905 }
906
907 /* Select frame buffer registers. */
908 WRT_REG_WORD(&reg->ctrl_status, 0x10);
909 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
910
911 /* Reset frame buffer FIFOs. */
912 if (IS_QLA2200(ha)) {
913 WRT_FB_CMD_REG(ha, reg, 0xa000);
914 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
915 } else {
916 WRT_FB_CMD_REG(ha, reg, 0x00fc);
917
918 /* Read back fb_cmd until zero or 3 seconds max */
919 for (cnt = 0; cnt < 3000; cnt++) {
920 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
921 break;
922 udelay(100);
923 }
924 }
925
926 /* Select RISC module registers. */
927 WRT_REG_WORD(&reg->ctrl_status, 0);
928 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
929
930 /* Reset RISC processor. */
931 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
932 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
933
934 /* Release RISC processor. */
935 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
936 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
937 }
938
939 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
940 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
941
942 /* Reset ISP chip. */
943 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
944
945 /* Wait for RISC to recover from reset. */
946 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
947 /*
948 * It is necessary to for a delay here since the card doesn't
949 * respond to PCI reads during a reset. On some architectures
950 * this will result in an MCA.
951 */
952 udelay(20);
953 for (cnt = 30000; cnt; cnt--) {
954 if ((RD_REG_WORD(&reg->ctrl_status) &
955 CSR_ISP_SOFT_RESET) == 0)
956 break;
957 udelay(100);
958 }
959 } else
960 udelay(10);
961
962 /* Reset RISC processor. */
963 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
964
965 WRT_REG_WORD(&reg->semaphore, 0);
966
967 /* Release RISC processor. */
968 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
969 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
970
971 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
972 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 973 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 974 break;
1da177e4
LT
975
976 udelay(100);
977 }
978 } else
979 udelay(100);
980
981 /* Turn on master enable */
982 cmd |= PCI_COMMAND_MASTER;
983 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
984
985 /* Disable RISC pause on FPM parity error. */
986 if (!IS_QLA2100(ha)) {
987 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
988 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
989 }
990
991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
992}
993
b1d46989
MI
994/**
995 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
996 *
997 * Returns 0 on success.
998 */
fa492630 999static int
b1d46989
MI
1000qla81xx_reset_mpi(scsi_qla_host_t *vha)
1001{
1002 uint16_t mb[4] = {0x1010, 0, 1, 0};
1003
6246b8a1
GM
1004 if (!IS_QLA81XX(vha->hw))
1005 return QLA_SUCCESS;
1006
b1d46989
MI
1007 return qla81xx_write_mpi_register(vha, mb);
1008}
1009
0107109e 1010/**
88c26663 1011 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
0107109e
AV
1012 * @ha: HA context
1013 *
1014 * Returns 0 on success.
1015 */
88c26663 1016static inline void
e315cd28 1017qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
1018{
1019 unsigned long flags = 0;
e315cd28 1020 struct qla_hw_data *ha = vha->hw;
0107109e
AV
1021 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1022 uint32_t cnt, d2;
335a1cc9 1023 uint16_t wd;
b1d46989 1024 static int abts_cnt; /* ISP abort retry counts */
0107109e 1025
0107109e
AV
1026 spin_lock_irqsave(&ha->hardware_lock, flags);
1027
1028 /* Reset RISC. */
1029 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1030 for (cnt = 0; cnt < 30000; cnt++) {
1031 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
1032 break;
1033
1034 udelay(10);
1035 }
1036
1037 WRT_REG_DWORD(&reg->ctrl_status,
1038 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 1039 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 1040
335a1cc9 1041 udelay(100);
88c26663 1042 /* Wait for firmware to complete NVRAM accesses. */
88c26663
AV
1043 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1044 for (cnt = 10000 ; cnt && d2; cnt--) {
1045 udelay(5);
1046 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1047 barrier();
1048 }
1049
335a1cc9 1050 /* Wait for soft-reset to complete. */
0107109e
AV
1051 d2 = RD_REG_DWORD(&reg->ctrl_status);
1052 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
1053 udelay(5);
1054 d2 = RD_REG_DWORD(&reg->ctrl_status);
1055 barrier();
1056 }
1057
b1d46989
MI
1058 /* If required, do an MPI FW reset now */
1059 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1060 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1061 if (++abts_cnt < 5) {
1062 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1063 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1064 } else {
1065 /*
1066 * We exhausted the ISP abort retries. We have to
1067 * set the board offline.
1068 */
1069 abts_cnt = 0;
1070 vha->flags.online = 0;
1071 }
1072 }
1073 }
1074
0107109e
AV
1075 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1076 RD_REG_DWORD(&reg->hccr);
1077
1078 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1079 RD_REG_DWORD(&reg->hccr);
1080
1081 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1082 RD_REG_DWORD(&reg->hccr);
1083
1084 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1085 for (cnt = 6000000 ; cnt && d2; cnt--) {
1086 udelay(5);
1087 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1088 barrier();
1089 }
1090
1091 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6
AV
1092
1093 if (IS_NOPOLLING_TYPE(ha))
1094 ha->isp_ops->enable_intrs(ha);
0107109e
AV
1095}
1096
4ea2c9c7
JC
1097static void
1098qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
1099{
1100 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1101
1102 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1103 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
1104
1105}
1106
1107static void
1108qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
1109{
1110 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
1111
1112 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
1113 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
1114}
1115
1116static void
1117qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
1118{
1119 struct qla_hw_data *ha = vha->hw;
1120 uint32_t wd32 = 0;
1121 uint delta_msec = 100;
1122 uint elapsed_msec = 0;
1123 uint timeout_msec;
1124 ulong n;
1125
1126 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
1127 return;
1128
1129attempt:
1130 timeout_msec = TIMEOUT_SEMAPHORE;
1131 n = timeout_msec / delta_msec;
1132 while (n--) {
1133 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
1134 qla25xx_read_risc_sema_reg(vha, &wd32);
1135 if (wd32 & RISC_SEMAPHORE)
1136 break;
1137 msleep(delta_msec);
1138 elapsed_msec += delta_msec;
1139 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1140 goto force;
1141 }
1142
1143 if (!(wd32 & RISC_SEMAPHORE))
1144 goto force;
1145
1146 if (!(wd32 & RISC_SEMAPHORE_FORCE))
1147 goto acquired;
1148
1149 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
1150 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
1151 n = timeout_msec / delta_msec;
1152 while (n--) {
1153 qla25xx_read_risc_sema_reg(vha, &wd32);
1154 if (!(wd32 & RISC_SEMAPHORE_FORCE))
1155 break;
1156 msleep(delta_msec);
1157 elapsed_msec += delta_msec;
1158 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
1159 goto force;
1160 }
1161
1162 if (wd32 & RISC_SEMAPHORE_FORCE)
1163 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
1164
1165 goto attempt;
1166
1167force:
1168 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
1169
1170acquired:
1171 return;
1172}
1173
88c26663
AV
1174/**
1175 * qla24xx_reset_chip() - Reset ISP24xx chip.
1176 * @ha: HA context
1177 *
1178 * Returns 0 on success.
1179 */
1180void
e315cd28 1181qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 1182{
e315cd28 1183 struct qla_hw_data *ha = vha->hw;
85880801
AV
1184
1185 if (pci_channel_offline(ha->pdev) &&
1186 ha->flags.pci_channel_io_perm_failure) {
1187 return;
1188 }
1189
fd34f556 1190 ha->isp_ops->disable_intrs(ha);
88c26663 1191
4ea2c9c7
JC
1192 qla25xx_manipulate_risc_semaphore(vha);
1193
88c26663 1194 /* Perform RISC reset. */
e315cd28 1195 qla24xx_reset_risc(vha);
88c26663
AV
1196}
1197
1da177e4
LT
1198/**
1199 * qla2x00_chip_diag() - Test chip for proper operation.
1200 * @ha: HA context
1201 *
1202 * Returns 0 on success.
1203 */
abbd8870 1204int
e315cd28 1205qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
1206{
1207 int rval;
e315cd28 1208 struct qla_hw_data *ha = vha->hw;
3d71644c 1209 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1210 unsigned long flags = 0;
1211 uint16_t data;
1212 uint32_t cnt;
1213 uint16_t mb[5];
73208dfd 1214 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
1215
1216 /* Assume a failed state */
1217 rval = QLA_FUNCTION_FAILED;
1218
7c3df132
SK
1219 ql_dbg(ql_dbg_init, vha, 0x007b,
1220 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1da177e4
LT
1221
1222 spin_lock_irqsave(&ha->hardware_lock, flags);
1223
1224 /* Reset ISP chip. */
1225 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1226
1227 /*
1228 * We need to have a delay here since the card will not respond while
1229 * in reset causing an MCA on some architectures.
1230 */
1231 udelay(20);
1232 data = qla2x00_debounce_register(&reg->ctrl_status);
1233 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1234 udelay(5);
1235 data = RD_REG_WORD(&reg->ctrl_status);
1236 barrier();
1237 }
1238
1239 if (!cnt)
1240 goto chip_diag_failed;
1241
7c3df132
SK
1242 ql_dbg(ql_dbg_init, vha, 0x007c,
1243 "Reset register cleared by chip reset.\n");
1da177e4
LT
1244
1245 /* Reset RISC processor. */
1246 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1247 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1248
1249 /* Workaround for QLA2312 PCI parity error */
1250 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1251 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1252 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1253 udelay(5);
1254 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 1255 barrier();
1da177e4
LT
1256 }
1257 } else
1258 udelay(10);
1259
1260 if (!cnt)
1261 goto chip_diag_failed;
1262
1263 /* Check product ID of chip */
7c3df132 1264 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1da177e4
LT
1265
1266 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1267 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1268 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1269 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1270 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1271 mb[3] != PROD_ID_3) {
7c3df132
SK
1272 ql_log(ql_log_warn, vha, 0x0062,
1273 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1274 mb[1], mb[2], mb[3]);
1da177e4
LT
1275
1276 goto chip_diag_failed;
1277 }
1278 ha->product_id[0] = mb[1];
1279 ha->product_id[1] = mb[2];
1280 ha->product_id[2] = mb[3];
1281 ha->product_id[3] = mb[4];
1282
1283 /* Adjust fw RISC transfer size */
73208dfd 1284 if (req->length > 1024)
1da177e4
LT
1285 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1286 else
1287 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 1288 req->length;
1da177e4
LT
1289
1290 if (IS_QLA2200(ha) &&
1291 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1292 /* Limit firmware transfer size with a 2200A */
7c3df132 1293 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 1294
ea5b6382 1295 ha->device_type |= DT_ISP2200A;
1da177e4
LT
1296 ha->fw_transfer_size = 128;
1297 }
1298
1299 /* Wrap Incoming Mailboxes Test. */
1300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1301
7c3df132 1302 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 1303 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
1304 if (rval)
1305 ql_log(ql_log_warn, vha, 0x0080,
1306 "Failed mailbox send register test.\n");
1307 else
1da177e4
LT
1308 /* Flag a successful rval */
1309 rval = QLA_SUCCESS;
1da177e4
LT
1310 spin_lock_irqsave(&ha->hardware_lock, flags);
1311
1312chip_diag_failed:
1313 if (rval)
7c3df132
SK
1314 ql_log(ql_log_info, vha, 0x0081,
1315 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
1316
1317 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1318
1319 return (rval);
1320}
1321
0107109e
AV
1322/**
1323 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1324 * @ha: HA context
1325 *
1326 * Returns 0 on success.
1327 */
1328int
e315cd28 1329qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
1330{
1331 int rval;
e315cd28 1332 struct qla_hw_data *ha = vha->hw;
73208dfd 1333 struct req_que *req = ha->req_q_map[0];
0107109e 1334
a9083016
GM
1335 if (IS_QLA82XX(ha))
1336 return QLA_SUCCESS;
1337
73208dfd 1338 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 1339
e315cd28 1340 rval = qla2x00_mbx_reg_test(vha);
0107109e 1341 if (rval) {
7c3df132
SK
1342 ql_log(ql_log_warn, vha, 0x0082,
1343 "Failed mailbox send register test.\n");
0107109e
AV
1344 } else {
1345 /* Flag a successful rval */
1346 rval = QLA_SUCCESS;
1347 }
1348
1349 return rval;
1350}
1351
a7a167bf 1352void
e315cd28 1353qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
0107109e 1354{
a7a167bf
AV
1355 int rval;
1356 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
73208dfd 1357 eft_size, fce_size, mq_size;
df613b96
AV
1358 dma_addr_t tc_dma;
1359 void *tc;
e315cd28 1360 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
1361 struct req_que *req = ha->req_q_map[0];
1362 struct rsp_que *rsp = ha->rsp_q_map[0];
a7a167bf
AV
1363
1364 if (ha->fw_dump) {
7c3df132
SK
1365 ql_dbg(ql_dbg_init, vha, 0x00bd,
1366 "Firmware dump already allocated.\n");
a7a167bf
AV
1367 return;
1368 }
d4e3e04d 1369
0107109e 1370 ha->fw_dumped = 0;
73208dfd 1371 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
d4e3e04d 1372 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
a7a167bf 1373 fixed_size = sizeof(struct qla2100_fw_dump);
d4e3e04d 1374 } else if (IS_QLA23XX(ha)) {
a7a167bf
AV
1375 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1376 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1377 sizeof(uint16_t);
e428924c 1378 } else if (IS_FWI2_CAPABLE(ha)) {
6246b8a1
GM
1379 if (IS_QLA83XX(ha))
1380 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1381 else if (IS_QLA81XX(ha))
3a03eb79
AV
1382 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1383 else if (IS_QLA25XX(ha))
1384 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1385 else
1386 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
a7a167bf
AV
1387 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1388 sizeof(uint32_t);
050c9bb1 1389 if (ha->mqenable) {
6246b8a1
GM
1390 if (!IS_QLA83XX(ha))
1391 mq_size = sizeof(struct qla2xxx_mq_chain);
050c9bb1
GM
1392 /*
1393 * Allocate maximum buffer size for all queues.
1394 * Resizing must be done at end-of-dump processing.
1395 */
1396 mq_size += ha->max_req_queues *
1397 (req->length * sizeof(request_t));
1398 mq_size += ha->max_rsp_queues *
1399 (rsp->length * sizeof(response_t));
1400 }
00876ae8 1401 if (ha->tgt.atio_ring)
2d70c103 1402 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
df613b96 1403 /* Allocate memory for Fibre Channel Event Buffer. */
6246b8a1 1404 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
436a7b11 1405 goto try_eft;
df613b96
AV
1406
1407 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1408 GFP_KERNEL);
1409 if (!tc) {
7c3df132
SK
1410 ql_log(ql_log_warn, vha, 0x00be,
1411 "Unable to allocate (%d KB) for FCE.\n",
1412 FCE_SIZE / 1024);
17d98630 1413 goto try_eft;
df613b96
AV
1414 }
1415
1416 memset(tc, 0, FCE_SIZE);
e315cd28 1417 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
1418 ha->fce_mb, &ha->fce_bufs);
1419 if (rval) {
7c3df132
SK
1420 ql_log(ql_log_warn, vha, 0x00bf,
1421 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
1422 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1423 tc_dma);
1424 ha->flags.fce_enabled = 0;
17d98630 1425 goto try_eft;
df613b96 1426 }
cfb0919c 1427 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 1428 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 1429
7d9dade3 1430 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
df613b96
AV
1431 ha->flags.fce_enabled = 1;
1432 ha->fce_dma = tc_dma;
1433 ha->fce = tc;
436a7b11
AV
1434try_eft:
1435 /* Allocate memory for Extended Trace Buffer. */
1436 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1437 GFP_KERNEL);
1438 if (!tc) {
7c3df132
SK
1439 ql_log(ql_log_warn, vha, 0x00c1,
1440 "Unable to allocate (%d KB) for EFT.\n",
1441 EFT_SIZE / 1024);
436a7b11
AV
1442 goto cont_alloc;
1443 }
1444
1445 memset(tc, 0, EFT_SIZE);
e315cd28 1446 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 1447 if (rval) {
7c3df132
SK
1448 ql_log(ql_log_warn, vha, 0x00c2,
1449 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
1450 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1451 tc_dma);
1452 goto cont_alloc;
1453 }
cfb0919c 1454 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 1455 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11
AV
1456
1457 eft_size = EFT_SIZE;
1458 ha->eft_dma = tc_dma;
1459 ha->eft = tc;
d4e3e04d 1460 }
a7a167bf 1461cont_alloc:
73208dfd
AC
1462 req_q_size = req->length * sizeof(request_t);
1463 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf
AV
1464
1465 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 1466 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
1467 ha->chain_offset = dump_size;
1468 dump_size += mq_size + fce_size;
d4e3e04d
AV
1469
1470 ha->fw_dump = vmalloc(dump_size);
a7a167bf 1471 if (!ha->fw_dump) {
7c3df132
SK
1472 ql_log(ql_log_warn, vha, 0x00c4,
1473 "Unable to allocate (%d KB) for firmware dump.\n",
1474 dump_size / 1024);
a7a167bf 1475
e30d1756
MI
1476 if (ha->fce) {
1477 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1478 ha->fce_dma);
1479 ha->fce = NULL;
1480 ha->fce_dma = 0;
1481 }
1482
a7a167bf
AV
1483 if (ha->eft) {
1484 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1485 ha->eft_dma);
1486 ha->eft = NULL;
1487 ha->eft_dma = 0;
1488 }
1489 return;
1490 }
cfb0919c 1491 ql_dbg(ql_dbg_init, vha, 0x00c5,
7c3df132 1492 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
a7a167bf
AV
1493
1494 ha->fw_dump_len = dump_size;
1495 ha->fw_dump->signature[0] = 'Q';
1496 ha->fw_dump->signature[1] = 'L';
1497 ha->fw_dump->signature[2] = 'G';
1498 ha->fw_dump->signature[3] = 'C';
1499 ha->fw_dump->version = __constant_htonl(1);
1500
1501 ha->fw_dump->fixed_size = htonl(fixed_size);
1502 ha->fw_dump->mem_size = htonl(mem_size);
1503 ha->fw_dump->req_q_size = htonl(req_q_size);
1504 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1505
1506 ha->fw_dump->eft_size = htonl(eft_size);
1507 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1508 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1509
1510 ha->fw_dump->header_size =
1511 htonl(offsetof(struct qla2xxx_fw_dump, isp));
0107109e
AV
1512}
1513
18e7555a
AV
1514static int
1515qla81xx_mpi_sync(scsi_qla_host_t *vha)
1516{
1517#define MPS_MASK 0xe0
1518 int rval;
1519 uint16_t dc;
1520 uint32_t dw;
18e7555a
AV
1521
1522 if (!IS_QLA81XX(vha->hw))
1523 return QLA_SUCCESS;
1524
1525 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1526 if (rval != QLA_SUCCESS) {
7c3df132
SK
1527 ql_log(ql_log_warn, vha, 0x0105,
1528 "Unable to acquire semaphore.\n");
18e7555a
AV
1529 goto done;
1530 }
1531
1532 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1533 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1534 if (rval != QLA_SUCCESS) {
7c3df132 1535 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
1536 goto done_release;
1537 }
1538
1539 dc &= MPS_MASK;
1540 if (dc == (dw & MPS_MASK))
1541 goto done_release;
1542
1543 dw &= ~MPS_MASK;
1544 dw |= dc;
1545 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1546 if (rval != QLA_SUCCESS) {
7c3df132 1547 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
1548 }
1549
1550done_release:
1551 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1552 if (rval != QLA_SUCCESS) {
7c3df132
SK
1553 ql_log(ql_log_warn, vha, 0x006d,
1554 "Unable to release semaphore.\n");
18e7555a
AV
1555 }
1556
1557done:
1558 return rval;
1559}
1560
8d93f550
CD
1561int
1562qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
1563{
1564 /* Don't try to reallocate the array */
1565 if (req->outstanding_cmds)
1566 return QLA_SUCCESS;
1567
1568 if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
1569 (ql2xmultique_tag || ql2xmaxqueues > 1)))
1570 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
1571 else {
1572 if (ha->fw_xcb_count <= ha->fw_iocb_count)
1573 req->num_outstanding_cmds = ha->fw_xcb_count;
1574 else
1575 req->num_outstanding_cmds = ha->fw_iocb_count;
1576 }
1577
1578 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1579 req->num_outstanding_cmds, GFP_KERNEL);
1580
1581 if (!req->outstanding_cmds) {
1582 /*
1583 * Try to allocate a minimal size just so we can get through
1584 * initialization.
1585 */
1586 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
1587 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
1588 req->num_outstanding_cmds, GFP_KERNEL);
1589
1590 if (!req->outstanding_cmds) {
1591 ql_log(ql_log_fatal, NULL, 0x0126,
1592 "Failed to allocate memory for "
1593 "outstanding_cmds for req_que %p.\n", req);
1594 req->num_outstanding_cmds = 0;
1595 return QLA_FUNCTION_FAILED;
1596 }
1597 }
1598
1599 return QLA_SUCCESS;
1600}
1601
1da177e4
LT
1602/**
1603 * qla2x00_setup_chip() - Load and start RISC firmware.
1604 * @ha: HA context
1605 *
1606 * Returns 0 on success.
1607 */
1608static int
e315cd28 1609qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 1610{
0107109e
AV
1611 int rval;
1612 uint32_t srisc_address = 0;
e315cd28 1613 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
1614 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1615 unsigned long flags;
dda772e8 1616 uint16_t fw_major_version;
3db0652e 1617
a9083016
GM
1618 if (IS_QLA82XX(ha)) {
1619 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
1620 if (rval == QLA_SUCCESS) {
1621 qla2x00_stop_firmware(vha);
a9083016 1622 goto enable_82xx_npiv;
14e303d9 1623 } else
b963752f 1624 goto failed;
a9083016
GM
1625 }
1626
3db0652e
AV
1627 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1628 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1629 spin_lock_irqsave(&ha->hardware_lock, flags);
1630 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1631 RD_REG_WORD(&reg->hccr);
1632 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1633 }
1da177e4 1634
18e7555a
AV
1635 qla81xx_mpi_sync(vha);
1636
1da177e4 1637 /* Load firmware sequences */
e315cd28 1638 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 1639 if (rval == QLA_SUCCESS) {
7c3df132
SK
1640 ql_dbg(ql_dbg_init, vha, 0x00c9,
1641 "Verifying Checksum of loaded RISC code.\n");
1da177e4 1642
e315cd28 1643 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
1644 if (rval == QLA_SUCCESS) {
1645 /* Start firmware execution. */
7c3df132
SK
1646 ql_dbg(ql_dbg_init, vha, 0x00ca,
1647 "Starting firmware.\n");
1da177e4 1648
e315cd28 1649 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 1650 /* Retrieve firmware information. */
dda772e8 1651 if (rval == QLA_SUCCESS) {
a9083016 1652enable_82xx_npiv:
dda772e8 1653 fw_major_version = ha->fw_major_version;
3173167f
GM
1654 if (IS_QLA82XX(ha))
1655 qla82xx_check_md_needed(vha);
6246b8a1
GM
1656 else
1657 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
1658 if (rval != QLA_SUCCESS)
1659 goto failed;
2c3dfe3f 1660 ha->flags.npiv_supported = 0;
e315cd28 1661 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 1662 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 1663 ha->flags.npiv_supported = 1;
4d0ea247
SJ
1664 if ((!ha->max_npiv_vports) ||
1665 ((ha->max_npiv_vports + 1) %
eb66dc60 1666 MIN_MULTI_ID_FABRIC))
4d0ea247 1667 ha->max_npiv_vports =
eb66dc60 1668 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 1669 }
24a08138 1670 qla2x00_get_resource_cnts(vha, NULL,
8d93f550 1671 &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
f3a0a77e 1672 &ha->max_npiv_vports, NULL);
d743de66 1673
8d93f550
CD
1674 /*
1675 * Allocate the array of outstanding commands
1676 * now that we know the firmware resources.
1677 */
1678 rval = qla2x00_alloc_outstanding_cmds(ha,
1679 vha->req);
1680 if (rval != QLA_SUCCESS)
1681 goto failed;
1682
be5ea3cf
SK
1683 if (!fw_major_version && ql2xallocfwdump
1684 && !IS_QLA82XX(ha))
08de2844 1685 qla2x00_alloc_fw_dump(vha);
1da177e4
LT
1686 }
1687 } else {
7c3df132
SK
1688 ql_log(ql_log_fatal, vha, 0x00cd,
1689 "ISP Firmware failed checksum.\n");
1690 goto failed;
1da177e4 1691 }
c74d88a4
AV
1692 } else
1693 goto failed;
1da177e4 1694
3db0652e
AV
1695 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1696 /* Enable proper parity. */
1697 spin_lock_irqsave(&ha->hardware_lock, flags);
1698 if (IS_QLA2300(ha))
1699 /* SRAM parity */
1700 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1701 else
1702 /* SRAM, Instruction RAM and GP RAM parity */
1703 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1704 RD_REG_WORD(&reg->hccr);
1705 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1706 }
1707
6246b8a1
GM
1708 if (IS_QLA83XX(ha))
1709 goto skip_fac_check;
1710
1d2874de
JC
1711 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1712 uint32_t size;
1713
1714 rval = qla81xx_fac_get_sector_size(vha, &size);
1715 if (rval == QLA_SUCCESS) {
1716 ha->flags.fac_supported = 1;
1717 ha->fdt_block_size = size << 2;
1718 } else {
7c3df132 1719 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
1720 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1721 ha->fw_major_version, ha->fw_minor_version,
1722 ha->fw_subminor_version);
6246b8a1
GM
1723skip_fac_check:
1724 if (IS_QLA83XX(ha)) {
1725 ha->flags.fac_supported = 0;
1726 rval = QLA_SUCCESS;
1727 }
1d2874de
JC
1728 }
1729 }
ca9e9c3e 1730failed:
1da177e4 1731 if (rval) {
7c3df132
SK
1732 ql_log(ql_log_fatal, vha, 0x00cf,
1733 "Setup chip ****FAILED****.\n");
1da177e4
LT
1734 }
1735
1736 return (rval);
1737}
1738
1739/**
1740 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1741 * @ha: HA context
1742 *
1743 * Beginning of request ring has initialization control block already built
1744 * by nvram config routine.
1745 *
1746 * Returns 0 on success.
1747 */
73208dfd
AC
1748void
1749qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
1750{
1751 uint16_t cnt;
1752 response_t *pkt;
1753
2afa19a9
AC
1754 rsp->ring_ptr = rsp->ring;
1755 rsp->ring_index = 0;
1756 rsp->status_srb = NULL;
e315cd28
AC
1757 pkt = rsp->ring_ptr;
1758 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
1759 pkt->signature = RESPONSE_PROCESSED;
1760 pkt++;
1761 }
1da177e4
LT
1762}
1763
1764/**
1765 * qla2x00_update_fw_options() - Read and process firmware options.
1766 * @ha: HA context
1767 *
1768 * Returns 0 on success.
1769 */
abbd8870 1770void
e315cd28 1771qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
1772{
1773 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 1774 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1775
1776 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 1777 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
1778
1779 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1780 return;
1781
1782 /* Serial Link options. */
7c3df132
SK
1783 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1784 "Serial link options.\n");
1785 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1786 (uint8_t *)&ha->fw_seriallink_options,
1787 sizeof(ha->fw_seriallink_options));
1da177e4
LT
1788
1789 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1790 if (ha->fw_seriallink_options[3] & BIT_2) {
1791 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1792
1793 /* 1G settings */
1794 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1795 emphasis = (ha->fw_seriallink_options[2] &
1796 (BIT_4 | BIT_3)) >> 3;
1797 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 1798 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
1799 rx_sens = (ha->fw_seriallink_options[0] &
1800 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1801 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1802 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1803 if (rx_sens == 0x0)
1804 rx_sens = 0x3;
1805 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1806 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1807 ha->fw_options[10] |= BIT_5 |
1808 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1809 (tx_sens & (BIT_1 | BIT_0));
1810
1811 /* 2G settings */
1812 swing = (ha->fw_seriallink_options[2] &
1813 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1814 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1815 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 1816 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
1817 rx_sens = (ha->fw_seriallink_options[1] &
1818 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1819 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1820 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1821 if (rx_sens == 0x0)
1822 rx_sens = 0x3;
1823 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1824 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1825 ha->fw_options[11] |= BIT_5 |
1826 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1827 (tx_sens & (BIT_1 | BIT_0));
1828 }
1829
1830 /* FCP2 options. */
1831 /* Return command IOCBs without waiting for an ABTS to complete. */
1832 ha->fw_options[3] |= BIT_13;
1833
1834 /* LED scheme. */
1835 if (ha->flags.enable_led_scheme)
1836 ha->fw_options[2] |= BIT_12;
1837
48c02fde
AV
1838 /* Detect ISP6312. */
1839 if (IS_QLA6312(ha))
1840 ha->fw_options[2] |= BIT_13;
1841
1da177e4 1842 /* Update firmware options. */
e315cd28 1843 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
1844}
1845
0107109e 1846void
e315cd28 1847qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
1848{
1849 int rval;
e315cd28 1850 struct qla_hw_data *ha = vha->hw;
0107109e 1851
a9083016
GM
1852 if (IS_QLA82XX(ha))
1853 return;
1854
0107109e 1855 /* Update Serial Link options. */
f94097ed 1856 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
1857 return;
1858
e315cd28 1859 rval = qla2x00_set_serdes_params(vha,
f94097ed
AV
1860 le16_to_cpu(ha->fw_seriallink_options24[1]),
1861 le16_to_cpu(ha->fw_seriallink_options24[2]),
1862 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 1863 if (rval != QLA_SUCCESS) {
7c3df132 1864 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
1865 "Unable to update Serial Link options (%x).\n", rval);
1866 }
1867}
1868
abbd8870 1869void
e315cd28 1870qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 1871{
e315cd28 1872 struct qla_hw_data *ha = vha->hw;
3d71644c 1873 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
1874 struct req_que *req = ha->req_q_map[0];
1875 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
1876
1877 /* Setup ring parameters in initialization control block. */
1878 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1879 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
e315cd28
AC
1880 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1881 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1882 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1883 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1884 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1885 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
1886
1887 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1888 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1889 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1890 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1891 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1892}
1893
0107109e 1894void
e315cd28 1895qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 1896{
e315cd28 1897 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
1898 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1899 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1900 struct qla_msix_entry *msix;
0107109e 1901 struct init_cb_24xx *icb;
73208dfd
AC
1902 uint16_t rid = 0;
1903 struct req_que *req = ha->req_q_map[0];
1904 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 1905
6246b8a1 1906 /* Setup ring parameters in initialization control block. */
0107109e
AV
1907 icb = (struct init_cb_24xx *)ha->init_cb;
1908 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1909 icb->response_q_inpointer = __constant_cpu_to_le16(0);
e315cd28
AC
1910 icb->request_q_length = cpu_to_le16(req->length);
1911 icb->response_q_length = cpu_to_le16(rsp->length);
1912 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1913 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1914 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1915 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 1916
2d70c103
NB
1917 /* Setup ATIO queue dma pointers for target mode */
1918 icb->atio_q_inpointer = __constant_cpu_to_le16(0);
1919 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
1920 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1921 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1922
6246b8a1 1923 if (ha->mqenable || IS_QLA83XX(ha)) {
73208dfd
AC
1924 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1925 icb->rid = __constant_cpu_to_le16(rid);
1926 if (ha->flags.msix_enabled) {
1927 msix = &ha->msix_entries[1];
7c3df132
SK
1928 ql_dbg(ql_dbg_init, vha, 0x00fd,
1929 "Registering vector 0x%x for base que.\n",
1930 msix->entry);
73208dfd
AC
1931 icb->msix = cpu_to_le16(msix->entry);
1932 }
1933 /* Use alternate PCI bus number */
1934 if (MSB(rid))
1935 icb->firmware_options_2 |=
1936 __constant_cpu_to_le32(BIT_19);
1937 /* Use alternate PCI devfn */
1938 if (LSB(rid))
1939 icb->firmware_options_2 |=
1940 __constant_cpu_to_le32(BIT_18);
1941
3155754a 1942 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
1943 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
1944 (ha->flags.msix_enabled)) {
3155754a
AC
1945 icb->firmware_options_2 &=
1946 __constant_cpu_to_le32(~BIT_22);
1947 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
1948 ql_dbg(ql_dbg_init, vha, 0x00fe,
1949 "MSIX Handshake Disable Mode turned on.\n");
3155754a
AC
1950 } else {
1951 icb->firmware_options_2 |=
1952 __constant_cpu_to_le32(BIT_22);
1953 }
73208dfd 1954 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
73208dfd
AC
1955
1956 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1957 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1958 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1959 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1960 } else {
1961 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1962 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1963 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1964 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1965 }
aa230bc5 1966 qlt_24xx_config_rings(vha);
2d70c103 1967
73208dfd
AC
1968 /* PCI posting */
1969 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
1970}
1971
1da177e4
LT
1972/**
1973 * qla2x00_init_rings() - Initializes firmware.
1974 * @ha: HA context
1975 *
1976 * Beginning of request ring has initialization control block already built
1977 * by nvram config routine.
1978 *
1979 * Returns 0 on success.
1980 */
8ae6d9c7 1981int
e315cd28 1982qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
1983{
1984 int rval;
1985 unsigned long flags = 0;
29bdccbe 1986 int cnt, que;
e315cd28 1987 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
1988 struct req_que *req;
1989 struct rsp_que *rsp;
2c3dfe3f
SJ
1990 struct mid_init_cb_24xx *mid_init_cb =
1991 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
1992
1993 spin_lock_irqsave(&ha->hardware_lock, flags);
1994
1995 /* Clear outstanding commands array. */
2afa19a9 1996 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe
AC
1997 req = ha->req_q_map[que];
1998 if (!req)
1999 continue;
8d93f550 2000 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 2001 req->outstanding_cmds[cnt] = NULL;
1da177e4 2002
2afa19a9 2003 req->current_outstanding_cmd = 1;
1da177e4 2004
29bdccbe
AC
2005 /* Initialize firmware. */
2006 req->ring_ptr = req->ring;
2007 req->ring_index = 0;
2008 req->cnt = req->length;
2009 }
1da177e4 2010
2afa19a9 2011 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe
AC
2012 rsp = ha->rsp_q_map[que];
2013 if (!rsp)
2014 continue;
29bdccbe 2015 /* Initialize response queue entries */
8ae6d9c7
GM
2016 if (IS_QLAFX00(ha))
2017 qlafx00_init_response_q_entries(rsp);
2018 else
2019 qla2x00_init_response_q_entries(rsp);
29bdccbe 2020 }
1da177e4 2021
2d70c103
NB
2022 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
2023 ha->tgt.atio_ring_index = 0;
2024 /* Initialize ATIO queue entries */
2025 qlt_init_atio_q_entries(vha);
2026
e315cd28 2027 ha->isp_ops->config_rings(vha);
1da177e4
LT
2028
2029 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2030
8ae6d9c7
GM
2031 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
2032
2033 if (IS_QLAFX00(ha)) {
2034 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
2035 goto next_check;
2036 }
2037
1da177e4 2038 /* Update any ISP specific firmware options before initialization. */
e315cd28 2039 ha->isp_ops->update_fw_options(vha);
1da177e4 2040
605aa2bc 2041 if (ha->flags.npiv_supported) {
45980cc2 2042 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 2043 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 2044 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
2045 }
2046
24a08138
AV
2047 if (IS_FWI2_CAPABLE(ha)) {
2048 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
2049 mid_init_cb->init_cb.execution_throttle =
2050 cpu_to_le16(ha->fw_xcb_count);
2051 }
2c3dfe3f 2052
e315cd28 2053 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 2054next_check:
1da177e4 2055 if (rval) {
7c3df132
SK
2056 ql_log(ql_log_fatal, vha, 0x00d2,
2057 "Init Firmware **** FAILED ****.\n");
1da177e4 2058 } else {
7c3df132
SK
2059 ql_dbg(ql_dbg_init, vha, 0x00d3,
2060 "Init Firmware -- success.\n");
1da177e4
LT
2061 }
2062
2063 return (rval);
2064}
2065
2066/**
2067 * qla2x00_fw_ready() - Waits for firmware ready.
2068 * @ha: HA context
2069 *
2070 * Returns 0 on success.
2071 */
2072static int
e315cd28 2073qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
2074{
2075 int rval;
4d4df193 2076 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
2077 uint16_t min_wait; /* Minimum wait time if loop is down */
2078 uint16_t wait_time; /* Wait time if loop is coming ready */
656e8912 2079 uint16_t state[5];
e315cd28 2080 struct qla_hw_data *ha = vha->hw;
1da177e4 2081
8ae6d9c7
GM
2082 if (IS_QLAFX00(vha->hw))
2083 return qlafx00_fw_ready(vha);
2084
1da177e4
LT
2085 rval = QLA_SUCCESS;
2086
2087 /* 20 seconds for loop down. */
fa2a1ce5 2088 min_wait = 20;
1da177e4
LT
2089
2090 /*
2091 * Firmware should take at most one RATOV to login, plus 5 seconds for
2092 * our own processing.
2093 */
2094 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
2095 wait_time = min_wait;
2096 }
2097
2098 /* Min wait time if loop down */
2099 mtime = jiffies + (min_wait * HZ);
2100
2101 /* wait time before firmware ready */
2102 wtime = jiffies + (wait_time * HZ);
2103
2104 /* Wait for ISP to finish LIP */
e315cd28 2105 if (!vha->flags.init_done)
7c3df132
SK
2106 ql_log(ql_log_info, vha, 0x801e,
2107 "Waiting for LIP to complete.\n");
1da177e4
LT
2108
2109 do {
5b939038 2110 memset(state, -1, sizeof(state));
e315cd28 2111 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 2112 if (rval == QLA_SUCCESS) {
4d4df193 2113 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 2114 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 2115 }
4d4df193 2116 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
2117 ql_dbg(ql_dbg_taskm, vha, 0x801f,
2118 "fw_state=%x 84xx=%x.\n", state[0],
2119 state[2]);
4d4df193
HK
2120 if ((state[2] & FSTATE_LOGGED_IN) &&
2121 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
2122 ql_dbg(ql_dbg_taskm, vha, 0x8028,
2123 "Sending verify iocb.\n");
4d4df193
HK
2124
2125 cs84xx_time = jiffies;
e315cd28 2126 rval = qla84xx_init_chip(vha);
7c3df132
SK
2127 if (rval != QLA_SUCCESS) {
2128 ql_log(ql_log_warn,
cfb0919c 2129 vha, 0x8007,
7c3df132 2130 "Init chip failed.\n");
4d4df193 2131 break;
7c3df132 2132 }
4d4df193
HK
2133
2134 /* Add time taken to initialize. */
2135 cs84xx_time = jiffies - cs84xx_time;
2136 wtime += cs84xx_time;
2137 mtime += cs84xx_time;
cfb0919c 2138 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
2139 "Increasing wait time by %ld. "
2140 "New time %ld.\n", cs84xx_time,
2141 wtime);
4d4df193
HK
2142 }
2143 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
2144 ql_dbg(ql_dbg_taskm, vha, 0x8037,
2145 "F/W Ready - OK.\n");
1da177e4 2146
e315cd28 2147 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
2148 &ha->login_timeout, &ha->r_a_tov);
2149
2150 rval = QLA_SUCCESS;
2151 break;
2152 }
2153
2154 rval = QLA_FUNCTION_FAILED;
2155
e315cd28 2156 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 2157 state[0] != FSTATE_READY) {
1da177e4 2158 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
2159 * other than Wait for Login.
2160 */
1da177e4 2161 if (time_after_eq(jiffies, mtime)) {
7c3df132 2162 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
2163 "Cable is unplugged...\n");
2164
e315cd28 2165 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
2166 break;
2167 }
2168 }
2169 } else {
2170 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 2171 if (time_after_eq(jiffies, mtime) ||
7190575f 2172 ha->flags.isp82xx_fw_hung)
1da177e4
LT
2173 break;
2174 }
2175
2176 if (time_after_eq(jiffies, wtime))
2177 break;
2178
2179 /* Delay for a while */
2180 msleep(500);
1da177e4
LT
2181 } while (1);
2182
7c3df132
SK
2183 ql_dbg(ql_dbg_taskm, vha, 0x803a,
2184 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
2185 state[1], state[2], state[3], state[4], jiffies);
1da177e4 2186
cfb0919c 2187 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
2188 ql_log(ql_log_warn, vha, 0x803b,
2189 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
2190 }
2191
2192 return (rval);
2193}
2194
2195/*
2196* qla2x00_configure_hba
2197* Setup adapter context.
2198*
2199* Input:
2200* ha = adapter state pointer.
2201*
2202* Returns:
2203* 0 = success
2204*
2205* Context:
2206* Kernel context.
2207*/
2208static int
e315cd28 2209qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
2210{
2211 int rval;
2212 uint16_t loop_id;
2213 uint16_t topo;
2c3dfe3f 2214 uint16_t sw_cap;
1da177e4
LT
2215 uint8_t al_pa;
2216 uint8_t area;
2217 uint8_t domain;
2218 char connect_type[22];
e315cd28 2219 struct qla_hw_data *ha = vha->hw;
f24b5cb8 2220 unsigned long flags;
61e1b269 2221 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
2222
2223 /* Get host addresses. */
e315cd28 2224 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 2225 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 2226 if (rval != QLA_SUCCESS) {
e315cd28 2227 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 2228 IS_CNA_CAPABLE(ha) ||
33135aa2 2229 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
2230 ql_dbg(ql_dbg_disc, vha, 0x2008,
2231 "Loop is in a transition state.\n");
33135aa2 2232 } else {
7c3df132
SK
2233 ql_log(ql_log_warn, vha, 0x2009,
2234 "Unable to get host loop ID.\n");
61e1b269
JC
2235 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
2236 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
2237 ql_log(ql_log_warn, vha, 0x1151,
2238 "Doing link init.\n");
2239 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
2240 return rval;
2241 }
e315cd28 2242 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 2243 }
1da177e4
LT
2244 return (rval);
2245 }
2246
2247 if (topo == 4) {
7c3df132
SK
2248 ql_log(ql_log_info, vha, 0x200a,
2249 "Cannot get topology - retrying.\n");
1da177e4
LT
2250 return (QLA_FUNCTION_FAILED);
2251 }
2252
e315cd28 2253 vha->loop_id = loop_id;
1da177e4
LT
2254
2255 /* initialize */
2256 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2257 ha->operating_mode = LOOP;
2c3dfe3f 2258 ha->switch_cap = 0;
1da177e4
LT
2259
2260 switch (topo) {
2261 case 0:
7c3df132 2262 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
2263 ha->current_topology = ISP_CFG_NL;
2264 strcpy(connect_type, "(Loop)");
2265 break;
2266
2267 case 1:
7c3df132 2268 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 2269 ha->switch_cap = sw_cap;
1da177e4
LT
2270 ha->current_topology = ISP_CFG_FL;
2271 strcpy(connect_type, "(FL_Port)");
2272 break;
2273
2274 case 2:
7c3df132 2275 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
2276 ha->operating_mode = P2P;
2277 ha->current_topology = ISP_CFG_N;
2278 strcpy(connect_type, "(N_Port-to-N_Port)");
2279 break;
2280
2281 case 3:
7c3df132 2282 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 2283 ha->switch_cap = sw_cap;
1da177e4
LT
2284 ha->operating_mode = P2P;
2285 ha->current_topology = ISP_CFG_F;
2286 strcpy(connect_type, "(F_Port)");
2287 break;
2288
2289 default:
7c3df132
SK
2290 ql_dbg(ql_dbg_disc, vha, 0x200f,
2291 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
2292 ha->current_topology = ISP_CFG_NL;
2293 strcpy(connect_type, "(Loop)");
2294 break;
2295 }
2296
2297 /* Save Host port and loop ID. */
2298 /* byte order - Big Endian */
e315cd28
AC
2299 vha->d_id.b.domain = domain;
2300 vha->d_id.b.area = area;
2301 vha->d_id.b.al_pa = al_pa;
1da177e4 2302
f24b5cb8 2303 spin_lock_irqsave(&ha->vport_slock, flags);
2d70c103 2304 qlt_update_vp_map(vha, SET_AL_PA);
f24b5cb8 2305 spin_unlock_irqrestore(&ha->vport_slock, flags);
2d70c103 2306
e315cd28 2307 if (!vha->flags.init_done)
7c3df132
SK
2308 ql_log(ql_log_info, vha, 0x2010,
2309 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 2310 connect_type, vha->loop_id);
1da177e4 2311
1da177e4
LT
2312 return(rval);
2313}
2314
a9083016 2315inline void
e315cd28
AC
2316qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2317 char *def)
9bb9fcf2
AV
2318{
2319 char *st, *en;
2320 uint16_t index;
e315cd28 2321 struct qla_hw_data *ha = vha->hw;
ab671149 2322 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 2323 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
2324
2325 if (memcmp(model, BINZERO, len) != 0) {
2326 strncpy(ha->model_number, model, len);
2327 st = en = ha->model_number;
2328 en += len - 1;
2329 while (en > st) {
2330 if (*en != 0x20 && *en != 0x00)
2331 break;
2332 *en-- = '\0';
2333 }
2334
2335 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
2336 if (use_tbl &&
2337 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 2338 index < QLA_MODEL_NAMES)
1ee27146
JC
2339 strncpy(ha->model_desc,
2340 qla2x00_model_name[index * 2 + 1],
2341 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
2342 } else {
2343 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
2344 if (use_tbl &&
2345 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
2346 index < QLA_MODEL_NAMES) {
2347 strcpy(ha->model_number,
2348 qla2x00_model_name[index * 2]);
1ee27146
JC
2349 strncpy(ha->model_desc,
2350 qla2x00_model_name[index * 2 + 1],
2351 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
2352 } else {
2353 strcpy(ha->model_number, def);
2354 }
2355 }
1ee27146 2356 if (IS_FWI2_CAPABLE(ha))
e315cd28 2357 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 2358 sizeof(ha->model_desc));
9bb9fcf2
AV
2359}
2360
4e08df3f
DM
2361/* On sparc systems, obtain port and node WWN from firmware
2362 * properties.
2363 */
e315cd28 2364static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
2365{
2366#ifdef CONFIG_SPARC
e315cd28 2367 struct qla_hw_data *ha = vha->hw;
4e08df3f 2368 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
2369 struct device_node *dp = pci_device_to_OF_node(pdev);
2370 const u8 *val;
4e08df3f
DM
2371 int len;
2372
2373 val = of_get_property(dp, "port-wwn", &len);
2374 if (val && len >= WWN_SIZE)
2375 memcpy(nv->port_name, val, WWN_SIZE);
2376
2377 val = of_get_property(dp, "node-wwn", &len);
2378 if (val && len >= WWN_SIZE)
2379 memcpy(nv->node_name, val, WWN_SIZE);
2380#endif
2381}
2382
1da177e4
LT
2383/*
2384* NVRAM configuration for ISP 2xxx
2385*
2386* Input:
2387* ha = adapter block pointer.
2388*
2389* Output:
2390* initialization control block in response_ring
2391* host adapters parameters in host adapter block
2392*
2393* Returns:
2394* 0 = success.
2395*/
abbd8870 2396int
e315cd28 2397qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 2398{
4e08df3f 2399 int rval;
0107109e
AV
2400 uint8_t chksum = 0;
2401 uint16_t cnt;
2402 uint8_t *dptr1, *dptr2;
e315cd28 2403 struct qla_hw_data *ha = vha->hw;
0107109e 2404 init_cb_t *icb = ha->init_cb;
281afe19
SJ
2405 nvram_t *nv = ha->nvram;
2406 uint8_t *ptr = ha->nvram;
3d71644c 2407 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2408
4e08df3f
DM
2409 rval = QLA_SUCCESS;
2410
1da177e4 2411 /* Determine NVRAM starting address. */
0107109e 2412 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
2413 ha->nvram_base = 0;
2414 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2415 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2416 ha->nvram_base = 0x80;
2417
2418 /* Get NVRAM data and calculate checksum. */
e315cd28 2419 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
2420 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2421 chksum += *ptr++;
1da177e4 2422
7c3df132
SK
2423 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2424 "Contents of NVRAM.\n");
2425 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2426 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
2427
2428 /* Bad NVRAM data, set defaults parameters. */
2429 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2430 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2431 /* Reset NVRAM data. */
7c3df132 2432 ql_log(ql_log_warn, vha, 0x0064,
9e336520 2433 "Inconsistent NVRAM "
7c3df132
SK
2434 "detected: checksum=0x%x id=%c version=0x%x.\n",
2435 chksum, nv->id[0], nv->nvram_version);
2436 ql_log(ql_log_warn, vha, 0x0065,
2437 "Falling back to "
2438 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
2439
2440 /*
2441 * Set default initialization control block.
2442 */
2443 memset(nv, 0, ha->nvram_size);
2444 nv->parameter_block_version = ICB_VERSION;
2445
2446 if (IS_QLA23XX(ha)) {
2447 nv->firmware_options[0] = BIT_2 | BIT_1;
2448 nv->firmware_options[1] = BIT_7 | BIT_5;
2449 nv->add_firmware_options[0] = BIT_5;
2450 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2451 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2452 nv->special_options[1] = BIT_7;
2453 } else if (IS_QLA2200(ha)) {
2454 nv->firmware_options[0] = BIT_2 | BIT_1;
2455 nv->firmware_options[1] = BIT_7 | BIT_5;
2456 nv->add_firmware_options[0] = BIT_5;
2457 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2458 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2459 } else if (IS_QLA2100(ha)) {
2460 nv->firmware_options[0] = BIT_3 | BIT_1;
2461 nv->firmware_options[1] = BIT_5;
2462 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2463 }
2464
2465 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2466 nv->execution_throttle = __constant_cpu_to_le16(16);
2467 nv->retry_count = 8;
2468 nv->retry_delay = 1;
2469
2470 nv->port_name[0] = 33;
2471 nv->port_name[3] = 224;
2472 nv->port_name[4] = 139;
2473
e315cd28 2474 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
2475
2476 nv->login_timeout = 4;
2477
2478 /*
2479 * Set default host adapter parameters
2480 */
2481 nv->host_p[1] = BIT_2;
2482 nv->reset_delay = 5;
2483 nv->port_down_retry_count = 8;
2484 nv->max_luns_per_target = __constant_cpu_to_le16(8);
2485 nv->link_down_timeout = 60;
2486
2487 rval = 1;
1da177e4
LT
2488 }
2489
2490#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2491 /*
2492 * The SN2 does not provide BIOS emulation which means you can't change
2493 * potentially bogus BIOS settings. Force the use of default settings
2494 * for link rate and frame size. Hope that the rest of the settings
2495 * are valid.
2496 */
2497 if (ia64_platform_is("sn2")) {
2498 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2499 if (IS_QLA23XX(ha))
2500 nv->special_options[1] = BIT_7;
2501 }
2502#endif
2503
2504 /* Reset Initialization control block */
0107109e 2505 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
2506
2507 /*
2508 * Setup driver NVRAM options.
2509 */
2510 nv->firmware_options[0] |= (BIT_6 | BIT_1);
2511 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2512 nv->firmware_options[1] |= (BIT_5 | BIT_0);
2513 nv->firmware_options[1] &= ~BIT_4;
2514
2515 if (IS_QLA23XX(ha)) {
2516 nv->firmware_options[0] |= BIT_2;
2517 nv->firmware_options[0] &= ~BIT_3;
2d70c103 2518 nv->special_options[0] &= ~BIT_6;
0107109e 2519 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
2520
2521 if (IS_QLA2300(ha)) {
2522 if (ha->fb_rev == FPM_2310) {
2523 strcpy(ha->model_number, "QLA2310");
2524 } else {
2525 strcpy(ha->model_number, "QLA2300");
2526 }
2527 } else {
e315cd28 2528 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 2529 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
2530 }
2531 } else if (IS_QLA2200(ha)) {
2532 nv->firmware_options[0] |= BIT_2;
2533 /*
2534 * 'Point-to-point preferred, else loop' is not a safe
2535 * connection mode setting.
2536 */
2537 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2538 (BIT_5 | BIT_4)) {
2539 /* Force 'loop preferred, else point-to-point'. */
2540 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2541 nv->add_firmware_options[0] |= BIT_5;
2542 }
2543 strcpy(ha->model_number, "QLA22xx");
2544 } else /*if (IS_QLA2100(ha))*/ {
2545 strcpy(ha->model_number, "QLA2100");
2546 }
2547
2548 /*
2549 * Copy over NVRAM RISC parameter block to initialization control block.
2550 */
2551 dptr1 = (uint8_t *)icb;
2552 dptr2 = (uint8_t *)&nv->parameter_block_version;
2553 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2554 while (cnt--)
2555 *dptr1++ = *dptr2++;
2556
2557 /* Copy 2nd half. */
2558 dptr1 = (uint8_t *)icb->add_firmware_options;
2559 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2560 while (cnt--)
2561 *dptr1++ = *dptr2++;
2562
5341e868
AV
2563 /* Use alternate WWN? */
2564 if (nv->host_p[1] & BIT_7) {
2565 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2566 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2567 }
2568
1da177e4
LT
2569 /* Prepare nodename */
2570 if ((icb->firmware_options[1] & BIT_6) == 0) {
2571 /*
2572 * Firmware will apply the following mask if the nodename was
2573 * not provided.
2574 */
2575 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2576 icb->node_name[0] &= 0xF0;
2577 }
2578
2579 /*
2580 * Set host adapter parameters.
2581 */
3ce8866c
SK
2582
2583 /*
2584 * BIT_7 in the host-parameters section allows for modification to
2585 * internal driver logging.
2586 */
0181944f 2587 if (nv->host_p[0] & BIT_7)
cfb0919c 2588 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
2589 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2590 /* Always load RISC code on non ISP2[12]00 chips. */
2591 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2592 ha->flags.disable_risc_code_load = 0;
2593 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2594 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2595 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 2596 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 2597 ha->flags.disable_serdes = 0;
1da177e4
LT
2598
2599 ha->operating_mode =
2600 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2601
2602 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2603 sizeof(ha->fw_seriallink_options));
2604
2605 /* save HBA serial number */
2606 ha->serial0 = icb->port_name[5];
2607 ha->serial1 = icb->port_name[6];
2608 ha->serial2 = icb->port_name[7];
e315cd28
AC
2609 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2610 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4
LT
2611
2612 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2613
2614 ha->retry_count = nv->retry_count;
2615
2616 /* Set minimum login_timeout to 4 seconds. */
5b91490e 2617 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
2618 nv->login_timeout = ql2xlogintimeout;
2619 if (nv->login_timeout < 4)
2620 nv->login_timeout = 4;
2621 ha->login_timeout = nv->login_timeout;
2622 icb->login_timeout = nv->login_timeout;
2623
00a537b8
AV
2624 /* Set minimum RATOV to 100 tenths of a second. */
2625 ha->r_a_tov = 100;
1da177e4 2626
1da177e4
LT
2627 ha->loop_reset_delay = nv->reset_delay;
2628
1da177e4
LT
2629 /* Link Down Timeout = 0:
2630 *
2631 * When Port Down timer expires we will start returning
2632 * I/O's to OS with "DID_NO_CONNECT".
2633 *
2634 * Link Down Timeout != 0:
2635 *
2636 * The driver waits for the link to come up after link down
2637 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 2638 */
1da177e4
LT
2639 if (nv->link_down_timeout == 0) {
2640 ha->loop_down_abort_time =
354d6b21 2641 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
2642 } else {
2643 ha->link_down_timeout = nv->link_down_timeout;
2644 ha->loop_down_abort_time =
2645 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 2646 }
1da177e4 2647
1da177e4
LT
2648 /*
2649 * Need enough time to try and get the port back.
2650 */
2651 ha->port_down_retry_count = nv->port_down_retry_count;
2652 if (qlport_down_retry)
2653 ha->port_down_retry_count = qlport_down_retry;
2654 /* Set login_retry_count */
2655 ha->login_retry_count = nv->retry_count;
2656 if (ha->port_down_retry_count == nv->port_down_retry_count &&
2657 ha->port_down_retry_count > 3)
2658 ha->login_retry_count = ha->port_down_retry_count;
2659 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2660 ha->login_retry_count = ha->port_down_retry_count;
2661 if (ql2xloginretrycount)
2662 ha->login_retry_count = ql2xloginretrycount;
2663
1da177e4
LT
2664 icb->lun_enables = __constant_cpu_to_le16(0);
2665 icb->command_resource_count = 0;
2666 icb->immediate_notify_resource_count = 0;
2667 icb->timeout = __constant_cpu_to_le16(0);
2668
2669 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2670 /* Enable RIO */
2671 icb->firmware_options[0] &= ~BIT_3;
2672 icb->add_firmware_options[0] &=
2673 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2674 icb->add_firmware_options[0] |= BIT_2;
2675 icb->response_accumulation_timer = 3;
2676 icb->interrupt_delay_timer = 5;
2677
e315cd28 2678 vha->flags.process_response_queue = 1;
1da177e4 2679 } else {
4fdfefe5 2680 /* Enable ZIO. */
e315cd28 2681 if (!vha->flags.init_done) {
4fdfefe5
AV
2682 ha->zio_mode = icb->add_firmware_options[0] &
2683 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2684 ha->zio_timer = icb->interrupt_delay_timer ?
2685 icb->interrupt_delay_timer: 2;
2686 }
1da177e4
LT
2687 icb->add_firmware_options[0] &=
2688 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 2689 vha->flags.process_response_queue = 0;
4fdfefe5 2690 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d
AV
2691 ha->zio_mode = QLA_ZIO_MODE_6;
2692
7c3df132 2693 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
2694 "ZIO mode %d enabled; timer delay (%d us).\n",
2695 ha->zio_mode, ha->zio_timer * 100);
1da177e4 2696
4fdfefe5
AV
2697 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2698 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 2699 vha->flags.process_response_queue = 1;
1da177e4
LT
2700 }
2701 }
2702
4e08df3f 2703 if (rval) {
7c3df132
SK
2704 ql_log(ql_log_warn, vha, 0x0069,
2705 "NVRAM configuration failed.\n");
4e08df3f
DM
2706 }
2707 return (rval);
1da177e4
LT
2708}
2709
19a7b4ae
JSEC
2710static void
2711qla2x00_rport_del(void *data)
2712{
2713 fc_port_t *fcport = data;
d97994dc 2714 struct fc_rport *rport;
2d70c103 2715 scsi_qla_host_t *vha = fcport->vha;
044d78e1 2716 unsigned long flags;
d97994dc 2717
044d78e1 2718 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 2719 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 2720 fcport->drport = NULL;
044d78e1 2721 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2d70c103 2722 if (rport) {
d97994dc 2723 fc_remote_port_delete(rport);
2d70c103
NB
2724 /*
2725 * Release the target mode FC NEXUS in qla_target.c code
2726 * if target mod is enabled.
2727 */
2728 qlt_fc_port_deleted(vha, fcport);
2729 }
19a7b4ae
JSEC
2730}
2731
1da177e4
LT
2732/**
2733 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2734 * @ha: HA context
2735 * @flags: allocation flags
2736 *
2737 * Returns a pointer to the allocated fcport, or NULL, if none available.
2738 */
9a069e19 2739fc_port_t *
e315cd28 2740qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
2741{
2742 fc_port_t *fcport;
2743
bbfbbbc1
MK
2744 fcport = kzalloc(sizeof(fc_port_t), flags);
2745 if (!fcport)
2746 return NULL;
1da177e4
LT
2747
2748 /* Setup fcport template structure. */
e315cd28 2749 fcport->vha = vha;
1da177e4
LT
2750 fcport->port_type = FCT_UNKNOWN;
2751 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 2752 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 2753 fcport->supported_classes = FC_COS_UNSPECIFIED;
1da177e4 2754
bbfbbbc1 2755 return fcport;
1da177e4
LT
2756}
2757
2758/*
2759 * qla2x00_configure_loop
2760 * Updates Fibre Channel Device Database with what is actually on loop.
2761 *
2762 * Input:
2763 * ha = adapter block pointer.
2764 *
2765 * Returns:
2766 * 0 = success.
2767 * 1 = error.
2768 * 2 = database was full and device was not configured.
2769 */
2770static int
e315cd28 2771qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
2772{
2773 int rval;
2774 unsigned long flags, save_flags;
e315cd28 2775 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2776 rval = QLA_SUCCESS;
2777
2778 /* Get Initiator ID */
e315cd28
AC
2779 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2780 rval = qla2x00_configure_hba(vha);
1da177e4 2781 if (rval != QLA_SUCCESS) {
7c3df132
SK
2782 ql_dbg(ql_dbg_disc, vha, 0x2013,
2783 "Unable to configure HBA.\n");
1da177e4
LT
2784 return (rval);
2785 }
2786 }
2787
e315cd28 2788 save_flags = flags = vha->dpc_flags;
7c3df132
SK
2789 ql_dbg(ql_dbg_disc, vha, 0x2014,
2790 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
2791
2792 /*
2793 * If we have both an RSCN and PORT UPDATE pending then handle them
2794 * both at the same time.
2795 */
e315cd28
AC
2796 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2797 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 2798
3064ff39
MH
2799 qla2x00_get_data_rate(vha);
2800
1da177e4
LT
2801 /* Determine what we need to do */
2802 if (ha->current_topology == ISP_CFG_FL &&
2803 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2804
1da177e4
LT
2805 set_bit(RSCN_UPDATE, &flags);
2806
2807 } else if (ha->current_topology == ISP_CFG_F &&
2808 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2809
1da177e4
LT
2810 set_bit(RSCN_UPDATE, &flags);
2811 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
2812
2813 } else if (ha->current_topology == ISP_CFG_N) {
2814 clear_bit(RSCN_UPDATE, &flags);
1da177e4 2815
e315cd28 2816 } else if (!vha->flags.online ||
1da177e4
LT
2817 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2818
1da177e4
LT
2819 set_bit(RSCN_UPDATE, &flags);
2820 set_bit(LOCAL_LOOP_UPDATE, &flags);
2821 }
2822
2823 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
2824 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2825 ql_dbg(ql_dbg_disc, vha, 0x2015,
2826 "Loop resync needed, failing.\n");
1da177e4 2827 rval = QLA_FUNCTION_FAILED;
642ef983 2828 } else
e315cd28 2829 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
2830 }
2831
2832 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132
SK
2833 if (LOOP_TRANSITION(vha)) {
2834 ql_dbg(ql_dbg_disc, vha, 0x201e,
2835 "Needs RSCN update and loop transition.\n");
1da177e4 2836 rval = QLA_FUNCTION_FAILED;
7c3df132 2837 }
e315cd28
AC
2838 else
2839 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
2840 }
2841
2842 if (rval == QLA_SUCCESS) {
e315cd28
AC
2843 if (atomic_read(&vha->loop_down_timer) ||
2844 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
2845 rval = QLA_FUNCTION_FAILED;
2846 } else {
e315cd28 2847 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
2848 ql_dbg(ql_dbg_disc, vha, 0x2069,
2849 "LOOP READY.\n");
1da177e4
LT
2850 }
2851 }
2852
2853 if (rval) {
7c3df132
SK
2854 ql_dbg(ql_dbg_disc, vha, 0x206a,
2855 "%s *** FAILED ***.\n", __func__);
1da177e4 2856 } else {
7c3df132
SK
2857 ql_dbg(ql_dbg_disc, vha, 0x206b,
2858 "%s: exiting normally.\n", __func__);
1da177e4
LT
2859 }
2860
cc3ef7bc 2861 /* Restore state if a resync event occurred during processing */
e315cd28 2862 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 2863 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 2864 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 2865 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 2866 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 2867 }
1da177e4
LT
2868 }
2869
2870 return (rval);
2871}
2872
2873
2874
2875/*
2876 * qla2x00_configure_local_loop
2877 * Updates Fibre Channel Device Database with local loop devices.
2878 *
2879 * Input:
2880 * ha = adapter block pointer.
2881 *
2882 * Returns:
2883 * 0 = success.
2884 */
2885static int
e315cd28 2886qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
2887{
2888 int rval, rval2;
2889 int found_devs;
2890 int found;
2891 fc_port_t *fcport, *new_fcport;
2892
2893 uint16_t index;
2894 uint16_t entries;
2895 char *id_iter;
2896 uint16_t loop_id;
2897 uint8_t domain, area, al_pa;
e315cd28 2898 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2899
2900 found_devs = 0;
2901 new_fcport = NULL;
642ef983 2902 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 2903
1da177e4 2904 /* Get list of logged in devices. */
642ef983 2905 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 2906 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
2907 &entries);
2908 if (rval != QLA_SUCCESS)
2909 goto cleanup_allocation;
2910
7c3df132
SK
2911 ql_dbg(ql_dbg_disc, vha, 0x2017,
2912 "Entries in ID list (%d).\n", entries);
2913 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2914 (uint8_t *)ha->gid_list,
2915 entries * sizeof(struct gid_list_info));
1da177e4
LT
2916
2917 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 2918 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 2919 if (new_fcport == NULL) {
7c3df132
SK
2920 ql_log(ql_log_warn, vha, 0x2018,
2921 "Memory allocation failed for fcport.\n");
1da177e4
LT
2922 rval = QLA_MEMORY_ALLOC_FAILED;
2923 goto cleanup_allocation;
2924 }
2925 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2926
2927 /*
2928 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2929 */
e315cd28 2930 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
2931 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2932 fcport->port_type != FCT_BROADCAST &&
2933 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2934
7c3df132
SK
2935 ql_dbg(ql_dbg_disc, vha, 0x2019,
2936 "Marking port lost loop_id=0x%04x.\n",
2937 fcport->loop_id);
1da177e4 2938
ec426e10 2939 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1da177e4
LT
2940 }
2941 }
2942
2943 /* Add devices to port list. */
2944 id_iter = (char *)ha->gid_list;
2945 for (index = 0; index < entries; index++) {
2946 domain = ((struct gid_list_info *)id_iter)->domain;
2947 area = ((struct gid_list_info *)id_iter)->area;
2948 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 2949 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
2950 loop_id = (uint16_t)
2951 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 2952 else
1da177e4
LT
2953 loop_id = le16_to_cpu(
2954 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 2955 id_iter += ha->gid_list_info_size;
1da177e4
LT
2956
2957 /* Bypass reserved domain fields. */
2958 if ((domain & 0xf0) == 0xf0)
2959 continue;
2960
2961 /* Bypass if not same domain and area of adapter. */
f7d289f6 2962 if (area && domain &&
e315cd28 2963 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
1da177e4
LT
2964 continue;
2965
2966 /* Bypass invalid local loop ID. */
2967 if (loop_id > LAST_LOCAL_LOOP_ID)
2968 continue;
2969
370d550e
AE
2970 memset(new_fcport, 0, sizeof(fc_port_t));
2971
1da177e4
LT
2972 /* Fill in member data. */
2973 new_fcport->d_id.b.domain = domain;
2974 new_fcport->d_id.b.area = area;
2975 new_fcport->d_id.b.al_pa = al_pa;
2976 new_fcport->loop_id = loop_id;
e315cd28 2977 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 2978 if (rval2 != QLA_SUCCESS) {
7c3df132
SK
2979 ql_dbg(ql_dbg_disc, vha, 0x201a,
2980 "Failed to retrieve fcport information "
2981 "-- get_port_database=%x, loop_id=0x%04x.\n",
2982 rval2, new_fcport->loop_id);
2983 ql_dbg(ql_dbg_disc, vha, 0x201b,
2984 "Scheduling resync.\n");
e315cd28 2985 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
2986 continue;
2987 }
2988
2989 /* Check for matching device in port list. */
2990 found = 0;
2991 fcport = NULL;
e315cd28 2992 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
2993 if (memcmp(new_fcport->port_name, fcport->port_name,
2994 WWN_SIZE))
2995 continue;
2996
ddb9b126 2997 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
2998 fcport->loop_id = new_fcport->loop_id;
2999 fcport->port_type = new_fcport->port_type;
3000 fcport->d_id.b24 = new_fcport->d_id.b24;
3001 memcpy(fcport->node_name, new_fcport->node_name,
3002 WWN_SIZE);
3003
3004 found++;
3005 break;
3006 }
3007
3008 if (!found) {
3009 /* New device, add to fcports list. */
e315cd28 3010 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
3011
3012 /* Allocate a new replacement fcport. */
3013 fcport = new_fcport;
e315cd28 3014 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 3015 if (new_fcport == NULL) {
7c3df132
SK
3016 ql_log(ql_log_warn, vha, 0x201c,
3017 "Failed to allocate memory for fcport.\n");
1da177e4
LT
3018 rval = QLA_MEMORY_ALLOC_FAILED;
3019 goto cleanup_allocation;
3020 }
3021 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
3022 }
3023
d8b45213 3024 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 3025 fcport->fp_speed = ha->link_data_rate;
d8b45213 3026
e315cd28 3027 qla2x00_update_fcport(vha, fcport);
1da177e4
LT
3028
3029 found_devs++;
3030 }
3031
3032cleanup_allocation:
c9475cb0 3033 kfree(new_fcport);
1da177e4
LT
3034
3035 if (rval != QLA_SUCCESS) {
7c3df132
SK
3036 ql_dbg(ql_dbg_disc, vha, 0x201d,
3037 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
3038 }
3039
1da177e4
LT
3040 return (rval);
3041}
3042
d8b45213 3043static void
e315cd28 3044qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 3045{
d8b45213 3046 int rval;
1bb39548 3047 uint16_t mb[4];
e315cd28 3048 struct qla_hw_data *ha = vha->hw;
d8b45213 3049
c76f2c01 3050 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
3051 return;
3052
c9afb9a2
GM
3053 if (atomic_read(&fcport->state) != FCS_ONLINE)
3054 return;
3055
39bd9622
AV
3056 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
3057 fcport->fp_speed > ha->link_data_rate)
d8b45213
AV
3058 return;
3059
e315cd28 3060 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 3061 mb);
d8b45213 3062 if (rval != QLA_SUCCESS) {
7c3df132
SK
3063 ql_dbg(ql_dbg_disc, vha, 0x2004,
3064 "Unable to adjust iIDMA "
3065 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
3066 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
d8b45213
AV
3067 fcport->port_name[2], fcport->port_name[3],
3068 fcport->port_name[4], fcport->port_name[5],
3069 fcport->port_name[6], fcport->port_name[7], rval,
7c3df132 3070 fcport->fp_speed, mb[0], mb[1]);
d8b45213 3071 } else {
7c3df132
SK
3072 ql_dbg(ql_dbg_disc, vha, 0x2005,
3073 "iIDMA adjusted to %s GB/s "
d0297c9a
JC
3074 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
3075 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
7c3df132
SK
3076 fcport->port_name[0], fcport->port_name[1],
3077 fcport->port_name[2], fcport->port_name[3],
3078 fcport->port_name[4], fcport->port_name[5],
3079 fcport->port_name[6], fcport->port_name[7]);
d8b45213
AV
3080 }
3081}
3082
23be331d 3083static void
e315cd28 3084qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118
AV
3085{
3086 struct fc_rport_identifiers rport_ids;
bdf79621 3087 struct fc_rport *rport;
044d78e1 3088 unsigned long flags;
8482e118 3089
ac280b67 3090 qla2x00_rport_del(fcport);
8482e118 3091
f8b02a85
AV
3092 rport_ids.node_name = wwn_to_u64(fcport->node_name);
3093 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118
AV
3094 rport_ids.port_id = fcport->d_id.b.domain << 16 |
3095 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 3096 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 3097 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 3098 if (!rport) {
7c3df132
SK
3099 ql_log(ql_log_warn, vha, 0x2006,
3100 "Unable to allocate fc remote port.\n");
77d74143
AV
3101 return;
3102 }
2d70c103
NB
3103 /*
3104 * Create target mode FC NEXUS in qla_target.c if target mode is
3105 * enabled..
3106 */
3107 qlt_fc_port_added(vha, fcport);
3108
044d78e1 3109 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 3110 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 3111 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 3112
ad3e0eda 3113 rport->supported_classes = fcport->supported_classes;
77d74143 3114
8482e118
AV
3115 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3116 if (fcport->port_type == FCT_INITIATOR)
3117 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3118 if (fcport->port_type == FCT_TARGET)
3119 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
77d74143 3120 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
3121}
3122
23be331d
AB
3123/*
3124 * qla2x00_update_fcport
3125 * Updates device on list.
3126 *
3127 * Input:
3128 * ha = adapter block pointer.
3129 * fcport = port structure pointer.
3130 *
3131 * Return:
3132 * 0 - Success
3133 * BIT_0 - error
3134 *
3135 * Context:
3136 * Kernel context.
3137 */
3138void
e315cd28 3139qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 3140{
e315cd28 3141 fcport->vha = vha;
8ae6d9c7
GM
3142
3143 if (IS_QLAFX00(vha->hw)) {
3144 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3145 qla2x00_reg_remote_port(vha, fcport);
3146 return;
3147 }
23be331d 3148 fcport->login_retry = 0;
5ff1d584 3149 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
23be331d 3150
1f93da52 3151 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e315cd28 3152 qla2x00_iidma_fcport(vha, fcport);
21090cbe 3153 qla24xx_update_fcport_fcp_prio(vha, fcport);
e315cd28 3154 qla2x00_reg_remote_port(vha, fcport);
23be331d
AB
3155}
3156
1da177e4
LT
3157/*
3158 * qla2x00_configure_fabric
3159 * Setup SNS devices with loop ID's.
3160 *
3161 * Input:
3162 * ha = adapter block pointer.
3163 *
3164 * Returns:
3165 * 0 = success.
3166 * BIT_0 = error
3167 */
3168static int
e315cd28 3169qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 3170{
b3b02e6e 3171 int rval;
e452ceb6 3172 fc_port_t *fcport, *fcptemp;
1da177e4
LT
3173 uint16_t next_loopid;
3174 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 3175 uint16_t loop_id;
1da177e4 3176 LIST_HEAD(new_fcports);
e315cd28
AC
3177 struct qla_hw_data *ha = vha->hw;
3178 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
3179
3180 /* If FL port exists, then SNS is present */
e428924c 3181 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
3182 loop_id = NPH_F_PORT;
3183 else
3184 loop_id = SNS_FL_PORT;
e315cd28 3185 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 3186 if (rval != QLA_SUCCESS) {
7c3df132
SK
3187 ql_dbg(ql_dbg_disc, vha, 0x201f,
3188 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 3189
e315cd28 3190 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
3191 return (QLA_SUCCESS);
3192 }
e315cd28 3193 vha->device_flags |= SWITCH_FOUND;
1da177e4 3194
1da177e4 3195 do {
cca5335c
AV
3196 /* FDMI support. */
3197 if (ql2xfdmienable &&
e315cd28
AC
3198 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
3199 qla2x00_fdmi_register(vha);
cca5335c 3200
1da177e4 3201 /* Ensure we are logged into the SNS. */
e428924c 3202 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
3203 loop_id = NPH_SNS;
3204 else
3205 loop_id = SIMPLE_NAME_SERVER;
0b91d116
CD
3206 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3207 0xfc, mb, BIT_1|BIT_0);
3208 if (rval != QLA_SUCCESS) {
3209 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 3210 return rval;
0b91d116 3211 }
1da177e4 3212 if (mb[0] != MBS_COMMAND_COMPLETE) {
7c3df132
SK
3213 ql_dbg(ql_dbg_disc, vha, 0x2042,
3214 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3215 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3216 mb[2], mb[6], mb[7]);
1da177e4
LT
3217 return (QLA_SUCCESS);
3218 }
3219
e315cd28
AC
3220 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3221 if (qla2x00_rft_id(vha)) {
1da177e4 3222 /* EMPTY */
7c3df132
SK
3223 ql_dbg(ql_dbg_disc, vha, 0x2045,
3224 "Register FC-4 TYPE failed.\n");
1da177e4 3225 }
e315cd28 3226 if (qla2x00_rff_id(vha)) {
1da177e4 3227 /* EMPTY */
7c3df132
SK
3228 ql_dbg(ql_dbg_disc, vha, 0x2049,
3229 "Register FC-4 Features failed.\n");
1da177e4 3230 }
e315cd28 3231 if (qla2x00_rnn_id(vha)) {
1da177e4 3232 /* EMPTY */
7c3df132
SK
3233 ql_dbg(ql_dbg_disc, vha, 0x204f,
3234 "Register Node Name failed.\n");
e315cd28 3235 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 3236 /* EMPTY */
7c3df132
SK
3237 ql_dbg(ql_dbg_disc, vha, 0x2053,
3238 "Register Symobilic Node Name failed.\n");
1da177e4
LT
3239 }
3240 }
3241
827210ba
JC
3242#define QLA_FCPORT_SCAN 1
3243#define QLA_FCPORT_FOUND 2
3244
3245 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3246 fcport->scan_state = QLA_FCPORT_SCAN;
3247 }
3248
e315cd28 3249 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
1da177e4
LT
3250 if (rval != QLA_SUCCESS)
3251 break;
3252
e452ceb6
JC
3253 /*
3254 * Logout all previous fabric devices marked lost, except
3255 * FCP2 devices.
3256 */
e315cd28
AC
3257 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3258 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1da177e4
LT
3259 break;
3260
3261 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3262 continue;
3263
827210ba 3264 if (fcport->scan_state == QLA_FCPORT_SCAN &&
b3b02e6e 3265 atomic_read(&fcport->state) == FCS_ONLINE) {
e315cd28 3266 qla2x00_mark_device_lost(vha, fcport,
d97994dc 3267 ql2xplogiabsentdevice, 0);
1da177e4 3268 if (fcport->loop_id != FC_NO_LOOP_ID &&
f08b7251 3269 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
1da177e4
LT
3270 fcport->port_type != FCT_INITIATOR &&
3271 fcport->port_type != FCT_BROADCAST) {
e315cd28 3272 ha->isp_ops->fabric_logout(vha,
1c7c6357
AV
3273 fcport->loop_id,
3274 fcport->d_id.b.domain,
3275 fcport->d_id.b.area,
3276 fcport->d_id.b.al_pa);
e452ceb6 3277 fcport->loop_id = FC_NO_LOOP_ID;
1da177e4
LT
3278 }
3279 }
e452ceb6 3280 }
1da177e4 3281
e452ceb6
JC
3282 /* Starting free loop ID. */
3283 next_loopid = ha->min_external_loopid;
3284
3285 /*
3286 * Scan through our port list and login entries that need to be
3287 * logged in.
3288 */
3289 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3290 if (atomic_read(&vha->loop_down_timer) ||
3291 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3292 break;
3293
3294 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3295 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3296 continue;
3297
3298 if (fcport->loop_id == FC_NO_LOOP_ID) {
3299 fcport->loop_id = next_loopid;
3300 rval = qla2x00_find_new_loop_id(
3301 base_vha, fcport);
3302 if (rval != QLA_SUCCESS) {
3303 /* Ran out of IDs to use */
3304 break;
1da177e4
LT
3305 }
3306 }
e452ceb6
JC
3307 /* Login and update database */
3308 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3309 }
3310
3311 /* Exit if out of loop IDs. */
3312 if (rval != QLA_SUCCESS) {
3313 break;
3314 }
3315
3316 /*
3317 * Login and add the new devices to our port list.
3318 */
3319 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3320 if (atomic_read(&vha->loop_down_timer) ||
3321 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3322 break;
3323
3324 /* Find a new loop ID to use. */
3325 fcport->loop_id = next_loopid;
3326 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3327 if (rval != QLA_SUCCESS) {
3328 /* Ran out of IDs to use */
3329 break;
3330 }
1da177e4 3331
bdf79621 3332 /* Login and update database */
e315cd28 3333 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
e452ceb6
JC
3334
3335 list_move_tail(&fcport->list, &vha->vp_fcports);
1da177e4
LT
3336 }
3337 } while (0);
3338
e452ceb6
JC
3339 /* Free all new device structures not processed. */
3340 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3341 list_del(&fcport->list);
3342 kfree(fcport);
3343 }
3344
1da177e4 3345 if (rval) {
7c3df132
SK
3346 ql_dbg(ql_dbg_disc, vha, 0x2068,
3347 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
3348 }
3349
3350 return (rval);
3351}
3352
1da177e4
LT
3353/*
3354 * qla2x00_find_all_fabric_devs
3355 *
3356 * Input:
3357 * ha = adapter block pointer.
3358 * dev = database device entry pointer.
3359 *
3360 * Returns:
3361 * 0 = success.
3362 *
3363 * Context:
3364 * Kernel context.
3365 */
3366static int
e315cd28
AC
3367qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3368 struct list_head *new_fcports)
1da177e4
LT
3369{
3370 int rval;
3371 uint16_t loop_id;
3372 fc_port_t *fcport, *new_fcport, *fcptemp;
3373 int found;
3374
3375 sw_info_t *swl;
3376 int swl_idx;
3377 int first_dev, last_dev;
1516ef44 3378 port_id_t wrap = {}, nxt_d_id;
e315cd28 3379 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 3380 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
3381
3382 rval = QLA_SUCCESS;
3383
3384 /* Try GID_PT to get device list, else GAN. */
7a67735b 3385 if (!ha->swl)
642ef983 3386 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
3387 GFP_KERNEL);
3388 swl = ha->swl;
bbfbbbc1 3389 if (!swl) {
1da177e4 3390 /*EMPTY*/
7c3df132
SK
3391 ql_dbg(ql_dbg_disc, vha, 0x2054,
3392 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 3393 } else {
642ef983 3394 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 3395 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 3396 swl = NULL;
e315cd28 3397 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 3398 swl = NULL;
e315cd28 3399 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 3400 swl = NULL;
e5896bd5 3401 } else if (ql2xiidmaenable &&
e315cd28
AC
3402 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3403 qla2x00_gpsc(vha, swl);
1da177e4 3404 }
e8c72ba5
CD
3405
3406 /* If other queries succeeded probe for FC-4 type */
3407 if (swl)
3408 qla2x00_gff_id(vha, swl);
1da177e4
LT
3409 }
3410 swl_idx = 0;
3411
3412 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 3413 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 3414 if (new_fcport == NULL) {
7c3df132
SK
3415 ql_log(ql_log_warn, vha, 0x205e,
3416 "Failed to allocate memory for fcport.\n");
1da177e4
LT
3417 return (QLA_MEMORY_ALLOC_FAILED);
3418 }
3419 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
3420 /* Set start port ID scan at adapter ID. */
3421 first_dev = 1;
3422 last_dev = 0;
3423
3424 /* Starting free loop ID. */
e315cd28
AC
3425 loop_id = ha->min_external_loopid;
3426 for (; loop_id <= ha->max_loop_id; loop_id++) {
3427 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
3428 continue;
3429
3a6478df
GM
3430 if (ha->current_topology == ISP_CFG_FL &&
3431 (atomic_read(&vha->loop_down_timer) ||
3432 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
3433 atomic_set(&vha->loop_down_timer, 0);
3434 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3435 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 3436 break;
bb2d52b2 3437 }
1da177e4
LT
3438
3439 if (swl != NULL) {
3440 if (last_dev) {
3441 wrap.b24 = new_fcport->d_id.b24;
3442 } else {
3443 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3444 memcpy(new_fcport->node_name,
3445 swl[swl_idx].node_name, WWN_SIZE);
3446 memcpy(new_fcport->port_name,
3447 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
3448 memcpy(new_fcport->fabric_port_name,
3449 swl[swl_idx].fabric_port_name, WWN_SIZE);
3450 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 3451 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4
LT
3452
3453 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3454 last_dev = 1;
3455 }
3456 swl_idx++;
3457 }
3458 } else {
3459 /* Send GA_NXT to the switch */
e315cd28 3460 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 3461 if (rval != QLA_SUCCESS) {
7c3df132
SK
3462 ql_log(ql_log_warn, vha, 0x2064,
3463 "SNS scan failed -- assuming "
3464 "zero-entry result.\n");
1da177e4
LT
3465 list_for_each_entry_safe(fcport, fcptemp,
3466 new_fcports, list) {
3467 list_del(&fcport->list);
3468 kfree(fcport);
3469 }
3470 rval = QLA_SUCCESS;
3471 break;
3472 }
3473 }
3474
3475 /* If wrap on switch device list, exit. */
3476 if (first_dev) {
3477 wrap.b24 = new_fcport->d_id.b24;
3478 first_dev = 0;
3479 } else if (new_fcport->d_id.b24 == wrap.b24) {
7c3df132
SK
3480 ql_dbg(ql_dbg_disc, vha, 0x2065,
3481 "Device wrap (%02x%02x%02x).\n",
3482 new_fcport->d_id.b.domain,
3483 new_fcport->d_id.b.area,
3484 new_fcport->d_id.b.al_pa);
1da177e4
LT
3485 break;
3486 }
3487
2c3dfe3f 3488 /* Bypass if same physical adapter. */
e315cd28 3489 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
3490 continue;
3491
2c3dfe3f 3492 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
3493 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
3494 continue;
2c3dfe3f 3495
f7d289f6
AV
3496 /* Bypass if same domain and area of adapter. */
3497 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 3498 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
3499 ISP_CFG_FL)
3500 continue;
3501
1da177e4
LT
3502 /* Bypass reserved domain fields. */
3503 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3504 continue;
3505
e8c72ba5 3506 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
3507 if (ql2xgffidenable &&
3508 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3509 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
3510 continue;
3511
1da177e4
LT
3512 /* Locate matching device in database. */
3513 found = 0;
e315cd28 3514 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
3515 if (memcmp(new_fcport->port_name, fcport->port_name,
3516 WWN_SIZE))
3517 continue;
3518
827210ba 3519 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 3520
1da177e4
LT
3521 found++;
3522
d8b45213
AV
3523 /* Update port state. */
3524 memcpy(fcport->fabric_port_name,
3525 new_fcport->fabric_port_name, WWN_SIZE);
3526 fcport->fp_speed = new_fcport->fp_speed;
3527
1da177e4
LT
3528 /*
3529 * If address the same and state FCS_ONLINE, nothing
3530 * changed.
3531 */
3532 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3533 atomic_read(&fcport->state) == FCS_ONLINE) {
3534 break;
3535 }
3536
3537 /*
3538 * If device was not a fabric device before.
3539 */
3540 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3541 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 3542 qla2x00_clear_loop_id(fcport);
1da177e4
LT
3543 fcport->flags |= (FCF_FABRIC_DEVICE |
3544 FCF_LOGIN_NEEDED);
1da177e4
LT
3545 break;
3546 }
3547
3548 /*
3549 * Port ID changed or device was marked to be updated;
3550 * Log it out if still logged in and mark it for
3551 * relogin later.
3552 */
3553 fcport->d_id.b24 = new_fcport->d_id.b24;
3554 fcport->flags |= FCF_LOGIN_NEEDED;
3555 if (fcport->loop_id != FC_NO_LOOP_ID &&
f08b7251 3556 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
0eba25df 3557 (fcport->flags & FCF_ASYNC_SENT) == 0 &&
1da177e4
LT
3558 fcport->port_type != FCT_INITIATOR &&
3559 fcport->port_type != FCT_BROADCAST) {
e315cd28 3560 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
3561 fcport->d_id.b.domain, fcport->d_id.b.area,
3562 fcport->d_id.b.al_pa);
5f16b331 3563 qla2x00_clear_loop_id(fcport);
1da177e4
LT
3564 }
3565
3566 break;
3567 }
3568
3569 if (found)
3570 continue;
1da177e4
LT
3571 /* If device was not in our fcports list, then add it. */
3572 list_add_tail(&new_fcport->list, new_fcports);
3573
3574 /* Allocate a new replacement fcport. */
3575 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 3576 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 3577 if (new_fcport == NULL) {
7c3df132
SK
3578 ql_log(ql_log_warn, vha, 0x2066,
3579 "Memory allocation failed for fcport.\n");
1da177e4
LT
3580 return (QLA_MEMORY_ALLOC_FAILED);
3581 }
3582 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3583 new_fcport->d_id.b24 = nxt_d_id.b24;
3584 }
3585
c9475cb0 3586 kfree(new_fcport);
1da177e4 3587
1da177e4
LT
3588 return (rval);
3589}
3590
3591/*
3592 * qla2x00_find_new_loop_id
3593 * Scan through our port list and find a new usable loop ID.
3594 *
3595 * Input:
3596 * ha: adapter state pointer.
3597 * dev: port structure pointer.
3598 *
3599 * Returns:
3600 * qla2x00 local function return status code.
3601 *
3602 * Context:
3603 * Kernel context.
3604 */
03bcfb57 3605int
e315cd28 3606qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
3607{
3608 int rval;
e315cd28 3609 struct qla_hw_data *ha = vha->hw;
feafb7b1 3610 unsigned long flags = 0;
1da177e4
LT
3611
3612 rval = QLA_SUCCESS;
3613
5f16b331 3614 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 3615
5f16b331
CD
3616 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
3617 LOOPID_MAP_SIZE);
3618 if (dev->loop_id >= LOOPID_MAP_SIZE ||
3619 qla2x00_is_reserved_id(vha, dev->loop_id)) {
3620 dev->loop_id = FC_NO_LOOP_ID;
3621 rval = QLA_FUNCTION_FAILED;
3622 } else
3623 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 3624
5f16b331 3625 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 3626
5f16b331
CD
3627 if (rval == QLA_SUCCESS)
3628 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3629 "Assigning new loopid=%x, portid=%x.\n",
3630 dev->loop_id, dev->d_id.b24);
3631 else
3632 ql_log(ql_log_warn, dev->vha, 0x2087,
3633 "No loop_id's available, portid=%x.\n",
3634 dev->d_id.b24);
1da177e4
LT
3635
3636 return (rval);
3637}
3638
1da177e4
LT
3639/*
3640 * qla2x00_fabric_dev_login
3641 * Login fabric target device and update FC port database.
3642 *
3643 * Input:
3644 * ha: adapter state pointer.
3645 * fcport: port structure list pointer.
3646 * next_loopid: contains value of a new loop ID that can be used
3647 * by the next login attempt.
3648 *
3649 * Returns:
3650 * qla2x00 local function return status code.
3651 *
3652 * Context:
3653 * Kernel context.
3654 */
3655static int
e315cd28 3656qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
3657 uint16_t *next_loopid)
3658{
3659 int rval;
3660 int retry;
0107109e 3661 uint8_t opts;
e315cd28 3662 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3663
3664 rval = QLA_SUCCESS;
3665 retry = 0;
3666
ac280b67 3667 if (IS_ALOGIO_CAPABLE(ha)) {
5ff1d584
AV
3668 if (fcport->flags & FCF_ASYNC_SENT)
3669 return rval;
3670 fcport->flags |= FCF_ASYNC_SENT;
ac280b67
AV
3671 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3672 if (!rval)
3673 return rval;
3674 }
3675
5ff1d584 3676 fcport->flags &= ~FCF_ASYNC_SENT;
e315cd28 3677 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
1da177e4 3678 if (rval == QLA_SUCCESS) {
f08b7251 3679 /* Send an ADISC to FCP2 devices.*/
0107109e 3680 opts = 0;
f08b7251 3681 if (fcport->flags & FCF_FCP2_DEVICE)
0107109e 3682 opts |= BIT_1;
e315cd28 3683 rval = qla2x00_get_port_database(vha, fcport, opts);
1da177e4 3684 if (rval != QLA_SUCCESS) {
e315cd28 3685 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
3686 fcport->d_id.b.domain, fcport->d_id.b.area,
3687 fcport->d_id.b.al_pa);
e315cd28 3688 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4 3689 } else {
e315cd28 3690 qla2x00_update_fcport(vha, fcport);
1da177e4 3691 }
0b91d116
CD
3692 } else {
3693 /* Retry Login. */
3694 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
3695 }
3696
3697 return (rval);
3698}
3699
3700/*
3701 * qla2x00_fabric_login
3702 * Issue fabric login command.
3703 *
3704 * Input:
3705 * ha = adapter block pointer.
3706 * device = pointer to FC device type structure.
3707 *
3708 * Returns:
3709 * 0 - Login successfully
3710 * 1 - Login failed
3711 * 2 - Initiator device
3712 * 3 - Fatal error
3713 */
3714int
e315cd28 3715qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
3716 uint16_t *next_loopid)
3717{
3718 int rval;
3719 int retry;
3720 uint16_t tmp_loopid;
3721 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 3722 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3723
3724 retry = 0;
3725 tmp_loopid = 0;
3726
3727 for (;;) {
7c3df132
SK
3728 ql_dbg(ql_dbg_disc, vha, 0x2000,
3729 "Trying Fabric Login w/loop id 0x%04x for port "
3730 "%02x%02x%02x.\n",
3731 fcport->loop_id, fcport->d_id.b.domain,
3732 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
3733
3734 /* Login fcport on switch. */
0b91d116 3735 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
3736 fcport->d_id.b.domain, fcport->d_id.b.area,
3737 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
3738 if (rval != QLA_SUCCESS) {
3739 return rval;
3740 }
1da177e4
LT
3741 if (mb[0] == MBS_PORT_ID_USED) {
3742 /*
3743 * Device has another loop ID. The firmware team
0107109e
AV
3744 * recommends the driver perform an implicit login with
3745 * the specified ID again. The ID we just used is save
3746 * here so we return with an ID that can be tried by
3747 * the next login.
1da177e4
LT
3748 */
3749 retry++;
3750 tmp_loopid = fcport->loop_id;
3751 fcport->loop_id = mb[1];
3752
7c3df132
SK
3753 ql_dbg(ql_dbg_disc, vha, 0x2001,
3754 "Fabric Login: port in use - next loop "
3755 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 3756 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 3757 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
3758
3759 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3760 /*
3761 * Login succeeded.
3762 */
3763 if (retry) {
3764 /* A retry occurred before. */
3765 *next_loopid = tmp_loopid;
3766 } else {
3767 /*
3768 * No retry occurred before. Just increment the
3769 * ID value for next login.
3770 */
3771 *next_loopid = (fcport->loop_id + 1);
3772 }
3773
3774 if (mb[1] & BIT_0) {
3775 fcport->port_type = FCT_INITIATOR;
3776 } else {
3777 fcport->port_type = FCT_TARGET;
3778 if (mb[1] & BIT_1) {
8474f3a0 3779 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
3780 }
3781 }
3782
ad3e0eda
AV
3783 if (mb[10] & BIT_0)
3784 fcport->supported_classes |= FC_COS_CLASS2;
3785 if (mb[10] & BIT_1)
3786 fcport->supported_classes |= FC_COS_CLASS3;
3787
2d70c103
NB
3788 if (IS_FWI2_CAPABLE(ha)) {
3789 if (mb[10] & BIT_7)
3790 fcport->flags |=
3791 FCF_CONF_COMP_SUPPORTED;
3792 }
3793
1da177e4
LT
3794 rval = QLA_SUCCESS;
3795 break;
3796 } else if (mb[0] == MBS_LOOP_ID_USED) {
3797 /*
3798 * Loop ID already used, try next loop ID.
3799 */
3800 fcport->loop_id++;
e315cd28 3801 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
3802 if (rval != QLA_SUCCESS) {
3803 /* Ran out of loop IDs to use */
3804 break;
3805 }
3806 } else if (mb[0] == MBS_COMMAND_ERROR) {
3807 /*
3808 * Firmware possibly timed out during login. If NO
3809 * retries are left to do then the device is declared
3810 * dead.
3811 */
3812 *next_loopid = fcport->loop_id;
e315cd28 3813 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
3814 fcport->d_id.b.domain, fcport->d_id.b.area,
3815 fcport->d_id.b.al_pa);
e315cd28 3816 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
3817
3818 rval = 1;
3819 break;
3820 } else {
3821 /*
3822 * unrecoverable / not handled error
3823 */
7c3df132
SK
3824 ql_dbg(ql_dbg_disc, vha, 0x2002,
3825 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3826 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3827 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3828 fcport->loop_id, jiffies);
1da177e4
LT
3829
3830 *next_loopid = fcport->loop_id;
e315cd28 3831 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
3832 fcport->d_id.b.domain, fcport->d_id.b.area,
3833 fcport->d_id.b.al_pa);
5f16b331 3834 qla2x00_clear_loop_id(fcport);
0eedfcf0 3835 fcport->login_retry = 0;
1da177e4
LT
3836
3837 rval = 3;
3838 break;
3839 }
3840 }
3841
3842 return (rval);
3843}
3844
3845/*
3846 * qla2x00_local_device_login
3847 * Issue local device login command.
3848 *
3849 * Input:
3850 * ha = adapter block pointer.
3851 * loop_id = loop id of device to login to.
3852 *
3853 * Returns (Where's the #define!!!!):
3854 * 0 - Login successfully
3855 * 1 - Login failed
3856 * 3 - Fatal error
3857 */
3858int
e315cd28 3859qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
3860{
3861 int rval;
3862 uint16_t mb[MAILBOX_REGISTER_COUNT];
3863
3864 memset(mb, 0, sizeof(mb));
e315cd28 3865 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
3866 if (rval == QLA_SUCCESS) {
3867 /* Interrogate mailbox registers for any errors */
3868 if (mb[0] == MBS_COMMAND_ERROR)
3869 rval = 1;
3870 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3871 /* device not in PCB table */
3872 rval = 3;
3873 }
3874
3875 return (rval);
3876}
3877
3878/*
3879 * qla2x00_loop_resync
3880 * Resync with fibre channel devices.
3881 *
3882 * Input:
3883 * ha = adapter block pointer.
3884 *
3885 * Returns:
3886 * 0 = success
3887 */
3888int
e315cd28 3889qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 3890{
73208dfd 3891 int rval = QLA_SUCCESS;
1da177e4 3892 uint32_t wait_time;
67c2e93a
AC
3893 struct req_que *req;
3894 struct rsp_que *rsp;
3895
7163ea81 3896 if (vha->hw->flags.cpu_affinity_enabled)
67c2e93a
AC
3897 req = vha->hw->req_q_map[0];
3898 else
3899 req = vha->req;
3900 rsp = req->rsp;
1da177e4 3901
e315cd28
AC
3902 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3903 if (vha->flags.online) {
3904 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
3905 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3906 wait_time = 256;
3907 do {
8ae6d9c7
GM
3908 if (!IS_QLAFX00(vha->hw)) {
3909 /*
3910 * Issue a marker after FW becomes
3911 * ready.
3912 */
3913 qla2x00_marker(vha, req, rsp, 0, 0,
3914 MK_SYNC_ALL);
3915 vha->marker_needed = 0;
3916 }
1da177e4
LT
3917
3918 /* Remap devices on Loop. */
e315cd28 3919 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 3920
8ae6d9c7
GM
3921 if (IS_QLAFX00(vha->hw))
3922 qlafx00_configure_devices(vha);
3923 else
3924 qla2x00_configure_loop(vha);
3925
1da177e4 3926 wait_time--;
e315cd28
AC
3927 } while (!atomic_read(&vha->loop_down_timer) &&
3928 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3929 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3930 &vha->dpc_flags)));
1da177e4 3931 }
1da177e4
LT
3932 }
3933
e315cd28 3934 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 3935 return (QLA_FUNCTION_FAILED);
1da177e4 3936
e315cd28 3937 if (rval)
7c3df132
SK
3938 ql_dbg(ql_dbg_disc, vha, 0x206c,
3939 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
3940
3941 return (rval);
3942}
3943
579d12b5
SK
3944/*
3945* qla2x00_perform_loop_resync
3946* Description: This function will set the appropriate flags and call
3947* qla2x00_loop_resync. If successful loop will be resynced
3948* Arguments : scsi_qla_host_t pointer
3949* returm : Success or Failure
3950*/
3951
3952int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
3953{
3954 int32_t rval = 0;
3955
3956 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
3957 /*Configure the flags so that resync happens properly*/
3958 atomic_set(&ha->loop_down_timer, 0);
3959 if (!(ha->device_flags & DFLG_NO_CABLE)) {
3960 atomic_set(&ha->loop_state, LOOP_UP);
3961 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
3962 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
3963 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
3964
3965 rval = qla2x00_loop_resync(ha);
3966 } else
3967 atomic_set(&ha->loop_state, LOOP_DEAD);
3968
3969 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
3970 }
3971
3972 return rval;
3973}
3974
d97994dc 3975void
67becc00 3976qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc
AV
3977{
3978 fc_port_t *fcport;
feafb7b1
AE
3979 struct scsi_qla_host *vha;
3980 struct qla_hw_data *ha = base_vha->hw;
3981 unsigned long flags;
d97994dc 3982
feafb7b1 3983 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 3984 /* Go with deferred removal of rport references. */
feafb7b1
AE
3985 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3986 atomic_inc(&vha->vref_count);
3987 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 3988 if (fcport->drport &&
feafb7b1
AE
3989 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3990 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 3991 qla2x00_rport_del(fcport);
feafb7b1
AE
3992 spin_lock_irqsave(&ha->vport_slock, flags);
3993 }
3994 }
3995 atomic_dec(&vha->vref_count);
3996 }
3997 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc
AV
3998}
3999
7d613ac6
SV
4000/* Assumes idc_lock always held on entry */
4001void
4002qla83xx_reset_ownership(scsi_qla_host_t *vha)
4003{
4004 struct qla_hw_data *ha = vha->hw;
4005 uint32_t drv_presence, drv_presence_mask;
4006 uint32_t dev_part_info1, dev_part_info2, class_type;
4007 uint32_t class_type_mask = 0x3;
4008 uint16_t fcoe_other_function = 0xffff, i;
4009
4010 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4011
4012 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
4013 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
4014 for (i = 0; i < 8; i++) {
4015 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
4016 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4017 (i != ha->portnum)) {
4018 fcoe_other_function = i;
4019 break;
4020 }
4021 }
4022 if (fcoe_other_function == 0xffff) {
4023 for (i = 0; i < 8; i++) {
4024 class_type = ((dev_part_info2 >> (i * 4)) &
4025 class_type_mask);
4026 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
4027 ((i + 8) != ha->portnum)) {
4028 fcoe_other_function = i + 8;
4029 break;
4030 }
4031 }
4032 }
4033 /*
4034 * Prepare drv-presence mask based on fcoe functions present.
4035 * However consider only valid physical fcoe function numbers (0-15).
4036 */
4037 drv_presence_mask = ~((1 << (ha->portnum)) |
4038 ((fcoe_other_function == 0xffff) ?
4039 0 : (1 << (fcoe_other_function))));
4040
4041 /* We are the reset owner iff:
4042 * - No other protocol drivers present.
4043 * - This is the lowest among fcoe functions. */
4044 if (!(drv_presence & drv_presence_mask) &&
4045 (ha->portnum < fcoe_other_function)) {
4046 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
4047 "This host is Reset owner.\n");
4048 ha->flags.nic_core_reset_owner = 1;
4049 }
4050}
4051
fa492630 4052static int
7d613ac6
SV
4053__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
4054{
4055 int rval = QLA_SUCCESS;
4056 struct qla_hw_data *ha = vha->hw;
4057 uint32_t drv_ack;
4058
4059 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4060 if (rval == QLA_SUCCESS) {
4061 drv_ack |= (1 << ha->portnum);
4062 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4063 }
4064
4065 return rval;
4066}
4067
fa492630 4068static int
7d613ac6
SV
4069__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
4070{
4071 int rval = QLA_SUCCESS;
4072 struct qla_hw_data *ha = vha->hw;
4073 uint32_t drv_ack;
4074
4075 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4076 if (rval == QLA_SUCCESS) {
4077 drv_ack &= ~(1 << ha->portnum);
4078 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
4079 }
4080
4081 return rval;
4082}
4083
fa492630 4084static const char *
7d613ac6
SV
4085qla83xx_dev_state_to_string(uint32_t dev_state)
4086{
4087 switch (dev_state) {
4088 case QLA8XXX_DEV_COLD:
4089 return "COLD/RE-INIT";
4090 case QLA8XXX_DEV_INITIALIZING:
4091 return "INITIALIZING";
4092 case QLA8XXX_DEV_READY:
4093 return "READY";
4094 case QLA8XXX_DEV_NEED_RESET:
4095 return "NEED RESET";
4096 case QLA8XXX_DEV_NEED_QUIESCENT:
4097 return "NEED QUIESCENT";
4098 case QLA8XXX_DEV_FAILED:
4099 return "FAILED";
4100 case QLA8XXX_DEV_QUIESCENT:
4101 return "QUIESCENT";
4102 default:
4103 return "Unknown";
4104 }
4105}
4106
4107/* Assumes idc-lock always held on entry */
4108void
4109qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
4110{
4111 struct qla_hw_data *ha = vha->hw;
4112 uint32_t idc_audit_reg = 0, duration_secs = 0;
4113
4114 switch (audit_type) {
4115 case IDC_AUDIT_TIMESTAMP:
4116 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
4117 idc_audit_reg = (ha->portnum) |
4118 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
4119 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4120 break;
4121
4122 case IDC_AUDIT_COMPLETION:
4123 duration_secs = ((jiffies_to_msecs(jiffies) -
4124 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
4125 idc_audit_reg = (ha->portnum) |
4126 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
4127 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
4128 break;
4129
4130 default:
4131 ql_log(ql_log_warn, vha, 0xb078,
4132 "Invalid audit type specified.\n");
4133 break;
4134 }
4135}
4136
4137/* Assumes idc_lock always held on entry */
fa492630 4138static int
7d613ac6
SV
4139qla83xx_initiating_reset(scsi_qla_host_t *vha)
4140{
4141 struct qla_hw_data *ha = vha->hw;
4142 uint32_t idc_control, dev_state;
4143
4144 __qla83xx_get_idc_control(vha, &idc_control);
4145 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
4146 ql_log(ql_log_info, vha, 0xb080,
4147 "NIC Core reset has been disabled. idc-control=0x%x\n",
4148 idc_control);
4149 return QLA_FUNCTION_FAILED;
4150 }
4151
4152 /* Set NEED-RESET iff in READY state and we are the reset-owner */
4153 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4154 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
4155 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
4156 QLA8XXX_DEV_NEED_RESET);
4157 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
4158 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
4159 } else {
4160 const char *state = qla83xx_dev_state_to_string(dev_state);
4161 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
4162
4163 /* SV: XXX: Is timeout required here? */
4164 /* Wait for IDC state change READY -> NEED_RESET */
4165 while (dev_state == QLA8XXX_DEV_READY) {
4166 qla83xx_idc_unlock(vha, 0);
4167 msleep(200);
4168 qla83xx_idc_lock(vha, 0);
4169 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4170 }
4171 }
4172
4173 /* Send IDC ack by writing to drv-ack register */
4174 __qla83xx_set_drv_ack(vha);
4175
4176 return QLA_SUCCESS;
4177}
4178
4179int
4180__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4181{
4182 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4183}
4184
7d613ac6
SV
4185int
4186__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4187{
4188 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4189}
4190
fa492630 4191static int
7d613ac6
SV
4192qla83xx_check_driver_presence(scsi_qla_host_t *vha)
4193{
4194 uint32_t drv_presence = 0;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4198 if (drv_presence & (1 << ha->portnum))
4199 return QLA_SUCCESS;
4200 else
4201 return QLA_TEST_FAILED;
4202}
4203
4204int
4205qla83xx_nic_core_reset(scsi_qla_host_t *vha)
4206{
4207 int rval = QLA_SUCCESS;
4208 struct qla_hw_data *ha = vha->hw;
4209
4210 ql_dbg(ql_dbg_p3p, vha, 0xb058,
4211 "Entered %s().\n", __func__);
4212
4213 if (vha->device_flags & DFLG_DEV_FAILED) {
4214 ql_log(ql_log_warn, vha, 0xb059,
4215 "Device in unrecoverable FAILED state.\n");
4216 return QLA_FUNCTION_FAILED;
4217 }
4218
4219 qla83xx_idc_lock(vha, 0);
4220
4221 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
4222 ql_log(ql_log_warn, vha, 0xb05a,
4223 "Function=0x%x has been removed from IDC participation.\n",
4224 ha->portnum);
4225 rval = QLA_FUNCTION_FAILED;
4226 goto exit;
4227 }
4228
4229 qla83xx_reset_ownership(vha);
4230
4231 rval = qla83xx_initiating_reset(vha);
4232
4233 /*
4234 * Perform reset if we are the reset-owner,
4235 * else wait till IDC state changes to READY/FAILED.
4236 */
4237 if (rval == QLA_SUCCESS) {
4238 rval = qla83xx_idc_state_handler(vha);
4239
4240 if (rval == QLA_SUCCESS)
4241 ha->flags.nic_core_hung = 0;
4242 __qla83xx_clear_drv_ack(vha);
4243 }
4244
4245exit:
4246 qla83xx_idc_unlock(vha, 0);
4247
4248 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
4249
4250 return rval;
4251}
4252
81178772
SK
4253int
4254qla2xxx_mctp_dump(scsi_qla_host_t *vha)
4255{
4256 struct qla_hw_data *ha = vha->hw;
4257 int rval = QLA_FUNCTION_FAILED;
4258
4259 if (!IS_MCTP_CAPABLE(ha)) {
4260 /* This message can be removed from the final version */
4261 ql_log(ql_log_info, vha, 0x506d,
4262 "This board is not MCTP capable\n");
4263 return rval;
4264 }
4265
4266 if (!ha->mctp_dump) {
4267 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
4268 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
4269
4270 if (!ha->mctp_dump) {
4271 ql_log(ql_log_warn, vha, 0x506e,
4272 "Failed to allocate memory for mctp dump\n");
4273 return rval;
4274 }
4275 }
4276
4277#define MCTP_DUMP_STR_ADDR 0x00000000
4278 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
4279 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
4280 if (rval != QLA_SUCCESS) {
4281 ql_log(ql_log_warn, vha, 0x506f,
4282 "Failed to capture mctp dump\n");
4283 } else {
4284 ql_log(ql_log_info, vha, 0x5070,
4285 "Mctp dump capture for host (%ld/%p).\n",
4286 vha->host_no, ha->mctp_dump);
4287 ha->mctp_dumped = 1;
4288 }
4289
409ee0fe 4290 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
4291 ha->flags.nic_core_reset_hdlr_active = 1;
4292 rval = qla83xx_restart_nic_firmware(vha);
4293 if (rval)
4294 /* NIC Core reset failed. */
4295 ql_log(ql_log_warn, vha, 0x5071,
4296 "Failed to restart nic firmware\n");
4297 else
4298 ql_dbg(ql_dbg_p3p, vha, 0xb084,
4299 "Restarted NIC firmware successfully.\n");
4300 ha->flags.nic_core_reset_hdlr_active = 0;
4301 }
4302
4303 return rval;
4304
4305}
4306
579d12b5 4307/*
8fcd6b8b 4308* qla2x00_quiesce_io
579d12b5
SK
4309* Description: This function will block the new I/Os
4310* Its not aborting any I/Os as context
4311* is not destroyed during quiescence
4312* Arguments: scsi_qla_host_t
4313* return : void
4314*/
4315void
8fcd6b8b 4316qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
4317{
4318 struct qla_hw_data *ha = vha->hw;
4319 struct scsi_qla_host *vp;
4320
8fcd6b8b
CD
4321 ql_dbg(ql_dbg_dpc, vha, 0x401d,
4322 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
4323
4324 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
4325 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4326 atomic_set(&vha->loop_state, LOOP_DOWN);
4327 qla2x00_mark_all_devices_lost(vha, 0);
4328 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 4329 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
4330 } else {
4331 if (!atomic_read(&vha->loop_down_timer))
4332 atomic_set(&vha->loop_down_timer,
4333 LOOP_DOWN_TIME);
4334 }
4335 /* Wait for pending cmds to complete */
4336 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
4337}
4338
a9083016
GM
4339void
4340qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4341{
4342 struct qla_hw_data *ha = vha->hw;
579d12b5 4343 struct scsi_qla_host *vp;
feafb7b1 4344 unsigned long flags;
6aef87be 4345 fc_port_t *fcport;
a9083016 4346
e46ef004
SK
4347 /* For ISP82XX, driver waits for completion of the commands.
4348 * online flag should be set.
4349 */
4350 if (!IS_QLA82XX(ha))
4351 vha->flags.online = 0;
a9083016
GM
4352 ha->flags.chip_reset_done = 0;
4353 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 4354 vha->qla_stats.total_isp_aborts++;
a9083016 4355
7c3df132
SK
4356 ql_log(ql_log_info, vha, 0x00af,
4357 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 4358
e46ef004
SK
4359 /* For ISP82XX, reset_chip is just disabling interrupts.
4360 * Driver waits for the completion of the commands.
4361 * the interrupts need to be enabled.
4362 */
a9083016
GM
4363 if (!IS_QLA82XX(ha))
4364 ha->isp_ops->reset_chip(vha);
4365
4366 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
4367 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
4368 atomic_set(&vha->loop_state, LOOP_DOWN);
4369 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
4370
4371 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 4372 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
4373 atomic_inc(&vp->vref_count);
4374 spin_unlock_irqrestore(&ha->vport_slock, flags);
4375
a9083016 4376 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
4377
4378 spin_lock_irqsave(&ha->vport_slock, flags);
4379 atomic_dec(&vp->vref_count);
4380 }
4381 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
4382 } else {
4383 if (!atomic_read(&vha->loop_down_timer))
4384 atomic_set(&vha->loop_down_timer,
4385 LOOP_DOWN_TIME);
4386 }
4387
6aef87be
AV
4388 /* Clear all async request states across all VPs. */
4389 list_for_each_entry(fcport, &vha->vp_fcports, list)
4390 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4391 spin_lock_irqsave(&ha->vport_slock, flags);
4392 list_for_each_entry(vp, &ha->vp_list, list) {
4393 atomic_inc(&vp->vref_count);
4394 spin_unlock_irqrestore(&ha->vport_slock, flags);
4395
4396 list_for_each_entry(fcport, &vp->vp_fcports, list)
4397 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4398
4399 spin_lock_irqsave(&ha->vport_slock, flags);
4400 atomic_dec(&vp->vref_count);
4401 }
4402 spin_unlock_irqrestore(&ha->vport_slock, flags);
4403
bddd2d65
LC
4404 if (!ha->flags.eeh_busy) {
4405 /* Make sure for ISP 82XX IO DMA is complete */
4406 if (IS_QLA82XX(ha)) {
7190575f 4407 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
4408 ql_log(ql_log_info, vha, 0x00b4,
4409 "Done chip reset cleanup.\n");
a9083016 4410
e46ef004
SK
4411 /* Done waiting for pending commands.
4412 * Reset the online flag.
4413 */
4414 vha->flags.online = 0;
4d78c973 4415 }
a9083016 4416
bddd2d65
LC
4417 /* Requeue all commands in outstanding command list. */
4418 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4419 }
a9083016
GM
4420}
4421
1da177e4
LT
4422/*
4423* qla2x00_abort_isp
4424* Resets ISP and aborts all outstanding commands.
4425*
4426* Input:
4427* ha = adapter block pointer.
4428*
4429* Returns:
4430* 0 = success
4431*/
4432int
e315cd28 4433qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 4434{
476e8978 4435 int rval;
1da177e4 4436 uint8_t status = 0;
e315cd28
AC
4437 struct qla_hw_data *ha = vha->hw;
4438 struct scsi_qla_host *vp;
73208dfd 4439 struct req_que *req = ha->req_q_map[0];
feafb7b1 4440 unsigned long flags;
1da177e4 4441
e315cd28 4442 if (vha->flags.online) {
a9083016 4443 qla2x00_abort_isp_cleanup(vha);
1da177e4 4444
a6171297
SV
4445 if (IS_QLA8031(ha)) {
4446 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
4447 "Clearing fcoe driver presence.\n");
4448 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
4449 ql_dbg(ql_dbg_p3p, vha, 0xb073,
4450 "Error while clearing DRV-Presence.\n");
4451 }
4452
85880801
AV
4453 if (unlikely(pci_channel_offline(ha->pdev) &&
4454 ha->flags.pci_channel_io_perm_failure)) {
4455 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4456 status = 0;
4457 return status;
4458 }
4459
73208dfd 4460 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 4461
e315cd28 4462 ha->isp_ops->nvram_config(vha);
1da177e4 4463
e315cd28
AC
4464 if (!qla2x00_restart_isp(vha)) {
4465 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 4466
e315cd28 4467 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
4468 /*
4469 * Issue marker command only when we are going
4470 * to start the I/O .
4471 */
e315cd28 4472 vha->marker_needed = 1;
1da177e4
LT
4473 }
4474
e315cd28 4475 vha->flags.online = 1;
1da177e4 4476
fd34f556 4477 ha->isp_ops->enable_intrs(ha);
1da177e4 4478
fa2a1ce5 4479 ha->isp_abort_cnt = 0;
e315cd28 4480 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 4481
6246b8a1
GM
4482 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
4483 qla2x00_get_fw_version(vha);
df613b96
AV
4484 if (ha->fce) {
4485 ha->flags.fce_enabled = 1;
4486 memset(ha->fce, 0,
4487 fce_calc_size(ha->fce_bufs));
e315cd28 4488 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
4489 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4490 &ha->fce_bufs);
4491 if (rval) {
7c3df132 4492 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
4493 "Unable to reinitialize FCE "
4494 "(%d).\n", rval);
4495 ha->flags.fce_enabled = 0;
4496 }
4497 }
436a7b11
AV
4498
4499 if (ha->eft) {
4500 memset(ha->eft, 0, EFT_SIZE);
e315cd28 4501 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
4502 ha->eft_dma, EFT_NUM_BUFFERS);
4503 if (rval) {
7c3df132 4504 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
4505 "Unable to reinitialize EFT "
4506 "(%d).\n", rval);
4507 }
4508 }
1da177e4 4509 } else { /* failed the ISP abort */
e315cd28
AC
4510 vha->flags.online = 1;
4511 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 4512 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
4513 ql_log(ql_log_fatal, vha, 0x8035,
4514 "ISP error recover failed - "
4515 "board disabled.\n");
fa2a1ce5 4516 /*
1da177e4
LT
4517 * The next call disables the board
4518 * completely.
4519 */
e315cd28
AC
4520 ha->isp_ops->reset_adapter(vha);
4521 vha->flags.online = 0;
1da177e4 4522 clear_bit(ISP_ABORT_RETRY,
e315cd28 4523 &vha->dpc_flags);
1da177e4
LT
4524 status = 0;
4525 } else { /* schedule another ISP abort */
4526 ha->isp_abort_cnt--;
7c3df132
SK
4527 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4528 "ISP abort - retry remaining %d.\n",
4529 ha->isp_abort_cnt);
1da177e4
LT
4530 status = 1;
4531 }
4532 } else {
4533 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
4534 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4535 "ISP error recovery - retrying (%d) "
4536 "more times.\n", ha->isp_abort_cnt);
e315cd28 4537 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
4538 status = 1;
4539 }
4540 }
fa2a1ce5 4541
1da177e4
LT
4542 }
4543
e315cd28 4544 if (!status) {
7c3df132 4545 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
feafb7b1
AE
4546
4547 spin_lock_irqsave(&ha->vport_slock, flags);
4548 list_for_each_entry(vp, &ha->vp_list, list) {
4549 if (vp->vp_idx) {
4550 atomic_inc(&vp->vref_count);
4551 spin_unlock_irqrestore(&ha->vport_slock, flags);
4552
e315cd28 4553 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
4554
4555 spin_lock_irqsave(&ha->vport_slock, flags);
4556 atomic_dec(&vp->vref_count);
4557 }
e315cd28 4558 }
feafb7b1
AE
4559 spin_unlock_irqrestore(&ha->vport_slock, flags);
4560
7d613ac6
SV
4561 if (IS_QLA8031(ha)) {
4562 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
4563 "Setting back fcoe driver presence.\n");
4564 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
4565 ql_dbg(ql_dbg_p3p, vha, 0xb074,
4566 "Error while setting DRV-Presence.\n");
4567 }
e315cd28 4568 } else {
d8424f68
JP
4569 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4570 __func__);
1da177e4
LT
4571 }
4572
4573 return(status);
4574}
4575
4576/*
4577* qla2x00_restart_isp
4578* restarts the ISP after a reset
4579*
4580* Input:
4581* ha = adapter block pointer.
4582*
4583* Returns:
4584* 0 = success
4585*/
4586static int
e315cd28 4587qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 4588{
c6b2fca8 4589 int status = 0;
1da177e4 4590 uint32_t wait_time;
e315cd28 4591 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
4592 struct req_que *req = ha->req_q_map[0];
4593 struct rsp_que *rsp = ha->rsp_q_map[0];
2d70c103 4594 unsigned long flags;
1da177e4
LT
4595
4596 /* If firmware needs to be loaded */
e315cd28
AC
4597 if (qla2x00_isp_firmware(vha)) {
4598 vha->flags.online = 0;
4599 status = ha->isp_ops->chip_diag(vha);
4600 if (!status)
4601 status = qla2x00_setup_chip(vha);
1da177e4
LT
4602 }
4603
e315cd28
AC
4604 if (!status && !(status = qla2x00_init_rings(vha))) {
4605 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 4606 ha->flags.chip_reset_done = 1;
73208dfd
AC
4607 /* Initialize the queues in use */
4608 qla25xx_init_queues(ha);
4609
e315cd28
AC
4610 status = qla2x00_fw_ready(vha);
4611 if (!status) {
7c3df132
SK
4612 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4613 "Start configure loop status = %d.\n", status);
0107109e
AV
4614
4615 /* Issue a marker after FW becomes ready. */
73208dfd 4616 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
0107109e 4617
e315cd28 4618 vha->flags.online = 1;
2d70c103
NB
4619
4620 /*
4621 * Process any ATIO queue entries that came in
4622 * while we weren't online.
4623 */
4624 spin_lock_irqsave(&ha->hardware_lock, flags);
4625 if (qla_tgt_mode_enabled(vha))
4626 qlt_24xx_process_atio_queue(vha);
4627 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4628
1da177e4
LT
4629 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4630 wait_time = 256;
4631 do {
e315cd28
AC
4632 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4633 qla2x00_configure_loop(vha);
1da177e4 4634 wait_time--;
e315cd28
AC
4635 } while (!atomic_read(&vha->loop_down_timer) &&
4636 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4637 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4638 &vha->dpc_flags)));
1da177e4
LT
4639 }
4640
4641 /* if no cable then assume it's good */
e315cd28 4642 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4
LT
4643 status = 0;
4644
7c3df132
SK
4645 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4646 "Configure loop done, status = 0x%x.\n", status);
1da177e4
LT
4647 }
4648 return (status);
4649}
4650
73208dfd
AC
4651static int
4652qla25xx_init_queues(struct qla_hw_data *ha)
4653{
4654 struct rsp_que *rsp = NULL;
4655 struct req_que *req = NULL;
4656 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4657 int ret = -1;
4658 int i;
4659
2afa19a9 4660 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd
AC
4661 rsp = ha->rsp_q_map[i];
4662 if (rsp) {
4663 rsp->options &= ~BIT_0;
618a7523 4664 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 4665 if (ret != QLA_SUCCESS)
7c3df132
SK
4666 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4667 "%s Rsp que: %d init failed.\n",
4668 __func__, rsp->id);
73208dfd 4669 else
7c3df132
SK
4670 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4671 "%s Rsp que: %d inited.\n",
4672 __func__, rsp->id);
73208dfd 4673 }
2afa19a9
AC
4674 }
4675 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd
AC
4676 req = ha->req_q_map[i];
4677 if (req) {
29bdccbe 4678 /* Clear outstanding commands array. */
73208dfd 4679 req->options &= ~BIT_0;
618a7523 4680 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 4681 if (ret != QLA_SUCCESS)
7c3df132
SK
4682 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4683 "%s Req que: %d init failed.\n",
4684 __func__, req->id);
73208dfd 4685 else
7c3df132
SK
4686 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4687 "%s Req que: %d inited.\n",
4688 __func__, req->id);
73208dfd
AC
4689 }
4690 }
4691 return ret;
4692}
4693
1da177e4
LT
4694/*
4695* qla2x00_reset_adapter
4696* Reset adapter.
4697*
4698* Input:
4699* ha = adapter block pointer.
4700*/
abbd8870 4701void
e315cd28 4702qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
4703{
4704 unsigned long flags = 0;
e315cd28 4705 struct qla_hw_data *ha = vha->hw;
3d71644c 4706 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 4707
e315cd28 4708 vha->flags.online = 0;
fd34f556 4709 ha->isp_ops->disable_intrs(ha);
1da177e4 4710
1da177e4
LT
4711 spin_lock_irqsave(&ha->hardware_lock, flags);
4712 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4713 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4714 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4715 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4717}
0107109e
AV
4718
4719void
e315cd28 4720qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
4721{
4722 unsigned long flags = 0;
e315cd28 4723 struct qla_hw_data *ha = vha->hw;
0107109e
AV
4724 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4725
a9083016
GM
4726 if (IS_QLA82XX(ha))
4727 return;
4728
e315cd28 4729 vha->flags.online = 0;
fd34f556 4730 ha->isp_ops->disable_intrs(ha);
0107109e
AV
4731
4732 spin_lock_irqsave(&ha->hardware_lock, flags);
4733 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4734 RD_REG_DWORD(&reg->hccr);
4735 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4736 RD_REG_DWORD(&reg->hccr);
4737 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
4738
4739 if (IS_NOPOLLING_TYPE(ha))
4740 ha->isp_ops->enable_intrs(ha);
0107109e
AV
4741}
4742
4e08df3f
DM
4743/* On sparc systems, obtain port and node WWN from firmware
4744 * properties.
4745 */
e315cd28
AC
4746static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4747 struct nvram_24xx *nv)
4e08df3f
DM
4748{
4749#ifdef CONFIG_SPARC
e315cd28 4750 struct qla_hw_data *ha = vha->hw;
4e08df3f 4751 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
4752 struct device_node *dp = pci_device_to_OF_node(pdev);
4753 const u8 *val;
4e08df3f
DM
4754 int len;
4755
4756 val = of_get_property(dp, "port-wwn", &len);
4757 if (val && len >= WWN_SIZE)
4758 memcpy(nv->port_name, val, WWN_SIZE);
4759
4760 val = of_get_property(dp, "node-wwn", &len);
4761 if (val && len >= WWN_SIZE)
4762 memcpy(nv->node_name, val, WWN_SIZE);
4763#endif
4764}
4765
0107109e 4766int
e315cd28 4767qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 4768{
4e08df3f 4769 int rval;
0107109e
AV
4770 struct init_cb_24xx *icb;
4771 struct nvram_24xx *nv;
4772 uint32_t *dptr;
4773 uint8_t *dptr1, *dptr2;
4774 uint32_t chksum;
4775 uint16_t cnt;
e315cd28 4776 struct qla_hw_data *ha = vha->hw;
0107109e 4777
4e08df3f 4778 rval = QLA_SUCCESS;
0107109e 4779 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 4780 nv = ha->nvram;
0107109e
AV
4781
4782 /* Determine NVRAM starting address. */
e5b68a61
AC
4783 if (ha->flags.port0) {
4784 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4785 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4786 } else {
0107109e 4787 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790
AV
4788 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4789 }
e5b68a61
AC
4790 ha->nvram_size = sizeof(struct nvram_24xx);
4791 ha->vpd_size = FA_NVRAM_VPD_SIZE;
a9083016
GM
4792 if (IS_QLA82XX(ha))
4793 ha->vpd_size = FA_VPD_SIZE_82XX;
0107109e 4794
281afe19
SJ
4795 /* Get VPD data into cache */
4796 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 4797 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
4798 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4799
4800 /* Get NVRAM data into cache and calculate checksum. */
0107109e 4801 dptr = (uint32_t *)nv;
e315cd28 4802 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e
AV
4803 ha->nvram_size);
4804 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4805 chksum += le32_to_cpu(*dptr++);
4806
7c3df132
SK
4807 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4808 "Contents of NVRAM\n");
4809 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4810 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
4811
4812 /* Bad NVRAM data, set defaults parameters. */
4813 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4814 || nv->id[3] != ' ' ||
4815 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4816 /* Reset NVRAM data. */
7c3df132 4817 ql_log(ql_log_warn, vha, 0x006b,
9e336520 4818 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
4819 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4820 ql_log(ql_log_warn, vha, 0x006c,
4821 "Falling back to functioning (yet invalid -- WWPN) "
4822 "defaults.\n");
4e08df3f
DM
4823
4824 /*
4825 * Set default initialization control block.
4826 */
4827 memset(nv, 0, ha->nvram_size);
4828 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4829 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4830 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4831 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4832 nv->exchange_count = __constant_cpu_to_le16(0);
4833 nv->hard_address = __constant_cpu_to_le16(124);
4834 nv->port_name[0] = 0x21;
e5b68a61 4835 nv->port_name[1] = 0x00 + ha->port_no;
4e08df3f
DM
4836 nv->port_name[2] = 0x00;
4837 nv->port_name[3] = 0xe0;
4838 nv->port_name[4] = 0x8b;
4839 nv->port_name[5] = 0x1c;
4840 nv->port_name[6] = 0x55;
4841 nv->port_name[7] = 0x86;
4842 nv->node_name[0] = 0x20;
4843 nv->node_name[1] = 0x00;
4844 nv->node_name[2] = 0x00;
4845 nv->node_name[3] = 0xe0;
4846 nv->node_name[4] = 0x8b;
4847 nv->node_name[5] = 0x1c;
4848 nv->node_name[6] = 0x55;
4849 nv->node_name[7] = 0x86;
e315cd28 4850 qla24xx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
4851 nv->login_retry_count = __constant_cpu_to_le16(8);
4852 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4853 nv->login_timeout = __constant_cpu_to_le16(0);
4854 nv->firmware_options_1 =
4855 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4856 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4857 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4858 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4859 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4860 nv->efi_parameters = __constant_cpu_to_le32(0);
4861 nv->reset_delay = 5;
4862 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4863 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4864 nv->link_down_timeout = __constant_cpu_to_le16(30);
4865
4866 rval = 1;
0107109e
AV
4867 }
4868
2d70c103
NB
4869 if (!qla_ini_mode_enabled(vha)) {
4870 /* Don't enable full login after initial LIP */
4871 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4872 /* Don't enable LIP full login for initiator */
4873 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4874 }
4875
4876 qlt_24xx_config_nvram_stage1(vha, nv);
4877
0107109e 4878 /* Reset Initialization control block */
e315cd28 4879 memset(icb, 0, ha->init_cb_size);
0107109e
AV
4880
4881 /* Copy 1st segment. */
4882 dptr1 = (uint8_t *)icb;
4883 dptr2 = (uint8_t *)&nv->version;
4884 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4885 while (cnt--)
4886 *dptr1++ = *dptr2++;
4887
4888 icb->login_retry_count = nv->login_retry_count;
3ea66e28 4889 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
4890
4891 /* Copy 2nd segment. */
4892 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4893 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4894 cnt = (uint8_t *)&icb->reserved_3 -
4895 (uint8_t *)&icb->interrupt_delay_timer;
4896 while (cnt--)
4897 *dptr1++ = *dptr2++;
4898
4899 /*
4900 * Setup driver NVRAM options.
4901 */
e315cd28 4902 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 4903 "QLA2462");
0107109e 4904
2d70c103
NB
4905 qlt_24xx_config_nvram_stage2(vha, icb);
4906
5341e868 4907 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
2d70c103 4908 /* Use alternate WWN? */
5341e868
AV
4909 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4910 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4911 }
4912
0107109e 4913 /* Prepare nodename */
fd0e7e4d 4914 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
4915 /*
4916 * Firmware will apply the following mask if the nodename was
4917 * not provided.
4918 */
4919 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4920 icb->node_name[0] &= 0xF0;
4921 }
4922
4923 /* Set host adapter parameters. */
4924 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
4925 ha->flags.enable_lip_reset = 0;
4926 ha->flags.enable_lip_full_login =
4927 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4928 ha->flags.enable_target_reset =
4929 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 4930 ha->flags.enable_led_scheme = 0;
d4c760c2 4931 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 4932
fd0e7e4d
AV
4933 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4934 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
4935
4936 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4937 sizeof(ha->fw_seriallink_options24));
4938
4939 /* save HBA serial number */
4940 ha->serial0 = icb->port_name[5];
4941 ha->serial1 = icb->port_name[6];
4942 ha->serial2 = icb->port_name[7];
e315cd28
AC
4943 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4944 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 4945
bc8fb3cb
AV
4946 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4947
0107109e
AV
4948 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4949
4950 /* Set minimum login_timeout to 4 seconds. */
4951 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4952 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4953 if (le16_to_cpu(nv->login_timeout) < 4)
4954 nv->login_timeout = __constant_cpu_to_le16(4);
4955 ha->login_timeout = le16_to_cpu(nv->login_timeout);
c6852c4c 4956 icb->login_timeout = nv->login_timeout;
0107109e 4957
00a537b8
AV
4958 /* Set minimum RATOV to 100 tenths of a second. */
4959 ha->r_a_tov = 100;
0107109e
AV
4960
4961 ha->loop_reset_delay = nv->reset_delay;
4962
4963 /* Link Down Timeout = 0:
4964 *
4965 * When Port Down timer expires we will start returning
4966 * I/O's to OS with "DID_NO_CONNECT".
4967 *
4968 * Link Down Timeout != 0:
4969 *
4970 * The driver waits for the link to come up after link down
4971 * before returning I/Os to OS with "DID_NO_CONNECT".
4972 */
4973 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4974 ha->loop_down_abort_time =
4975 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4976 } else {
4977 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4978 ha->loop_down_abort_time =
4979 (LOOP_DOWN_TIME - ha->link_down_timeout);
4980 }
4981
4982 /* Need enough time to try and get the port back. */
4983 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4984 if (qlport_down_retry)
4985 ha->port_down_retry_count = qlport_down_retry;
4986
4987 /* Set login_retry_count */
4988 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4989 if (ha->port_down_retry_count ==
4990 le16_to_cpu(nv->port_down_retry_count) &&
4991 ha->port_down_retry_count > 3)
4992 ha->login_retry_count = ha->port_down_retry_count;
4993 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4994 ha->login_retry_count = ha->port_down_retry_count;
4995 if (ql2xloginretrycount)
4996 ha->login_retry_count = ql2xloginretrycount;
4997
4fdfefe5 4998 /* Enable ZIO. */
e315cd28 4999 if (!vha->flags.init_done) {
4fdfefe5
AV
5000 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5001 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5002 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5003 le16_to_cpu(icb->interrupt_delay_timer): 2;
5004 }
5005 icb->firmware_options_2 &= __constant_cpu_to_le32(
5006 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
e315cd28 5007 vha->flags.process_response_queue = 0;
4fdfefe5 5008 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d
AV
5009 ha->zio_mode = QLA_ZIO_MODE_6;
5010
7c3df132 5011 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
5012 "ZIO mode %d enabled; timer delay (%d us).\n",
5013 ha->zio_mode, ha->zio_timer * 100);
5014
5015 icb->firmware_options_2 |= cpu_to_le32(
5016 (uint32_t)ha->zio_mode);
5017 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
e315cd28 5018 vha->flags.process_response_queue = 1;
4fdfefe5
AV
5019 }
5020
4e08df3f 5021 if (rval) {
7c3df132
SK
5022 ql_log(ql_log_warn, vha, 0x0070,
5023 "NVRAM configuration failed.\n");
4e08df3f
DM
5024 }
5025 return (rval);
0107109e
AV
5026}
5027
413975a0 5028static int
cbc8eb67
AV
5029qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5030 uint32_t faddr)
d1c61909 5031{
73208dfd 5032 int rval = QLA_SUCCESS;
d1c61909 5033 int segments, fragment;
d1c61909
AV
5034 uint32_t *dcode, dlen;
5035 uint32_t risc_addr;
5036 uint32_t risc_size;
5037 uint32_t i;
e315cd28 5038 struct qla_hw_data *ha = vha->hw;
73208dfd 5039 struct req_que *req = ha->req_q_map[0];
eaac30be 5040
7c3df132 5041 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 5042 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 5043
d1c61909
AV
5044 rval = QLA_SUCCESS;
5045
5046 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 5047 dcode = (uint32_t *)req->ring;
d1c61909
AV
5048 *srisc_addr = 0;
5049
5050 /* Validate firmware image by checking version. */
e315cd28 5051 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
5052 for (i = 0; i < 4; i++)
5053 dcode[i] = be32_to_cpu(dcode[i]);
5054 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5055 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5056 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5057 dcode[3] == 0)) {
7c3df132
SK
5058 ql_log(ql_log_fatal, vha, 0x008c,
5059 "Unable to verify the integrity of flash firmware "
5060 "image.\n");
5061 ql_log(ql_log_fatal, vha, 0x008d,
5062 "Firmware data: %08x %08x %08x %08x.\n",
5063 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
5064
5065 return QLA_FUNCTION_FAILED;
5066 }
5067
5068 while (segments && rval == QLA_SUCCESS) {
5069 /* Read segment's load information. */
e315cd28 5070 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
5071
5072 risc_addr = be32_to_cpu(dcode[2]);
5073 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5074 risc_size = be32_to_cpu(dcode[3]);
5075
5076 fragment = 0;
5077 while (risc_size > 0 && rval == QLA_SUCCESS) {
5078 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5079 if (dlen > risc_size)
5080 dlen = risc_size;
5081
7c3df132
SK
5082 ql_dbg(ql_dbg_init, vha, 0x008e,
5083 "Loading risc segment@ risc addr %x "
5084 "number of dwords 0x%x offset 0x%x.\n",
5085 risc_addr, dlen, faddr);
d1c61909 5086
e315cd28 5087 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
5088 for (i = 0; i < dlen; i++)
5089 dcode[i] = swab32(dcode[i]);
5090
73208dfd 5091 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
5092 dlen);
5093 if (rval) {
7c3df132
SK
5094 ql_log(ql_log_fatal, vha, 0x008f,
5095 "Failed to load segment %d of firmware.\n",
5096 fragment);
d1c61909
AV
5097 break;
5098 }
5099
5100 faddr += dlen;
5101 risc_addr += dlen;
5102 risc_size -= dlen;
5103 fragment++;
5104 }
5105
5106 /* Next segment. */
5107 segments--;
5108 }
5109
5110 return rval;
5111}
5112
e9454a88 5113#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 5114
0107109e 5115int
e315cd28 5116qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
5117{
5118 int rval;
5119 int i, fragment;
5120 uint16_t *wcode, *fwcode;
5121 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
5122 struct fw_blob *blob;
e315cd28 5123 struct qla_hw_data *ha = vha->hw;
73208dfd 5124 struct req_que *req = ha->req_q_map[0];
5433383e
AV
5125
5126 /* Load firmware blob. */
e315cd28 5127 blob = qla2x00_request_firmware(vha);
5433383e 5128 if (!blob) {
7c3df132
SK
5129 ql_log(ql_log_info, vha, 0x0083,
5130 "Fimware image unavailable.\n");
5131 ql_log(ql_log_info, vha, 0x0084,
5132 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
5133 return QLA_FUNCTION_FAILED;
5134 }
5135
5136 rval = QLA_SUCCESS;
5137
73208dfd 5138 wcode = (uint16_t *)req->ring;
5433383e
AV
5139 *srisc_addr = 0;
5140 fwcode = (uint16_t *)blob->fw->data;
5141 fwclen = 0;
5142
5143 /* Validate firmware image by checking version. */
5144 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132
SK
5145 ql_log(ql_log_fatal, vha, 0x0085,
5146 "Unable to verify integrity of firmware image (%Zd).\n",
5433383e
AV
5147 blob->fw->size);
5148 goto fail_fw_integrity;
5149 }
5150 for (i = 0; i < 4; i++)
5151 wcode[i] = be16_to_cpu(fwcode[i + 4]);
5152 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
5153 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
5154 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
5155 ql_log(ql_log_fatal, vha, 0x0086,
5156 "Unable to verify integrity of firmware image.\n");
5157 ql_log(ql_log_fatal, vha, 0x0087,
5158 "Firmware data: %04x %04x %04x %04x.\n",
5159 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
5160 goto fail_fw_integrity;
5161 }
5162
5163 seg = blob->segs;
5164 while (*seg && rval == QLA_SUCCESS) {
5165 risc_addr = *seg;
5166 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
5167 risc_size = be16_to_cpu(fwcode[3]);
5168
5169 /* Validate firmware image size. */
5170 fwclen += risc_size * sizeof(uint16_t);
5171 if (blob->fw->size < fwclen) {
7c3df132 5172 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 5173 "Unable to verify integrity of firmware image "
7c3df132 5174 "(%Zd).\n", blob->fw->size);
5433383e
AV
5175 goto fail_fw_integrity;
5176 }
5177
5178 fragment = 0;
5179 while (risc_size > 0 && rval == QLA_SUCCESS) {
5180 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
5181 if (wlen > risc_size)
5182 wlen = risc_size;
7c3df132
SK
5183 ql_dbg(ql_dbg_init, vha, 0x0089,
5184 "Loading risc segment@ risc addr %x number of "
5185 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
5186
5187 for (i = 0; i < wlen; i++)
5188 wcode[i] = swab16(fwcode[i]);
5189
73208dfd 5190 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
5191 wlen);
5192 if (rval) {
7c3df132
SK
5193 ql_log(ql_log_fatal, vha, 0x008a,
5194 "Failed to load segment %d of firmware.\n",
5195 fragment);
5433383e
AV
5196 break;
5197 }
5198
5199 fwcode += wlen;
5200 risc_addr += wlen;
5201 risc_size -= wlen;
5202 fragment++;
5203 }
5204
5205 /* Next segment. */
5206 seg++;
5207 }
5208 return rval;
5209
5210fail_fw_integrity:
5211 return QLA_FUNCTION_FAILED;
5212}
5213
eaac30be
AV
5214static int
5215qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
5216{
5217 int rval;
5218 int segments, fragment;
5219 uint32_t *dcode, dlen;
5220 uint32_t risc_addr;
5221 uint32_t risc_size;
5222 uint32_t i;
5433383e 5223 struct fw_blob *blob;
0107109e 5224 uint32_t *fwcode, fwclen;
e315cd28 5225 struct qla_hw_data *ha = vha->hw;
73208dfd 5226 struct req_que *req = ha->req_q_map[0];
0107109e 5227
5433383e 5228 /* Load firmware blob. */
e315cd28 5229 blob = qla2x00_request_firmware(vha);
5433383e 5230 if (!blob) {
7c3df132
SK
5231 ql_log(ql_log_warn, vha, 0x0090,
5232 "Fimware image unavailable.\n");
5233 ql_log(ql_log_warn, vha, 0x0091,
5234 "Firmware images can be retrieved from: "
5235 QLA_FW_URL ".\n");
d1c61909 5236
eaac30be 5237 return QLA_FUNCTION_FAILED;
0107109e
AV
5238 }
5239
cfb0919c
CD
5240 ql_dbg(ql_dbg_init, vha, 0x0092,
5241 "FW: Loading via request-firmware.\n");
eaac30be 5242
0107109e
AV
5243 rval = QLA_SUCCESS;
5244
5245 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 5246 dcode = (uint32_t *)req->ring;
0107109e 5247 *srisc_addr = 0;
5433383e 5248 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
5249 fwclen = 0;
5250
5251 /* Validate firmware image by checking version. */
5433383e 5252 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132
SK
5253 ql_log(ql_log_fatal, vha, 0x0093,
5254 "Unable to verify integrity of firmware image (%Zd).\n",
5433383e 5255 blob->fw->size);
0107109e
AV
5256 goto fail_fw_integrity;
5257 }
5258 for (i = 0; i < 4; i++)
5259 dcode[i] = be32_to_cpu(fwcode[i + 4]);
5260 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
5261 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
5262 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
5263 dcode[3] == 0)) {
7c3df132
SK
5264 ql_log(ql_log_fatal, vha, 0x0094,
5265 "Unable to verify integrity of firmware image (%Zd).\n",
5266 blob->fw->size);
5267 ql_log(ql_log_fatal, vha, 0x0095,
5268 "Firmware data: %08x %08x %08x %08x.\n",
5269 dcode[0], dcode[1], dcode[2], dcode[3]);
0107109e
AV
5270 goto fail_fw_integrity;
5271 }
5272
5273 while (segments && rval == QLA_SUCCESS) {
5274 risc_addr = be32_to_cpu(fwcode[2]);
5275 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
5276 risc_size = be32_to_cpu(fwcode[3]);
5277
5278 /* Validate firmware image size. */
5279 fwclen += risc_size * sizeof(uint32_t);
5433383e 5280 if (blob->fw->size < fwclen) {
7c3df132 5281 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 5282 "Unable to verify integrity of firmware image "
7c3df132 5283 "(%Zd).\n", blob->fw->size);
5433383e 5284
0107109e
AV
5285 goto fail_fw_integrity;
5286 }
5287
5288 fragment = 0;
5289 while (risc_size > 0 && rval == QLA_SUCCESS) {
5290 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
5291 if (dlen > risc_size)
5292 dlen = risc_size;
5293
7c3df132
SK
5294 ql_dbg(ql_dbg_init, vha, 0x0097,
5295 "Loading risc segment@ risc addr %x "
5296 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
5297
5298 for (i = 0; i < dlen; i++)
5299 dcode[i] = swab32(fwcode[i]);
5300
73208dfd 5301 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 5302 dlen);
0107109e 5303 if (rval) {
7c3df132
SK
5304 ql_log(ql_log_fatal, vha, 0x0098,
5305 "Failed to load segment %d of firmware.\n",
5306 fragment);
0107109e
AV
5307 break;
5308 }
5309
5310 fwcode += dlen;
5311 risc_addr += dlen;
5312 risc_size -= dlen;
5313 fragment++;
5314 }
5315
5316 /* Next segment. */
5317 segments--;
5318 }
0107109e
AV
5319 return rval;
5320
5321fail_fw_integrity:
0107109e 5322 return QLA_FUNCTION_FAILED;
0107109e 5323}
18c6c127 5324
eaac30be
AV
5325int
5326qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5327{
5328 int rval;
5329
e337d907
AV
5330 if (ql2xfwloadbin == 1)
5331 return qla81xx_load_risc(vha, srisc_addr);
5332
eaac30be
AV
5333 /*
5334 * FW Load priority:
5335 * 1) Firmware via request-firmware interface (.bin file).
5336 * 2) Firmware residing in flash.
5337 */
5338 rval = qla24xx_load_risc_blob(vha, srisc_addr);
5339 if (rval == QLA_SUCCESS)
5340 return rval;
5341
cbc8eb67
AV
5342 return qla24xx_load_risc_flash(vha, srisc_addr,
5343 vha->hw->flt_region_fw);
eaac30be
AV
5344}
5345
5346int
5347qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5348{
5349 int rval;
cbc8eb67 5350 struct qla_hw_data *ha = vha->hw;
eaac30be 5351
e337d907 5352 if (ql2xfwloadbin == 2)
cbc8eb67 5353 goto try_blob_fw;
e337d907 5354
eaac30be
AV
5355 /*
5356 * FW Load priority:
5357 * 1) Firmware residing in flash.
5358 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 5359 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 5360 */
cbc8eb67 5361 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
5362 if (rval == QLA_SUCCESS)
5363 return rval;
5364
cbc8eb67
AV
5365try_blob_fw:
5366 rval = qla24xx_load_risc_blob(vha, srisc_addr);
5367 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
5368 return rval;
5369
7c3df132
SK
5370 ql_log(ql_log_info, vha, 0x0099,
5371 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
5372 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
5373 if (rval != QLA_SUCCESS)
5374 return rval;
5375
7c3df132 5376 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 5377 ha->flags.running_gold_fw = 1;
cbc8eb67 5378 return rval;
eaac30be
AV
5379}
5380
18c6c127 5381void
e315cd28 5382qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
5383{
5384 int ret, retries;
e315cd28 5385 struct qla_hw_data *ha = vha->hw;
18c6c127 5386
85880801
AV
5387 if (ha->flags.pci_channel_io_perm_failure)
5388 return;
e428924c 5389 if (!IS_FWI2_CAPABLE(ha))
18c6c127 5390 return;
75edf81d
AV
5391 if (!ha->fw_major_version)
5392 return;
18c6c127 5393
e315cd28 5394 ret = qla2x00_stop_firmware(vha);
7c7f1f29 5395 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 5396 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
5397 ha->isp_ops->reset_chip(vha);
5398 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 5399 continue;
e315cd28 5400 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 5401 continue;
7c3df132
SK
5402 ql_log(ql_log_info, vha, 0x8015,
5403 "Attempting retry of stop-firmware command.\n");
e315cd28 5404 ret = qla2x00_stop_firmware(vha);
18c6c127
AV
5405 }
5406}
2c3dfe3f
SJ
5407
5408int
e315cd28 5409qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
5410{
5411 int rval = QLA_SUCCESS;
0b91d116 5412 int rval2;
2c3dfe3f 5413 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
5414 struct qla_hw_data *ha = vha->hw;
5415 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
5416 struct req_que *req;
5417 struct rsp_que *rsp;
2c3dfe3f 5418
e315cd28 5419 if (!vha->vp_idx)
2c3dfe3f
SJ
5420 return -EINVAL;
5421
e315cd28 5422 rval = qla2x00_fw_ready(base_vha);
7163ea81 5423 if (ha->flags.cpu_affinity_enabled)
67c2e93a
AC
5424 req = ha->req_q_map[0];
5425 else
5426 req = vha->req;
5427 rsp = req->rsp;
5428
2c3dfe3f 5429 if (rval == QLA_SUCCESS) {
e315cd28 5430 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 5431 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
5432 }
5433
e315cd28 5434 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
5435
5436 /* Login to SNS first */
0b91d116
CD
5437 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
5438 BIT_1);
5439 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5440 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
5441 ql_dbg(ql_dbg_init, vha, 0x0120,
5442 "Failed SNS login: loop_id=%x, rval2=%d\n",
5443 NPH_SNS, rval2);
5444 else
5445 ql_dbg(ql_dbg_init, vha, 0x0103,
5446 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
5447 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
5448 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
5449 return (QLA_FUNCTION_FAILED);
5450 }
5451
e315cd28
AC
5452 atomic_set(&vha->loop_down_timer, 0);
5453 atomic_set(&vha->loop_state, LOOP_UP);
5454 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5455 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5456 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
5457
5458 return rval;
5459}
4d4df193
HK
5460
5461/* 84XX Support **************************************************************/
5462
5463static LIST_HEAD(qla_cs84xx_list);
5464static DEFINE_MUTEX(qla_cs84xx_mutex);
5465
5466static struct qla_chip_state_84xx *
e315cd28 5467qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
5468{
5469 struct qla_chip_state_84xx *cs84xx;
e315cd28 5470 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
5471
5472 mutex_lock(&qla_cs84xx_mutex);
5473
5474 /* Find any shared 84xx chip. */
5475 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
5476 if (cs84xx->bus == ha->pdev->bus) {
5477 kref_get(&cs84xx->kref);
5478 goto done;
5479 }
5480 }
5481
5482 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
5483 if (!cs84xx)
5484 goto done;
5485
5486 kref_init(&cs84xx->kref);
5487 spin_lock_init(&cs84xx->access_lock);
5488 mutex_init(&cs84xx->fw_update_mutex);
5489 cs84xx->bus = ha->pdev->bus;
5490
5491 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
5492done:
5493 mutex_unlock(&qla_cs84xx_mutex);
5494 return cs84xx;
5495}
5496
5497static void
5498__qla84xx_chip_release(struct kref *kref)
5499{
5500 struct qla_chip_state_84xx *cs84xx =
5501 container_of(kref, struct qla_chip_state_84xx, kref);
5502
5503 mutex_lock(&qla_cs84xx_mutex);
5504 list_del(&cs84xx->list);
5505 mutex_unlock(&qla_cs84xx_mutex);
5506 kfree(cs84xx);
5507}
5508
5509void
e315cd28 5510qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 5511{
e315cd28 5512 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
5513 if (ha->cs84xx)
5514 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
5515}
5516
5517static int
e315cd28 5518qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
5519{
5520 int rval;
5521 uint16_t status[2];
e315cd28 5522 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
5523
5524 mutex_lock(&ha->cs84xx->fw_update_mutex);
5525
e315cd28 5526 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
5527
5528 mutex_unlock(&ha->cs84xx->fw_update_mutex);
5529
5530 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
5531 QLA_SUCCESS;
5532}
3a03eb79
AV
5533
5534/* 81XX Support **************************************************************/
5535
5536int
5537qla81xx_nvram_config(scsi_qla_host_t *vha)
5538{
5539 int rval;
5540 struct init_cb_81xx *icb;
5541 struct nvram_81xx *nv;
5542 uint32_t *dptr;
5543 uint8_t *dptr1, *dptr2;
5544 uint32_t chksum;
5545 uint16_t cnt;
5546 struct qla_hw_data *ha = vha->hw;
5547
5548 rval = QLA_SUCCESS;
5549 icb = (struct init_cb_81xx *)ha->init_cb;
5550 nv = ha->nvram;
5551
5552 /* Determine NVRAM starting address. */
5553 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 5554 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3a03eb79
AV
5555
5556 /* Get VPD data into cache */
5557 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
5558 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5559 ha->vpd_size);
3a03eb79
AV
5560
5561 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 5562 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 5563 ha->nvram_size);
3d79038f 5564 dptr = (uint32_t *)nv;
3a03eb79
AV
5565 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5566 chksum += le32_to_cpu(*dptr++);
5567
7c3df132
SK
5568 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5569 "Contents of NVRAM:\n");
5570 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5571 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
5572
5573 /* Bad NVRAM data, set defaults parameters. */
5574 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5575 || nv->id[3] != ' ' ||
5576 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5577 /* Reset NVRAM data. */
7c3df132 5578 ql_log(ql_log_info, vha, 0x0073,
9e336520 5579 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 5580 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 5581 le16_to_cpu(nv->nvram_version));
7c3df132
SK
5582 ql_log(ql_log_info, vha, 0x0074,
5583 "Falling back to functioning (yet invalid -- WWPN) "
5584 "defaults.\n");
3a03eb79
AV
5585
5586 /*
5587 * Set default initialization control block.
5588 */
5589 memset(nv, 0, ha->nvram_size);
5590 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5591 nv->version = __constant_cpu_to_le16(ICB_VERSION);
5592 nv->frame_payload_size = __constant_cpu_to_le16(2048);
5593 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5594 nv->exchange_count = __constant_cpu_to_le16(0);
5595 nv->port_name[0] = 0x21;
e5b68a61 5596 nv->port_name[1] = 0x00 + ha->port_no;
3a03eb79
AV
5597 nv->port_name[2] = 0x00;
5598 nv->port_name[3] = 0xe0;
5599 nv->port_name[4] = 0x8b;
5600 nv->port_name[5] = 0x1c;
5601 nv->port_name[6] = 0x55;
5602 nv->port_name[7] = 0x86;
5603 nv->node_name[0] = 0x20;
5604 nv->node_name[1] = 0x00;
5605 nv->node_name[2] = 0x00;
5606 nv->node_name[3] = 0xe0;
5607 nv->node_name[4] = 0x8b;
5608 nv->node_name[5] = 0x1c;
5609 nv->node_name[6] = 0x55;
5610 nv->node_name[7] = 0x86;
5611 nv->login_retry_count = __constant_cpu_to_le16(8);
5612 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5613 nv->login_timeout = __constant_cpu_to_le16(0);
5614 nv->firmware_options_1 =
5615 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5616 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5617 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5618 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5619 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5620 nv->efi_parameters = __constant_cpu_to_le32(0);
5621 nv->reset_delay = 5;
5622 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5623 nv->port_down_retry_count = __constant_cpu_to_le16(30);
6246b8a1 5624 nv->link_down_timeout = __constant_cpu_to_le16(180);
eeebcc92 5625 nv->enode_mac[0] = 0x00;
6246b8a1
GM
5626 nv->enode_mac[1] = 0xC0;
5627 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
5628 nv->enode_mac[3] = 0x04;
5629 nv->enode_mac[4] = 0x05;
e5b68a61 5630 nv->enode_mac[5] = 0x06 + ha->port_no;
3a03eb79
AV
5631
5632 rval = 1;
5633 }
5634
9e522cd8
AE
5635 if (IS_T10_PI_CAPABLE(ha))
5636 nv->frame_payload_size &= ~7;
5637
aa230bc5
AE
5638 qlt_81xx_config_nvram_stage1(vha, nv);
5639
3a03eb79 5640 /* Reset Initialization control block */
773120e4 5641 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
5642
5643 /* Copy 1st segment. */
5644 dptr1 = (uint8_t *)icb;
5645 dptr2 = (uint8_t *)&nv->version;
5646 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5647 while (cnt--)
5648 *dptr1++ = *dptr2++;
5649
5650 icb->login_retry_count = nv->login_retry_count;
5651
5652 /* Copy 2nd segment. */
5653 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5654 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5655 cnt = (uint8_t *)&icb->reserved_5 -
5656 (uint8_t *)&icb->interrupt_delay_timer;
5657 while (cnt--)
5658 *dptr1++ = *dptr2++;
5659
5660 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5661 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5662 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
5663 icb->enode_mac[0] = 0x00;
5664 icb->enode_mac[1] = 0xC0;
5665 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
5666 icb->enode_mac[3] = 0x04;
5667 icb->enode_mac[4] = 0x05;
e5b68a61 5668 icb->enode_mac[5] = 0x06 + ha->port_no;
3a03eb79
AV
5669 }
5670
b64b0e8f
AV
5671 /* Use extended-initialization control block. */
5672 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5673
3a03eb79
AV
5674 /*
5675 * Setup driver NVRAM options.
5676 */
5677 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 5678 "QLE8XXX");
3a03eb79 5679
aa230bc5
AE
5680 qlt_81xx_config_nvram_stage2(vha, icb);
5681
3a03eb79
AV
5682 /* Use alternate WWN? */
5683 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5684 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5685 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5686 }
5687
5688 /* Prepare nodename */
5689 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5690 /*
5691 * Firmware will apply the following mask if the nodename was
5692 * not provided.
5693 */
5694 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5695 icb->node_name[0] &= 0xF0;
5696 }
5697
5698 /* Set host adapter parameters. */
5699 ha->flags.disable_risc_code_load = 0;
5700 ha->flags.enable_lip_reset = 0;
5701 ha->flags.enable_lip_full_login =
5702 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5703 ha->flags.enable_target_reset =
5704 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5705 ha->flags.enable_led_scheme = 0;
5706 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5707
5708 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5709 (BIT_6 | BIT_5 | BIT_4)) >> 4;
5710
5711 /* save HBA serial number */
5712 ha->serial0 = icb->port_name[5];
5713 ha->serial1 = icb->port_name[6];
5714 ha->serial2 = icb->port_name[7];
5715 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5716 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5717
5718 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5719
5720 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5721
5722 /* Set minimum login_timeout to 4 seconds. */
5723 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5724 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5725 if (le16_to_cpu(nv->login_timeout) < 4)
5726 nv->login_timeout = __constant_cpu_to_le16(4);
5727 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5728 icb->login_timeout = nv->login_timeout;
5729
5730 /* Set minimum RATOV to 100 tenths of a second. */
5731 ha->r_a_tov = 100;
5732
5733 ha->loop_reset_delay = nv->reset_delay;
5734
5735 /* Link Down Timeout = 0:
5736 *
5737 * When Port Down timer expires we will start returning
5738 * I/O's to OS with "DID_NO_CONNECT".
5739 *
5740 * Link Down Timeout != 0:
5741 *
5742 * The driver waits for the link to come up after link down
5743 * before returning I/Os to OS with "DID_NO_CONNECT".
5744 */
5745 if (le16_to_cpu(nv->link_down_timeout) == 0) {
5746 ha->loop_down_abort_time =
5747 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5748 } else {
5749 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5750 ha->loop_down_abort_time =
5751 (LOOP_DOWN_TIME - ha->link_down_timeout);
5752 }
5753
5754 /* Need enough time to try and get the port back. */
5755 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5756 if (qlport_down_retry)
5757 ha->port_down_retry_count = qlport_down_retry;
5758
5759 /* Set login_retry_count */
5760 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
5761 if (ha->port_down_retry_count ==
5762 le16_to_cpu(nv->port_down_retry_count) &&
5763 ha->port_down_retry_count > 3)
5764 ha->login_retry_count = ha->port_down_retry_count;
5765 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5766 ha->login_retry_count = ha->port_down_retry_count;
5767 if (ql2xloginretrycount)
5768 ha->login_retry_count = ql2xloginretrycount;
5769
6246b8a1
GM
5770 /* if not running MSI-X we need handshaking on interrupts */
5771 if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
5772 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
5773
3a03eb79
AV
5774 /* Enable ZIO. */
5775 if (!vha->flags.init_done) {
5776 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5777 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5778 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5779 le16_to_cpu(icb->interrupt_delay_timer): 2;
5780 }
5781 icb->firmware_options_2 &= __constant_cpu_to_le32(
5782 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5783 vha->flags.process_response_queue = 0;
5784 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5785 ha->zio_mode = QLA_ZIO_MODE_6;
5786
7c3df132 5787 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 5788 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
5789 ha->zio_mode,
5790 ha->zio_timer * 100);
3a03eb79
AV
5791
5792 icb->firmware_options_2 |= cpu_to_le32(
5793 (uint32_t)ha->zio_mode);
5794 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5795 vha->flags.process_response_queue = 1;
5796 }
5797
5798 if (rval) {
7c3df132
SK
5799 ql_log(ql_log_warn, vha, 0x0076,
5800 "NVRAM configuration failed.\n");
3a03eb79
AV
5801 }
5802 return (rval);
5803}
5804
a9083016
GM
5805int
5806qla82xx_restart_isp(scsi_qla_host_t *vha)
5807{
5808 int status, rval;
5809 uint32_t wait_time;
5810 struct qla_hw_data *ha = vha->hw;
5811 struct req_que *req = ha->req_q_map[0];
5812 struct rsp_que *rsp = ha->rsp_q_map[0];
5813 struct scsi_qla_host *vp;
feafb7b1 5814 unsigned long flags;
a9083016
GM
5815
5816 status = qla2x00_init_rings(vha);
5817 if (!status) {
5818 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5819 ha->flags.chip_reset_done = 1;
5820
5821 status = qla2x00_fw_ready(vha);
5822 if (!status) {
7c3df132
SK
5823 ql_log(ql_log_info, vha, 0x803c,
5824 "Start configure loop, status =%d.\n", status);
a9083016
GM
5825
5826 /* Issue a marker after FW becomes ready. */
5827 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5828
5829 vha->flags.online = 1;
5830 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5831 wait_time = 256;
5832 do {
5833 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5834 qla2x00_configure_loop(vha);
5835 wait_time--;
5836 } while (!atomic_read(&vha->loop_down_timer) &&
5837 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5838 wait_time &&
5839 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5840 }
5841
5842 /* if no cable then assume it's good */
5843 if ((vha->device_flags & DFLG_NO_CABLE))
5844 status = 0;
5845
cfb0919c 5846 ql_log(ql_log_info, vha, 0x8000,
7c3df132 5847 "Configure loop done, status = 0x%x.\n", status);
a9083016
GM
5848 }
5849
5850 if (!status) {
5851 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5852
5853 if (!atomic_read(&vha->loop_down_timer)) {
5854 /*
5855 * Issue marker command only when we are going
5856 * to start the I/O .
5857 */
5858 vha->marker_needed = 1;
5859 }
5860
5861 vha->flags.online = 1;
5862
5863 ha->isp_ops->enable_intrs(ha);
5864
5865 ha->isp_abort_cnt = 0;
5866 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5867
53296788 5868 /* Update the firmware version */
3173167f 5869 status = qla82xx_check_md_needed(vha);
53296788 5870
a9083016
GM
5871 if (ha->fce) {
5872 ha->flags.fce_enabled = 1;
5873 memset(ha->fce, 0,
5874 fce_calc_size(ha->fce_bufs));
5875 rval = qla2x00_enable_fce_trace(vha,
5876 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5877 &ha->fce_bufs);
5878 if (rval) {
cfb0919c 5879 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
5880 "Unable to reinitialize FCE (%d).\n",
5881 rval);
a9083016
GM
5882 ha->flags.fce_enabled = 0;
5883 }
5884 }
5885
5886 if (ha->eft) {
5887 memset(ha->eft, 0, EFT_SIZE);
5888 rval = qla2x00_enable_eft_trace(vha,
5889 ha->eft_dma, EFT_NUM_BUFFERS);
5890 if (rval) {
cfb0919c 5891 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
5892 "Unable to reinitialize EFT (%d).\n",
5893 rval);
a9083016
GM
5894 }
5895 }
a9083016
GM
5896 }
5897
5898 if (!status) {
cfb0919c 5899 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 5900 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
5901
5902 spin_lock_irqsave(&ha->vport_slock, flags);
5903 list_for_each_entry(vp, &ha->vp_list, list) {
5904 if (vp->vp_idx) {
5905 atomic_inc(&vp->vref_count);
5906 spin_unlock_irqrestore(&ha->vport_slock, flags);
5907
a9083016 5908 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
5909
5910 spin_lock_irqsave(&ha->vport_slock, flags);
5911 atomic_dec(&vp->vref_count);
5912 }
a9083016 5913 }
feafb7b1
AE
5914 spin_unlock_irqrestore(&ha->vport_slock, flags);
5915
a9083016 5916 } else {
cfb0919c 5917 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 5918 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
5919 }
5920
5921 return status;
5922}
5923
3a03eb79 5924void
ae97c91e 5925qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 5926{
ae97c91e
AV
5927 struct qla_hw_data *ha = vha->hw;
5928
5929 if (!ql2xetsenable)
5930 return;
5931
5932 /* Enable ETS Burst. */
5933 memset(ha->fw_options, 0, sizeof(ha->fw_options));
5934 ha->fw_options[2] |= BIT_9;
5935 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 5936}
09ff701a
SR
5937
5938/*
5939 * qla24xx_get_fcp_prio
5940 * Gets the fcp cmd priority value for the logged in port.
5941 * Looks for a match of the port descriptors within
5942 * each of the fcp prio config entries. If a match is found,
5943 * the tag (priority) value is returned.
5944 *
5945 * Input:
21090cbe 5946 * vha = scsi host structure pointer.
09ff701a
SR
5947 * fcport = port structure pointer.
5948 *
5949 * Return:
6c452a45 5950 * non-zero (if found)
f28a0a96 5951 * -1 (if not found)
09ff701a
SR
5952 *
5953 * Context:
5954 * Kernel context
5955 */
f28a0a96 5956static int
09ff701a
SR
5957qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5958{
5959 int i, entries;
5960 uint8_t pid_match, wwn_match;
f28a0a96 5961 int priority;
09ff701a
SR
5962 uint32_t pid1, pid2;
5963 uint64_t wwn1, wwn2;
5964 struct qla_fcp_prio_entry *pri_entry;
5965 struct qla_hw_data *ha = vha->hw;
5966
5967 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 5968 return -1;
09ff701a 5969
f28a0a96 5970 priority = -1;
09ff701a
SR
5971 entries = ha->fcp_prio_cfg->num_entries;
5972 pri_entry = &ha->fcp_prio_cfg->entry[0];
5973
5974 for (i = 0; i < entries; i++) {
5975 pid_match = wwn_match = 0;
5976
5977 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5978 pri_entry++;
5979 continue;
5980 }
5981
5982 /* check source pid for a match */
5983 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5984 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5985 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5986 if (pid1 == INVALID_PORT_ID)
5987 pid_match++;
5988 else if (pid1 == pid2)
5989 pid_match++;
5990 }
5991
5992 /* check destination pid for a match */
5993 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5994 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5995 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5996 if (pid1 == INVALID_PORT_ID)
5997 pid_match++;
5998 else if (pid1 == pid2)
5999 pid_match++;
6000 }
6001
6002 /* check source WWN for a match */
6003 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
6004 wwn1 = wwn_to_u64(vha->port_name);
6005 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
6006 if (wwn2 == (uint64_t)-1)
6007 wwn_match++;
6008 else if (wwn1 == wwn2)
6009 wwn_match++;
6010 }
6011
6012 /* check destination WWN for a match */
6013 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
6014 wwn1 = wwn_to_u64(fcport->port_name);
6015 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
6016 if (wwn2 == (uint64_t)-1)
6017 wwn_match++;
6018 else if (wwn1 == wwn2)
6019 wwn_match++;
6020 }
6021
6022 if (pid_match == 2 || wwn_match == 2) {
6023 /* Found a matching entry */
6024 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
6025 priority = pri_entry->tag;
6026 break;
6027 }
6028
6029 pri_entry++;
6030 }
6031
6032 return priority;
6033}
6034
6035/*
6036 * qla24xx_update_fcport_fcp_prio
6037 * Activates fcp priority for the logged in fc port
6038 *
6039 * Input:
21090cbe 6040 * vha = scsi host structure pointer.
09ff701a
SR
6041 * fcp = port structure pointer.
6042 *
6043 * Return:
6044 * QLA_SUCCESS or QLA_FUNCTION_FAILED
6045 *
6046 * Context:
6047 * Kernel context.
6048 */
6049int
21090cbe 6050qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
6051{
6052 int ret;
f28a0a96 6053 int priority;
09ff701a
SR
6054 uint16_t mb[5];
6055
21090cbe
MI
6056 if (fcport->port_type != FCT_TARGET ||
6057 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
6058 return QLA_FUNCTION_FAILED;
6059
21090cbe 6060 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
6061 if (priority < 0)
6062 return QLA_FUNCTION_FAILED;
6063
a00f6296
SK
6064 if (IS_QLA82XX(vha->hw)) {
6065 fcport->fcp_prio = priority & 0xf;
6066 return QLA_SUCCESS;
6067 }
6068
21090cbe 6069 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
6070 if (ret == QLA_SUCCESS) {
6071 if (fcport->fcp_prio != priority)
6072 ql_dbg(ql_dbg_user, vha, 0x709e,
6073 "Updated FCP_CMND priority - value=%d loop_id=%d "
6074 "port_id=%02x%02x%02x.\n", priority,
6075 fcport->loop_id, fcport->d_id.b.domain,
6076 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 6077 fcport->fcp_prio = priority & 0xf;
cfb0919c 6078 } else
7c3df132 6079 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
6080 "Unable to update FCP_CMND priority - ret=0x%x for "
6081 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
6082 fcport->d_id.b.domain, fcport->d_id.b.area,
6083 fcport->d_id.b.al_pa);
09ff701a
SR
6084 return ret;
6085}
6086
6087/*
6088 * qla24xx_update_all_fcp_prio
6089 * Activates fcp priority for all the logged in ports
6090 *
6091 * Input:
6092 * ha = adapter block pointer.
6093 *
6094 * Return:
6095 * QLA_SUCCESS or QLA_FUNCTION_FAILED
6096 *
6097 * Context:
6098 * Kernel context.
6099 */
6100int
6101qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
6102{
6103 int ret;
6104 fc_port_t *fcport;
6105
6106 ret = QLA_FUNCTION_FAILED;
6107 /* We need to set priority for all logged in ports */
6108 list_for_each_entry(fcport, &vha->vp_fcports, list)
6109 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
6110
6111 return ret;
6112}