]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/qla2xxx/qla_init.c
865fc617e3aab8f673a3ff32fc8f6d345ba62300
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22
23 /*
24 * QLogic ISP2x00 Hardware Support Function Prototypes.
25 */
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
35
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
44
45 /* SRB Extensions ---------------------------------------------------------- */
46
47 void
48 qla2x00_sp_timeout(struct timer_list *t)
49 {
50 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
51 struct srb_iocb *iocb;
52 scsi_qla_host_t *vha = sp->vha;
53 struct req_que *req;
54 unsigned long flags;
55
56 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
57 req = vha->hw->req_q_map[0];
58 req->outstanding_cmds[sp->handle] = NULL;
59 iocb = &sp->u.iocb_cmd;
60 iocb->timeout(sp);
61 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
62 }
63
64 void
65 qla2x00_sp_free(void *ptr)
66 {
67 srb_t *sp = ptr;
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
69
70 del_timer(&iocb->timer);
71 qla2x00_rel_sp(sp);
72 }
73
74 /* Asynchronous Login/Logout Routines -------------------------------------- */
75
76 unsigned long
77 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
78 {
79 unsigned long tmo;
80 struct qla_hw_data *ha = vha->hw;
81
82 /* Firmware should use switch negotiated r_a_tov for timeout. */
83 tmo = ha->r_a_tov / 10 * 2;
84 if (IS_QLAFX00(ha)) {
85 tmo = FX00_DEF_RATOV * 2;
86 } else if (!IS_FWI2_CAPABLE(ha)) {
87 /*
88 * Except for earlier ISPs where the timeout is seeded from the
89 * initialization control block.
90 */
91 tmo = ha->login_timeout;
92 }
93 return tmo;
94 }
95
96 void
97 qla2x00_async_iocb_timeout(void *data)
98 {
99 srb_t *sp = data;
100 fc_port_t *fcport = sp->fcport;
101 struct srb_iocb *lio = &sp->u.iocb_cmd;
102
103 if (fcport) {
104 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
105 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
106 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
107
108 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
109 } else {
110 pr_info("Async-%s timeout - hdl=%x.\n",
111 sp->name, sp->handle);
112 }
113
114 switch (sp->type) {
115 case SRB_LOGIN_CMD:
116 /* Retry as needed. */
117 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
118 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
119 QLA_LOGIO_LOGIN_RETRIED : 0;
120 sp->done(sp, QLA_FUNCTION_TIMEOUT);
121 break;
122 case SRB_LOGOUT_CMD:
123 case SRB_CT_PTHRU_CMD:
124 case SRB_MB_IOCB:
125 case SRB_NACK_PLOGI:
126 case SRB_NACK_PRLI:
127 case SRB_NACK_LOGO:
128 sp->done(sp, QLA_FUNCTION_TIMEOUT);
129 break;
130 }
131 }
132
133 static void
134 qla2x00_async_login_sp_done(void *ptr, int res)
135 {
136 srb_t *sp = ptr;
137 struct scsi_qla_host *vha = sp->vha;
138 struct srb_iocb *lio = &sp->u.iocb_cmd;
139 struct event_arg ea;
140
141 ql_dbg(ql_dbg_disc, vha, 0x20dd,
142 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
143
144 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
145
146 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
147 memset(&ea, 0, sizeof(ea));
148 ea.event = FCME_PLOGI_DONE;
149 ea.fcport = sp->fcport;
150 ea.data[0] = lio->u.logio.data[0];
151 ea.data[1] = lio->u.logio.data[1];
152 ea.iop[0] = lio->u.logio.iop[0];
153 ea.iop[1] = lio->u.logio.iop[1];
154 ea.sp = sp;
155 qla2x00_fcport_event_handler(vha, &ea);
156 }
157
158 sp->free(sp);
159 }
160
161 int
162 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
163 uint16_t *data)
164 {
165 srb_t *sp;
166 struct srb_iocb *lio;
167 int rval = QLA_FUNCTION_FAILED;
168
169 if (!vha->flags.online)
170 goto done;
171
172 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
173 if (!sp)
174 goto done;
175
176 fcport->flags |= FCF_ASYNC_SENT;
177 fcport->logout_completed = 0;
178
179 sp->type = SRB_LOGIN_CMD;
180 sp->name = "login";
181 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
182
183 lio = &sp->u.iocb_cmd;
184 lio->timeout = qla2x00_async_iocb_timeout;
185 sp->done = qla2x00_async_login_sp_done;
186 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
187
188 if (fcport->fc4f_nvme)
189 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
190
191 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
192 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
193 rval = qla2x00_start_sp(sp);
194 if (rval != QLA_SUCCESS) {
195 fcport->flags &= ~FCF_ASYNC_SENT;
196 fcport->flags |= FCF_LOGIN_NEEDED;
197 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
198 goto done_free_sp;
199 }
200
201 ql_dbg(ql_dbg_disc, vha, 0x2072,
202 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
203 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
204 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
205 fcport->login_retry);
206 return rval;
207
208 done_free_sp:
209 sp->free(sp);
210 done:
211 fcport->flags &= ~FCF_ASYNC_SENT;
212 return rval;
213 }
214
215 static void
216 qla2x00_async_logout_sp_done(void *ptr, int res)
217 {
218 srb_t *sp = ptr;
219
220 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
221 sp->fcport->login_gen++;
222 qlt_logo_completion_handler(sp->fcport, res);
223 sp->free(sp);
224 }
225
226 int
227 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
228 {
229 srb_t *sp;
230 struct srb_iocb *lio;
231 int rval;
232
233 rval = QLA_FUNCTION_FAILED;
234 fcport->flags |= FCF_ASYNC_SENT;
235 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
236 if (!sp)
237 goto done;
238
239 sp->type = SRB_LOGOUT_CMD;
240 sp->name = "logout";
241 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
242
243 lio = &sp->u.iocb_cmd;
244 lio->timeout = qla2x00_async_iocb_timeout;
245 sp->done = qla2x00_async_logout_sp_done;
246 rval = qla2x00_start_sp(sp);
247 if (rval != QLA_SUCCESS)
248 goto done_free_sp;
249
250 ql_dbg(ql_dbg_disc, vha, 0x2070,
251 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
252 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
253 fcport->d_id.b.area, fcport->d_id.b.al_pa,
254 fcport->port_name);
255 return rval;
256
257 done_free_sp:
258 sp->free(sp);
259 done:
260 fcport->flags &= ~FCF_ASYNC_SENT;
261 return rval;
262 }
263
264 static void
265 qla2x00_async_adisc_sp_done(void *ptr, int res)
266 {
267 srb_t *sp = ptr;
268 struct scsi_qla_host *vha = sp->vha;
269 struct srb_iocb *lio = &sp->u.iocb_cmd;
270
271 if (!test_bit(UNLOADING, &vha->dpc_flags))
272 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
273 lio->u.logio.data);
274 sp->free(sp);
275 }
276
277 int
278 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
279 uint16_t *data)
280 {
281 srb_t *sp;
282 struct srb_iocb *lio;
283 int rval;
284
285 rval = QLA_FUNCTION_FAILED;
286 fcport->flags |= FCF_ASYNC_SENT;
287 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
288 if (!sp)
289 goto done;
290
291 sp->type = SRB_ADISC_CMD;
292 sp->name = "adisc";
293 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
294
295 lio = &sp->u.iocb_cmd;
296 lio->timeout = qla2x00_async_iocb_timeout;
297 sp->done = qla2x00_async_adisc_sp_done;
298 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
299 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
300 rval = qla2x00_start_sp(sp);
301 if (rval != QLA_SUCCESS)
302 goto done_free_sp;
303
304 ql_dbg(ql_dbg_disc, vha, 0x206f,
305 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
306 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
307 fcport->d_id.b.area, fcport->d_id.b.al_pa);
308 return rval;
309
310 done_free_sp:
311 sp->free(sp);
312 done:
313 fcport->flags &= ~FCF_ASYNC_SENT;
314 return rval;
315 }
316
317 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
318 struct event_arg *ea)
319 {
320 fc_port_t *fcport, *conflict_fcport;
321 struct get_name_list_extended *e;
322 u16 i, n, found = 0, loop_id;
323 port_id_t id;
324 u64 wwn;
325 u8 opt = 0, current_login_state;
326
327 fcport = ea->fcport;
328
329 if (ea->rc) { /* rval */
330 if (fcport->login_retry == 0) {
331 fcport->login_retry = vha->hw->login_retry_count;
332 ql_dbg(ql_dbg_disc, vha, 0x20de,
333 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
334 fcport->port_name, fcport->login_retry);
335 }
336 return;
337 }
338
339 if (fcport->last_rscn_gen != fcport->rscn_gen) {
340 ql_dbg(ql_dbg_disc, vha, 0x20df,
341 "%s %8phC rscn gen changed rscn %d|%d \n",
342 __func__, fcport->port_name,
343 fcport->last_rscn_gen, fcport->rscn_gen);
344 qla24xx_post_gidpn_work(vha, fcport);
345 return;
346 } else if (fcport->last_login_gen != fcport->login_gen) {
347 ql_dbg(ql_dbg_disc, vha, 0x20e0,
348 "%s %8phC login gen changed login %d|%d\n",
349 __func__, fcport->port_name,
350 fcport->last_login_gen, fcport->login_gen);
351 return;
352 }
353
354 n = ea->data[0] / sizeof(struct get_name_list_extended);
355
356 ql_dbg(ql_dbg_disc, vha, 0x20e1,
357 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
358 __func__, __LINE__, fcport->port_name, n,
359 fcport->d_id.b.domain, fcport->d_id.b.area,
360 fcport->d_id.b.al_pa, fcport->loop_id);
361
362 for (i = 0; i < n; i++) {
363 e = &vha->gnl.l[i];
364 wwn = wwn_to_u64(e->port_name);
365
366 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
367 continue;
368
369 found = 1;
370 id.b.domain = e->port_id[2];
371 id.b.area = e->port_id[1];
372 id.b.al_pa = e->port_id[0];
373 id.b.rsvd_1 = 0;
374
375 loop_id = le16_to_cpu(e->nport_handle);
376 loop_id = (loop_id & 0x7fff);
377
378 ql_dbg(ql_dbg_disc, vha, 0x20e2,
379 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
380 __func__, fcport->port_name,
381 e->current_login_state, fcport->fw_login_state,
382 id.b.domain, id.b.area, id.b.al_pa,
383 fcport->d_id.b.domain, fcport->d_id.b.area,
384 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
385
386 if ((id.b24 != fcport->d_id.b24) ||
387 ((fcport->loop_id != FC_NO_LOOP_ID) &&
388 (fcport->loop_id != loop_id))) {
389 ql_dbg(ql_dbg_disc, vha, 0x20e3,
390 "%s %d %8phC post del sess\n",
391 __func__, __LINE__, fcport->port_name);
392 qlt_schedule_sess_for_deletion(fcport);
393 return;
394 }
395
396 fcport->loop_id = loop_id;
397
398 wwn = wwn_to_u64(fcport->port_name);
399 qlt_find_sess_invalidate_other(vha, wwn,
400 id, loop_id, &conflict_fcport);
401
402 if (conflict_fcport) {
403 /*
404 * Another share fcport share the same loop_id &
405 * nport id. Conflict fcport needs to finish
406 * cleanup before this fcport can proceed to login.
407 */
408 conflict_fcport->conflict = fcport;
409 fcport->login_pause = 1;
410 }
411
412 if (fcport->fc4f_nvme)
413 current_login_state = e->current_login_state >> 4;
414 else
415 current_login_state = e->current_login_state & 0xf;
416
417 switch (current_login_state) {
418 case DSC_LS_PRLI_COMP:
419 ql_dbg(ql_dbg_disc, vha, 0x20e4,
420 "%s %d %8phC post gpdb\n",
421 __func__, __LINE__, fcport->port_name);
422 opt = PDO_FORCE_ADISC;
423 qla24xx_post_gpdb_work(vha, fcport, opt);
424 break;
425 case DSC_LS_PORT_UNAVAIL:
426 default:
427 if (fcport->loop_id == FC_NO_LOOP_ID) {
428 qla2x00_find_new_loop_id(vha, fcport);
429 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
430 }
431 ql_dbg(ql_dbg_disc, vha, 0x20e5,
432 "%s %d %8phC\n",
433 __func__, __LINE__, fcport->port_name);
434 qla24xx_fcport_handle_login(vha, fcport);
435 break;
436 }
437 }
438
439 if (!found) {
440 /* fw has no record of this port */
441 if (fcport->loop_id == FC_NO_LOOP_ID) {
442 qla2x00_find_new_loop_id(vha, fcport);
443 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
444 } else {
445 for (i = 0; i < n; i++) {
446 e = &vha->gnl.l[i];
447 id.b.domain = e->port_id[0];
448 id.b.area = e->port_id[1];
449 id.b.al_pa = e->port_id[2];
450 id.b.rsvd_1 = 0;
451 loop_id = le16_to_cpu(e->nport_handle);
452
453 if (fcport->d_id.b24 == id.b24) {
454 conflict_fcport =
455 qla2x00_find_fcport_by_wwpn(vha,
456 e->port_name, 0);
457
458 ql_dbg(ql_dbg_disc, vha, 0x20e6,
459 "%s %d %8phC post del sess\n",
460 __func__, __LINE__,
461 conflict_fcport->port_name);
462 qlt_schedule_sess_for_deletion
463 (conflict_fcport);
464 }
465
466 if (fcport->loop_id == loop_id) {
467 /* FW already picked this loop id for another fcport */
468 qla2x00_find_new_loop_id(vha, fcport);
469 }
470 }
471 }
472 qla24xx_fcport_handle_login(vha, fcport);
473 }
474 } /* gnl_event */
475
476 static void
477 qla24xx_async_gnl_sp_done(void *s, int res)
478 {
479 struct srb *sp = s;
480 struct scsi_qla_host *vha = sp->vha;
481 unsigned long flags;
482 struct fc_port *fcport = NULL, *tf;
483 u16 i, n = 0, loop_id;
484 struct event_arg ea;
485 struct get_name_list_extended *e;
486 u64 wwn;
487 struct list_head h;
488
489 ql_dbg(ql_dbg_disc, vha, 0x20e7,
490 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
491 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
492 sp->u.iocb_cmd.u.mbx.in_mb[2]);
493
494 memset(&ea, 0, sizeof(ea));
495 ea.sp = sp;
496 ea.rc = res;
497 ea.event = FCME_GNL_DONE;
498
499 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
500 sizeof(struct get_name_list_extended)) {
501 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
502 sizeof(struct get_name_list_extended);
503 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
504 }
505
506 for (i = 0; i < n; i++) {
507 e = &vha->gnl.l[i];
508 loop_id = le16_to_cpu(e->nport_handle);
509 /* mask out reserve bit */
510 loop_id = (loop_id & 0x7fff);
511 set_bit(loop_id, vha->hw->loop_id_map);
512 wwn = wwn_to_u64(e->port_name);
513
514 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
515 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
516 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
517 e->port_id[0], e->current_login_state, e->last_login_state,
518 (loop_id & 0x7fff));
519 }
520
521 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
522
523 INIT_LIST_HEAD(&h);
524 fcport = tf = NULL;
525 if (!list_empty(&vha->gnl.fcports))
526 list_splice_init(&vha->gnl.fcports, &h);
527
528 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
529 list_del_init(&fcport->gnl_entry);
530 spin_lock(&vha->hw->tgt.sess_lock);
531 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
532 spin_unlock(&vha->hw->tgt.sess_lock);
533 ea.fcport = fcport;
534
535 qla2x00_fcport_event_handler(vha, &ea);
536 }
537
538 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
539
540 sp->free(sp);
541 }
542
543 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
544 {
545 srb_t *sp;
546 struct srb_iocb *mbx;
547 int rval = QLA_FUNCTION_FAILED;
548 unsigned long flags;
549 u16 *mb;
550
551 if (!vha->flags.online)
552 goto done;
553
554 ql_dbg(ql_dbg_disc, vha, 0x20d9,
555 "Async-gnlist WWPN %8phC \n", fcport->port_name);
556
557 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
558 if (!list_empty(&fcport->gnl_entry)) {
559 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
560 rval = QLA_SUCCESS;
561 goto done;
562 }
563
564 spin_lock(&vha->hw->tgt.sess_lock);
565 fcport->flags |= FCF_ASYNC_SENT;
566 fcport->disc_state = DSC_GNL;
567 fcport->last_rscn_gen = fcport->rscn_gen;
568 fcport->last_login_gen = fcport->login_gen;
569 spin_unlock(&vha->hw->tgt.sess_lock);
570
571 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
572 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
573
574 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
575 if (!sp)
576 goto done;
577 sp->type = SRB_MB_IOCB;
578 sp->name = "gnlist";
579 sp->gen1 = fcport->rscn_gen;
580 sp->gen2 = fcport->login_gen;
581
582 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
583
584 mb = sp->u.iocb_cmd.u.mbx.out_mb;
585 mb[0] = MBC_PORT_NODE_NAME_LIST;
586 mb[1] = BIT_2 | BIT_3;
587 mb[2] = MSW(vha->gnl.ldma);
588 mb[3] = LSW(vha->gnl.ldma);
589 mb[6] = MSW(MSD(vha->gnl.ldma));
590 mb[7] = LSW(MSD(vha->gnl.ldma));
591 mb[8] = vha->gnl.size;
592 mb[9] = vha->vp_idx;
593
594 mbx = &sp->u.iocb_cmd;
595 mbx->timeout = qla2x00_async_iocb_timeout;
596
597 sp->done = qla24xx_async_gnl_sp_done;
598
599 rval = qla2x00_start_sp(sp);
600 if (rval != QLA_SUCCESS)
601 goto done_free_sp;
602
603 ql_dbg(ql_dbg_disc, vha, 0x20da,
604 "Async-%s - OUT WWPN %8phC hndl %x\n",
605 sp->name, fcport->port_name, sp->handle);
606
607 return rval;
608
609 done_free_sp:
610 sp->free(sp);
611 done:
612 fcport->flags &= ~FCF_ASYNC_SENT;
613 return rval;
614 }
615
616 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
617 {
618 struct qla_work_evt *e;
619
620 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
621 if (!e)
622 return QLA_FUNCTION_FAILED;
623
624 e->u.fcport.fcport = fcport;
625 fcport->flags |= FCF_ASYNC_ACTIVE;
626 return qla2x00_post_work(vha, e);
627 }
628
629 static
630 void qla24xx_async_gpdb_sp_done(void *s, int res)
631 {
632 struct srb *sp = s;
633 struct scsi_qla_host *vha = sp->vha;
634 struct qla_hw_data *ha = vha->hw;
635 struct port_database_24xx *pd;
636 fc_port_t *fcport = sp->fcport;
637 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
638 int rval = QLA_SUCCESS;
639 struct event_arg ea;
640
641 ql_dbg(ql_dbg_disc, vha, 0x20db,
642 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
643 sp->name, res, fcport->port_name, mb[1], mb[2]);
644
645 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
646
647 if (res) {
648 rval = res;
649 goto gpd_error_out;
650 }
651
652 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
653
654 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
655
656 gpd_error_out:
657 memset(&ea, 0, sizeof(ea));
658 ea.event = FCME_GPDB_DONE;
659 ea.rc = rval;
660 ea.fcport = fcport;
661 ea.sp = sp;
662
663 qla2x00_fcport_event_handler(vha, &ea);
664
665 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
666 sp->u.iocb_cmd.u.mbx.in_dma);
667
668 sp->free(sp);
669 }
670
671 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
672 {
673 struct qla_work_evt *e;
674
675 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
676 if (!e)
677 return QLA_FUNCTION_FAILED;
678
679 e->u.fcport.fcport = fcport;
680
681 return qla2x00_post_work(vha, e);
682 }
683
684 static void
685 qla2x00_async_prli_sp_done(void *ptr, int res)
686 {
687 srb_t *sp = ptr;
688 struct scsi_qla_host *vha = sp->vha;
689 struct srb_iocb *lio = &sp->u.iocb_cmd;
690 struct event_arg ea;
691
692 ql_dbg(ql_dbg_disc, vha, 0x2129,
693 "%s %8phC res %d \n", __func__,
694 sp->fcport->port_name, res);
695
696 sp->fcport->flags &= ~FCF_ASYNC_SENT;
697
698 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
699 memset(&ea, 0, sizeof(ea));
700 ea.event = FCME_PRLI_DONE;
701 ea.fcport = sp->fcport;
702 ea.data[0] = lio->u.logio.data[0];
703 ea.data[1] = lio->u.logio.data[1];
704 ea.iop[0] = lio->u.logio.iop[0];
705 ea.iop[1] = lio->u.logio.iop[1];
706 ea.sp = sp;
707
708 qla2x00_fcport_event_handler(vha, &ea);
709 }
710
711 sp->free(sp);
712 }
713
714 int
715 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
716 {
717 srb_t *sp;
718 struct srb_iocb *lio;
719 int rval = QLA_FUNCTION_FAILED;
720
721 if (!vha->flags.online)
722 return rval;
723
724 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
725 fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
726 fcport->fw_login_state == DSC_LS_PRLI_PEND)
727 return rval;
728
729 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
730 if (!sp)
731 return rval;
732
733 fcport->flags |= FCF_ASYNC_SENT;
734 fcport->logout_completed = 0;
735
736 sp->type = SRB_PRLI_CMD;
737 sp->name = "prli";
738 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
739
740 lio = &sp->u.iocb_cmd;
741 lio->timeout = qla2x00_async_iocb_timeout;
742 sp->done = qla2x00_async_prli_sp_done;
743 lio->u.logio.flags = 0;
744
745 if (fcport->fc4f_nvme)
746 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
747
748 rval = qla2x00_start_sp(sp);
749 if (rval != QLA_SUCCESS) {
750 fcport->flags &= ~FCF_ASYNC_SENT;
751 fcport->flags |= FCF_LOGIN_NEEDED;
752 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
753 goto done_free_sp;
754 }
755
756 ql_dbg(ql_dbg_disc, vha, 0x211b,
757 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
758 fcport->port_name, sp->handle, fcport->loop_id,
759 fcport->d_id.b24, fcport->login_retry);
760
761 return rval;
762
763 done_free_sp:
764 sp->free(sp);
765 fcport->flags &= ~FCF_ASYNC_SENT;
766 return rval;
767 }
768
769 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
770 {
771 struct qla_work_evt *e;
772
773 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
774 if (!e)
775 return QLA_FUNCTION_FAILED;
776
777 e->u.fcport.fcport = fcport;
778 e->u.fcport.opt = opt;
779 fcport->flags |= FCF_ASYNC_ACTIVE;
780 return qla2x00_post_work(vha, e);
781 }
782
783 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
784 {
785 srb_t *sp;
786 struct srb_iocb *mbx;
787 int rval = QLA_FUNCTION_FAILED;
788 u16 *mb;
789 dma_addr_t pd_dma;
790 struct port_database_24xx *pd;
791 struct qla_hw_data *ha = vha->hw;
792
793 if (!vha->flags.online)
794 goto done;
795
796 fcport->flags |= FCF_ASYNC_SENT;
797 fcport->disc_state = DSC_GPDB;
798
799 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
800 if (!sp)
801 goto done;
802
803 sp->type = SRB_MB_IOCB;
804 sp->name = "gpdb";
805 sp->gen1 = fcport->rscn_gen;
806 sp->gen2 = fcport->login_gen;
807 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
808
809 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
810 if (pd == NULL) {
811 ql_log(ql_log_warn, vha, 0xd043,
812 "Failed to allocate port database structure.\n");
813 goto done_free_sp;
814 }
815
816 mb = sp->u.iocb_cmd.u.mbx.out_mb;
817 mb[0] = MBC_GET_PORT_DATABASE;
818 mb[1] = fcport->loop_id;
819 mb[2] = MSW(pd_dma);
820 mb[3] = LSW(pd_dma);
821 mb[6] = MSW(MSD(pd_dma));
822 mb[7] = LSW(MSD(pd_dma));
823 mb[9] = vha->vp_idx;
824 mb[10] = opt;
825
826 mbx = &sp->u.iocb_cmd;
827 mbx->timeout = qla2x00_async_iocb_timeout;
828 mbx->u.mbx.in = (void *)pd;
829 mbx->u.mbx.in_dma = pd_dma;
830
831 sp->done = qla24xx_async_gpdb_sp_done;
832
833 rval = qla2x00_start_sp(sp);
834 if (rval != QLA_SUCCESS)
835 goto done_free_sp;
836
837 ql_dbg(ql_dbg_disc, vha, 0x20dc,
838 "Async-%s %8phC hndl %x opt %x\n",
839 sp->name, fcport->port_name, sp->handle, opt);
840
841 return rval;
842
843 done_free_sp:
844 if (pd)
845 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
846
847 sp->free(sp);
848 done:
849 fcport->flags &= ~FCF_ASYNC_SENT;
850 qla24xx_post_gpdb_work(vha, fcport, opt);
851 return rval;
852 }
853
854 static
855 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
856 {
857 int rval = ea->rc;
858 fc_port_t *fcport = ea->fcport;
859 unsigned long flags;
860 u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
861
862 fcport->flags &= ~FCF_ASYNC_SENT;
863
864 ql_dbg(ql_dbg_disc, vha, 0x20d2,
865 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
866 fcport->disc_state, fcport->fw_login_state, rval);
867
868 if (ea->sp->gen2 != fcport->login_gen) {
869 /* target side must have changed it. */
870 ql_dbg(ql_dbg_disc, vha, 0x20d3,
871 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
872 __func__, fcport->port_name, fcport->last_rscn_gen,
873 fcport->rscn_gen, fcport->last_login_gen,
874 fcport->login_gen);
875 return;
876 } else if (ea->sp->gen1 != fcport->rscn_gen) {
877 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
878 __func__, __LINE__, fcport->port_name);
879 qla24xx_post_gidpn_work(vha, fcport);
880 return;
881 }
882
883 if (rval != QLA_SUCCESS) {
884 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
885 __func__, __LINE__, fcport->port_name);
886 qlt_schedule_sess_for_deletion(fcport);
887 return;
888 }
889
890 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
891 if (opt != PDO_FORCE_ADISC)
892 ea->fcport->login_gen++;
893 ea->fcport->deleted = 0;
894 ea->fcport->logout_on_delete = 1;
895
896 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
897 vha->fcport_count++;
898 ea->fcport->login_succ = 1;
899
900 if (!IS_IIDMA_CAPABLE(vha->hw) ||
901 !vha->hw->flags.gpsc_supported) {
902 ql_dbg(ql_dbg_disc, vha, 0x20d6,
903 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
904 __func__, __LINE__, fcport->port_name,
905 vha->fcport_count);
906
907 qla24xx_post_upd_fcport_work(vha, fcport);
908 } else {
909 ql_dbg(ql_dbg_disc, vha, 0x20d7,
910 "%s %d %8phC post gpsc fcp_cnt %d\n",
911 __func__, __LINE__, fcport->port_name,
912 vha->fcport_count);
913
914 qla24xx_post_gpsc_work(vha, fcport);
915 }
916 } else if (ea->fcport->login_succ) {
917 /*
918 * We have an existing session. A late RSCN delivery
919 * must have triggered the session to be re-validate.
920 * session is still valid.
921 */
922 ql_dbg(ql_dbg_disc, vha, 0x20d6,
923 "%s %d %8phC session revalidate success\n",
924 __func__, __LINE__, fcport->port_name);
925 fcport->disc_state = DSC_LOGIN_COMPLETE;
926 }
927 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
928 } /* gpdb event */
929
930
931 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
932 {
933 u8 login = 0;
934
935 if (qla_tgt_mode_enabled(vha))
936 return;
937
938 if (qla_dual_mode_enabled(vha)) {
939 if (N2N_TOPO(vha->hw)) {
940 u64 mywwn, wwn;
941
942 mywwn = wwn_to_u64(vha->port_name);
943 wwn = wwn_to_u64(fcport->port_name);
944 if (mywwn > wwn)
945 login = 1;
946 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
947 && time_after_eq(jiffies,
948 fcport->plogi_nack_done_deadline))
949 login = 1;
950 } else {
951 login = 1;
952 }
953 } else {
954 /* initiator mode */
955 login = 1;
956 }
957
958 if (login) {
959 ql_dbg(ql_dbg_disc, vha, 0x20bf,
960 "%s %d %8phC post login\n",
961 __func__, __LINE__, fcport->port_name);
962 fcport->disc_state = DSC_LOGIN_PEND;
963 qla2x00_post_async_login_work(vha, fcport, NULL);
964 }
965 }
966
967 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
968 {
969 if (fcport->login_retry == 0)
970 return 0;
971
972 if (fcport->scan_state != QLA_FCPORT_FOUND)
973 return 0;
974
975 ql_dbg(ql_dbg_disc, vha, 0x20d8,
976 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
977 __func__, fcport->port_name, fcport->disc_state,
978 fcport->fw_login_state, fcport->login_pause, fcport->flags,
979 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
980 fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
981 fcport->loop_id);
982
983 fcport->login_retry--;
984
985 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
986 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
987 return 0;
988
989 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
990 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
991 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
992 return 0;
993 }
994 }
995
996 /* for pure Target Mode. Login will not be initiated */
997 if (vha->host->active_mode == MODE_TARGET)
998 return 0;
999
1000 if (fcport->flags & FCF_ASYNC_SENT) {
1001 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1002 return 0;
1003 }
1004
1005 switch (fcport->disc_state) {
1006 case DSC_DELETED:
1007 if (fcport->loop_id == FC_NO_LOOP_ID) {
1008 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1009 "%s %d %8phC post gnl\n",
1010 __func__, __LINE__, fcport->port_name);
1011 qla24xx_post_gnl_work(vha, fcport);
1012 } else {
1013 qla_chk_n2n_b4_login(vha, fcport);
1014 }
1015 break;
1016
1017 case DSC_GNL:
1018 if (fcport->login_pause) {
1019 fcport->last_rscn_gen = fcport->rscn_gen;
1020 fcport->last_login_gen = fcport->login_gen;
1021 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1022 break;
1023 }
1024
1025 qla_chk_n2n_b4_login(vha, fcport);
1026 break;
1027
1028 case DSC_LOGIN_FAILED:
1029 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1030 "%s %d %8phC post gidpn\n",
1031 __func__, __LINE__, fcport->port_name);
1032 if (N2N_TOPO(vha->hw))
1033 qla_chk_n2n_b4_login(vha, fcport);
1034 else
1035 qla24xx_post_gidpn_work(vha, fcport);
1036 break;
1037
1038 case DSC_LOGIN_COMPLETE:
1039 /* recheck login state */
1040 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1041 "%s %d %8phC post gpdb\n",
1042 __func__, __LINE__, fcport->port_name);
1043
1044 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
1045 break;
1046
1047 default:
1048 break;
1049 }
1050
1051 return 0;
1052 }
1053
1054 static
1055 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1056 {
1057 fcport->rscn_gen++;
1058
1059 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1060 "%s %8phC DS %d LS %d\n",
1061 __func__, fcport->port_name, fcport->disc_state,
1062 fcport->fw_login_state);
1063
1064 if (fcport->flags & FCF_ASYNC_SENT)
1065 return;
1066
1067 switch (fcport->disc_state) {
1068 case DSC_DELETED:
1069 case DSC_LOGIN_COMPLETE:
1070 qla24xx_post_gpnid_work(fcport->vha, &ea->id);
1071 break;
1072 default:
1073 break;
1074 }
1075 }
1076
1077 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1078 u8 *port_name, void *pla)
1079 {
1080 struct qla_work_evt *e;
1081 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1082 if (!e)
1083 return QLA_FUNCTION_FAILED;
1084
1085 e->u.new_sess.id = *id;
1086 e->u.new_sess.pla = pla;
1087 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1088
1089 return qla2x00_post_work(vha, e);
1090 }
1091
1092 static
1093 int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1094 struct event_arg *ea)
1095 {
1096 fc_port_t *fcport = ea->fcport;
1097
1098 if (test_bit(UNLOADING, &vha->dpc_flags))
1099 return 0;
1100
1101 switch (vha->host->active_mode) {
1102 case MODE_INITIATOR:
1103 case MODE_DUAL:
1104 if (fcport->scan_state == QLA_FCPORT_FOUND)
1105 qla24xx_fcport_handle_login(vha, fcport);
1106 break;
1107
1108 case MODE_TARGET:
1109 default:
1110 /* no-op */
1111 break;
1112 }
1113
1114 return 0;
1115 }
1116
1117 static
1118 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1119 struct event_arg *ea)
1120 {
1121 fc_port_t *fcport = ea->fcport;
1122
1123 if (fcport->scan_state != QLA_FCPORT_FOUND) {
1124 fcport->login_retry++;
1125 return;
1126 }
1127
1128 ql_dbg(ql_dbg_disc, vha, 0x2102,
1129 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1130 __func__, fcport->port_name, fcport->disc_state,
1131 fcport->fw_login_state, fcport->login_pause,
1132 fcport->deleted, fcport->conflict,
1133 fcport->last_rscn_gen, fcport->rscn_gen,
1134 fcport->last_login_gen, fcport->login_gen,
1135 fcport->flags);
1136
1137 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1138 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1139 return;
1140
1141 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1142 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1143 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1144 return;
1145 }
1146 }
1147
1148 if (fcport->flags & FCF_ASYNC_SENT) {
1149 fcport->login_retry++;
1150 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1151 return;
1152 }
1153
1154 if (fcport->disc_state == DSC_DELETE_PEND) {
1155 fcport->login_retry++;
1156 return;
1157 }
1158
1159 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1160 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1161 __func__, __LINE__, fcport->port_name);
1162
1163 qla24xx_post_gidpn_work(vha, fcport);
1164 return;
1165 }
1166
1167 qla24xx_fcport_handle_login(vha, fcport);
1168 }
1169
1170 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1171 {
1172 fc_port_t *fcport, *f, *tf;
1173 uint32_t id = 0, mask, rid;
1174 int rc;
1175
1176 switch (ea->event) {
1177 case FCME_RELOGIN:
1178 case FCME_RSCN:
1179 case FCME_GIDPN_DONE:
1180 case FCME_GPSC_DONE:
1181 case FCME_GPNID_DONE:
1182 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1183 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1184 return;
1185 break;
1186 default:
1187 break;
1188 }
1189
1190 switch (ea->event) {
1191 case FCME_RELOGIN:
1192 if (test_bit(UNLOADING, &vha->dpc_flags))
1193 return;
1194
1195 qla24xx_handle_relogin_event(vha, ea);
1196 break;
1197 case FCME_RSCN:
1198 if (test_bit(UNLOADING, &vha->dpc_flags))
1199 return;
1200 switch (ea->id.b.rsvd_1) {
1201 case RSCN_PORT_ADDR:
1202 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1203 if (!fcport) {
1204 /* cable moved */
1205 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1206 if (rc) {
1207 ql_log(ql_log_warn, vha, 0xd044,
1208 "RSCN GPNID work failed %02x%02x%02x\n",
1209 ea->id.b.domain, ea->id.b.area,
1210 ea->id.b.al_pa);
1211 }
1212 } else {
1213 ea->fcport = fcport;
1214 qla24xx_handle_rscn_event(fcport, ea);
1215 }
1216 break;
1217 case RSCN_AREA_ADDR:
1218 case RSCN_DOM_ADDR:
1219 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1220 mask = 0xffff00;
1221 ql_dbg(ql_dbg_async, vha, 0x5044,
1222 "RSCN: Area 0x%06x was affected\n",
1223 ea->id.b24);
1224 } else {
1225 mask = 0xff0000;
1226 ql_dbg(ql_dbg_async, vha, 0x507a,
1227 "RSCN: Domain 0x%06x was affected\n",
1228 ea->id.b24);
1229 }
1230
1231 rid = ea->id.b24 & mask;
1232 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1233 list) {
1234 id = f->d_id.b24 & mask;
1235 if (rid == id) {
1236 ea->fcport = f;
1237 qla24xx_handle_rscn_event(f, ea);
1238 }
1239 }
1240 break;
1241 case RSCN_FAB_ADDR:
1242 default:
1243 ql_log(ql_log_warn, vha, 0xd045,
1244 "RSCN: Fabric was affected. Addr format %d\n",
1245 ea->id.b.rsvd_1);
1246 qla2x00_mark_all_devices_lost(vha, 1);
1247 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1248 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1249 }
1250 break;
1251 case FCME_GIDPN_DONE:
1252 qla24xx_handle_gidpn_event(vha, ea);
1253 break;
1254 case FCME_GNL_DONE:
1255 qla24xx_handle_gnl_done_event(vha, ea);
1256 break;
1257 case FCME_GPSC_DONE:
1258 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1259 break;
1260 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1261 qla24xx_handle_plogi_done_event(vha, ea);
1262 break;
1263 case FCME_PRLI_DONE:
1264 qla24xx_handle_prli_done_event(vha, ea);
1265 break;
1266 case FCME_GPDB_DONE:
1267 qla24xx_handle_gpdb_event(vha, ea);
1268 break;
1269 case FCME_GPNID_DONE:
1270 qla24xx_handle_gpnid_event(vha, ea);
1271 break;
1272 case FCME_GFFID_DONE:
1273 qla24xx_handle_gffid_event(vha, ea);
1274 break;
1275 case FCME_DELETE_DONE:
1276 qla24xx_handle_delete_done_event(vha, ea);
1277 break;
1278 default:
1279 BUG_ON(1);
1280 break;
1281 }
1282 }
1283
1284 static void
1285 qla2x00_tmf_iocb_timeout(void *data)
1286 {
1287 srb_t *sp = data;
1288 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1289
1290 tmf->u.tmf.comp_status = CS_TIMEOUT;
1291 complete(&tmf->u.tmf.comp);
1292 }
1293
1294 static void
1295 qla2x00_tmf_sp_done(void *ptr, int res)
1296 {
1297 srb_t *sp = ptr;
1298 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1299
1300 complete(&tmf->u.tmf.comp);
1301 }
1302
1303 int
1304 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1305 uint32_t tag)
1306 {
1307 struct scsi_qla_host *vha = fcport->vha;
1308 struct srb_iocb *tm_iocb;
1309 srb_t *sp;
1310 int rval = QLA_FUNCTION_FAILED;
1311
1312 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1313 if (!sp)
1314 goto done;
1315
1316 tm_iocb = &sp->u.iocb_cmd;
1317 sp->type = SRB_TM_CMD;
1318 sp->name = "tmf";
1319 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1320 tm_iocb->u.tmf.flags = flags;
1321 tm_iocb->u.tmf.lun = lun;
1322 tm_iocb->u.tmf.data = tag;
1323 sp->done = qla2x00_tmf_sp_done;
1324 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1325 init_completion(&tm_iocb->u.tmf.comp);
1326
1327 rval = qla2x00_start_sp(sp);
1328 if (rval != QLA_SUCCESS)
1329 goto done_free_sp;
1330
1331 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1332 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1333 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1334 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1335
1336 wait_for_completion(&tm_iocb->u.tmf.comp);
1337
1338 rval = tm_iocb->u.tmf.data;
1339
1340 if (rval != QLA_SUCCESS) {
1341 ql_log(ql_log_warn, vha, 0x8030,
1342 "TM IOCB failed (%x).\n", rval);
1343 }
1344
1345 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1346 flags = tm_iocb->u.tmf.flags;
1347 lun = (uint16_t)tm_iocb->u.tmf.lun;
1348
1349 /* Issue Marker IOCB */
1350 qla2x00_marker(vha, vha->hw->req_q_map[0],
1351 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1352 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1353 }
1354
1355 done_free_sp:
1356 sp->free(sp);
1357 done:
1358 return rval;
1359 }
1360
1361 static void
1362 qla24xx_abort_iocb_timeout(void *data)
1363 {
1364 srb_t *sp = data;
1365 struct srb_iocb *abt = &sp->u.iocb_cmd;
1366
1367 abt->u.abt.comp_status = CS_TIMEOUT;
1368 complete(&abt->u.abt.comp);
1369 }
1370
1371 static void
1372 qla24xx_abort_sp_done(void *ptr, int res)
1373 {
1374 srb_t *sp = ptr;
1375 struct srb_iocb *abt = &sp->u.iocb_cmd;
1376
1377 del_timer(&sp->u.iocb_cmd.timer);
1378 complete(&abt->u.abt.comp);
1379 }
1380
1381 int
1382 qla24xx_async_abort_cmd(srb_t *cmd_sp)
1383 {
1384 scsi_qla_host_t *vha = cmd_sp->vha;
1385 fc_port_t *fcport = cmd_sp->fcport;
1386 struct srb_iocb *abt_iocb;
1387 srb_t *sp;
1388 int rval = QLA_FUNCTION_FAILED;
1389
1390 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1391 if (!sp)
1392 goto done;
1393
1394 abt_iocb = &sp->u.iocb_cmd;
1395 sp->type = SRB_ABT_CMD;
1396 sp->name = "abort";
1397 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1398 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1399 sp->done = qla24xx_abort_sp_done;
1400 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1401 init_completion(&abt_iocb->u.abt.comp);
1402
1403 rval = qla2x00_start_sp(sp);
1404 if (rval != QLA_SUCCESS)
1405 goto done_free_sp;
1406
1407 ql_dbg(ql_dbg_async, vha, 0x507c,
1408 "Abort command issued - hdl=%x, target_id=%x\n",
1409 cmd_sp->handle, fcport->tgt_id);
1410
1411 wait_for_completion(&abt_iocb->u.abt.comp);
1412
1413 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1414 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1415
1416 done_free_sp:
1417 sp->free(sp);
1418 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1419 done:
1420 return rval;
1421 }
1422
1423 int
1424 qla24xx_async_abort_command(srb_t *sp)
1425 {
1426 unsigned long flags = 0;
1427
1428 uint32_t handle;
1429 fc_port_t *fcport = sp->fcport;
1430 struct scsi_qla_host *vha = fcport->vha;
1431 struct qla_hw_data *ha = vha->hw;
1432 struct req_que *req = vha->req;
1433
1434 spin_lock_irqsave(&ha->hardware_lock, flags);
1435 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1436 if (req->outstanding_cmds[handle] == sp)
1437 break;
1438 }
1439 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1440 if (handle == req->num_outstanding_cmds) {
1441 /* Command not found. */
1442 return QLA_FUNCTION_FAILED;
1443 }
1444 if (sp->type == SRB_FXIOCB_DCMD)
1445 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1446 FXDISC_ABORT_IOCTL);
1447
1448 return qla24xx_async_abort_cmd(sp);
1449 }
1450
1451 static void
1452 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1453 {
1454 switch (ea->data[0]) {
1455 case MBS_COMMAND_COMPLETE:
1456 ql_dbg(ql_dbg_disc, vha, 0x2118,
1457 "%s %d %8phC post gpdb\n",
1458 __func__, __LINE__, ea->fcport->port_name);
1459
1460 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1461 ea->fcport->logout_on_delete = 1;
1462 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1463 break;
1464 default:
1465 if (ea->fcport->n2n_flag) {
1466 ql_dbg(ql_dbg_disc, vha, 0x2118,
1467 "%s %d %8phC post fc4 prli\n",
1468 __func__, __LINE__, ea->fcport->port_name);
1469 ea->fcport->fc4f_nvme = 0;
1470 ea->fcport->n2n_flag = 0;
1471 qla24xx_post_prli_work(vha, ea->fcport);
1472 }
1473 ql_dbg(ql_dbg_disc, vha, 0x2119,
1474 "%s %d %8phC unhandle event of %x\n",
1475 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1476 break;
1477 }
1478 }
1479
1480 static void
1481 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1482 {
1483 port_id_t cid; /* conflict Nport id */
1484 u16 lid;
1485 struct fc_port *conflict_fcport;
1486
1487 switch (ea->data[0]) {
1488 case MBS_COMMAND_COMPLETE:
1489 /*
1490 * Driver must validate login state - If PRLI not complete,
1491 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1492 * requests.
1493 */
1494 if (ea->fcport->fc4f_nvme) {
1495 ql_dbg(ql_dbg_disc, vha, 0x2117,
1496 "%s %d %8phC post prli\n",
1497 __func__, __LINE__, ea->fcport->port_name);
1498 qla24xx_post_prli_work(vha, ea->fcport);
1499 } else {
1500 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1501 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1502 __func__, __LINE__, ea->fcport->port_name,
1503 ea->fcport->loop_id, ea->fcport->d_id.b24);
1504
1505 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1506 ea->fcport->loop_id = FC_NO_LOOP_ID;
1507 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1508 ea->fcport->logout_on_delete = 1;
1509 ea->fcport->send_els_logo = 0;
1510 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1511 }
1512 break;
1513 case MBS_COMMAND_ERROR:
1514 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
1515 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1516
1517 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1518 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1519 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1520 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1521 else
1522 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
1523 break;
1524 case MBS_LOOP_ID_USED:
1525 /* data[1] = IO PARAM 1 = nport ID */
1526 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1527 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1528 cid.b.al_pa = ea->iop[1] & 0xff;
1529 cid.b.rsvd_1 = 0;
1530
1531 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1532 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1533 __func__, __LINE__, ea->fcport->port_name,
1534 ea->fcport->loop_id);
1535
1536 if (IS_SW_RESV_ADDR(cid)) {
1537 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1538 ea->fcport->loop_id = FC_NO_LOOP_ID;
1539 } else {
1540 qla2x00_clear_loop_id(ea->fcport);
1541 }
1542 qla24xx_post_gnl_work(vha, ea->fcport);
1543 break;
1544 case MBS_PORT_ID_USED:
1545 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1546 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1547 __func__, __LINE__, ea->fcport->port_name,
1548 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1549 ea->fcport->d_id.b.al_pa);
1550
1551 lid = ea->iop[1] & 0xffff;
1552 qlt_find_sess_invalidate_other(vha,
1553 wwn_to_u64(ea->fcport->port_name),
1554 ea->fcport->d_id, lid, &conflict_fcport);
1555
1556 if (conflict_fcport) {
1557 /*
1558 * Another fcport share the same loop_id/nport id.
1559 * Conflict fcport needs to finish cleanup before this
1560 * fcport can proceed to login.
1561 */
1562 conflict_fcport->conflict = ea->fcport;
1563 ea->fcport->login_pause = 1;
1564
1565 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1566 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1567 __func__, __LINE__, ea->fcport->port_name,
1568 ea->fcport->d_id.b24, lid);
1569 qla2x00_clear_loop_id(ea->fcport);
1570 qla24xx_post_gidpn_work(vha, ea->fcport);
1571 } else {
1572 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1573 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1574 __func__, __LINE__, ea->fcport->port_name,
1575 ea->fcport->d_id.b24, lid);
1576
1577 qla2x00_clear_loop_id(ea->fcport);
1578 set_bit(lid, vha->hw->loop_id_map);
1579 ea->fcport->loop_id = lid;
1580 ea->fcport->keep_nport_handle = 0;
1581 qlt_schedule_sess_for_deletion(ea->fcport);
1582 }
1583 break;
1584 }
1585 return;
1586 }
1587
1588 void
1589 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1590 uint16_t *data)
1591 {
1592 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1593 qlt_logo_completion_handler(fcport, data[0]);
1594 fcport->login_gen++;
1595 return;
1596 }
1597
1598 void
1599 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1600 uint16_t *data)
1601 {
1602 if (data[0] == MBS_COMMAND_COMPLETE) {
1603 qla2x00_update_fcport(vha, fcport);
1604
1605 return;
1606 }
1607
1608 /* Retry login. */
1609 fcport->flags &= ~FCF_ASYNC_SENT;
1610 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1611 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1612 else
1613 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1614
1615 return;
1616 }
1617
1618 /****************************************************************************/
1619 /* QLogic ISP2x00 Hardware Support Functions. */
1620 /****************************************************************************/
1621
1622 static int
1623 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1624 {
1625 int rval = QLA_SUCCESS;
1626 struct qla_hw_data *ha = vha->hw;
1627 uint32_t idc_major_ver, idc_minor_ver;
1628 uint16_t config[4];
1629
1630 qla83xx_idc_lock(vha, 0);
1631
1632 /* SV: TODO: Assign initialization timeout from
1633 * flash-info / other param
1634 */
1635 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1636 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1637
1638 /* Set our fcoe function presence */
1639 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1640 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1641 "Error while setting DRV-Presence.\n");
1642 rval = QLA_FUNCTION_FAILED;
1643 goto exit;
1644 }
1645
1646 /* Decide the reset ownership */
1647 qla83xx_reset_ownership(vha);
1648
1649 /*
1650 * On first protocol driver load:
1651 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1652 * register.
1653 * Others: Check compatibility with current IDC Major version.
1654 */
1655 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1656 if (ha->flags.nic_core_reset_owner) {
1657 /* Set IDC Major version */
1658 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1659 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1660
1661 /* Clearing IDC-Lock-Recovery register */
1662 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1663 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1664 /*
1665 * Clear further IDC participation if we are not compatible with
1666 * the current IDC Major Version.
1667 */
1668 ql_log(ql_log_warn, vha, 0xb07d,
1669 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1670 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1671 __qla83xx_clear_drv_presence(vha);
1672 rval = QLA_FUNCTION_FAILED;
1673 goto exit;
1674 }
1675 /* Each function sets its supported Minor version. */
1676 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1677 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1678 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1679
1680 if (ha->flags.nic_core_reset_owner) {
1681 memset(config, 0, sizeof(config));
1682 if (!qla81xx_get_port_config(vha, config))
1683 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1684 QLA8XXX_DEV_READY);
1685 }
1686
1687 rval = qla83xx_idc_state_handler(vha);
1688
1689 exit:
1690 qla83xx_idc_unlock(vha, 0);
1691
1692 return rval;
1693 }
1694
1695 /*
1696 * qla2x00_initialize_adapter
1697 * Initialize board.
1698 *
1699 * Input:
1700 * ha = adapter block pointer.
1701 *
1702 * Returns:
1703 * 0 = success
1704 */
1705 int
1706 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1707 {
1708 int rval;
1709 struct qla_hw_data *ha = vha->hw;
1710 struct req_que *req = ha->req_q_map[0];
1711
1712 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1713 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1714
1715 /* Clear adapter flags. */
1716 vha->flags.online = 0;
1717 ha->flags.chip_reset_done = 0;
1718 vha->flags.reset_active = 0;
1719 ha->flags.pci_channel_io_perm_failure = 0;
1720 ha->flags.eeh_busy = 0;
1721 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1722 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1723 atomic_set(&vha->loop_state, LOOP_DOWN);
1724 vha->device_flags = DFLG_NO_CABLE;
1725 vha->dpc_flags = 0;
1726 vha->flags.management_server_logged_in = 0;
1727 vha->marker_needed = 0;
1728 ha->isp_abort_cnt = 0;
1729 ha->beacon_blink_led = 0;
1730
1731 set_bit(0, ha->req_qid_map);
1732 set_bit(0, ha->rsp_qid_map);
1733
1734 ql_dbg(ql_dbg_init, vha, 0x0040,
1735 "Configuring PCI space...\n");
1736 rval = ha->isp_ops->pci_config(vha);
1737 if (rval) {
1738 ql_log(ql_log_warn, vha, 0x0044,
1739 "Unable to configure PCI space.\n");
1740 return (rval);
1741 }
1742
1743 ha->isp_ops->reset_chip(vha);
1744
1745 rval = qla2xxx_get_flash_info(vha);
1746 if (rval) {
1747 ql_log(ql_log_fatal, vha, 0x004f,
1748 "Unable to validate FLASH data.\n");
1749 return rval;
1750 }
1751
1752 if (IS_QLA8044(ha)) {
1753 qla8044_read_reset_template(vha);
1754
1755 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1756 * If DONRESET_BIT0 is set, drivers should not set dev_state
1757 * to NEED_RESET. But if NEED_RESET is set, drivers should
1758 * should honor the reset. */
1759 if (ql2xdontresethba == 1)
1760 qla8044_set_idc_dontreset(vha);
1761 }
1762
1763 ha->isp_ops->get_flash_version(vha, req->ring);
1764 ql_dbg(ql_dbg_init, vha, 0x0061,
1765 "Configure NVRAM parameters...\n");
1766
1767 ha->isp_ops->nvram_config(vha);
1768
1769 if (ha->flags.disable_serdes) {
1770 /* Mask HBA via NVRAM settings? */
1771 ql_log(ql_log_info, vha, 0x0077,
1772 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
1773 return QLA_FUNCTION_FAILED;
1774 }
1775
1776 ql_dbg(ql_dbg_init, vha, 0x0078,
1777 "Verifying loaded RISC code...\n");
1778
1779 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1780 rval = ha->isp_ops->chip_diag(vha);
1781 if (rval)
1782 return (rval);
1783 rval = qla2x00_setup_chip(vha);
1784 if (rval)
1785 return (rval);
1786 }
1787
1788 if (IS_QLA84XX(ha)) {
1789 ha->cs84xx = qla84xx_get_chip(vha);
1790 if (!ha->cs84xx) {
1791 ql_log(ql_log_warn, vha, 0x00d0,
1792 "Unable to configure ISP84XX.\n");
1793 return QLA_FUNCTION_FAILED;
1794 }
1795 }
1796
1797 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
1798 rval = qla2x00_init_rings(vha);
1799
1800 ha->flags.chip_reset_done = 1;
1801
1802 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
1803 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
1804 rval = qla84xx_init_chip(vha);
1805 if (rval != QLA_SUCCESS) {
1806 ql_log(ql_log_warn, vha, 0x00d4,
1807 "Unable to initialize ISP84XX.\n");
1808 qla84xx_put_chip(vha);
1809 }
1810 }
1811
1812 /* Load the NIC Core f/w if we are the first protocol driver. */
1813 if (IS_QLA8031(ha)) {
1814 rval = qla83xx_nic_core_fw_load(vha);
1815 if (rval)
1816 ql_log(ql_log_warn, vha, 0x0124,
1817 "Error in initializing NIC Core f/w.\n");
1818 }
1819
1820 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1821 qla24xx_read_fcp_prio_cfg(vha);
1822
1823 if (IS_P3P_TYPE(ha))
1824 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1825 else
1826 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1827
1828 return (rval);
1829 }
1830
1831 /**
1832 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1833 * @ha: HA context
1834 *
1835 * Returns 0 on success.
1836 */
1837 int
1838 qla2100_pci_config(scsi_qla_host_t *vha)
1839 {
1840 uint16_t w;
1841 unsigned long flags;
1842 struct qla_hw_data *ha = vha->hw;
1843 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1844
1845 pci_set_master(ha->pdev);
1846 pci_try_set_mwi(ha->pdev);
1847
1848 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1849 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1850 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1851
1852 pci_disable_rom(ha->pdev);
1853
1854 /* Get PCI bus information. */
1855 spin_lock_irqsave(&ha->hardware_lock, flags);
1856 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1857 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1858
1859 return QLA_SUCCESS;
1860 }
1861
1862 /**
1863 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1864 * @ha: HA context
1865 *
1866 * Returns 0 on success.
1867 */
1868 int
1869 qla2300_pci_config(scsi_qla_host_t *vha)
1870 {
1871 uint16_t w;
1872 unsigned long flags = 0;
1873 uint32_t cnt;
1874 struct qla_hw_data *ha = vha->hw;
1875 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1876
1877 pci_set_master(ha->pdev);
1878 pci_try_set_mwi(ha->pdev);
1879
1880 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1881 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1882
1883 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1884 w &= ~PCI_COMMAND_INTX_DISABLE;
1885 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1886
1887 /*
1888 * If this is a 2300 card and not 2312, reset the
1889 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1890 * the 2310 also reports itself as a 2300 so we need to get the
1891 * fb revision level -- a 6 indicates it really is a 2300 and
1892 * not a 2310.
1893 */
1894 if (IS_QLA2300(ha)) {
1895 spin_lock_irqsave(&ha->hardware_lock, flags);
1896
1897 /* Pause RISC. */
1898 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1899 for (cnt = 0; cnt < 30000; cnt++) {
1900 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
1901 break;
1902
1903 udelay(10);
1904 }
1905
1906 /* Select FPM registers. */
1907 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1908 RD_REG_WORD(&reg->ctrl_status);
1909
1910 /* Get the fb rev level */
1911 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
1912
1913 if (ha->fb_rev == FPM_2300)
1914 pci_clear_mwi(ha->pdev);
1915
1916 /* Deselect FPM registers. */
1917 WRT_REG_WORD(&reg->ctrl_status, 0x0);
1918 RD_REG_WORD(&reg->ctrl_status);
1919
1920 /* Release RISC module. */
1921 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1922 for (cnt = 0; cnt < 30000; cnt++) {
1923 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
1924 break;
1925
1926 udelay(10);
1927 }
1928
1929 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1930 }
1931
1932 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1933
1934 pci_disable_rom(ha->pdev);
1935
1936 /* Get PCI bus information. */
1937 spin_lock_irqsave(&ha->hardware_lock, flags);
1938 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1940
1941 return QLA_SUCCESS;
1942 }
1943
1944 /**
1945 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1946 * @ha: HA context
1947 *
1948 * Returns 0 on success.
1949 */
1950 int
1951 qla24xx_pci_config(scsi_qla_host_t *vha)
1952 {
1953 uint16_t w;
1954 unsigned long flags = 0;
1955 struct qla_hw_data *ha = vha->hw;
1956 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1957
1958 pci_set_master(ha->pdev);
1959 pci_try_set_mwi(ha->pdev);
1960
1961 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1962 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1963 w &= ~PCI_COMMAND_INTX_DISABLE;
1964 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1965
1966 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1967
1968 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
1969 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1970 pcix_set_mmrbc(ha->pdev, 2048);
1971
1972 /* PCIe -- adjust Maximum Read Request Size (2048). */
1973 if (pci_is_pcie(ha->pdev))
1974 pcie_set_readrq(ha->pdev, 4096);
1975
1976 pci_disable_rom(ha->pdev);
1977
1978 ha->chip_revision = ha->pdev->revision;
1979
1980 /* Get PCI bus information. */
1981 spin_lock_irqsave(&ha->hardware_lock, flags);
1982 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
1983 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1984
1985 return QLA_SUCCESS;
1986 }
1987
1988 /**
1989 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1990 * @ha: HA context
1991 *
1992 * Returns 0 on success.
1993 */
1994 int
1995 qla25xx_pci_config(scsi_qla_host_t *vha)
1996 {
1997 uint16_t w;
1998 struct qla_hw_data *ha = vha->hw;
1999
2000 pci_set_master(ha->pdev);
2001 pci_try_set_mwi(ha->pdev);
2002
2003 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2004 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2005 w &= ~PCI_COMMAND_INTX_DISABLE;
2006 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2007
2008 /* PCIe -- adjust Maximum Read Request Size (2048). */
2009 if (pci_is_pcie(ha->pdev))
2010 pcie_set_readrq(ha->pdev, 4096);
2011
2012 pci_disable_rom(ha->pdev);
2013
2014 ha->chip_revision = ha->pdev->revision;
2015
2016 return QLA_SUCCESS;
2017 }
2018
2019 /**
2020 * qla2x00_isp_firmware() - Choose firmware image.
2021 * @ha: HA context
2022 *
2023 * Returns 0 on success.
2024 */
2025 static int
2026 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2027 {
2028 int rval;
2029 uint16_t loop_id, topo, sw_cap;
2030 uint8_t domain, area, al_pa;
2031 struct qla_hw_data *ha = vha->hw;
2032
2033 /* Assume loading risc code */
2034 rval = QLA_FUNCTION_FAILED;
2035
2036 if (ha->flags.disable_risc_code_load) {
2037 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2038
2039 /* Verify checksum of loaded RISC code. */
2040 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2041 if (rval == QLA_SUCCESS) {
2042 /* And, verify we are not in ROM code. */
2043 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2044 &area, &domain, &topo, &sw_cap);
2045 }
2046 }
2047
2048 if (rval)
2049 ql_dbg(ql_dbg_init, vha, 0x007a,
2050 "**** Load RISC code ****.\n");
2051
2052 return (rval);
2053 }
2054
2055 /**
2056 * qla2x00_reset_chip() - Reset ISP chip.
2057 * @ha: HA context
2058 *
2059 * Returns 0 on success.
2060 */
2061 void
2062 qla2x00_reset_chip(scsi_qla_host_t *vha)
2063 {
2064 unsigned long flags = 0;
2065 struct qla_hw_data *ha = vha->hw;
2066 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2067 uint32_t cnt;
2068 uint16_t cmd;
2069
2070 if (unlikely(pci_channel_offline(ha->pdev)))
2071 return;
2072
2073 ha->isp_ops->disable_intrs(ha);
2074
2075 spin_lock_irqsave(&ha->hardware_lock, flags);
2076
2077 /* Turn off master enable */
2078 cmd = 0;
2079 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2080 cmd &= ~PCI_COMMAND_MASTER;
2081 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2082
2083 if (!IS_QLA2100(ha)) {
2084 /* Pause RISC. */
2085 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2086 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2087 for (cnt = 0; cnt < 30000; cnt++) {
2088 if ((RD_REG_WORD(&reg->hccr) &
2089 HCCR_RISC_PAUSE) != 0)
2090 break;
2091 udelay(100);
2092 }
2093 } else {
2094 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2095 udelay(10);
2096 }
2097
2098 /* Select FPM registers. */
2099 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2100 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2101
2102 /* FPM Soft Reset. */
2103 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2104 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2105
2106 /* Toggle Fpm Reset. */
2107 if (!IS_QLA2200(ha)) {
2108 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2109 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2110 }
2111
2112 /* Select frame buffer registers. */
2113 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2114 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2115
2116 /* Reset frame buffer FIFOs. */
2117 if (IS_QLA2200(ha)) {
2118 WRT_FB_CMD_REG(ha, reg, 0xa000);
2119 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2120 } else {
2121 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2122
2123 /* Read back fb_cmd until zero or 3 seconds max */
2124 for (cnt = 0; cnt < 3000; cnt++) {
2125 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2126 break;
2127 udelay(100);
2128 }
2129 }
2130
2131 /* Select RISC module registers. */
2132 WRT_REG_WORD(&reg->ctrl_status, 0);
2133 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2134
2135 /* Reset RISC processor. */
2136 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2137 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2138
2139 /* Release RISC processor. */
2140 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2141 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2142 }
2143
2144 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2145 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2146
2147 /* Reset ISP chip. */
2148 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2149
2150 /* Wait for RISC to recover from reset. */
2151 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2152 /*
2153 * It is necessary to for a delay here since the card doesn't
2154 * respond to PCI reads during a reset. On some architectures
2155 * this will result in an MCA.
2156 */
2157 udelay(20);
2158 for (cnt = 30000; cnt; cnt--) {
2159 if ((RD_REG_WORD(&reg->ctrl_status) &
2160 CSR_ISP_SOFT_RESET) == 0)
2161 break;
2162 udelay(100);
2163 }
2164 } else
2165 udelay(10);
2166
2167 /* Reset RISC processor. */
2168 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2169
2170 WRT_REG_WORD(&reg->semaphore, 0);
2171
2172 /* Release RISC processor. */
2173 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2174 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2175
2176 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2177 for (cnt = 0; cnt < 30000; cnt++) {
2178 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2179 break;
2180
2181 udelay(100);
2182 }
2183 } else
2184 udelay(100);
2185
2186 /* Turn on master enable */
2187 cmd |= PCI_COMMAND_MASTER;
2188 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2189
2190 /* Disable RISC pause on FPM parity error. */
2191 if (!IS_QLA2100(ha)) {
2192 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2193 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2194 }
2195
2196 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2197 }
2198
2199 /**
2200 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2201 *
2202 * Returns 0 on success.
2203 */
2204 static int
2205 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2206 {
2207 uint16_t mb[4] = {0x1010, 0, 1, 0};
2208
2209 if (!IS_QLA81XX(vha->hw))
2210 return QLA_SUCCESS;
2211
2212 return qla81xx_write_mpi_register(vha, mb);
2213 }
2214
2215 /**
2216 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2217 * @ha: HA context
2218 *
2219 * Returns 0 on success.
2220 */
2221 static inline int
2222 qla24xx_reset_risc(scsi_qla_host_t *vha)
2223 {
2224 unsigned long flags = 0;
2225 struct qla_hw_data *ha = vha->hw;
2226 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2227 uint32_t cnt;
2228 uint16_t wd;
2229 static int abts_cnt; /* ISP abort retry counts */
2230 int rval = QLA_SUCCESS;
2231
2232 spin_lock_irqsave(&ha->hardware_lock, flags);
2233
2234 /* Reset RISC. */
2235 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2236 for (cnt = 0; cnt < 30000; cnt++) {
2237 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2238 break;
2239
2240 udelay(10);
2241 }
2242
2243 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2244 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2245
2246 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2247 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2248 RD_REG_DWORD(&reg->hccr),
2249 RD_REG_DWORD(&reg->ctrl_status),
2250 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2251
2252 WRT_REG_DWORD(&reg->ctrl_status,
2253 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2254 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2255
2256 udelay(100);
2257
2258 /* Wait for firmware to complete NVRAM accesses. */
2259 RD_REG_WORD(&reg->mailbox0);
2260 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2261 rval == QLA_SUCCESS; cnt--) {
2262 barrier();
2263 if (cnt)
2264 udelay(5);
2265 else
2266 rval = QLA_FUNCTION_TIMEOUT;
2267 }
2268
2269 if (rval == QLA_SUCCESS)
2270 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2271
2272 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2273 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2274 RD_REG_DWORD(&reg->hccr),
2275 RD_REG_DWORD(&reg->mailbox0));
2276
2277 /* Wait for soft-reset to complete. */
2278 RD_REG_DWORD(&reg->ctrl_status);
2279 for (cnt = 0; cnt < 60; cnt++) {
2280 barrier();
2281 if ((RD_REG_DWORD(&reg->ctrl_status) &
2282 CSRX_ISP_SOFT_RESET) == 0)
2283 break;
2284
2285 udelay(5);
2286 }
2287 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2288 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2289
2290 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2291 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2292 RD_REG_DWORD(&reg->hccr),
2293 RD_REG_DWORD(&reg->ctrl_status));
2294
2295 /* If required, do an MPI FW reset now */
2296 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2297 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2298 if (++abts_cnt < 5) {
2299 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2300 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2301 } else {
2302 /*
2303 * We exhausted the ISP abort retries. We have to
2304 * set the board offline.
2305 */
2306 abts_cnt = 0;
2307 vha->flags.online = 0;
2308 }
2309 }
2310 }
2311
2312 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2313 RD_REG_DWORD(&reg->hccr);
2314
2315 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2316 RD_REG_DWORD(&reg->hccr);
2317
2318 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2319 RD_REG_DWORD(&reg->hccr);
2320
2321 RD_REG_WORD(&reg->mailbox0);
2322 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
2323 rval == QLA_SUCCESS; cnt--) {
2324 barrier();
2325 if (cnt)
2326 udelay(5);
2327 else
2328 rval = QLA_FUNCTION_TIMEOUT;
2329 }
2330 if (rval == QLA_SUCCESS)
2331 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2332
2333 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2334 "Host Risc 0x%x, mailbox0 0x%x\n",
2335 RD_REG_DWORD(&reg->hccr),
2336 RD_REG_WORD(&reg->mailbox0));
2337
2338 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2339
2340 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2341 "Driver in %s mode\n",
2342 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2343
2344 if (IS_NOPOLLING_TYPE(ha))
2345 ha->isp_ops->enable_intrs(ha);
2346
2347 return rval;
2348 }
2349
2350 static void
2351 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2352 {
2353 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2354
2355 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2356 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2357
2358 }
2359
2360 static void
2361 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2362 {
2363 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2364
2365 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2366 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2367 }
2368
2369 static void
2370 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2371 {
2372 uint32_t wd32 = 0;
2373 uint delta_msec = 100;
2374 uint elapsed_msec = 0;
2375 uint timeout_msec;
2376 ulong n;
2377
2378 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2379 vha->hw->pdev->subsystem_device != 0x0240)
2380 return;
2381
2382 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2383 udelay(100);
2384
2385 attempt:
2386 timeout_msec = TIMEOUT_SEMAPHORE;
2387 n = timeout_msec / delta_msec;
2388 while (n--) {
2389 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2390 qla25xx_read_risc_sema_reg(vha, &wd32);
2391 if (wd32 & RISC_SEMAPHORE)
2392 break;
2393 msleep(delta_msec);
2394 elapsed_msec += delta_msec;
2395 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2396 goto force;
2397 }
2398
2399 if (!(wd32 & RISC_SEMAPHORE))
2400 goto force;
2401
2402 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2403 goto acquired;
2404
2405 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2406 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2407 n = timeout_msec / delta_msec;
2408 while (n--) {
2409 qla25xx_read_risc_sema_reg(vha, &wd32);
2410 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2411 break;
2412 msleep(delta_msec);
2413 elapsed_msec += delta_msec;
2414 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2415 goto force;
2416 }
2417
2418 if (wd32 & RISC_SEMAPHORE_FORCE)
2419 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2420
2421 goto attempt;
2422
2423 force:
2424 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2425
2426 acquired:
2427 return;
2428 }
2429
2430 /**
2431 * qla24xx_reset_chip() - Reset ISP24xx chip.
2432 * @ha: HA context
2433 *
2434 * Returns 0 on success.
2435 */
2436 void
2437 qla24xx_reset_chip(scsi_qla_host_t *vha)
2438 {
2439 struct qla_hw_data *ha = vha->hw;
2440
2441 if (pci_channel_offline(ha->pdev) &&
2442 ha->flags.pci_channel_io_perm_failure) {
2443 return;
2444 }
2445
2446 ha->isp_ops->disable_intrs(ha);
2447
2448 qla25xx_manipulate_risc_semaphore(vha);
2449
2450 /* Perform RISC reset. */
2451 qla24xx_reset_risc(vha);
2452 }
2453
2454 /**
2455 * qla2x00_chip_diag() - Test chip for proper operation.
2456 * @ha: HA context
2457 *
2458 * Returns 0 on success.
2459 */
2460 int
2461 qla2x00_chip_diag(scsi_qla_host_t *vha)
2462 {
2463 int rval;
2464 struct qla_hw_data *ha = vha->hw;
2465 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2466 unsigned long flags = 0;
2467 uint16_t data;
2468 uint32_t cnt;
2469 uint16_t mb[5];
2470 struct req_que *req = ha->req_q_map[0];
2471
2472 /* Assume a failed state */
2473 rval = QLA_FUNCTION_FAILED;
2474
2475 ql_dbg(ql_dbg_init, vha, 0x007b,
2476 "Testing device at %lx.\n", (u_long)&reg->flash_address);
2477
2478 spin_lock_irqsave(&ha->hardware_lock, flags);
2479
2480 /* Reset ISP chip. */
2481 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2482
2483 /*
2484 * We need to have a delay here since the card will not respond while
2485 * in reset causing an MCA on some architectures.
2486 */
2487 udelay(20);
2488 data = qla2x00_debounce_register(&reg->ctrl_status);
2489 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2490 udelay(5);
2491 data = RD_REG_WORD(&reg->ctrl_status);
2492 barrier();
2493 }
2494
2495 if (!cnt)
2496 goto chip_diag_failed;
2497
2498 ql_dbg(ql_dbg_init, vha, 0x007c,
2499 "Reset register cleared by chip reset.\n");
2500
2501 /* Reset RISC processor. */
2502 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2503 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2504
2505 /* Workaround for QLA2312 PCI parity error */
2506 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2507 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2508 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2509 udelay(5);
2510 data = RD_MAILBOX_REG(ha, reg, 0);
2511 barrier();
2512 }
2513 } else
2514 udelay(10);
2515
2516 if (!cnt)
2517 goto chip_diag_failed;
2518
2519 /* Check product ID of chip */
2520 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2521
2522 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2523 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2524 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2525 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2526 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2527 mb[3] != PROD_ID_3) {
2528 ql_log(ql_log_warn, vha, 0x0062,
2529 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2530 mb[1], mb[2], mb[3]);
2531
2532 goto chip_diag_failed;
2533 }
2534 ha->product_id[0] = mb[1];
2535 ha->product_id[1] = mb[2];
2536 ha->product_id[2] = mb[3];
2537 ha->product_id[3] = mb[4];
2538
2539 /* Adjust fw RISC transfer size */
2540 if (req->length > 1024)
2541 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2542 else
2543 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
2544 req->length;
2545
2546 if (IS_QLA2200(ha) &&
2547 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2548 /* Limit firmware transfer size with a 2200A */
2549 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
2550
2551 ha->device_type |= DT_ISP2200A;
2552 ha->fw_transfer_size = 128;
2553 }
2554
2555 /* Wrap Incoming Mailboxes Test. */
2556 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2557
2558 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
2559 rval = qla2x00_mbx_reg_test(vha);
2560 if (rval)
2561 ql_log(ql_log_warn, vha, 0x0080,
2562 "Failed mailbox send register test.\n");
2563 else
2564 /* Flag a successful rval */
2565 rval = QLA_SUCCESS;
2566 spin_lock_irqsave(&ha->hardware_lock, flags);
2567
2568 chip_diag_failed:
2569 if (rval)
2570 ql_log(ql_log_info, vha, 0x0081,
2571 "Chip diagnostics **** FAILED ****.\n");
2572
2573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2574
2575 return (rval);
2576 }
2577
2578 /**
2579 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2580 * @ha: HA context
2581 *
2582 * Returns 0 on success.
2583 */
2584 int
2585 qla24xx_chip_diag(scsi_qla_host_t *vha)
2586 {
2587 int rval;
2588 struct qla_hw_data *ha = vha->hw;
2589 struct req_que *req = ha->req_q_map[0];
2590
2591 if (IS_P3P_TYPE(ha))
2592 return QLA_SUCCESS;
2593
2594 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
2595
2596 rval = qla2x00_mbx_reg_test(vha);
2597 if (rval) {
2598 ql_log(ql_log_warn, vha, 0x0082,
2599 "Failed mailbox send register test.\n");
2600 } else {
2601 /* Flag a successful rval */
2602 rval = QLA_SUCCESS;
2603 }
2604
2605 return rval;
2606 }
2607
2608 void
2609 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2610 {
2611 int rval;
2612 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2613 eft_size, fce_size, mq_size;
2614 dma_addr_t tc_dma;
2615 void *tc;
2616 struct qla_hw_data *ha = vha->hw;
2617 struct req_que *req = ha->req_q_map[0];
2618 struct rsp_que *rsp = ha->rsp_q_map[0];
2619
2620 if (ha->fw_dump) {
2621 ql_dbg(ql_dbg_init, vha, 0x00bd,
2622 "Firmware dump already allocated.\n");
2623 return;
2624 }
2625
2626 ha->fw_dumped = 0;
2627 ha->fw_dump_cap_flags = 0;
2628 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2629 req_q_size = rsp_q_size = 0;
2630
2631 if (IS_QLA27XX(ha))
2632 goto try_fce;
2633
2634 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2635 fixed_size = sizeof(struct qla2100_fw_dump);
2636 } else if (IS_QLA23XX(ha)) {
2637 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2638 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2639 sizeof(uint16_t);
2640 } else if (IS_FWI2_CAPABLE(ha)) {
2641 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2642 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2643 else if (IS_QLA81XX(ha))
2644 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2645 else if (IS_QLA25XX(ha))
2646 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2647 else
2648 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2649
2650 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2651 sizeof(uint32_t);
2652 if (ha->mqenable) {
2653 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2654 mq_size = sizeof(struct qla2xxx_mq_chain);
2655 /*
2656 * Allocate maximum buffer size for all queues.
2657 * Resizing must be done at end-of-dump processing.
2658 */
2659 mq_size += ha->max_req_queues *
2660 (req->length * sizeof(request_t));
2661 mq_size += ha->max_rsp_queues *
2662 (rsp->length * sizeof(response_t));
2663 }
2664 if (ha->tgt.atio_ring)
2665 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2666 /* Allocate memory for Fibre Channel Event Buffer. */
2667 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2668 !IS_QLA27XX(ha))
2669 goto try_eft;
2670
2671 try_fce:
2672 if (ha->fce)
2673 dma_free_coherent(&ha->pdev->dev,
2674 FCE_SIZE, ha->fce, ha->fce_dma);
2675
2676 /* Allocate memory for Fibre Channel Event Buffer. */
2677 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2678 GFP_KERNEL);
2679 if (!tc) {
2680 ql_log(ql_log_warn, vha, 0x00be,
2681 "Unable to allocate (%d KB) for FCE.\n",
2682 FCE_SIZE / 1024);
2683 goto try_eft;
2684 }
2685
2686 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
2687 ha->fce_mb, &ha->fce_bufs);
2688 if (rval) {
2689 ql_log(ql_log_warn, vha, 0x00bf,
2690 "Unable to initialize FCE (%d).\n", rval);
2691 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2692 tc_dma);
2693 ha->flags.fce_enabled = 0;
2694 goto try_eft;
2695 }
2696 ql_dbg(ql_dbg_init, vha, 0x00c0,
2697 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
2698
2699 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2700 ha->flags.fce_enabled = 1;
2701 ha->fce_dma = tc_dma;
2702 ha->fce = tc;
2703
2704 try_eft:
2705 if (ha->eft)
2706 dma_free_coherent(&ha->pdev->dev,
2707 EFT_SIZE, ha->eft, ha->eft_dma);
2708
2709 /* Allocate memory for Extended Trace Buffer. */
2710 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2711 GFP_KERNEL);
2712 if (!tc) {
2713 ql_log(ql_log_warn, vha, 0x00c1,
2714 "Unable to allocate (%d KB) for EFT.\n",
2715 EFT_SIZE / 1024);
2716 goto cont_alloc;
2717 }
2718
2719 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
2720 if (rval) {
2721 ql_log(ql_log_warn, vha, 0x00c2,
2722 "Unable to initialize EFT (%d).\n", rval);
2723 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2724 tc_dma);
2725 goto cont_alloc;
2726 }
2727 ql_dbg(ql_dbg_init, vha, 0x00c3,
2728 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2729
2730 eft_size = EFT_SIZE;
2731 ha->eft_dma = tc_dma;
2732 ha->eft = tc;
2733 }
2734
2735 cont_alloc:
2736 if (IS_QLA27XX(ha)) {
2737 if (!ha->fw_dump_template) {
2738 ql_log(ql_log_warn, vha, 0x00ba,
2739 "Failed missing fwdump template\n");
2740 return;
2741 }
2742 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2743 ql_dbg(ql_dbg_init, vha, 0x00fa,
2744 "-> allocating fwdump (%x bytes)...\n", dump_size);
2745 goto allocate;
2746 }
2747
2748 req_q_size = req->length * sizeof(request_t);
2749 rsp_q_size = rsp->length * sizeof(response_t);
2750 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2751 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
2752 ha->chain_offset = dump_size;
2753 dump_size += mq_size + fce_size;
2754
2755 if (ha->exchoffld_buf)
2756 dump_size += sizeof(struct qla2xxx_offld_chain) +
2757 ha->exchoffld_size;
2758 if (ha->exlogin_buf)
2759 dump_size += sizeof(struct qla2xxx_offld_chain) +
2760 ha->exlogin_size;
2761
2762 allocate:
2763 ha->fw_dump = vmalloc(dump_size);
2764 if (!ha->fw_dump) {
2765 ql_log(ql_log_warn, vha, 0x00c4,
2766 "Unable to allocate (%d KB) for firmware dump.\n",
2767 dump_size / 1024);
2768
2769 if (ha->fce) {
2770 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2771 ha->fce_dma);
2772 ha->fce = NULL;
2773 ha->fce_dma = 0;
2774 }
2775
2776 if (ha->eft) {
2777 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2778 ha->eft_dma);
2779 ha->eft = NULL;
2780 ha->eft_dma = 0;
2781 }
2782 return;
2783 }
2784 ha->fw_dump_len = dump_size;
2785 ql_dbg(ql_dbg_init, vha, 0x00c5,
2786 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
2787
2788 if (IS_QLA27XX(ha))
2789 return;
2790
2791 ha->fw_dump->signature[0] = 'Q';
2792 ha->fw_dump->signature[1] = 'L';
2793 ha->fw_dump->signature[2] = 'G';
2794 ha->fw_dump->signature[3] = 'C';
2795 ha->fw_dump->version = htonl(1);
2796
2797 ha->fw_dump->fixed_size = htonl(fixed_size);
2798 ha->fw_dump->mem_size = htonl(mem_size);
2799 ha->fw_dump->req_q_size = htonl(req_q_size);
2800 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2801
2802 ha->fw_dump->eft_size = htonl(eft_size);
2803 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2804 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2805
2806 ha->fw_dump->header_size =
2807 htonl(offsetof(struct qla2xxx_fw_dump, isp));
2808 }
2809
2810 static int
2811 qla81xx_mpi_sync(scsi_qla_host_t *vha)
2812 {
2813 #define MPS_MASK 0xe0
2814 int rval;
2815 uint16_t dc;
2816 uint32_t dw;
2817
2818 if (!IS_QLA81XX(vha->hw))
2819 return QLA_SUCCESS;
2820
2821 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2822 if (rval != QLA_SUCCESS) {
2823 ql_log(ql_log_warn, vha, 0x0105,
2824 "Unable to acquire semaphore.\n");
2825 goto done;
2826 }
2827
2828 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2829 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2830 if (rval != QLA_SUCCESS) {
2831 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
2832 goto done_release;
2833 }
2834
2835 dc &= MPS_MASK;
2836 if (dc == (dw & MPS_MASK))
2837 goto done_release;
2838
2839 dw &= ~MPS_MASK;
2840 dw |= dc;
2841 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2842 if (rval != QLA_SUCCESS) {
2843 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
2844 }
2845
2846 done_release:
2847 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2848 if (rval != QLA_SUCCESS) {
2849 ql_log(ql_log_warn, vha, 0x006d,
2850 "Unable to release semaphore.\n");
2851 }
2852
2853 done:
2854 return rval;
2855 }
2856
2857 int
2858 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2859 {
2860 /* Don't try to reallocate the array */
2861 if (req->outstanding_cmds)
2862 return QLA_SUCCESS;
2863
2864 if (!IS_FWI2_CAPABLE(ha))
2865 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2866 else {
2867 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2868 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
2869 else
2870 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
2871 }
2872
2873 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2874 req->num_outstanding_cmds, GFP_KERNEL);
2875
2876 if (!req->outstanding_cmds) {
2877 /*
2878 * Try to allocate a minimal size just so we can get through
2879 * initialization.
2880 */
2881 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2882 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2883 req->num_outstanding_cmds, GFP_KERNEL);
2884
2885 if (!req->outstanding_cmds) {
2886 ql_log(ql_log_fatal, NULL, 0x0126,
2887 "Failed to allocate memory for "
2888 "outstanding_cmds for req_que %p.\n", req);
2889 req->num_outstanding_cmds = 0;
2890 return QLA_FUNCTION_FAILED;
2891 }
2892 }
2893
2894 return QLA_SUCCESS;
2895 }
2896
2897 #define PRINT_FIELD(_field, _flag, _str) { \
2898 if (a0->_field & _flag) {\
2899 if (p) {\
2900 strcat(ptr, "|");\
2901 ptr++;\
2902 leftover--;\
2903 } \
2904 len = snprintf(ptr, leftover, "%s", _str); \
2905 p = 1;\
2906 leftover -= len;\
2907 ptr += len; \
2908 } \
2909 }
2910
2911 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
2912 {
2913 #define STR_LEN 64
2914 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
2915 u8 str[STR_LEN], *ptr, p;
2916 int leftover, len;
2917
2918 memset(str, 0, STR_LEN);
2919 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
2920 ql_dbg(ql_dbg_init, vha, 0x015a,
2921 "SFP MFG Name: %s\n", str);
2922
2923 memset(str, 0, STR_LEN);
2924 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
2925 ql_dbg(ql_dbg_init, vha, 0x015c,
2926 "SFP Part Name: %s\n", str);
2927
2928 /* media */
2929 memset(str, 0, STR_LEN);
2930 ptr = str;
2931 leftover = STR_LEN;
2932 p = len = 0;
2933 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
2934 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
2935 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
2936 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
2937 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
2938 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
2939 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
2940 ql_dbg(ql_dbg_init, vha, 0x0160,
2941 "SFP Media: %s\n", str);
2942
2943 /* link length */
2944 memset(str, 0, STR_LEN);
2945 ptr = str;
2946 leftover = STR_LEN;
2947 p = len = 0;
2948 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
2949 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
2950 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
2951 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
2952 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
2953 ql_dbg(ql_dbg_init, vha, 0x0196,
2954 "SFP Link Length: %s\n", str);
2955
2956 memset(str, 0, STR_LEN);
2957 ptr = str;
2958 leftover = STR_LEN;
2959 p = len = 0;
2960 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
2961 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
2962 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
2963 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
2964 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
2965 ql_dbg(ql_dbg_init, vha, 0x016e,
2966 "SFP FC Link Tech: %s\n", str);
2967
2968 if (a0->length_km)
2969 ql_dbg(ql_dbg_init, vha, 0x016f,
2970 "SFP Distant: %d km\n", a0->length_km);
2971 if (a0->length_100m)
2972 ql_dbg(ql_dbg_init, vha, 0x0170,
2973 "SFP Distant: %d m\n", a0->length_100m*100);
2974 if (a0->length_50um_10m)
2975 ql_dbg(ql_dbg_init, vha, 0x0189,
2976 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
2977 if (a0->length_62um_10m)
2978 ql_dbg(ql_dbg_init, vha, 0x018a,
2979 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
2980 if (a0->length_om4_10m)
2981 ql_dbg(ql_dbg_init, vha, 0x0194,
2982 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
2983 if (a0->length_om3_10m)
2984 ql_dbg(ql_dbg_init, vha, 0x0195,
2985 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
2986 }
2987
2988
2989 /*
2990 * Return Code:
2991 * QLA_SUCCESS: no action
2992 * QLA_INTERFACE_ERROR: SFP is not there.
2993 * QLA_FUNCTION_FAILED: detected New SFP
2994 */
2995 int
2996 qla24xx_detect_sfp(scsi_qla_host_t *vha)
2997 {
2998 int rc = QLA_SUCCESS;
2999 struct sff_8247_a0 *a;
3000 struct qla_hw_data *ha = vha->hw;
3001
3002 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3003 goto out;
3004
3005 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3006 if (rc)
3007 goto out;
3008
3009 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3010 qla2xxx_print_sfp_info(vha);
3011
3012 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3013 /* long range */
3014 ha->flags.detected_lr_sfp = 1;
3015
3016 if (a->length_km > 5 || a->length_100m > 50)
3017 ha->long_range_distance = LR_DISTANCE_10K;
3018 else
3019 ha->long_range_distance = LR_DISTANCE_5K;
3020
3021 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3022 ql_dbg(ql_dbg_async, vha, 0x507b,
3023 "Detected Long Range SFP.\n");
3024 } else {
3025 /* short range */
3026 ha->flags.detected_lr_sfp = 0;
3027 if (ha->flags.using_lr_setting)
3028 ql_dbg(ql_dbg_async, vha, 0x5084,
3029 "Detected Short Range SFP.\n");
3030 }
3031
3032 if (!vha->flags.init_done)
3033 rc = QLA_SUCCESS;
3034 out:
3035 return rc;
3036 }
3037
3038 /**
3039 * qla2x00_setup_chip() - Load and start RISC firmware.
3040 * @ha: HA context
3041 *
3042 * Returns 0 on success.
3043 */
3044 static int
3045 qla2x00_setup_chip(scsi_qla_host_t *vha)
3046 {
3047 int rval;
3048 uint32_t srisc_address = 0;
3049 struct qla_hw_data *ha = vha->hw;
3050 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3051 unsigned long flags;
3052 uint16_t fw_major_version;
3053
3054 if (IS_P3P_TYPE(ha)) {
3055 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3056 if (rval == QLA_SUCCESS) {
3057 qla2x00_stop_firmware(vha);
3058 goto enable_82xx_npiv;
3059 } else
3060 goto failed;
3061 }
3062
3063 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3064 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3065 spin_lock_irqsave(&ha->hardware_lock, flags);
3066 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3067 RD_REG_WORD(&reg->hccr);
3068 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3069 }
3070
3071 qla81xx_mpi_sync(vha);
3072
3073 /* Load firmware sequences */
3074 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3075 if (rval == QLA_SUCCESS) {
3076 ql_dbg(ql_dbg_init, vha, 0x00c9,
3077 "Verifying Checksum of loaded RISC code.\n");
3078
3079 rval = qla2x00_verify_checksum(vha, srisc_address);
3080 if (rval == QLA_SUCCESS) {
3081 /* Start firmware execution. */
3082 ql_dbg(ql_dbg_init, vha, 0x00ca,
3083 "Starting firmware.\n");
3084
3085 if (ql2xexlogins)
3086 ha->flags.exlogins_enabled = 1;
3087
3088 if (qla_is_exch_offld_enabled(vha))
3089 ha->flags.exchoffld_enabled = 1;
3090
3091 rval = qla2x00_execute_fw(vha, srisc_address);
3092 /* Retrieve firmware information. */
3093 if (rval == QLA_SUCCESS) {
3094 qla24xx_detect_sfp(vha);
3095
3096 rval = qla2x00_set_exlogins_buffer(vha);
3097 if (rval != QLA_SUCCESS)
3098 goto failed;
3099
3100 rval = qla2x00_set_exchoffld_buffer(vha);
3101 if (rval != QLA_SUCCESS)
3102 goto failed;
3103
3104 enable_82xx_npiv:
3105 fw_major_version = ha->fw_major_version;
3106 if (IS_P3P_TYPE(ha))
3107 qla82xx_check_md_needed(vha);
3108 else
3109 rval = qla2x00_get_fw_version(vha);
3110 if (rval != QLA_SUCCESS)
3111 goto failed;
3112 ha->flags.npiv_supported = 0;
3113 if (IS_QLA2XXX_MIDTYPE(ha) &&
3114 (ha->fw_attributes & BIT_2)) {
3115 ha->flags.npiv_supported = 1;
3116 if ((!ha->max_npiv_vports) ||
3117 ((ha->max_npiv_vports + 1) %
3118 MIN_MULTI_ID_FABRIC))
3119 ha->max_npiv_vports =
3120 MIN_MULTI_ID_FABRIC - 1;
3121 }
3122 qla2x00_get_resource_cnts(vha);
3123
3124 /*
3125 * Allocate the array of outstanding commands
3126 * now that we know the firmware resources.
3127 */
3128 rval = qla2x00_alloc_outstanding_cmds(ha,
3129 vha->req);
3130 if (rval != QLA_SUCCESS)
3131 goto failed;
3132
3133 if (!fw_major_version && ql2xallocfwdump
3134 && !(IS_P3P_TYPE(ha)))
3135 qla2x00_alloc_fw_dump(vha);
3136 } else {
3137 goto failed;
3138 }
3139 } else {
3140 ql_log(ql_log_fatal, vha, 0x00cd,
3141 "ISP Firmware failed checksum.\n");
3142 goto failed;
3143 }
3144 } else
3145 goto failed;
3146
3147 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3148 /* Enable proper parity. */
3149 spin_lock_irqsave(&ha->hardware_lock, flags);
3150 if (IS_QLA2300(ha))
3151 /* SRAM parity */
3152 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
3153 else
3154 /* SRAM, Instruction RAM and GP RAM parity */
3155 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
3156 RD_REG_WORD(&reg->hccr);
3157 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3158 }
3159
3160 if (IS_QLA27XX(ha))
3161 ha->flags.fac_supported = 1;
3162 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3163 uint32_t size;
3164
3165 rval = qla81xx_fac_get_sector_size(vha, &size);
3166 if (rval == QLA_SUCCESS) {
3167 ha->flags.fac_supported = 1;
3168 ha->fdt_block_size = size << 2;
3169 } else {
3170 ql_log(ql_log_warn, vha, 0x00ce,
3171 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3172 ha->fw_major_version, ha->fw_minor_version,
3173 ha->fw_subminor_version);
3174
3175 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3176 ha->flags.fac_supported = 0;
3177 rval = QLA_SUCCESS;
3178 }
3179 }
3180 }
3181 failed:
3182 if (rval) {
3183 ql_log(ql_log_fatal, vha, 0x00cf,
3184 "Setup chip ****FAILED****.\n");
3185 }
3186
3187 return (rval);
3188 }
3189
3190 /**
3191 * qla2x00_init_response_q_entries() - Initializes response queue entries.
3192 * @ha: HA context
3193 *
3194 * Beginning of request ring has initialization control block already built
3195 * by nvram config routine.
3196 *
3197 * Returns 0 on success.
3198 */
3199 void
3200 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3201 {
3202 uint16_t cnt;
3203 response_t *pkt;
3204
3205 rsp->ring_ptr = rsp->ring;
3206 rsp->ring_index = 0;
3207 rsp->status_srb = NULL;
3208 pkt = rsp->ring_ptr;
3209 for (cnt = 0; cnt < rsp->length; cnt++) {
3210 pkt->signature = RESPONSE_PROCESSED;
3211 pkt++;
3212 }
3213 }
3214
3215 /**
3216 * qla2x00_update_fw_options() - Read and process firmware options.
3217 * @ha: HA context
3218 *
3219 * Returns 0 on success.
3220 */
3221 void
3222 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3223 {
3224 uint16_t swing, emphasis, tx_sens, rx_sens;
3225 struct qla_hw_data *ha = vha->hw;
3226
3227 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3228 qla2x00_get_fw_options(vha, ha->fw_options);
3229
3230 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3231 return;
3232
3233 /* Serial Link options. */
3234 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3235 "Serial link options.\n");
3236 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3237 (uint8_t *)&ha->fw_seriallink_options,
3238 sizeof(ha->fw_seriallink_options));
3239
3240 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3241 if (ha->fw_seriallink_options[3] & BIT_2) {
3242 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3243
3244 /* 1G settings */
3245 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3246 emphasis = (ha->fw_seriallink_options[2] &
3247 (BIT_4 | BIT_3)) >> 3;
3248 tx_sens = ha->fw_seriallink_options[0] &
3249 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3250 rx_sens = (ha->fw_seriallink_options[0] &
3251 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3252 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3253 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3254 if (rx_sens == 0x0)
3255 rx_sens = 0x3;
3256 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3257 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3258 ha->fw_options[10] |= BIT_5 |
3259 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3260 (tx_sens & (BIT_1 | BIT_0));
3261
3262 /* 2G settings */
3263 swing = (ha->fw_seriallink_options[2] &
3264 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3265 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3266 tx_sens = ha->fw_seriallink_options[1] &
3267 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3268 rx_sens = (ha->fw_seriallink_options[1] &
3269 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3270 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3271 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3272 if (rx_sens == 0x0)
3273 rx_sens = 0x3;
3274 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3275 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3276 ha->fw_options[11] |= BIT_5 |
3277 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3278 (tx_sens & (BIT_1 | BIT_0));
3279 }
3280
3281 /* FCP2 options. */
3282 /* Return command IOCBs without waiting for an ABTS to complete. */
3283 ha->fw_options[3] |= BIT_13;
3284
3285 /* LED scheme. */
3286 if (ha->flags.enable_led_scheme)
3287 ha->fw_options[2] |= BIT_12;
3288
3289 /* Detect ISP6312. */
3290 if (IS_QLA6312(ha))
3291 ha->fw_options[2] |= BIT_13;
3292
3293 /* Set Retry FLOGI in case of P2P connection */
3294 if (ha->operating_mode == P2P) {
3295 ha->fw_options[2] |= BIT_3;
3296 ql_dbg(ql_dbg_disc, vha, 0x2100,
3297 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3298 __func__, ha->fw_options[2]);
3299 }
3300
3301 /* Update firmware options. */
3302 qla2x00_set_fw_options(vha, ha->fw_options);
3303 }
3304
3305 void
3306 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3307 {
3308 int rval;
3309 struct qla_hw_data *ha = vha->hw;
3310
3311 if (IS_P3P_TYPE(ha))
3312 return;
3313
3314 /* Hold status IOCBs until ABTS response received. */
3315 if (ql2xfwholdabts)
3316 ha->fw_options[3] |= BIT_12;
3317
3318 /* Set Retry FLOGI in case of P2P connection */
3319 if (ha->operating_mode == P2P) {
3320 ha->fw_options[2] |= BIT_3;
3321 ql_dbg(ql_dbg_disc, vha, 0x2101,
3322 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3323 __func__, ha->fw_options[2]);
3324 }
3325
3326 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3327 if (ql2xmvasynctoatio &&
3328 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
3329 if (qla_tgt_mode_enabled(vha) ||
3330 qla_dual_mode_enabled(vha))
3331 ha->fw_options[2] |= BIT_11;
3332 else
3333 ha->fw_options[2] &= ~BIT_11;
3334 }
3335
3336 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3337 /*
3338 * Tell FW to track each exchange to prevent
3339 * driver from using stale exchange.
3340 */
3341 if (qla_tgt_mode_enabled(vha) ||
3342 qla_dual_mode_enabled(vha))
3343 ha->fw_options[2] |= BIT_4;
3344 else
3345 ha->fw_options[2] &= ~BIT_4;
3346 }
3347
3348 ql_dbg(ql_dbg_init, vha, 0x00e8,
3349 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3350 __func__, ha->fw_options[1], ha->fw_options[2],
3351 ha->fw_options[3], vha->host->active_mode);
3352
3353 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3354 qla2x00_set_fw_options(vha, ha->fw_options);
3355
3356 /* Update Serial Link options. */
3357 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3358 return;
3359
3360 rval = qla2x00_set_serdes_params(vha,
3361 le16_to_cpu(ha->fw_seriallink_options24[1]),
3362 le16_to_cpu(ha->fw_seriallink_options24[2]),
3363 le16_to_cpu(ha->fw_seriallink_options24[3]));
3364 if (rval != QLA_SUCCESS) {
3365 ql_log(ql_log_warn, vha, 0x0104,
3366 "Unable to update Serial Link options (%x).\n", rval);
3367 }
3368 }
3369
3370 void
3371 qla2x00_config_rings(struct scsi_qla_host *vha)
3372 {
3373 struct qla_hw_data *ha = vha->hw;
3374 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3375 struct req_que *req = ha->req_q_map[0];
3376 struct rsp_que *rsp = ha->rsp_q_map[0];
3377
3378 /* Setup ring parameters in initialization control block. */
3379 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3380 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3381 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3382 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3383 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3384 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3385 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3386 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3387
3388 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3389 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3390 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3391 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3392 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3393 }
3394
3395 void
3396 qla24xx_config_rings(struct scsi_qla_host *vha)
3397 {
3398 struct qla_hw_data *ha = vha->hw;
3399 device_reg_t *reg = ISP_QUE_REG(ha, 0);
3400 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3401 struct qla_msix_entry *msix;
3402 struct init_cb_24xx *icb;
3403 uint16_t rid = 0;
3404 struct req_que *req = ha->req_q_map[0];
3405 struct rsp_que *rsp = ha->rsp_q_map[0];
3406
3407 /* Setup ring parameters in initialization control block. */
3408 icb = (struct init_cb_24xx *)ha->init_cb;
3409 icb->request_q_outpointer = cpu_to_le16(0);
3410 icb->response_q_inpointer = cpu_to_le16(0);
3411 icb->request_q_length = cpu_to_le16(req->length);
3412 icb->response_q_length = cpu_to_le16(rsp->length);
3413 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3414 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3415 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3416 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3417
3418 /* Setup ATIO queue dma pointers for target mode */
3419 icb->atio_q_inpointer = cpu_to_le16(0);
3420 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3421 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3422 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3423
3424 if (IS_SHADOW_REG_CAPABLE(ha))
3425 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3426
3427 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3428 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3429 icb->rid = cpu_to_le16(rid);
3430 if (ha->flags.msix_enabled) {
3431 msix = &ha->msix_entries[1];
3432 ql_dbg(ql_dbg_init, vha, 0x0019,
3433 "Registering vector 0x%x for base que.\n",
3434 msix->entry);
3435 icb->msix = cpu_to_le16(msix->entry);
3436 }
3437 /* Use alternate PCI bus number */
3438 if (MSB(rid))
3439 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3440 /* Use alternate PCI devfn */
3441 if (LSB(rid))
3442 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3443
3444 /* Use Disable MSIX Handshake mode for capable adapters */
3445 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3446 (ha->flags.msix_enabled)) {
3447 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3448 ha->flags.disable_msix_handshake = 1;
3449 ql_dbg(ql_dbg_init, vha, 0x00fe,
3450 "MSIX Handshake Disable Mode turned on.\n");
3451 } else {
3452 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3453 }
3454 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
3455
3456 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3457 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3458 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3459 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3460 } else {
3461 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3462 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3463 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3464 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3465 }
3466 qlt_24xx_config_rings(vha);
3467
3468 /* PCI posting */
3469 RD_REG_DWORD(&ioreg->hccr);
3470 }
3471
3472 /**
3473 * qla2x00_init_rings() - Initializes firmware.
3474 * @ha: HA context
3475 *
3476 * Beginning of request ring has initialization control block already built
3477 * by nvram config routine.
3478 *
3479 * Returns 0 on success.
3480 */
3481 int
3482 qla2x00_init_rings(scsi_qla_host_t *vha)
3483 {
3484 int rval;
3485 unsigned long flags = 0;
3486 int cnt, que;
3487 struct qla_hw_data *ha = vha->hw;
3488 struct req_que *req;
3489 struct rsp_que *rsp;
3490 struct mid_init_cb_24xx *mid_init_cb =
3491 (struct mid_init_cb_24xx *) ha->init_cb;
3492
3493 spin_lock_irqsave(&ha->hardware_lock, flags);
3494
3495 /* Clear outstanding commands array. */
3496 for (que = 0; que < ha->max_req_queues; que++) {
3497 req = ha->req_q_map[que];
3498 if (!req || !test_bit(que, ha->req_qid_map))
3499 continue;
3500 req->out_ptr = (void *)(req->ring + req->length);
3501 *req->out_ptr = 0;
3502 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
3503 req->outstanding_cmds[cnt] = NULL;
3504
3505 req->current_outstanding_cmd = 1;
3506
3507 /* Initialize firmware. */
3508 req->ring_ptr = req->ring;
3509 req->ring_index = 0;
3510 req->cnt = req->length;
3511 }
3512
3513 for (que = 0; que < ha->max_rsp_queues; que++) {
3514 rsp = ha->rsp_q_map[que];
3515 if (!rsp || !test_bit(que, ha->rsp_qid_map))
3516 continue;
3517 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3518 *rsp->in_ptr = 0;
3519 /* Initialize response queue entries */
3520 if (IS_QLAFX00(ha))
3521 qlafx00_init_response_q_entries(rsp);
3522 else
3523 qla2x00_init_response_q_entries(rsp);
3524 }
3525
3526 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3527 ha->tgt.atio_ring_index = 0;
3528 /* Initialize ATIO queue entries */
3529 qlt_init_atio_q_entries(vha);
3530
3531 ha->isp_ops->config_rings(vha);
3532
3533 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3534
3535 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3536
3537 if (IS_QLAFX00(ha)) {
3538 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3539 goto next_check;
3540 }
3541
3542 /* Update any ISP specific firmware options before initialization. */
3543 ha->isp_ops->update_fw_options(vha);
3544
3545 if (ha->flags.npiv_supported) {
3546 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
3547 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
3548 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
3549 }
3550
3551 if (IS_FWI2_CAPABLE(ha)) {
3552 mid_init_cb->options = cpu_to_le16(BIT_1);
3553 mid_init_cb->init_cb.execution_throttle =
3554 cpu_to_le16(ha->cur_fw_xcb_count);
3555 ha->flags.dport_enabled =
3556 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3557 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3558 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3559 /* FA-WWPN Status */
3560 ha->flags.fawwpn_enabled =
3561 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
3562 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
3563 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
3564 }
3565
3566 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
3567 next_check:
3568 if (rval) {
3569 ql_log(ql_log_fatal, vha, 0x00d2,
3570 "Init Firmware **** FAILED ****.\n");
3571 } else {
3572 ql_dbg(ql_dbg_init, vha, 0x00d3,
3573 "Init Firmware -- success.\n");
3574 QLA_FW_STARTED(ha);
3575 }
3576
3577 return (rval);
3578 }
3579
3580 /**
3581 * qla2x00_fw_ready() - Waits for firmware ready.
3582 * @ha: HA context
3583 *
3584 * Returns 0 on success.
3585 */
3586 static int
3587 qla2x00_fw_ready(scsi_qla_host_t *vha)
3588 {
3589 int rval;
3590 unsigned long wtime, mtime, cs84xx_time;
3591 uint16_t min_wait; /* Minimum wait time if loop is down */
3592 uint16_t wait_time; /* Wait time if loop is coming ready */
3593 uint16_t state[6];
3594 struct qla_hw_data *ha = vha->hw;
3595
3596 if (IS_QLAFX00(vha->hw))
3597 return qlafx00_fw_ready(vha);
3598
3599 rval = QLA_SUCCESS;
3600
3601 /* Time to wait for loop down */
3602 if (IS_P3P_TYPE(ha))
3603 min_wait = 30;
3604 else
3605 min_wait = 20;
3606
3607 /*
3608 * Firmware should take at most one RATOV to login, plus 5 seconds for
3609 * our own processing.
3610 */
3611 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3612 wait_time = min_wait;
3613 }
3614
3615 /* Min wait time if loop down */
3616 mtime = jiffies + (min_wait * HZ);
3617
3618 /* wait time before firmware ready */
3619 wtime = jiffies + (wait_time * HZ);
3620
3621 /* Wait for ISP to finish LIP */
3622 if (!vha->flags.init_done)
3623 ql_log(ql_log_info, vha, 0x801e,
3624 "Waiting for LIP to complete.\n");
3625
3626 do {
3627 memset(state, -1, sizeof(state));
3628 rval = qla2x00_get_firmware_state(vha, state);
3629 if (rval == QLA_SUCCESS) {
3630 if (state[0] < FSTATE_LOSS_OF_SYNC) {
3631 vha->device_flags &= ~DFLG_NO_CABLE;
3632 }
3633 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
3634 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3635 "fw_state=%x 84xx=%x.\n", state[0],
3636 state[2]);
3637 if ((state[2] & FSTATE_LOGGED_IN) &&
3638 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
3639 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3640 "Sending verify iocb.\n");
3641
3642 cs84xx_time = jiffies;
3643 rval = qla84xx_init_chip(vha);
3644 if (rval != QLA_SUCCESS) {
3645 ql_log(ql_log_warn,
3646 vha, 0x8007,
3647 "Init chip failed.\n");
3648 break;
3649 }
3650
3651 /* Add time taken to initialize. */
3652 cs84xx_time = jiffies - cs84xx_time;
3653 wtime += cs84xx_time;
3654 mtime += cs84xx_time;
3655 ql_dbg(ql_dbg_taskm, vha, 0x8008,
3656 "Increasing wait time by %ld. "
3657 "New time %ld.\n", cs84xx_time,
3658 wtime);
3659 }
3660 } else if (state[0] == FSTATE_READY) {
3661 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3662 "F/W Ready - OK.\n");
3663
3664 qla2x00_get_retry_cnt(vha, &ha->retry_count,
3665 &ha->login_timeout, &ha->r_a_tov);
3666
3667 rval = QLA_SUCCESS;
3668 break;
3669 }
3670
3671 rval = QLA_FUNCTION_FAILED;
3672
3673 if (atomic_read(&vha->loop_down_timer) &&
3674 state[0] != FSTATE_READY) {
3675 /* Loop down. Timeout on min_wait for states
3676 * other than Wait for Login.
3677 */
3678 if (time_after_eq(jiffies, mtime)) {
3679 ql_log(ql_log_info, vha, 0x8038,
3680 "Cable is unplugged...\n");
3681
3682 vha->device_flags |= DFLG_NO_CABLE;
3683 break;
3684 }
3685 }
3686 } else {
3687 /* Mailbox cmd failed. Timeout on min_wait. */
3688 if (time_after_eq(jiffies, mtime) ||
3689 ha->flags.isp82xx_fw_hung)
3690 break;
3691 }
3692
3693 if (time_after_eq(jiffies, wtime))
3694 break;
3695
3696 /* Delay for a while */
3697 msleep(500);
3698 } while (1);
3699
3700 ql_dbg(ql_dbg_taskm, vha, 0x803a,
3701 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3702 state[1], state[2], state[3], state[4], state[5], jiffies);
3703
3704 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
3705 ql_log(ql_log_warn, vha, 0x803b,
3706 "Firmware ready **** FAILED ****.\n");
3707 }
3708
3709 return (rval);
3710 }
3711
3712 /*
3713 * qla2x00_configure_hba
3714 * Setup adapter context.
3715 *
3716 * Input:
3717 * ha = adapter state pointer.
3718 *
3719 * Returns:
3720 * 0 = success
3721 *
3722 * Context:
3723 * Kernel context.
3724 */
3725 static int
3726 qla2x00_configure_hba(scsi_qla_host_t *vha)
3727 {
3728 int rval;
3729 uint16_t loop_id;
3730 uint16_t topo;
3731 uint16_t sw_cap;
3732 uint8_t al_pa;
3733 uint8_t area;
3734 uint8_t domain;
3735 char connect_type[22];
3736 struct qla_hw_data *ha = vha->hw;
3737 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3738 port_id_t id;
3739
3740 /* Get host addresses. */
3741 rval = qla2x00_get_adapter_id(vha,
3742 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
3743 if (rval != QLA_SUCCESS) {
3744 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
3745 IS_CNA_CAPABLE(ha) ||
3746 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
3747 ql_dbg(ql_dbg_disc, vha, 0x2008,
3748 "Loop is in a transition state.\n");
3749 } else {
3750 ql_log(ql_log_warn, vha, 0x2009,
3751 "Unable to get host loop ID.\n");
3752 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3753 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3754 ql_log(ql_log_warn, vha, 0x1151,
3755 "Doing link init.\n");
3756 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3757 return rval;
3758 }
3759 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3760 }
3761 return (rval);
3762 }
3763
3764 if (topo == 4) {
3765 ql_log(ql_log_info, vha, 0x200a,
3766 "Cannot get topology - retrying.\n");
3767 return (QLA_FUNCTION_FAILED);
3768 }
3769
3770 vha->loop_id = loop_id;
3771
3772 /* initialize */
3773 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3774 ha->operating_mode = LOOP;
3775 ha->switch_cap = 0;
3776
3777 switch (topo) {
3778 case 0:
3779 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
3780 ha->current_topology = ISP_CFG_NL;
3781 strcpy(connect_type, "(Loop)");
3782 break;
3783
3784 case 1:
3785 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
3786 ha->switch_cap = sw_cap;
3787 ha->current_topology = ISP_CFG_FL;
3788 strcpy(connect_type, "(FL_Port)");
3789 break;
3790
3791 case 2:
3792 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
3793 ha->operating_mode = P2P;
3794 ha->current_topology = ISP_CFG_N;
3795 strcpy(connect_type, "(N_Port-to-N_Port)");
3796 break;
3797
3798 case 3:
3799 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
3800 ha->switch_cap = sw_cap;
3801 ha->operating_mode = P2P;
3802 ha->current_topology = ISP_CFG_F;
3803 strcpy(connect_type, "(F_Port)");
3804 break;
3805
3806 default:
3807 ql_dbg(ql_dbg_disc, vha, 0x200f,
3808 "HBA in unknown topology %x, using NL.\n", topo);
3809 ha->current_topology = ISP_CFG_NL;
3810 strcpy(connect_type, "(Loop)");
3811 break;
3812 }
3813
3814 /* Save Host port and loop ID. */
3815 /* byte order - Big Endian */
3816 id.b.domain = domain;
3817 id.b.area = area;
3818 id.b.al_pa = al_pa;
3819 id.b.rsvd_1 = 0;
3820 qlt_update_host_map(vha, id);
3821
3822 if (!vha->flags.init_done)
3823 ql_log(ql_log_info, vha, 0x2010,
3824 "Topology - %s, Host Loop address 0x%x.\n",
3825 connect_type, vha->loop_id);
3826
3827 return(rval);
3828 }
3829
3830 inline void
3831 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3832 char *def)
3833 {
3834 char *st, *en;
3835 uint16_t index;
3836 struct qla_hw_data *ha = vha->hw;
3837 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
3838 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
3839
3840 if (memcmp(model, BINZERO, len) != 0) {
3841 strncpy(ha->model_number, model, len);
3842 st = en = ha->model_number;
3843 en += len - 1;
3844 while (en > st) {
3845 if (*en != 0x20 && *en != 0x00)
3846 break;
3847 *en-- = '\0';
3848 }
3849
3850 index = (ha->pdev->subsystem_device & 0xff);
3851 if (use_tbl &&
3852 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3853 index < QLA_MODEL_NAMES)
3854 strncpy(ha->model_desc,
3855 qla2x00_model_name[index * 2 + 1],
3856 sizeof(ha->model_desc) - 1);
3857 } else {
3858 index = (ha->pdev->subsystem_device & 0xff);
3859 if (use_tbl &&
3860 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3861 index < QLA_MODEL_NAMES) {
3862 strcpy(ha->model_number,
3863 qla2x00_model_name[index * 2]);
3864 strncpy(ha->model_desc,
3865 qla2x00_model_name[index * 2 + 1],
3866 sizeof(ha->model_desc) - 1);
3867 } else {
3868 strcpy(ha->model_number, def);
3869 }
3870 }
3871 if (IS_FWI2_CAPABLE(ha))
3872 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
3873 sizeof(ha->model_desc));
3874 }
3875
3876 /* On sparc systems, obtain port and node WWN from firmware
3877 * properties.
3878 */
3879 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
3880 {
3881 #ifdef CONFIG_SPARC
3882 struct qla_hw_data *ha = vha->hw;
3883 struct pci_dev *pdev = ha->pdev;
3884 struct device_node *dp = pci_device_to_OF_node(pdev);
3885 const u8 *val;
3886 int len;
3887
3888 val = of_get_property(dp, "port-wwn", &len);
3889 if (val && len >= WWN_SIZE)
3890 memcpy(nv->port_name, val, WWN_SIZE);
3891
3892 val = of_get_property(dp, "node-wwn", &len);
3893 if (val && len >= WWN_SIZE)
3894 memcpy(nv->node_name, val, WWN_SIZE);
3895 #endif
3896 }
3897
3898 /*
3899 * NVRAM configuration for ISP 2xxx
3900 *
3901 * Input:
3902 * ha = adapter block pointer.
3903 *
3904 * Output:
3905 * initialization control block in response_ring
3906 * host adapters parameters in host adapter block
3907 *
3908 * Returns:
3909 * 0 = success.
3910 */
3911 int
3912 qla2x00_nvram_config(scsi_qla_host_t *vha)
3913 {
3914 int rval;
3915 uint8_t chksum = 0;
3916 uint16_t cnt;
3917 uint8_t *dptr1, *dptr2;
3918 struct qla_hw_data *ha = vha->hw;
3919 init_cb_t *icb = ha->init_cb;
3920 nvram_t *nv = ha->nvram;
3921 uint8_t *ptr = ha->nvram;
3922 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3923
3924 rval = QLA_SUCCESS;
3925
3926 /* Determine NVRAM starting address. */
3927 ha->nvram_size = sizeof(nvram_t);
3928 ha->nvram_base = 0;
3929 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3930 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
3931 ha->nvram_base = 0x80;
3932
3933 /* Get NVRAM data and calculate checksum. */
3934 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
3935 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3936 chksum += *ptr++;
3937
3938 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3939 "Contents of NVRAM.\n");
3940 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3941 (uint8_t *)nv, ha->nvram_size);
3942
3943 /* Bad NVRAM data, set defaults parameters. */
3944 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3945 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3946 /* Reset NVRAM data. */
3947 ql_log(ql_log_warn, vha, 0x0064,
3948 "Inconsistent NVRAM "
3949 "detected: checksum=0x%x id=%c version=0x%x.\n",
3950 chksum, nv->id[0], nv->nvram_version);
3951 ql_log(ql_log_warn, vha, 0x0065,
3952 "Falling back to "
3953 "functioning (yet invalid -- WWPN) defaults.\n");
3954
3955 /*
3956 * Set default initialization control block.
3957 */
3958 memset(nv, 0, ha->nvram_size);
3959 nv->parameter_block_version = ICB_VERSION;
3960
3961 if (IS_QLA23XX(ha)) {
3962 nv->firmware_options[0] = BIT_2 | BIT_1;
3963 nv->firmware_options[1] = BIT_7 | BIT_5;
3964 nv->add_firmware_options[0] = BIT_5;
3965 nv->add_firmware_options[1] = BIT_5 | BIT_4;
3966 nv->frame_payload_size = 2048;
3967 nv->special_options[1] = BIT_7;
3968 } else if (IS_QLA2200(ha)) {
3969 nv->firmware_options[0] = BIT_2 | BIT_1;
3970 nv->firmware_options[1] = BIT_7 | BIT_5;
3971 nv->add_firmware_options[0] = BIT_5;
3972 nv->add_firmware_options[1] = BIT_5 | BIT_4;
3973 nv->frame_payload_size = 1024;
3974 } else if (IS_QLA2100(ha)) {
3975 nv->firmware_options[0] = BIT_3 | BIT_1;
3976 nv->firmware_options[1] = BIT_5;
3977 nv->frame_payload_size = 1024;
3978 }
3979
3980 nv->max_iocb_allocation = cpu_to_le16(256);
3981 nv->execution_throttle = cpu_to_le16(16);
3982 nv->retry_count = 8;
3983 nv->retry_delay = 1;
3984
3985 nv->port_name[0] = 33;
3986 nv->port_name[3] = 224;
3987 nv->port_name[4] = 139;
3988
3989 qla2xxx_nvram_wwn_from_ofw(vha, nv);
3990
3991 nv->login_timeout = 4;
3992
3993 /*
3994 * Set default host adapter parameters
3995 */
3996 nv->host_p[1] = BIT_2;
3997 nv->reset_delay = 5;
3998 nv->port_down_retry_count = 8;
3999 nv->max_luns_per_target = cpu_to_le16(8);
4000 nv->link_down_timeout = 60;
4001
4002 rval = 1;
4003 }
4004
4005 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
4006 /*
4007 * The SN2 does not provide BIOS emulation which means you can't change
4008 * potentially bogus BIOS settings. Force the use of default settings
4009 * for link rate and frame size. Hope that the rest of the settings
4010 * are valid.
4011 */
4012 if (ia64_platform_is("sn2")) {
4013 nv->frame_payload_size = 2048;
4014 if (IS_QLA23XX(ha))
4015 nv->special_options[1] = BIT_7;
4016 }
4017 #endif
4018
4019 /* Reset Initialization control block */
4020 memset(icb, 0, ha->init_cb_size);
4021
4022 /*
4023 * Setup driver NVRAM options.
4024 */
4025 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4026 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4027 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4028 nv->firmware_options[1] &= ~BIT_4;
4029
4030 if (IS_QLA23XX(ha)) {
4031 nv->firmware_options[0] |= BIT_2;
4032 nv->firmware_options[0] &= ~BIT_3;
4033 nv->special_options[0] &= ~BIT_6;
4034 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4035
4036 if (IS_QLA2300(ha)) {
4037 if (ha->fb_rev == FPM_2310) {
4038 strcpy(ha->model_number, "QLA2310");
4039 } else {
4040 strcpy(ha->model_number, "QLA2300");
4041 }
4042 } else {
4043 qla2x00_set_model_info(vha, nv->model_number,
4044 sizeof(nv->model_number), "QLA23xx");
4045 }
4046 } else if (IS_QLA2200(ha)) {
4047 nv->firmware_options[0] |= BIT_2;
4048 /*
4049 * 'Point-to-point preferred, else loop' is not a safe
4050 * connection mode setting.
4051 */
4052 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4053 (BIT_5 | BIT_4)) {
4054 /* Force 'loop preferred, else point-to-point'. */
4055 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4056 nv->add_firmware_options[0] |= BIT_5;
4057 }
4058 strcpy(ha->model_number, "QLA22xx");
4059 } else /*if (IS_QLA2100(ha))*/ {
4060 strcpy(ha->model_number, "QLA2100");
4061 }
4062
4063 /*
4064 * Copy over NVRAM RISC parameter block to initialization control block.
4065 */
4066 dptr1 = (uint8_t *)icb;
4067 dptr2 = (uint8_t *)&nv->parameter_block_version;
4068 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4069 while (cnt--)
4070 *dptr1++ = *dptr2++;
4071
4072 /* Copy 2nd half. */
4073 dptr1 = (uint8_t *)icb->add_firmware_options;
4074 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4075 while (cnt--)
4076 *dptr1++ = *dptr2++;
4077
4078 /* Use alternate WWN? */
4079 if (nv->host_p[1] & BIT_7) {
4080 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4081 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4082 }
4083
4084 /* Prepare nodename */
4085 if ((icb->firmware_options[1] & BIT_6) == 0) {
4086 /*
4087 * Firmware will apply the following mask if the nodename was
4088 * not provided.
4089 */
4090 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4091 icb->node_name[0] &= 0xF0;
4092 }
4093
4094 /*
4095 * Set host adapter parameters.
4096 */
4097
4098 /*
4099 * BIT_7 in the host-parameters section allows for modification to
4100 * internal driver logging.
4101 */
4102 if (nv->host_p[0] & BIT_7)
4103 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4104 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4105 /* Always load RISC code on non ISP2[12]00 chips. */
4106 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4107 ha->flags.disable_risc_code_load = 0;
4108 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4109 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4110 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4111 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4112 ha->flags.disable_serdes = 0;
4113
4114 ha->operating_mode =
4115 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4116
4117 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4118 sizeof(ha->fw_seriallink_options));
4119
4120 /* save HBA serial number */
4121 ha->serial0 = icb->port_name[5];
4122 ha->serial1 = icb->port_name[6];
4123 ha->serial2 = icb->port_name[7];
4124 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4125 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4126
4127 icb->execution_throttle = cpu_to_le16(0xFFFF);
4128
4129 ha->retry_count = nv->retry_count;
4130
4131 /* Set minimum login_timeout to 4 seconds. */
4132 if (nv->login_timeout != ql2xlogintimeout)
4133 nv->login_timeout = ql2xlogintimeout;
4134 if (nv->login_timeout < 4)
4135 nv->login_timeout = 4;
4136 ha->login_timeout = nv->login_timeout;
4137
4138 /* Set minimum RATOV to 100 tenths of a second. */
4139 ha->r_a_tov = 100;
4140
4141 ha->loop_reset_delay = nv->reset_delay;
4142
4143 /* Link Down Timeout = 0:
4144 *
4145 * When Port Down timer expires we will start returning
4146 * I/O's to OS with "DID_NO_CONNECT".
4147 *
4148 * Link Down Timeout != 0:
4149 *
4150 * The driver waits for the link to come up after link down
4151 * before returning I/Os to OS with "DID_NO_CONNECT".
4152 */
4153 if (nv->link_down_timeout == 0) {
4154 ha->loop_down_abort_time =
4155 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4156 } else {
4157 ha->link_down_timeout = nv->link_down_timeout;
4158 ha->loop_down_abort_time =
4159 (LOOP_DOWN_TIME - ha->link_down_timeout);
4160 }
4161
4162 /*
4163 * Need enough time to try and get the port back.
4164 */
4165 ha->port_down_retry_count = nv->port_down_retry_count;
4166 if (qlport_down_retry)
4167 ha->port_down_retry_count = qlport_down_retry;
4168 /* Set login_retry_count */
4169 ha->login_retry_count = nv->retry_count;
4170 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4171 ha->port_down_retry_count > 3)
4172 ha->login_retry_count = ha->port_down_retry_count;
4173 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4174 ha->login_retry_count = ha->port_down_retry_count;
4175 if (ql2xloginretrycount)
4176 ha->login_retry_count = ql2xloginretrycount;
4177
4178 icb->lun_enables = cpu_to_le16(0);
4179 icb->command_resource_count = 0;
4180 icb->immediate_notify_resource_count = 0;
4181 icb->timeout = cpu_to_le16(0);
4182
4183 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4184 /* Enable RIO */
4185 icb->firmware_options[0] &= ~BIT_3;
4186 icb->add_firmware_options[0] &=
4187 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4188 icb->add_firmware_options[0] |= BIT_2;
4189 icb->response_accumulation_timer = 3;
4190 icb->interrupt_delay_timer = 5;
4191
4192 vha->flags.process_response_queue = 1;
4193 } else {
4194 /* Enable ZIO. */
4195 if (!vha->flags.init_done) {
4196 ha->zio_mode = icb->add_firmware_options[0] &
4197 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4198 ha->zio_timer = icb->interrupt_delay_timer ?
4199 icb->interrupt_delay_timer: 2;
4200 }
4201 icb->add_firmware_options[0] &=
4202 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4203 vha->flags.process_response_queue = 0;
4204 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4205 ha->zio_mode = QLA_ZIO_MODE_6;
4206
4207 ql_log(ql_log_info, vha, 0x0068,
4208 "ZIO mode %d enabled; timer delay (%d us).\n",
4209 ha->zio_mode, ha->zio_timer * 100);
4210
4211 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4212 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4213 vha->flags.process_response_queue = 1;
4214 }
4215 }
4216
4217 if (rval) {
4218 ql_log(ql_log_warn, vha, 0x0069,
4219 "NVRAM configuration failed.\n");
4220 }
4221 return (rval);
4222 }
4223
4224 static void
4225 qla2x00_rport_del(void *data)
4226 {
4227 fc_port_t *fcport = data;
4228 struct fc_rport *rport;
4229 unsigned long flags;
4230
4231 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4232 rport = fcport->drport ? fcport->drport: fcport->rport;
4233 fcport->drport = NULL;
4234 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4235 if (rport) {
4236 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4237 "%s %8phN. rport %p roles %x\n",
4238 __func__, fcport->port_name, rport,
4239 rport->roles);
4240
4241 fc_remote_port_delete(rport);
4242 }
4243 }
4244
4245 /**
4246 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4247 * @ha: HA context
4248 * @flags: allocation flags
4249 *
4250 * Returns a pointer to the allocated fcport, or NULL, if none available.
4251 */
4252 fc_port_t *
4253 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4254 {
4255 fc_port_t *fcport;
4256
4257 fcport = kzalloc(sizeof(fc_port_t), flags);
4258 if (!fcport)
4259 return NULL;
4260
4261 /* Setup fcport template structure. */
4262 fcport->vha = vha;
4263 fcport->port_type = FCT_UNKNOWN;
4264 fcport->loop_id = FC_NO_LOOP_ID;
4265 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4266 fcport->supported_classes = FC_COS_UNSPECIFIED;
4267
4268 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4269 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4270 flags);
4271 fcport->disc_state = DSC_DELETED;
4272 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4273 fcport->deleted = QLA_SESS_DELETED;
4274 fcport->login_retry = vha->hw->login_retry_count;
4275 fcport->login_retry = 5;
4276 fcport->logout_on_delete = 1;
4277
4278 if (!fcport->ct_desc.ct_sns) {
4279 ql_log(ql_log_warn, vha, 0xd049,
4280 "Failed to allocate ct_sns request.\n");
4281 kfree(fcport);
4282 fcport = NULL;
4283 }
4284 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4285 INIT_LIST_HEAD(&fcport->gnl_entry);
4286 INIT_LIST_HEAD(&fcport->list);
4287
4288 return fcport;
4289 }
4290
4291 void
4292 qla2x00_free_fcport(fc_port_t *fcport)
4293 {
4294 if (fcport->ct_desc.ct_sns) {
4295 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4296 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4297 fcport->ct_desc.ct_sns_dma);
4298
4299 fcport->ct_desc.ct_sns = NULL;
4300 }
4301 kfree(fcport);
4302 }
4303
4304 /*
4305 * qla2x00_configure_loop
4306 * Updates Fibre Channel Device Database with what is actually on loop.
4307 *
4308 * Input:
4309 * ha = adapter block pointer.
4310 *
4311 * Returns:
4312 * 0 = success.
4313 * 1 = error.
4314 * 2 = database was full and device was not configured.
4315 */
4316 static int
4317 qla2x00_configure_loop(scsi_qla_host_t *vha)
4318 {
4319 int rval;
4320 unsigned long flags, save_flags;
4321 struct qla_hw_data *ha = vha->hw;
4322 rval = QLA_SUCCESS;
4323
4324 /* Get Initiator ID */
4325 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4326 rval = qla2x00_configure_hba(vha);
4327 if (rval != QLA_SUCCESS) {
4328 ql_dbg(ql_dbg_disc, vha, 0x2013,
4329 "Unable to configure HBA.\n");
4330 return (rval);
4331 }
4332 }
4333
4334 save_flags = flags = vha->dpc_flags;
4335 ql_dbg(ql_dbg_disc, vha, 0x2014,
4336 "Configure loop -- dpc flags = 0x%lx.\n", flags);
4337
4338 /*
4339 * If we have both an RSCN and PORT UPDATE pending then handle them
4340 * both at the same time.
4341 */
4342 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4343 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4344
4345 qla2x00_get_data_rate(vha);
4346
4347 /* Determine what we need to do */
4348 if (ha->current_topology == ISP_CFG_FL &&
4349 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4350
4351 set_bit(RSCN_UPDATE, &flags);
4352
4353 } else if (ha->current_topology == ISP_CFG_F &&
4354 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4355
4356 set_bit(RSCN_UPDATE, &flags);
4357 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4358
4359 } else if (ha->current_topology == ISP_CFG_N) {
4360 clear_bit(RSCN_UPDATE, &flags);
4361 if (ha->flags.rida_fmt2) {
4362 /* With Rida Format 2, the login is already triggered.
4363 * We know who is on the other side of the wire.
4364 * No need to login to do login to find out or drop into
4365 * qla2x00_configure_local_loop().
4366 */
4367 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4368 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4369 } else {
4370 if (qla_tgt_mode_enabled(vha)) {
4371 /* allow the other side to start the login */
4372 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4373 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4374 }
4375 }
4376 } else if (ha->current_topology == ISP_CFG_NL) {
4377 clear_bit(RSCN_UPDATE, &flags);
4378 set_bit(LOCAL_LOOP_UPDATE, &flags);
4379 } else if (!vha->flags.online ||
4380 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4381 set_bit(RSCN_UPDATE, &flags);
4382 set_bit(LOCAL_LOOP_UPDATE, &flags);
4383 }
4384
4385 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4386 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4387 ql_dbg(ql_dbg_disc, vha, 0x2015,
4388 "Loop resync needed, failing.\n");
4389 rval = QLA_FUNCTION_FAILED;
4390 } else
4391 rval = qla2x00_configure_local_loop(vha);
4392 }
4393
4394 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4395 if (LOOP_TRANSITION(vha)) {
4396 ql_dbg(ql_dbg_disc, vha, 0x2099,
4397 "Needs RSCN update and loop transition.\n");
4398 rval = QLA_FUNCTION_FAILED;
4399 }
4400 else
4401 rval = qla2x00_configure_fabric(vha);
4402 }
4403
4404 if (rval == QLA_SUCCESS) {
4405 if (atomic_read(&vha->loop_down_timer) ||
4406 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4407 rval = QLA_FUNCTION_FAILED;
4408 } else {
4409 atomic_set(&vha->loop_state, LOOP_READY);
4410 ql_dbg(ql_dbg_disc, vha, 0x2069,
4411 "LOOP READY.\n");
4412 ha->flags.fw_init_done = 1;
4413
4414 /*
4415 * Process any ATIO queue entries that came in
4416 * while we weren't online.
4417 */
4418 if (qla_tgt_mode_enabled(vha) ||
4419 qla_dual_mode_enabled(vha)) {
4420 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4421 spin_lock_irqsave(&ha->tgt.atio_lock,
4422 flags);
4423 qlt_24xx_process_atio_queue(vha, 0);
4424 spin_unlock_irqrestore(
4425 &ha->tgt.atio_lock, flags);
4426 } else {
4427 spin_lock_irqsave(&ha->hardware_lock,
4428 flags);
4429 qlt_24xx_process_atio_queue(vha, 1);
4430 spin_unlock_irqrestore(
4431 &ha->hardware_lock, flags);
4432 }
4433 }
4434 }
4435 }
4436
4437 if (rval) {
4438 ql_dbg(ql_dbg_disc, vha, 0x206a,
4439 "%s *** FAILED ***.\n", __func__);
4440 } else {
4441 ql_dbg(ql_dbg_disc, vha, 0x206b,
4442 "%s: exiting normally.\n", __func__);
4443 }
4444
4445 /* Restore state if a resync event occurred during processing */
4446 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4447 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
4448 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4449 if (test_bit(RSCN_UPDATE, &save_flags)) {
4450 set_bit(RSCN_UPDATE, &vha->dpc_flags);
4451 }
4452 }
4453
4454 return (rval);
4455 }
4456
4457 /*
4458 * N2N Login
4459 * Updates Fibre Channel Device Database with local loop devices.
4460 *
4461 * Input:
4462 * ha = adapter block pointer.
4463 *
4464 * Returns:
4465 */
4466 static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
4467 fc_port_t *fcport)
4468 {
4469 struct qla_hw_data *ha = vha->hw;
4470 int res = QLA_SUCCESS, rval;
4471 int greater_wwpn = 0;
4472 int logged_in = 0;
4473
4474 if (ha->current_topology != ISP_CFG_N)
4475 return res;
4476
4477 if (wwn_to_u64(vha->port_name) >
4478 wwn_to_u64(vha->n2n_port_name)) {
4479 ql_dbg(ql_dbg_disc, vha, 0x2002,
4480 "HBA WWPN is greater %llx > target %llx\n",
4481 wwn_to_u64(vha->port_name),
4482 wwn_to_u64(vha->n2n_port_name));
4483 greater_wwpn = 1;
4484 fcport->d_id.b24 = vha->n2n_id;
4485 }
4486
4487 fcport->loop_id = vha->loop_id;
4488 fcport->fc4f_nvme = 0;
4489 fcport->query = 1;
4490
4491 ql_dbg(ql_dbg_disc, vha, 0x4001,
4492 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
4493 fcport->d_id.b24, vha->loop_id);
4494
4495 /* Fill in member data. */
4496 if (!greater_wwpn) {
4497 rval = qla2x00_get_port_database(vha, fcport, 0);
4498 ql_dbg(ql_dbg_disc, vha, 0x1051,
4499 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
4500 fcport->current_login_state, fcport->last_login_state,
4501 fcport->d_id.b24, fcport->loop_id, rval);
4502
4503 if (((fcport->current_login_state & 0xf) == 0x4) ||
4504 ((fcport->current_login_state & 0xf) == 0x6))
4505 logged_in = 1;
4506 }
4507
4508 if (logged_in || greater_wwpn) {
4509 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4510 qla_nvme_register_hba(vha);
4511
4512 /* Set connected N_Port d_id */
4513 if (vha->flags.nvme_enabled)
4514 fcport->fc4f_nvme = 1;
4515
4516 fcport->scan_state = QLA_FCPORT_FOUND;
4517 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4518 fcport->disc_state = DSC_GNL;
4519 fcport->n2n_flag = 1;
4520 fcport->flags = 3;
4521 vha->hw->flags.gpsc_supported = 0;
4522
4523 if (greater_wwpn) {
4524 ql_dbg(ql_dbg_disc, vha, 0x20e5,
4525 "%s %d PLOGI ELS %8phC\n",
4526 __func__, __LINE__, fcport->port_name);
4527
4528 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
4529 fcport, fcport->d_id);
4530 }
4531
4532 if (res != QLA_SUCCESS) {
4533 ql_log(ql_log_info, vha, 0xd04d,
4534 "PLOGI Failed: portid=%06x - retrying\n",
4535 fcport->d_id.b24);
4536 res = QLA_SUCCESS;
4537 } else {
4538 /* State 0x6 means FCP PRLI complete */
4539 if ((fcport->current_login_state & 0xf) == 0x6) {
4540 ql_dbg(ql_dbg_disc, vha, 0x2118,
4541 "%s %d %8phC post GPDB work\n",
4542 __func__, __LINE__, fcport->port_name);
4543 fcport->chip_reset =
4544 vha->hw->base_qpair->chip_reset;
4545 qla24xx_post_gpdb_work(vha, fcport, 0);
4546 } else {
4547 ql_dbg(ql_dbg_disc, vha, 0x2118,
4548 "%s %d %8phC post NVMe PRLI\n",
4549 __func__, __LINE__, fcport->port_name);
4550 qla24xx_post_prli_work(vha, fcport);
4551 }
4552 }
4553 } else {
4554 /* Wait for next database change */
4555 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4556 }
4557
4558 return res;
4559 }
4560
4561 /*
4562 * qla2x00_configure_local_loop
4563 * Updates Fibre Channel Device Database with local loop devices.
4564 *
4565 * Input:
4566 * ha = adapter block pointer.
4567 *
4568 * Returns:
4569 * 0 = success.
4570 */
4571 static int
4572 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4573 {
4574 int rval, rval2;
4575 int found_devs;
4576 int found;
4577 fc_port_t *fcport, *new_fcport;
4578
4579 uint16_t index;
4580 uint16_t entries;
4581 char *id_iter;
4582 uint16_t loop_id;
4583 uint8_t domain, area, al_pa;
4584 struct qla_hw_data *ha = vha->hw;
4585 unsigned long flags;
4586
4587 found_devs = 0;
4588 new_fcport = NULL;
4589 entries = MAX_FIBRE_DEVICES_LOOP;
4590
4591 /* Get list of logged in devices. */
4592 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
4593 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
4594 &entries);
4595 if (rval != QLA_SUCCESS)
4596 goto cleanup_allocation;
4597
4598 ql_dbg(ql_dbg_disc, vha, 0x2011,
4599 "Entries in ID list (%d).\n", entries);
4600 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4601 (uint8_t *)ha->gid_list,
4602 entries * sizeof(struct gid_list_info));
4603
4604 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4605 fcport->scan_state = QLA_FCPORT_SCAN;
4606 }
4607
4608 /* Allocate temporary fcport for any new fcports discovered. */
4609 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4610 if (new_fcport == NULL) {
4611 ql_log(ql_log_warn, vha, 0x2012,
4612 "Memory allocation failed for fcport.\n");
4613 rval = QLA_MEMORY_ALLOC_FAILED;
4614 goto cleanup_allocation;
4615 }
4616 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4617
4618 /* Inititae N2N login. */
4619 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4620 rval = qla24xx_n2n_handle_login(vha, new_fcport);
4621 if (rval != QLA_SUCCESS)
4622 goto cleanup_allocation;
4623 return QLA_SUCCESS;
4624 }
4625
4626 /* Add devices to port list. */
4627 id_iter = (char *)ha->gid_list;
4628 for (index = 0; index < entries; index++) {
4629 domain = ((struct gid_list_info *)id_iter)->domain;
4630 area = ((struct gid_list_info *)id_iter)->area;
4631 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
4632 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4633 loop_id = (uint16_t)
4634 ((struct gid_list_info *)id_iter)->loop_id_2100;
4635 else
4636 loop_id = le16_to_cpu(
4637 ((struct gid_list_info *)id_iter)->loop_id);
4638 id_iter += ha->gid_list_info_size;
4639
4640 /* Bypass reserved domain fields. */
4641 if ((domain & 0xf0) == 0xf0)
4642 continue;
4643
4644 /* Bypass if not same domain and area of adapter. */
4645 if (area && domain &&
4646 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
4647 continue;
4648
4649 /* Bypass invalid local loop ID. */
4650 if (loop_id > LAST_LOCAL_LOOP_ID)
4651 continue;
4652
4653 memset(new_fcport->port_name, 0, WWN_SIZE);
4654
4655 /* Fill in member data. */
4656 new_fcport->d_id.b.domain = domain;
4657 new_fcport->d_id.b.area = area;
4658 new_fcport->d_id.b.al_pa = al_pa;
4659 new_fcport->loop_id = loop_id;
4660 new_fcport->scan_state = QLA_FCPORT_FOUND;
4661
4662 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
4663 if (rval2 != QLA_SUCCESS) {
4664 ql_dbg(ql_dbg_disc, vha, 0x2097,
4665 "Failed to retrieve fcport information "
4666 "-- get_port_database=%x, loop_id=0x%04x.\n",
4667 rval2, new_fcport->loop_id);
4668 /* Skip retry if N2N */
4669 if (ha->current_topology != ISP_CFG_N) {
4670 ql_dbg(ql_dbg_disc, vha, 0x2105,
4671 "Scheduling resync.\n");
4672 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4673 continue;
4674 }
4675 }
4676
4677 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4678 /* Check for matching device in port list. */
4679 found = 0;
4680 fcport = NULL;
4681 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4682 if (memcmp(new_fcport->port_name, fcport->port_name,
4683 WWN_SIZE))
4684 continue;
4685
4686 fcport->flags &= ~FCF_FABRIC_DEVICE;
4687 fcport->loop_id = new_fcport->loop_id;
4688 fcport->port_type = new_fcport->port_type;
4689 fcport->d_id.b24 = new_fcport->d_id.b24;
4690 memcpy(fcport->node_name, new_fcport->node_name,
4691 WWN_SIZE);
4692 fcport->scan_state = QLA_FCPORT_FOUND;
4693 found++;
4694 break;
4695 }
4696
4697 if (!found) {
4698 /* New device, add to fcports list. */
4699 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4700
4701 /* Allocate a new replacement fcport. */
4702 fcport = new_fcport;
4703
4704 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4705
4706 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4707
4708 if (new_fcport == NULL) {
4709 ql_log(ql_log_warn, vha, 0xd031,
4710 "Failed to allocate memory for fcport.\n");
4711 rval = QLA_MEMORY_ALLOC_FAILED;
4712 goto cleanup_allocation;
4713 }
4714 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4715 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4716 }
4717
4718 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4719
4720 /* Base iIDMA settings on HBA port speed. */
4721 fcport->fp_speed = ha->link_data_rate;
4722
4723 found_devs++;
4724 }
4725
4726 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4727 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4728 break;
4729
4730 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4731 if ((qla_dual_mode_enabled(vha) ||
4732 qla_ini_mode_enabled(vha)) &&
4733 atomic_read(&fcport->state) == FCS_ONLINE) {
4734 qla2x00_mark_device_lost(vha, fcport,
4735 ql2xplogiabsentdevice, 0);
4736 if (fcport->loop_id != FC_NO_LOOP_ID &&
4737 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4738 fcport->port_type != FCT_INITIATOR &&
4739 fcport->port_type != FCT_BROADCAST) {
4740 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4741 "%s %d %8phC post del sess\n",
4742 __func__, __LINE__,
4743 fcport->port_name);
4744
4745 qlt_schedule_sess_for_deletion(fcport);
4746 continue;
4747 }
4748 }
4749 }
4750
4751 if (fcport->scan_state == QLA_FCPORT_FOUND)
4752 qla24xx_fcport_handle_login(vha, fcport);
4753 }
4754
4755 cleanup_allocation:
4756 kfree(new_fcport);
4757
4758 if (rval != QLA_SUCCESS) {
4759 ql_dbg(ql_dbg_disc, vha, 0x2098,
4760 "Configure local loop error exit: rval=%x.\n", rval);
4761 }
4762
4763 return (rval);
4764 }
4765
4766 static void
4767 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4768 {
4769 int rval;
4770 uint16_t mb[MAILBOX_REGISTER_COUNT];
4771 struct qla_hw_data *ha = vha->hw;
4772
4773 if (!IS_IIDMA_CAPABLE(ha))
4774 return;
4775
4776 if (atomic_read(&fcport->state) != FCS_ONLINE)
4777 return;
4778
4779 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4780 fcport->fp_speed > ha->link_data_rate ||
4781 !ha->flags.gpsc_supported)
4782 return;
4783
4784 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
4785 mb);
4786 if (rval != QLA_SUCCESS) {
4787 ql_dbg(ql_dbg_disc, vha, 0x2004,
4788 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4789 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
4790 } else {
4791 ql_dbg(ql_dbg_disc, vha, 0x2005,
4792 "iIDMA adjusted to %s GB/s on %8phN.\n",
4793 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
4794 fcport->port_name);
4795 }
4796 }
4797
4798 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
4799 static void
4800 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
4801 {
4802 struct fc_rport_identifiers rport_ids;
4803 struct fc_rport *rport;
4804 unsigned long flags;
4805
4806 rport_ids.node_name = wwn_to_u64(fcport->node_name);
4807 rport_ids.port_name = wwn_to_u64(fcport->port_name);
4808 rport_ids.port_id = fcport->d_id.b.domain << 16 |
4809 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
4810 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4811 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
4812 if (!rport) {
4813 ql_log(ql_log_warn, vha, 0x2006,
4814 "Unable to allocate fc remote port.\n");
4815 return;
4816 }
4817
4818 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4819 *((fc_port_t **)rport->dd_data) = fcport;
4820 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4821
4822 rport->supported_classes = fcport->supported_classes;
4823
4824 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4825 if (fcport->port_type == FCT_INITIATOR)
4826 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4827 if (fcport->port_type == FCT_TARGET)
4828 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
4829
4830 ql_dbg(ql_dbg_disc, vha, 0x20ee,
4831 "%s %8phN. rport %p is %s mode\n",
4832 __func__, fcport->port_name, rport,
4833 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
4834
4835 fc_remote_port_rolechg(rport, rport_ids.roles);
4836 }
4837
4838 /*
4839 * qla2x00_update_fcport
4840 * Updates device on list.
4841 *
4842 * Input:
4843 * ha = adapter block pointer.
4844 * fcport = port structure pointer.
4845 *
4846 * Return:
4847 * 0 - Success
4848 * BIT_0 - error
4849 *
4850 * Context:
4851 * Kernel context.
4852 */
4853 void
4854 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4855 {
4856 fcport->vha = vha;
4857
4858 if (IS_SW_RESV_ADDR(fcport->d_id))
4859 return;
4860
4861 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
4862 __func__, fcport->port_name);
4863
4864 if (IS_QLAFX00(vha->hw)) {
4865 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4866 goto reg_port;
4867 }
4868 fcport->login_retry = 0;
4869 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4870 fcport->disc_state = DSC_LOGIN_COMPLETE;
4871 fcport->deleted = 0;
4872 fcport->logout_on_delete = 1;
4873
4874 if (fcport->fc4f_nvme) {
4875 qla_nvme_register_remote(vha, fcport);
4876 return;
4877 }
4878
4879 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4880 qla2x00_iidma_fcport(vha, fcport);
4881 qla24xx_update_fcport_fcp_prio(vha, fcport);
4882
4883 reg_port:
4884 switch (vha->host->active_mode) {
4885 case MODE_INITIATOR:
4886 qla2x00_reg_remote_port(vha, fcport);
4887 break;
4888 case MODE_TARGET:
4889 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4890 !vha->vha_tgt.qla_tgt->tgt_stopped)
4891 qlt_fc_port_added(vha, fcport);
4892 break;
4893 case MODE_DUAL:
4894 qla2x00_reg_remote_port(vha, fcport);
4895 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4896 !vha->vha_tgt.qla_tgt->tgt_stopped)
4897 qlt_fc_port_added(vha, fcport);
4898 break;
4899 default:
4900 break;
4901 }
4902 }
4903
4904 /*
4905 * qla2x00_configure_fabric
4906 * Setup SNS devices with loop ID's.
4907 *
4908 * Input:
4909 * ha = adapter block pointer.
4910 *
4911 * Returns:
4912 * 0 = success.
4913 * BIT_0 = error
4914 */
4915 static int
4916 qla2x00_configure_fabric(scsi_qla_host_t *vha)
4917 {
4918 int rval;
4919 fc_port_t *fcport;
4920 uint16_t mb[MAILBOX_REGISTER_COUNT];
4921 uint16_t loop_id;
4922 LIST_HEAD(new_fcports);
4923 struct qla_hw_data *ha = vha->hw;
4924 int discovery_gen;
4925
4926 /* If FL port exists, then SNS is present */
4927 if (IS_FWI2_CAPABLE(ha))
4928 loop_id = NPH_F_PORT;
4929 else
4930 loop_id = SNS_FL_PORT;
4931 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
4932 if (rval != QLA_SUCCESS) {
4933 ql_dbg(ql_dbg_disc, vha, 0x20a0,
4934 "MBX_GET_PORT_NAME failed, No FL Port.\n");
4935
4936 vha->device_flags &= ~SWITCH_FOUND;
4937 return (QLA_SUCCESS);
4938 }
4939 vha->device_flags |= SWITCH_FOUND;
4940
4941
4942 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
4943 rval = qla2x00_send_change_request(vha, 0x3, 0);
4944 if (rval != QLA_SUCCESS)
4945 ql_log(ql_log_warn, vha, 0x121,
4946 "Failed to enable receiving of RSCN requests: 0x%x.\n",
4947 rval);
4948 }
4949
4950
4951 do {
4952 qla2x00_mgmt_svr_login(vha);
4953
4954 /* FDMI support. */
4955 if (ql2xfdmienable &&
4956 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4957 qla2x00_fdmi_register(vha);
4958
4959 /* Ensure we are logged into the SNS. */
4960 loop_id = NPH_SNS_LID(ha);
4961 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4962 0xfc, mb, BIT_1|BIT_0);
4963 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
4964 ql_dbg(ql_dbg_disc, vha, 0x20a1,
4965 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
4966 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
4967 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4968 return rval;
4969 }
4970 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4971 if (qla2x00_rft_id(vha)) {
4972 /* EMPTY */
4973 ql_dbg(ql_dbg_disc, vha, 0x20a2,
4974 "Register FC-4 TYPE failed.\n");
4975 if (test_bit(LOOP_RESYNC_NEEDED,
4976 &vha->dpc_flags))
4977 break;
4978 }
4979 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
4980 /* EMPTY */
4981 ql_dbg(ql_dbg_disc, vha, 0x209a,
4982 "Register FC-4 Features failed.\n");
4983 if (test_bit(LOOP_RESYNC_NEEDED,
4984 &vha->dpc_flags))
4985 break;
4986 }
4987 if (vha->flags.nvme_enabled) {
4988 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
4989 ql_dbg(ql_dbg_disc, vha, 0x2049,
4990 "Register NVME FC Type Features failed.\n");
4991 }
4992 }
4993 if (qla2x00_rnn_id(vha)) {
4994 /* EMPTY */
4995 ql_dbg(ql_dbg_disc, vha, 0x2104,
4996 "Register Node Name failed.\n");
4997 if (test_bit(LOOP_RESYNC_NEEDED,
4998 &vha->dpc_flags))
4999 break;
5000 } else if (qla2x00_rsnn_nn(vha)) {
5001 /* EMPTY */
5002 ql_dbg(ql_dbg_disc, vha, 0x209b,
5003 "Register Symbolic Node Name failed.\n");
5004 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5005 break;
5006 }
5007 }
5008
5009 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5010 fcport->scan_state = QLA_FCPORT_SCAN;
5011 }
5012
5013 /* Mark the time right before querying FW for connected ports.
5014 * This process is long, asynchronous and by the time it's done,
5015 * collected information might not be accurate anymore. E.g.
5016 * disconnected port might have re-connected and a brand new
5017 * session has been created. In this case session's generation
5018 * will be newer than discovery_gen. */
5019 qlt_do_generation_tick(vha, &discovery_gen);
5020
5021 rval = qla2x00_find_all_fabric_devs(vha);
5022 if (rval != QLA_SUCCESS)
5023 break;
5024 } while (0);
5025
5026 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5027 qla_nvme_register_hba(vha);
5028
5029 if (rval)
5030 ql_dbg(ql_dbg_disc, vha, 0x2068,
5031 "Configure fabric error exit rval=%d.\n", rval);
5032
5033 return (rval);
5034 }
5035
5036 /*
5037 * qla2x00_find_all_fabric_devs
5038 *
5039 * Input:
5040 * ha = adapter block pointer.
5041 * dev = database device entry pointer.
5042 *
5043 * Returns:
5044 * 0 = success.
5045 *
5046 * Context:
5047 * Kernel context.
5048 */
5049 static int
5050 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5051 {
5052 int rval;
5053 uint16_t loop_id;
5054 fc_port_t *fcport, *new_fcport;
5055 int found;
5056
5057 sw_info_t *swl;
5058 int swl_idx;
5059 int first_dev, last_dev;
5060 port_id_t wrap = {}, nxt_d_id;
5061 struct qla_hw_data *ha = vha->hw;
5062 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5063 unsigned long flags;
5064
5065 rval = QLA_SUCCESS;
5066
5067 /* Try GID_PT to get device list, else GAN. */
5068 if (!ha->swl)
5069 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5070 GFP_KERNEL);
5071 swl = ha->swl;
5072 if (!swl) {
5073 /*EMPTY*/
5074 ql_dbg(ql_dbg_disc, vha, 0x209c,
5075 "GID_PT allocations failed, fallback on GA_NXT.\n");
5076 } else {
5077 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5078 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5079 swl = NULL;
5080 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5081 return rval;
5082 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5083 swl = NULL;
5084 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5085 return rval;
5086 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5087 swl = NULL;
5088 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5089 return rval;
5090 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5091 swl = NULL;
5092 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5093 return rval;
5094 }
5095
5096 /* If other queries succeeded probe for FC-4 type */
5097 if (swl) {
5098 qla2x00_gff_id(vha, swl);
5099 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5100 return rval;
5101 }
5102 }
5103 swl_idx = 0;
5104
5105 /* Allocate temporary fcport for any new fcports discovered. */
5106 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5107 if (new_fcport == NULL) {
5108 ql_log(ql_log_warn, vha, 0x209d,
5109 "Failed to allocate memory for fcport.\n");
5110 return (QLA_MEMORY_ALLOC_FAILED);
5111 }
5112 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5113 /* Set start port ID scan at adapter ID. */
5114 first_dev = 1;
5115 last_dev = 0;
5116
5117 /* Starting free loop ID. */
5118 loop_id = ha->min_external_loopid;
5119 for (; loop_id <= ha->max_loop_id; loop_id++) {
5120 if (qla2x00_is_reserved_id(vha, loop_id))
5121 continue;
5122
5123 if (ha->current_topology == ISP_CFG_FL &&
5124 (atomic_read(&vha->loop_down_timer) ||
5125 LOOP_TRANSITION(vha))) {
5126 atomic_set(&vha->loop_down_timer, 0);
5127 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5128 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5129 break;
5130 }
5131
5132 if (swl != NULL) {
5133 if (last_dev) {
5134 wrap.b24 = new_fcport->d_id.b24;
5135 } else {
5136 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5137 memcpy(new_fcport->node_name,
5138 swl[swl_idx].node_name, WWN_SIZE);
5139 memcpy(new_fcport->port_name,
5140 swl[swl_idx].port_name, WWN_SIZE);
5141 memcpy(new_fcport->fabric_port_name,
5142 swl[swl_idx].fabric_port_name, WWN_SIZE);
5143 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5144 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5145
5146 new_fcport->nvme_flag = 0;
5147 new_fcport->fc4f_nvme = 0;
5148 if (vha->flags.nvme_enabled &&
5149 swl[swl_idx].fc4f_nvme) {
5150 new_fcport->fc4f_nvme =
5151 swl[swl_idx].fc4f_nvme;
5152 ql_log(ql_log_info, vha, 0x2131,
5153 "FOUND: NVME port %8phC as FC Type 28h\n",
5154 new_fcport->port_name);
5155 }
5156
5157 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5158 last_dev = 1;
5159 }
5160 swl_idx++;
5161 }
5162 } else {
5163 /* Send GA_NXT to the switch */
5164 rval = qla2x00_ga_nxt(vha, new_fcport);
5165 if (rval != QLA_SUCCESS) {
5166 ql_log(ql_log_warn, vha, 0x209e,
5167 "SNS scan failed -- assuming "
5168 "zero-entry result.\n");
5169 rval = QLA_SUCCESS;
5170 break;
5171 }
5172 }
5173
5174 /* If wrap on switch device list, exit. */
5175 if (first_dev) {
5176 wrap.b24 = new_fcport->d_id.b24;
5177 first_dev = 0;
5178 } else if (new_fcport->d_id.b24 == wrap.b24) {
5179 ql_dbg(ql_dbg_disc, vha, 0x209f,
5180 "Device wrap (%02x%02x%02x).\n",
5181 new_fcport->d_id.b.domain,
5182 new_fcport->d_id.b.area,
5183 new_fcport->d_id.b.al_pa);
5184 break;
5185 }
5186
5187 /* Bypass if same physical adapter. */
5188 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5189 continue;
5190
5191 /* Bypass virtual ports of the same host. */
5192 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5193 continue;
5194
5195 /* Bypass if same domain and area of adapter. */
5196 if (((new_fcport->d_id.b24 & 0xffff00) ==
5197 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5198 ISP_CFG_FL)
5199 continue;
5200
5201 /* Bypass reserved domain fields. */
5202 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5203 continue;
5204
5205 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
5206 if (ql2xgffidenable &&
5207 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5208 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
5209 continue;
5210
5211 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5212
5213 /* Locate matching device in database. */
5214 found = 0;
5215 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5216 if (memcmp(new_fcport->port_name, fcport->port_name,
5217 WWN_SIZE))
5218 continue;
5219
5220 fcport->scan_state = QLA_FCPORT_FOUND;
5221
5222 found++;
5223
5224 /* Update port state. */
5225 memcpy(fcport->fabric_port_name,
5226 new_fcport->fabric_port_name, WWN_SIZE);
5227 fcport->fp_speed = new_fcport->fp_speed;
5228
5229 /*
5230 * If address the same and state FCS_ONLINE
5231 * (or in target mode), nothing changed.
5232 */
5233 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5234 (atomic_read(&fcport->state) == FCS_ONLINE ||
5235 (vha->host->active_mode == MODE_TARGET))) {
5236 break;
5237 }
5238
5239 /*
5240 * If device was not a fabric device before.
5241 */
5242 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5243 fcport->d_id.b24 = new_fcport->d_id.b24;
5244 qla2x00_clear_loop_id(fcport);
5245 fcport->flags |= (FCF_FABRIC_DEVICE |
5246 FCF_LOGIN_NEEDED);
5247 break;
5248 }
5249
5250 /*
5251 * Port ID changed or device was marked to be updated;
5252 * Log it out if still logged in and mark it for
5253 * relogin later.
5254 */
5255 if (qla_tgt_mode_enabled(base_vha)) {
5256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5257 "port changed FC ID, %8phC"
5258 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5259 fcport->port_name,
5260 fcport->d_id.b.domain,
5261 fcport->d_id.b.area,
5262 fcport->d_id.b.al_pa,
5263 fcport->loop_id,
5264 new_fcport->d_id.b.domain,
5265 new_fcport->d_id.b.area,
5266 new_fcport->d_id.b.al_pa);
5267 fcport->d_id.b24 = new_fcport->d_id.b24;
5268 break;
5269 }
5270
5271 fcport->d_id.b24 = new_fcport->d_id.b24;
5272 fcport->flags |= FCF_LOGIN_NEEDED;
5273 break;
5274 }
5275
5276 if (found) {
5277 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5278 continue;
5279 }
5280 /* If device was not in our fcports list, then add it. */
5281 new_fcport->scan_state = QLA_FCPORT_FOUND;
5282 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5283
5284 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5285
5286
5287 /* Allocate a new replacement fcport. */
5288 nxt_d_id.b24 = new_fcport->d_id.b24;
5289 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5290 if (new_fcport == NULL) {
5291 ql_log(ql_log_warn, vha, 0xd032,
5292 "Memory allocation failed for fcport.\n");
5293 return (QLA_MEMORY_ALLOC_FAILED);
5294 }
5295 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5296 new_fcport->d_id.b24 = nxt_d_id.b24;
5297 }
5298
5299 qla2x00_free_fcport(new_fcport);
5300
5301 /*
5302 * Logout all previous fabric dev marked lost, except FCP2 devices.
5303 */
5304 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5305 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5306 break;
5307
5308 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5309 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5310 continue;
5311
5312 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5313 if ((qla_dual_mode_enabled(vha) ||
5314 qla_ini_mode_enabled(vha)) &&
5315 atomic_read(&fcport->state) == FCS_ONLINE) {
5316 qla2x00_mark_device_lost(vha, fcport,
5317 ql2xplogiabsentdevice, 0);
5318 if (fcport->loop_id != FC_NO_LOOP_ID &&
5319 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5320 fcport->port_type != FCT_INITIATOR &&
5321 fcport->port_type != FCT_BROADCAST) {
5322 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5323 "%s %d %8phC post del sess\n",
5324 __func__, __LINE__,
5325 fcport->port_name);
5326 qlt_schedule_sess_for_deletion(fcport);
5327 continue;
5328 }
5329 }
5330 }
5331
5332 if (fcport->scan_state == QLA_FCPORT_FOUND)
5333 qla24xx_fcport_handle_login(vha, fcport);
5334 }
5335 return (rval);
5336 }
5337
5338 /*
5339 * qla2x00_find_new_loop_id
5340 * Scan through our port list and find a new usable loop ID.
5341 *
5342 * Input:
5343 * ha: adapter state pointer.
5344 * dev: port structure pointer.
5345 *
5346 * Returns:
5347 * qla2x00 local function return status code.
5348 *
5349 * Context:
5350 * Kernel context.
5351 */
5352 int
5353 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
5354 {
5355 int rval;
5356 struct qla_hw_data *ha = vha->hw;
5357 unsigned long flags = 0;
5358
5359 rval = QLA_SUCCESS;
5360
5361 spin_lock_irqsave(&ha->vport_slock, flags);
5362
5363 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5364 LOOPID_MAP_SIZE);
5365 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5366 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5367 dev->loop_id = FC_NO_LOOP_ID;
5368 rval = QLA_FUNCTION_FAILED;
5369 } else
5370 set_bit(dev->loop_id, ha->loop_id_map);
5371
5372 spin_unlock_irqrestore(&ha->vport_slock, flags);
5373
5374 if (rval == QLA_SUCCESS)
5375 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5376 "Assigning new loopid=%x, portid=%x.\n",
5377 dev->loop_id, dev->d_id.b24);
5378 else
5379 ql_log(ql_log_warn, dev->vha, 0x2087,
5380 "No loop_id's available, portid=%x.\n",
5381 dev->d_id.b24);
5382
5383 return (rval);
5384 }
5385
5386
5387 /*
5388 * qla2x00_fabric_login
5389 * Issue fabric login command.
5390 *
5391 * Input:
5392 * ha = adapter block pointer.
5393 * device = pointer to FC device type structure.
5394 *
5395 * Returns:
5396 * 0 - Login successfully
5397 * 1 - Login failed
5398 * 2 - Initiator device
5399 * 3 - Fatal error
5400 */
5401 int
5402 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5403 uint16_t *next_loopid)
5404 {
5405 int rval;
5406 int retry;
5407 uint16_t tmp_loopid;
5408 uint16_t mb[MAILBOX_REGISTER_COUNT];
5409 struct qla_hw_data *ha = vha->hw;
5410
5411 retry = 0;
5412 tmp_loopid = 0;
5413
5414 for (;;) {
5415 ql_dbg(ql_dbg_disc, vha, 0x2000,
5416 "Trying Fabric Login w/loop id 0x%04x for port "
5417 "%02x%02x%02x.\n",
5418 fcport->loop_id, fcport->d_id.b.domain,
5419 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5420
5421 /* Login fcport on switch. */
5422 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
5423 fcport->d_id.b.domain, fcport->d_id.b.area,
5424 fcport->d_id.b.al_pa, mb, BIT_0);
5425 if (rval != QLA_SUCCESS) {
5426 return rval;
5427 }
5428 if (mb[0] == MBS_PORT_ID_USED) {
5429 /*
5430 * Device has another loop ID. The firmware team
5431 * recommends the driver perform an implicit login with
5432 * the specified ID again. The ID we just used is save
5433 * here so we return with an ID that can be tried by
5434 * the next login.
5435 */
5436 retry++;
5437 tmp_loopid = fcport->loop_id;
5438 fcport->loop_id = mb[1];
5439
5440 ql_dbg(ql_dbg_disc, vha, 0x2001,
5441 "Fabric Login: port in use - next loop "
5442 "id=0x%04x, port id= %02x%02x%02x.\n",
5443 fcport->loop_id, fcport->d_id.b.domain,
5444 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5445
5446 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5447 /*
5448 * Login succeeded.
5449 */
5450 if (retry) {
5451 /* A retry occurred before. */
5452 *next_loopid = tmp_loopid;
5453 } else {
5454 /*
5455 * No retry occurred before. Just increment the
5456 * ID value for next login.
5457 */
5458 *next_loopid = (fcport->loop_id + 1);
5459 }
5460
5461 if (mb[1] & BIT_0) {
5462 fcport->port_type = FCT_INITIATOR;
5463 } else {
5464 fcport->port_type = FCT_TARGET;
5465 if (mb[1] & BIT_1) {
5466 fcport->flags |= FCF_FCP2_DEVICE;
5467 }
5468 }
5469
5470 if (mb[10] & BIT_0)
5471 fcport->supported_classes |= FC_COS_CLASS2;
5472 if (mb[10] & BIT_1)
5473 fcport->supported_classes |= FC_COS_CLASS3;
5474
5475 if (IS_FWI2_CAPABLE(ha)) {
5476 if (mb[10] & BIT_7)
5477 fcport->flags |=
5478 FCF_CONF_COMP_SUPPORTED;
5479 }
5480
5481 rval = QLA_SUCCESS;
5482 break;
5483 } else if (mb[0] == MBS_LOOP_ID_USED) {
5484 /*
5485 * Loop ID already used, try next loop ID.
5486 */
5487 fcport->loop_id++;
5488 rval = qla2x00_find_new_loop_id(vha, fcport);
5489 if (rval != QLA_SUCCESS) {
5490 /* Ran out of loop IDs to use */
5491 break;
5492 }
5493 } else if (mb[0] == MBS_COMMAND_ERROR) {
5494 /*
5495 * Firmware possibly timed out during login. If NO
5496 * retries are left to do then the device is declared
5497 * dead.
5498 */
5499 *next_loopid = fcport->loop_id;
5500 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5501 fcport->d_id.b.domain, fcport->d_id.b.area,
5502 fcport->d_id.b.al_pa);
5503 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5504
5505 rval = 1;
5506 break;
5507 } else {
5508 /*
5509 * unrecoverable / not handled error
5510 */
5511 ql_dbg(ql_dbg_disc, vha, 0x2002,
5512 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5513 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5514 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5515 fcport->loop_id, jiffies);
5516
5517 *next_loopid = fcport->loop_id;
5518 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5519 fcport->d_id.b.domain, fcport->d_id.b.area,
5520 fcport->d_id.b.al_pa);
5521 qla2x00_clear_loop_id(fcport);
5522 fcport->login_retry = 0;
5523
5524 rval = 3;
5525 break;
5526 }
5527 }
5528
5529 return (rval);
5530 }
5531
5532 /*
5533 * qla2x00_local_device_login
5534 * Issue local device login command.
5535 *
5536 * Input:
5537 * ha = adapter block pointer.
5538 * loop_id = loop id of device to login to.
5539 *
5540 * Returns (Where's the #define!!!!):
5541 * 0 - Login successfully
5542 * 1 - Login failed
5543 * 3 - Fatal error
5544 */
5545 int
5546 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
5547 {
5548 int rval;
5549 uint16_t mb[MAILBOX_REGISTER_COUNT];
5550
5551 memset(mb, 0, sizeof(mb));
5552 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
5553 if (rval == QLA_SUCCESS) {
5554 /* Interrogate mailbox registers for any errors */
5555 if (mb[0] == MBS_COMMAND_ERROR)
5556 rval = 1;
5557 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5558 /* device not in PCB table */
5559 rval = 3;
5560 }
5561
5562 return (rval);
5563 }
5564
5565 /*
5566 * qla2x00_loop_resync
5567 * Resync with fibre channel devices.
5568 *
5569 * Input:
5570 * ha = adapter block pointer.
5571 *
5572 * Returns:
5573 * 0 = success
5574 */
5575 int
5576 qla2x00_loop_resync(scsi_qla_host_t *vha)
5577 {
5578 int rval = QLA_SUCCESS;
5579 uint32_t wait_time;
5580 struct req_que *req;
5581 struct rsp_que *rsp;
5582
5583 req = vha->req;
5584 rsp = req->rsp;
5585
5586 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5587 if (vha->flags.online) {
5588 if (!(rval = qla2x00_fw_ready(vha))) {
5589 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5590 wait_time = 256;
5591 do {
5592 if (!IS_QLAFX00(vha->hw)) {
5593 /*
5594 * Issue a marker after FW becomes
5595 * ready.
5596 */
5597 qla2x00_marker(vha, req, rsp, 0, 0,
5598 MK_SYNC_ALL);
5599 vha->marker_needed = 0;
5600 }
5601
5602 /* Remap devices on Loop. */
5603 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5604
5605 if (IS_QLAFX00(vha->hw))
5606 qlafx00_configure_devices(vha);
5607 else
5608 qla2x00_configure_loop(vha);
5609
5610 wait_time--;
5611 } while (!atomic_read(&vha->loop_down_timer) &&
5612 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5613 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5614 &vha->dpc_flags)));
5615 }
5616 }
5617
5618 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5619 return (QLA_FUNCTION_FAILED);
5620
5621 if (rval)
5622 ql_dbg(ql_dbg_disc, vha, 0x206c,
5623 "%s *** FAILED ***.\n", __func__);
5624
5625 return (rval);
5626 }
5627
5628 /*
5629 * qla2x00_perform_loop_resync
5630 * Description: This function will set the appropriate flags and call
5631 * qla2x00_loop_resync. If successful loop will be resynced
5632 * Arguments : scsi_qla_host_t pointer
5633 * returm : Success or Failure
5634 */
5635
5636 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5637 {
5638 int32_t rval = 0;
5639
5640 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5641 /*Configure the flags so that resync happens properly*/
5642 atomic_set(&ha->loop_down_timer, 0);
5643 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5644 atomic_set(&ha->loop_state, LOOP_UP);
5645 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5646 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5647 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5648
5649 rval = qla2x00_loop_resync(ha);
5650 } else
5651 atomic_set(&ha->loop_state, LOOP_DEAD);
5652
5653 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5654 }
5655
5656 return rval;
5657 }
5658
5659 void
5660 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5661 {
5662 fc_port_t *fcport;
5663 struct scsi_qla_host *vha;
5664 struct qla_hw_data *ha = base_vha->hw;
5665 unsigned long flags;
5666
5667 spin_lock_irqsave(&ha->vport_slock, flags);
5668 /* Go with deferred removal of rport references. */
5669 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5670 atomic_inc(&vha->vref_count);
5671 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5672 if (fcport->drport &&
5673 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5674 spin_unlock_irqrestore(&ha->vport_slock, flags);
5675 qla2x00_rport_del(fcport);
5676
5677 spin_lock_irqsave(&ha->vport_slock, flags);
5678 }
5679 }
5680 atomic_dec(&vha->vref_count);
5681 wake_up(&vha->vref_waitq);
5682 }
5683 spin_unlock_irqrestore(&ha->vport_slock, flags);
5684 }
5685
5686 /* Assumes idc_lock always held on entry */
5687 void
5688 qla83xx_reset_ownership(scsi_qla_host_t *vha)
5689 {
5690 struct qla_hw_data *ha = vha->hw;
5691 uint32_t drv_presence, drv_presence_mask;
5692 uint32_t dev_part_info1, dev_part_info2, class_type;
5693 uint32_t class_type_mask = 0x3;
5694 uint16_t fcoe_other_function = 0xffff, i;
5695
5696 if (IS_QLA8044(ha)) {
5697 drv_presence = qla8044_rd_direct(vha,
5698 QLA8044_CRB_DRV_ACTIVE_INDEX);
5699 dev_part_info1 = qla8044_rd_direct(vha,
5700 QLA8044_CRB_DEV_PART_INFO_INDEX);
5701 dev_part_info2 = qla8044_rd_direct(vha,
5702 QLA8044_CRB_DEV_PART_INFO2);
5703 } else {
5704 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5705 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5706 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5707 }
5708 for (i = 0; i < 8; i++) {
5709 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5710 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5711 (i != ha->portnum)) {
5712 fcoe_other_function = i;
5713 break;
5714 }
5715 }
5716 if (fcoe_other_function == 0xffff) {
5717 for (i = 0; i < 8; i++) {
5718 class_type = ((dev_part_info2 >> (i * 4)) &
5719 class_type_mask);
5720 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5721 ((i + 8) != ha->portnum)) {
5722 fcoe_other_function = i + 8;
5723 break;
5724 }
5725 }
5726 }
5727 /*
5728 * Prepare drv-presence mask based on fcoe functions present.
5729 * However consider only valid physical fcoe function numbers (0-15).
5730 */
5731 drv_presence_mask = ~((1 << (ha->portnum)) |
5732 ((fcoe_other_function == 0xffff) ?
5733 0 : (1 << (fcoe_other_function))));
5734
5735 /* We are the reset owner iff:
5736 * - No other protocol drivers present.
5737 * - This is the lowest among fcoe functions. */
5738 if (!(drv_presence & drv_presence_mask) &&
5739 (ha->portnum < fcoe_other_function)) {
5740 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5741 "This host is Reset owner.\n");
5742 ha->flags.nic_core_reset_owner = 1;
5743 }
5744 }
5745
5746 static int
5747 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5748 {
5749 int rval = QLA_SUCCESS;
5750 struct qla_hw_data *ha = vha->hw;
5751 uint32_t drv_ack;
5752
5753 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5754 if (rval == QLA_SUCCESS) {
5755 drv_ack |= (1 << ha->portnum);
5756 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5757 }
5758
5759 return rval;
5760 }
5761
5762 static int
5763 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5764 {
5765 int rval = QLA_SUCCESS;
5766 struct qla_hw_data *ha = vha->hw;
5767 uint32_t drv_ack;
5768
5769 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5770 if (rval == QLA_SUCCESS) {
5771 drv_ack &= ~(1 << ha->portnum);
5772 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5773 }
5774
5775 return rval;
5776 }
5777
5778 static const char *
5779 qla83xx_dev_state_to_string(uint32_t dev_state)
5780 {
5781 switch (dev_state) {
5782 case QLA8XXX_DEV_COLD:
5783 return "COLD/RE-INIT";
5784 case QLA8XXX_DEV_INITIALIZING:
5785 return "INITIALIZING";
5786 case QLA8XXX_DEV_READY:
5787 return "READY";
5788 case QLA8XXX_DEV_NEED_RESET:
5789 return "NEED RESET";
5790 case QLA8XXX_DEV_NEED_QUIESCENT:
5791 return "NEED QUIESCENT";
5792 case QLA8XXX_DEV_FAILED:
5793 return "FAILED";
5794 case QLA8XXX_DEV_QUIESCENT:
5795 return "QUIESCENT";
5796 default:
5797 return "Unknown";
5798 }
5799 }
5800
5801 /* Assumes idc-lock always held on entry */
5802 void
5803 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5804 {
5805 struct qla_hw_data *ha = vha->hw;
5806 uint32_t idc_audit_reg = 0, duration_secs = 0;
5807
5808 switch (audit_type) {
5809 case IDC_AUDIT_TIMESTAMP:
5810 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5811 idc_audit_reg = (ha->portnum) |
5812 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5813 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5814 break;
5815
5816 case IDC_AUDIT_COMPLETION:
5817 duration_secs = ((jiffies_to_msecs(jiffies) -
5818 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5819 idc_audit_reg = (ha->portnum) |
5820 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5821 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5822 break;
5823
5824 default:
5825 ql_log(ql_log_warn, vha, 0xb078,
5826 "Invalid audit type specified.\n");
5827 break;
5828 }
5829 }
5830
5831 /* Assumes idc_lock always held on entry */
5832 static int
5833 qla83xx_initiating_reset(scsi_qla_host_t *vha)
5834 {
5835 struct qla_hw_data *ha = vha->hw;
5836 uint32_t idc_control, dev_state;
5837
5838 __qla83xx_get_idc_control(vha, &idc_control);
5839 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5840 ql_log(ql_log_info, vha, 0xb080,
5841 "NIC Core reset has been disabled. idc-control=0x%x\n",
5842 idc_control);
5843 return QLA_FUNCTION_FAILED;
5844 }
5845
5846 /* Set NEED-RESET iff in READY state and we are the reset-owner */
5847 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5848 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5849 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5850 QLA8XXX_DEV_NEED_RESET);
5851 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5852 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5853 } else {
5854 const char *state = qla83xx_dev_state_to_string(dev_state);
5855 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5856
5857 /* SV: XXX: Is timeout required here? */
5858 /* Wait for IDC state change READY -> NEED_RESET */
5859 while (dev_state == QLA8XXX_DEV_READY) {
5860 qla83xx_idc_unlock(vha, 0);
5861 msleep(200);
5862 qla83xx_idc_lock(vha, 0);
5863 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5864 }
5865 }
5866
5867 /* Send IDC ack by writing to drv-ack register */
5868 __qla83xx_set_drv_ack(vha);
5869
5870 return QLA_SUCCESS;
5871 }
5872
5873 int
5874 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5875 {
5876 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5877 }
5878
5879 int
5880 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5881 {
5882 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5883 }
5884
5885 static int
5886 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5887 {
5888 uint32_t drv_presence = 0;
5889 struct qla_hw_data *ha = vha->hw;
5890
5891 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5892 if (drv_presence & (1 << ha->portnum))
5893 return QLA_SUCCESS;
5894 else
5895 return QLA_TEST_FAILED;
5896 }
5897
5898 int
5899 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5900 {
5901 int rval = QLA_SUCCESS;
5902 struct qla_hw_data *ha = vha->hw;
5903
5904 ql_dbg(ql_dbg_p3p, vha, 0xb058,
5905 "Entered %s().\n", __func__);
5906
5907 if (vha->device_flags & DFLG_DEV_FAILED) {
5908 ql_log(ql_log_warn, vha, 0xb059,
5909 "Device in unrecoverable FAILED state.\n");
5910 return QLA_FUNCTION_FAILED;
5911 }
5912
5913 qla83xx_idc_lock(vha, 0);
5914
5915 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5916 ql_log(ql_log_warn, vha, 0xb05a,
5917 "Function=0x%x has been removed from IDC participation.\n",
5918 ha->portnum);
5919 rval = QLA_FUNCTION_FAILED;
5920 goto exit;
5921 }
5922
5923 qla83xx_reset_ownership(vha);
5924
5925 rval = qla83xx_initiating_reset(vha);
5926
5927 /*
5928 * Perform reset if we are the reset-owner,
5929 * else wait till IDC state changes to READY/FAILED.
5930 */
5931 if (rval == QLA_SUCCESS) {
5932 rval = qla83xx_idc_state_handler(vha);
5933
5934 if (rval == QLA_SUCCESS)
5935 ha->flags.nic_core_hung = 0;
5936 __qla83xx_clear_drv_ack(vha);
5937 }
5938
5939 exit:
5940 qla83xx_idc_unlock(vha, 0);
5941
5942 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5943
5944 return rval;
5945 }
5946
5947 int
5948 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5949 {
5950 struct qla_hw_data *ha = vha->hw;
5951 int rval = QLA_FUNCTION_FAILED;
5952
5953 if (!IS_MCTP_CAPABLE(ha)) {
5954 /* This message can be removed from the final version */
5955 ql_log(ql_log_info, vha, 0x506d,
5956 "This board is not MCTP capable\n");
5957 return rval;
5958 }
5959
5960 if (!ha->mctp_dump) {
5961 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5962 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5963
5964 if (!ha->mctp_dump) {
5965 ql_log(ql_log_warn, vha, 0x506e,
5966 "Failed to allocate memory for mctp dump\n");
5967 return rval;
5968 }
5969 }
5970
5971 #define MCTP_DUMP_STR_ADDR 0x00000000
5972 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5973 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5974 if (rval != QLA_SUCCESS) {
5975 ql_log(ql_log_warn, vha, 0x506f,
5976 "Failed to capture mctp dump\n");
5977 } else {
5978 ql_log(ql_log_info, vha, 0x5070,
5979 "Mctp dump capture for host (%ld/%p).\n",
5980 vha->host_no, ha->mctp_dump);
5981 ha->mctp_dumped = 1;
5982 }
5983
5984 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
5985 ha->flags.nic_core_reset_hdlr_active = 1;
5986 rval = qla83xx_restart_nic_firmware(vha);
5987 if (rval)
5988 /* NIC Core reset failed. */
5989 ql_log(ql_log_warn, vha, 0x5071,
5990 "Failed to restart nic firmware\n");
5991 else
5992 ql_dbg(ql_dbg_p3p, vha, 0xb084,
5993 "Restarted NIC firmware successfully.\n");
5994 ha->flags.nic_core_reset_hdlr_active = 0;
5995 }
5996
5997 return rval;
5998
5999 }
6000
6001 /*
6002 * qla2x00_quiesce_io
6003 * Description: This function will block the new I/Os
6004 * Its not aborting any I/Os as context
6005 * is not destroyed during quiescence
6006 * Arguments: scsi_qla_host_t
6007 * return : void
6008 */
6009 void
6010 qla2x00_quiesce_io(scsi_qla_host_t *vha)
6011 {
6012 struct qla_hw_data *ha = vha->hw;
6013 struct scsi_qla_host *vp;
6014
6015 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6016 "Quiescing I/O - ha=%p.\n", ha);
6017
6018 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6019 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6020 atomic_set(&vha->loop_state, LOOP_DOWN);
6021 qla2x00_mark_all_devices_lost(vha, 0);
6022 list_for_each_entry(vp, &ha->vp_list, list)
6023 qla2x00_mark_all_devices_lost(vp, 0);
6024 } else {
6025 if (!atomic_read(&vha->loop_down_timer))
6026 atomic_set(&vha->loop_down_timer,
6027 LOOP_DOWN_TIME);
6028 }
6029 /* Wait for pending cmds to complete */
6030 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
6031 }
6032
6033 void
6034 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6035 {
6036 struct qla_hw_data *ha = vha->hw;
6037 struct scsi_qla_host *vp;
6038 unsigned long flags;
6039 fc_port_t *fcport;
6040 u16 i;
6041
6042 /* For ISP82XX, driver waits for completion of the commands.
6043 * online flag should be set.
6044 */
6045 if (!(IS_P3P_TYPE(ha)))
6046 vha->flags.online = 0;
6047 ha->flags.chip_reset_done = 0;
6048 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6049 vha->qla_stats.total_isp_aborts++;
6050
6051 ql_log(ql_log_info, vha, 0x00af,
6052 "Performing ISP error recovery - ha=%p.\n", ha);
6053
6054 /* For ISP82XX, reset_chip is just disabling interrupts.
6055 * Driver waits for the completion of the commands.
6056 * the interrupts need to be enabled.
6057 */
6058 if (!(IS_P3P_TYPE(ha)))
6059 ha->isp_ops->reset_chip(vha);
6060
6061 SAVE_TOPO(ha);
6062 ha->flags.rida_fmt2 = 0;
6063 ha->flags.n2n_ae = 0;
6064 ha->flags.lip_ae = 0;
6065 ha->current_topology = 0;
6066 ha->flags.fw_started = 0;
6067 ha->flags.fw_init_done = 0;
6068 ha->base_qpair->chip_reset++;
6069 for (i = 0; i < ha->max_qpairs; i++) {
6070 if (ha->queue_pair_map[i])
6071 ha->queue_pair_map[i]->chip_reset =
6072 ha->base_qpair->chip_reset;
6073 }
6074
6075 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6076 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6077 atomic_set(&vha->loop_state, LOOP_DOWN);
6078 qla2x00_mark_all_devices_lost(vha, 0);
6079
6080 spin_lock_irqsave(&ha->vport_slock, flags);
6081 list_for_each_entry(vp, &ha->vp_list, list) {
6082 atomic_inc(&vp->vref_count);
6083 spin_unlock_irqrestore(&ha->vport_slock, flags);
6084
6085 qla2x00_mark_all_devices_lost(vp, 0);
6086
6087 spin_lock_irqsave(&ha->vport_slock, flags);
6088 atomic_dec(&vp->vref_count);
6089 }
6090 spin_unlock_irqrestore(&ha->vport_slock, flags);
6091 } else {
6092 if (!atomic_read(&vha->loop_down_timer))
6093 atomic_set(&vha->loop_down_timer,
6094 LOOP_DOWN_TIME);
6095 }
6096
6097 /* Clear all async request states across all VPs. */
6098 list_for_each_entry(fcport, &vha->vp_fcports, list)
6099 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6100 spin_lock_irqsave(&ha->vport_slock, flags);
6101 list_for_each_entry(vp, &ha->vp_list, list) {
6102 atomic_inc(&vp->vref_count);
6103 spin_unlock_irqrestore(&ha->vport_slock, flags);
6104
6105 list_for_each_entry(fcport, &vp->vp_fcports, list)
6106 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6107
6108 spin_lock_irqsave(&ha->vport_slock, flags);
6109 atomic_dec(&vp->vref_count);
6110 }
6111 spin_unlock_irqrestore(&ha->vport_slock, flags);
6112
6113 if (!ha->flags.eeh_busy) {
6114 /* Make sure for ISP 82XX IO DMA is complete */
6115 if (IS_P3P_TYPE(ha)) {
6116 qla82xx_chip_reset_cleanup(vha);
6117 ql_log(ql_log_info, vha, 0x00b4,
6118 "Done chip reset cleanup.\n");
6119
6120 /* Done waiting for pending commands.
6121 * Reset the online flag.
6122 */
6123 vha->flags.online = 0;
6124 }
6125
6126 /* Requeue all commands in outstanding command list. */
6127 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6128 }
6129 /* memory barrier */
6130 wmb();
6131 }
6132
6133 /*
6134 * qla2x00_abort_isp
6135 * Resets ISP and aborts all outstanding commands.
6136 *
6137 * Input:
6138 * ha = adapter block pointer.
6139 *
6140 * Returns:
6141 * 0 = success
6142 */
6143 int
6144 qla2x00_abort_isp(scsi_qla_host_t *vha)
6145 {
6146 int rval;
6147 uint8_t status = 0;
6148 struct qla_hw_data *ha = vha->hw;
6149 struct scsi_qla_host *vp;
6150 struct req_que *req = ha->req_q_map[0];
6151 unsigned long flags;
6152
6153 if (vha->flags.online) {
6154 qla2x00_abort_isp_cleanup(vha);
6155
6156 if (IS_QLA8031(ha)) {
6157 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6158 "Clearing fcoe driver presence.\n");
6159 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6160 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6161 "Error while clearing DRV-Presence.\n");
6162 }
6163
6164 if (unlikely(pci_channel_offline(ha->pdev) &&
6165 ha->flags.pci_channel_io_perm_failure)) {
6166 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6167 status = 0;
6168 return status;
6169 }
6170
6171 ha->isp_ops->get_flash_version(vha, req->ring);
6172
6173 ha->isp_ops->nvram_config(vha);
6174
6175 if (!qla2x00_restart_isp(vha)) {
6176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6177
6178 if (!atomic_read(&vha->loop_down_timer)) {
6179 /*
6180 * Issue marker command only when we are going
6181 * to start the I/O .
6182 */
6183 vha->marker_needed = 1;
6184 }
6185
6186 vha->flags.online = 1;
6187
6188 ha->isp_ops->enable_intrs(ha);
6189
6190 ha->isp_abort_cnt = 0;
6191 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6192
6193 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6194 qla2x00_get_fw_version(vha);
6195 if (ha->fce) {
6196 ha->flags.fce_enabled = 1;
6197 memset(ha->fce, 0,
6198 fce_calc_size(ha->fce_bufs));
6199 rval = qla2x00_enable_fce_trace(vha,
6200 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6201 &ha->fce_bufs);
6202 if (rval) {
6203 ql_log(ql_log_warn, vha, 0x8033,
6204 "Unable to reinitialize FCE "
6205 "(%d).\n", rval);
6206 ha->flags.fce_enabled = 0;
6207 }
6208 }
6209
6210 if (ha->eft) {
6211 memset(ha->eft, 0, EFT_SIZE);
6212 rval = qla2x00_enable_eft_trace(vha,
6213 ha->eft_dma, EFT_NUM_BUFFERS);
6214 if (rval) {
6215 ql_log(ql_log_warn, vha, 0x8034,
6216 "Unable to reinitialize EFT "
6217 "(%d).\n", rval);
6218 }
6219 }
6220 } else { /* failed the ISP abort */
6221 vha->flags.online = 1;
6222 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6223 if (ha->isp_abort_cnt == 0) {
6224 ql_log(ql_log_fatal, vha, 0x8035,
6225 "ISP error recover failed - "
6226 "board disabled.\n");
6227 /*
6228 * The next call disables the board
6229 * completely.
6230 */
6231 ha->isp_ops->reset_adapter(vha);
6232 vha->flags.online = 0;
6233 clear_bit(ISP_ABORT_RETRY,
6234 &vha->dpc_flags);
6235 status = 0;
6236 } else { /* schedule another ISP abort */
6237 ha->isp_abort_cnt--;
6238 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6239 "ISP abort - retry remaining %d.\n",
6240 ha->isp_abort_cnt);
6241 status = 1;
6242 }
6243 } else {
6244 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
6245 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6246 "ISP error recovery - retrying (%d) "
6247 "more times.\n", ha->isp_abort_cnt);
6248 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6249 status = 1;
6250 }
6251 }
6252
6253 }
6254
6255 if (!status) {
6256 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
6257 qla2x00_configure_hba(vha);
6258 spin_lock_irqsave(&ha->vport_slock, flags);
6259 list_for_each_entry(vp, &ha->vp_list, list) {
6260 if (vp->vp_idx) {
6261 atomic_inc(&vp->vref_count);
6262 spin_unlock_irqrestore(&ha->vport_slock, flags);
6263
6264 qla2x00_vp_abort_isp(vp);
6265
6266 spin_lock_irqsave(&ha->vport_slock, flags);
6267 atomic_dec(&vp->vref_count);
6268 }
6269 }
6270 spin_unlock_irqrestore(&ha->vport_slock, flags);
6271
6272 if (IS_QLA8031(ha)) {
6273 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6274 "Setting back fcoe driver presence.\n");
6275 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6276 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6277 "Error while setting DRV-Presence.\n");
6278 }
6279 } else {
6280 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6281 __func__);
6282 }
6283
6284 return(status);
6285 }
6286
6287 /*
6288 * qla2x00_restart_isp
6289 * restarts the ISP after a reset
6290 *
6291 * Input:
6292 * ha = adapter block pointer.
6293 *
6294 * Returns:
6295 * 0 = success
6296 */
6297 static int
6298 qla2x00_restart_isp(scsi_qla_host_t *vha)
6299 {
6300 int status = 0;
6301 struct qla_hw_data *ha = vha->hw;
6302 struct req_que *req = ha->req_q_map[0];
6303 struct rsp_que *rsp = ha->rsp_q_map[0];
6304
6305 /* If firmware needs to be loaded */
6306 if (qla2x00_isp_firmware(vha)) {
6307 vha->flags.online = 0;
6308 status = ha->isp_ops->chip_diag(vha);
6309 if (!status)
6310 status = qla2x00_setup_chip(vha);
6311 }
6312
6313 if (!status && !(status = qla2x00_init_rings(vha))) {
6314 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6315 ha->flags.chip_reset_done = 1;
6316
6317 /* Initialize the queues in use */
6318 qla25xx_init_queues(ha);
6319
6320 status = qla2x00_fw_ready(vha);
6321 if (!status) {
6322 /* Issue a marker after FW becomes ready. */
6323 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6324 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6325 }
6326
6327 /* if no cable then assume it's good */
6328 if ((vha->device_flags & DFLG_NO_CABLE))
6329 status = 0;
6330 }
6331 return (status);
6332 }
6333
6334 static int
6335 qla25xx_init_queues(struct qla_hw_data *ha)
6336 {
6337 struct rsp_que *rsp = NULL;
6338 struct req_que *req = NULL;
6339 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6340 int ret = -1;
6341 int i;
6342
6343 for (i = 1; i < ha->max_rsp_queues; i++) {
6344 rsp = ha->rsp_q_map[i];
6345 if (rsp && test_bit(i, ha->rsp_qid_map)) {
6346 rsp->options &= ~BIT_0;
6347 ret = qla25xx_init_rsp_que(base_vha, rsp);
6348 if (ret != QLA_SUCCESS)
6349 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6350 "%s Rsp que: %d init failed.\n",
6351 __func__, rsp->id);
6352 else
6353 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6354 "%s Rsp que: %d inited.\n",
6355 __func__, rsp->id);
6356 }
6357 }
6358 for (i = 1; i < ha->max_req_queues; i++) {
6359 req = ha->req_q_map[i];
6360 if (req && test_bit(i, ha->req_qid_map)) {
6361 /* Clear outstanding commands array. */
6362 req->options &= ~BIT_0;
6363 ret = qla25xx_init_req_que(base_vha, req);
6364 if (ret != QLA_SUCCESS)
6365 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6366 "%s Req que: %d init failed.\n",
6367 __func__, req->id);
6368 else
6369 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6370 "%s Req que: %d inited.\n",
6371 __func__, req->id);
6372 }
6373 }
6374 return ret;
6375 }
6376
6377 /*
6378 * qla2x00_reset_adapter
6379 * Reset adapter.
6380 *
6381 * Input:
6382 * ha = adapter block pointer.
6383 */
6384 void
6385 qla2x00_reset_adapter(scsi_qla_host_t *vha)
6386 {
6387 unsigned long flags = 0;
6388 struct qla_hw_data *ha = vha->hw;
6389 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
6390
6391 vha->flags.online = 0;
6392 ha->isp_ops->disable_intrs(ha);
6393
6394 spin_lock_irqsave(&ha->hardware_lock, flags);
6395 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6396 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6397 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6398 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6399 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6400 }
6401
6402 void
6403 qla24xx_reset_adapter(scsi_qla_host_t *vha)
6404 {
6405 unsigned long flags = 0;
6406 struct qla_hw_data *ha = vha->hw;
6407 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6408
6409 if (IS_P3P_TYPE(ha))
6410 return;
6411
6412 vha->flags.online = 0;
6413 ha->isp_ops->disable_intrs(ha);
6414
6415 spin_lock_irqsave(&ha->hardware_lock, flags);
6416 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6417 RD_REG_DWORD(&reg->hccr);
6418 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6419 RD_REG_DWORD(&reg->hccr);
6420 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6421
6422 if (IS_NOPOLLING_TYPE(ha))
6423 ha->isp_ops->enable_intrs(ha);
6424 }
6425
6426 /* On sparc systems, obtain port and node WWN from firmware
6427 * properties.
6428 */
6429 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6430 struct nvram_24xx *nv)
6431 {
6432 #ifdef CONFIG_SPARC
6433 struct qla_hw_data *ha = vha->hw;
6434 struct pci_dev *pdev = ha->pdev;
6435 struct device_node *dp = pci_device_to_OF_node(pdev);
6436 const u8 *val;
6437 int len;
6438
6439 val = of_get_property(dp, "port-wwn", &len);
6440 if (val && len >= WWN_SIZE)
6441 memcpy(nv->port_name, val, WWN_SIZE);
6442
6443 val = of_get_property(dp, "node-wwn", &len);
6444 if (val && len >= WWN_SIZE)
6445 memcpy(nv->node_name, val, WWN_SIZE);
6446 #endif
6447 }
6448
6449 int
6450 qla24xx_nvram_config(scsi_qla_host_t *vha)
6451 {
6452 int rval;
6453 struct init_cb_24xx *icb;
6454 struct nvram_24xx *nv;
6455 uint32_t *dptr;
6456 uint8_t *dptr1, *dptr2;
6457 uint32_t chksum;
6458 uint16_t cnt;
6459 struct qla_hw_data *ha = vha->hw;
6460
6461 rval = QLA_SUCCESS;
6462 icb = (struct init_cb_24xx *)ha->init_cb;
6463 nv = ha->nvram;
6464
6465 /* Determine NVRAM starting address. */
6466 if (ha->port_no == 0) {
6467 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6468 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6469 } else {
6470 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6471 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6472 }
6473
6474 ha->nvram_size = sizeof(struct nvram_24xx);
6475 ha->vpd_size = FA_NVRAM_VPD_SIZE;
6476
6477 /* Get VPD data into cache */
6478 ha->vpd = ha->nvram + VPD_OFFSET;
6479 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
6480 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6481
6482 /* Get NVRAM data into cache and calculate checksum. */
6483 dptr = (uint32_t *)nv;
6484 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
6485 ha->nvram_size);
6486 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6487 chksum += le32_to_cpu(*dptr);
6488
6489 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6490 "Contents of NVRAM\n");
6491 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6492 (uint8_t *)nv, ha->nvram_size);
6493
6494 /* Bad NVRAM data, set defaults parameters. */
6495 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6496 || nv->id[3] != ' ' ||
6497 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6498 /* Reset NVRAM data. */
6499 ql_log(ql_log_warn, vha, 0x006b,
6500 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
6501 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6502 ql_log(ql_log_warn, vha, 0x006c,
6503 "Falling back to functioning (yet invalid -- WWPN) "
6504 "defaults.\n");
6505
6506 /*
6507 * Set default initialization control block.
6508 */
6509 memset(nv, 0, ha->nvram_size);
6510 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6511 nv->version = cpu_to_le16(ICB_VERSION);
6512 nv->frame_payload_size = 2048;
6513 nv->execution_throttle = cpu_to_le16(0xFFFF);
6514 nv->exchange_count = cpu_to_le16(0);
6515 nv->hard_address = cpu_to_le16(124);
6516 nv->port_name[0] = 0x21;
6517 nv->port_name[1] = 0x00 + ha->port_no + 1;
6518 nv->port_name[2] = 0x00;
6519 nv->port_name[3] = 0xe0;
6520 nv->port_name[4] = 0x8b;
6521 nv->port_name[5] = 0x1c;
6522 nv->port_name[6] = 0x55;
6523 nv->port_name[7] = 0x86;
6524 nv->node_name[0] = 0x20;
6525 nv->node_name[1] = 0x00;
6526 nv->node_name[2] = 0x00;
6527 nv->node_name[3] = 0xe0;
6528 nv->node_name[4] = 0x8b;
6529 nv->node_name[5] = 0x1c;
6530 nv->node_name[6] = 0x55;
6531 nv->node_name[7] = 0x86;
6532 qla24xx_nvram_wwn_from_ofw(vha, nv);
6533 nv->login_retry_count = cpu_to_le16(8);
6534 nv->interrupt_delay_timer = cpu_to_le16(0);
6535 nv->login_timeout = cpu_to_le16(0);
6536 nv->firmware_options_1 =
6537 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6538 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6539 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6540 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6541 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6542 nv->efi_parameters = cpu_to_le32(0);
6543 nv->reset_delay = 5;
6544 nv->max_luns_per_target = cpu_to_le16(128);
6545 nv->port_down_retry_count = cpu_to_le16(30);
6546 nv->link_down_timeout = cpu_to_le16(30);
6547
6548 rval = 1;
6549 }
6550
6551 if (qla_tgt_mode_enabled(vha)) {
6552 /* Don't enable full login after initial LIP */
6553 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6554 /* Don't enable LIP full login for initiator */
6555 nv->host_p &= cpu_to_le32(~BIT_10);
6556 }
6557
6558 qlt_24xx_config_nvram_stage1(vha, nv);
6559
6560 /* Reset Initialization control block */
6561 memset(icb, 0, ha->init_cb_size);
6562
6563 /* Copy 1st segment. */
6564 dptr1 = (uint8_t *)icb;
6565 dptr2 = (uint8_t *)&nv->version;
6566 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6567 while (cnt--)
6568 *dptr1++ = *dptr2++;
6569
6570 icb->login_retry_count = nv->login_retry_count;
6571 icb->link_down_on_nos = nv->link_down_on_nos;
6572
6573 /* Copy 2nd segment. */
6574 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6575 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6576 cnt = (uint8_t *)&icb->reserved_3 -
6577 (uint8_t *)&icb->interrupt_delay_timer;
6578 while (cnt--)
6579 *dptr1++ = *dptr2++;
6580
6581 /*
6582 * Setup driver NVRAM options.
6583 */
6584 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6585 "QLA2462");
6586
6587 qlt_24xx_config_nvram_stage2(vha, icb);
6588
6589 if (nv->host_p & cpu_to_le32(BIT_15)) {
6590 /* Use alternate WWN? */
6591 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6592 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6593 }
6594
6595 /* Prepare nodename */
6596 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6597 /*
6598 * Firmware will apply the following mask if the nodename was
6599 * not provided.
6600 */
6601 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6602 icb->node_name[0] &= 0xF0;
6603 }
6604
6605 /* Set host adapter parameters. */
6606 ha->flags.disable_risc_code_load = 0;
6607 ha->flags.enable_lip_reset = 0;
6608 ha->flags.enable_lip_full_login =
6609 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6610 ha->flags.enable_target_reset =
6611 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6612 ha->flags.enable_led_scheme = 0;
6613 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6614
6615 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6616 (BIT_6 | BIT_5 | BIT_4)) >> 4;
6617
6618 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6619 sizeof(ha->fw_seriallink_options24));
6620
6621 /* save HBA serial number */
6622 ha->serial0 = icb->port_name[5];
6623 ha->serial1 = icb->port_name[6];
6624 ha->serial2 = icb->port_name[7];
6625 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6626 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6627
6628 icb->execution_throttle = cpu_to_le16(0xFFFF);
6629
6630 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6631
6632 /* Set minimum login_timeout to 4 seconds. */
6633 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6634 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6635 if (le16_to_cpu(nv->login_timeout) < 4)
6636 nv->login_timeout = cpu_to_le16(4);
6637 ha->login_timeout = le16_to_cpu(nv->login_timeout);
6638
6639 /* Set minimum RATOV to 100 tenths of a second. */
6640 ha->r_a_tov = 100;
6641
6642 ha->loop_reset_delay = nv->reset_delay;
6643
6644 /* Link Down Timeout = 0:
6645 *
6646 * When Port Down timer expires we will start returning
6647 * I/O's to OS with "DID_NO_CONNECT".
6648 *
6649 * Link Down Timeout != 0:
6650 *
6651 * The driver waits for the link to come up after link down
6652 * before returning I/Os to OS with "DID_NO_CONNECT".
6653 */
6654 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6655 ha->loop_down_abort_time =
6656 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6657 } else {
6658 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6659 ha->loop_down_abort_time =
6660 (LOOP_DOWN_TIME - ha->link_down_timeout);
6661 }
6662
6663 /* Need enough time to try and get the port back. */
6664 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6665 if (qlport_down_retry)
6666 ha->port_down_retry_count = qlport_down_retry;
6667
6668 /* Set login_retry_count */
6669 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6670 if (ha->port_down_retry_count ==
6671 le16_to_cpu(nv->port_down_retry_count) &&
6672 ha->port_down_retry_count > 3)
6673 ha->login_retry_count = ha->port_down_retry_count;
6674 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6675 ha->login_retry_count = ha->port_down_retry_count;
6676 if (ql2xloginretrycount)
6677 ha->login_retry_count = ql2xloginretrycount;
6678
6679 /* Enable ZIO. */
6680 if (!vha->flags.init_done) {
6681 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6682 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6683 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6684 le16_to_cpu(icb->interrupt_delay_timer): 2;
6685 }
6686 icb->firmware_options_2 &= cpu_to_le32(
6687 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6688 vha->flags.process_response_queue = 0;
6689 if (ha->zio_mode != QLA_ZIO_DISABLED) {
6690 ha->zio_mode = QLA_ZIO_MODE_6;
6691
6692 ql_log(ql_log_info, vha, 0x006f,
6693 "ZIO mode %d enabled; timer delay (%d us).\n",
6694 ha->zio_mode, ha->zio_timer * 100);
6695
6696 icb->firmware_options_2 |= cpu_to_le32(
6697 (uint32_t)ha->zio_mode);
6698 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6699 vha->flags.process_response_queue = 1;
6700 }
6701
6702 if (rval) {
6703 ql_log(ql_log_warn, vha, 0x0070,
6704 "NVRAM configuration failed.\n");
6705 }
6706 return (rval);
6707 }
6708
6709 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6710 {
6711 struct qla27xx_image_status pri_image_status, sec_image_status;
6712 uint8_t valid_pri_image, valid_sec_image;
6713 uint32_t *wptr;
6714 uint32_t cnt, chksum, size;
6715 struct qla_hw_data *ha = vha->hw;
6716
6717 valid_pri_image = valid_sec_image = 1;
6718 ha->active_image = 0;
6719 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6720
6721 if (!ha->flt_region_img_status_pri) {
6722 valid_pri_image = 0;
6723 goto check_sec_image;
6724 }
6725
6726 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6727 ha->flt_region_img_status_pri, size);
6728
6729 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6730 ql_dbg(ql_dbg_init, vha, 0x018b,
6731 "Primary image signature (0x%x) not valid\n",
6732 pri_image_status.signature);
6733 valid_pri_image = 0;
6734 goto check_sec_image;
6735 }
6736
6737 wptr = (uint32_t *)(&pri_image_status);
6738 cnt = size;
6739
6740 for (chksum = 0; cnt--; wptr++)
6741 chksum += le32_to_cpu(*wptr);
6742
6743 if (chksum) {
6744 ql_dbg(ql_dbg_init, vha, 0x018c,
6745 "Checksum validation failed for primary image (0x%x)\n",
6746 chksum);
6747 valid_pri_image = 0;
6748 }
6749
6750 check_sec_image:
6751 if (!ha->flt_region_img_status_sec) {
6752 valid_sec_image = 0;
6753 goto check_valid_image;
6754 }
6755
6756 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6757 ha->flt_region_img_status_sec, size);
6758
6759 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6760 ql_dbg(ql_dbg_init, vha, 0x018d,
6761 "Secondary image signature(0x%x) not valid\n",
6762 sec_image_status.signature);
6763 valid_sec_image = 0;
6764 goto check_valid_image;
6765 }
6766
6767 wptr = (uint32_t *)(&sec_image_status);
6768 cnt = size;
6769 for (chksum = 0; cnt--; wptr++)
6770 chksum += le32_to_cpu(*wptr);
6771 if (chksum) {
6772 ql_dbg(ql_dbg_init, vha, 0x018e,
6773 "Checksum validation failed for secondary image (0x%x)\n",
6774 chksum);
6775 valid_sec_image = 0;
6776 }
6777
6778 check_valid_image:
6779 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6780 ha->active_image = QLA27XX_PRIMARY_IMAGE;
6781 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6782 if (!ha->active_image ||
6783 pri_image_status.generation_number <
6784 sec_image_status.generation_number)
6785 ha->active_image = QLA27XX_SECONDARY_IMAGE;
6786 }
6787
6788 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6789 ha->active_image == 0 ? "default bootld and fw" :
6790 ha->active_image == 1 ? "primary" :
6791 ha->active_image == 2 ? "secondary" :
6792 "Invalid");
6793
6794 return ha->active_image;
6795 }
6796
6797 static int
6798 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6799 uint32_t faddr)
6800 {
6801 int rval = QLA_SUCCESS;
6802 int segments, fragment;
6803 uint32_t *dcode, dlen;
6804 uint32_t risc_addr;
6805 uint32_t risc_size;
6806 uint32_t i;
6807 struct qla_hw_data *ha = vha->hw;
6808 struct req_que *req = ha->req_q_map[0];
6809
6810 ql_dbg(ql_dbg_init, vha, 0x008b,
6811 "FW: Loading firmware from flash (%x).\n", faddr);
6812
6813 rval = QLA_SUCCESS;
6814
6815 segments = FA_RISC_CODE_SEGMENTS;
6816 dcode = (uint32_t *)req->ring;
6817 *srisc_addr = 0;
6818
6819 if (IS_QLA27XX(ha) &&
6820 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6821 faddr = ha->flt_region_fw_sec;
6822
6823 /* Validate firmware image by checking version. */
6824 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
6825 for (i = 0; i < 4; i++)
6826 dcode[i] = be32_to_cpu(dcode[i]);
6827 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6828 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6829 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6830 dcode[3] == 0)) {
6831 ql_log(ql_log_fatal, vha, 0x008c,
6832 "Unable to verify the integrity of flash firmware "
6833 "image.\n");
6834 ql_log(ql_log_fatal, vha, 0x008d,
6835 "Firmware data: %08x %08x %08x %08x.\n",
6836 dcode[0], dcode[1], dcode[2], dcode[3]);
6837
6838 return QLA_FUNCTION_FAILED;
6839 }
6840
6841 while (segments && rval == QLA_SUCCESS) {
6842 /* Read segment's load information. */
6843 qla24xx_read_flash_data(vha, dcode, faddr, 4);
6844
6845 risc_addr = be32_to_cpu(dcode[2]);
6846 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6847 risc_size = be32_to_cpu(dcode[3]);
6848
6849 fragment = 0;
6850 while (risc_size > 0 && rval == QLA_SUCCESS) {
6851 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6852 if (dlen > risc_size)
6853 dlen = risc_size;
6854
6855 ql_dbg(ql_dbg_init, vha, 0x008e,
6856 "Loading risc segment@ risc addr %x "
6857 "number of dwords 0x%x offset 0x%x.\n",
6858 risc_addr, dlen, faddr);
6859
6860 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
6861 for (i = 0; i < dlen; i++)
6862 dcode[i] = swab32(dcode[i]);
6863
6864 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
6865 dlen);
6866 if (rval) {
6867 ql_log(ql_log_fatal, vha, 0x008f,
6868 "Failed to load segment %d of firmware.\n",
6869 fragment);
6870 return QLA_FUNCTION_FAILED;
6871 }
6872
6873 faddr += dlen;
6874 risc_addr += dlen;
6875 risc_size -= dlen;
6876 fragment++;
6877 }
6878
6879 /* Next segment. */
6880 segments--;
6881 }
6882
6883 if (!IS_QLA27XX(ha))
6884 return rval;
6885
6886 if (ha->fw_dump_template)
6887 vfree(ha->fw_dump_template);
6888 ha->fw_dump_template = NULL;
6889 ha->fw_dump_template_len = 0;
6890
6891 ql_dbg(ql_dbg_init, vha, 0x0161,
6892 "Loading fwdump template from %x\n", faddr);
6893 qla24xx_read_flash_data(vha, dcode, faddr, 7);
6894 risc_size = be32_to_cpu(dcode[2]);
6895 ql_dbg(ql_dbg_init, vha, 0x0162,
6896 "-> array size %x dwords\n", risc_size);
6897 if (risc_size == 0 || risc_size == ~0)
6898 goto default_template;
6899
6900 dlen = (risc_size - 8) * sizeof(*dcode);
6901 ql_dbg(ql_dbg_init, vha, 0x0163,
6902 "-> template allocating %x bytes...\n", dlen);
6903 ha->fw_dump_template = vmalloc(dlen);
6904 if (!ha->fw_dump_template) {
6905 ql_log(ql_log_warn, vha, 0x0164,
6906 "Failed fwdump template allocate %x bytes.\n", risc_size);
6907 goto default_template;
6908 }
6909
6910 faddr += 7;
6911 risc_size -= 8;
6912 dcode = ha->fw_dump_template;
6913 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6914 for (i = 0; i < risc_size; i++)
6915 dcode[i] = le32_to_cpu(dcode[i]);
6916
6917 if (!qla27xx_fwdt_template_valid(dcode)) {
6918 ql_log(ql_log_warn, vha, 0x0165,
6919 "Failed fwdump template validate\n");
6920 goto default_template;
6921 }
6922
6923 dlen = qla27xx_fwdt_template_size(dcode);
6924 ql_dbg(ql_dbg_init, vha, 0x0166,
6925 "-> template size %x bytes\n", dlen);
6926 if (dlen > risc_size * sizeof(*dcode)) {
6927 ql_log(ql_log_warn, vha, 0x0167,
6928 "Failed fwdump template exceeds array by %zx bytes\n",
6929 (size_t)(dlen - risc_size * sizeof(*dcode)));
6930 goto default_template;
6931 }
6932 ha->fw_dump_template_len = dlen;
6933 return rval;
6934
6935 default_template:
6936 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6937 if (ha->fw_dump_template)
6938 vfree(ha->fw_dump_template);
6939 ha->fw_dump_template = NULL;
6940 ha->fw_dump_template_len = 0;
6941
6942 dlen = qla27xx_fwdt_template_default_size();
6943 ql_dbg(ql_dbg_init, vha, 0x0169,
6944 "-> template allocating %x bytes...\n", dlen);
6945 ha->fw_dump_template = vmalloc(dlen);
6946 if (!ha->fw_dump_template) {
6947 ql_log(ql_log_warn, vha, 0x016a,
6948 "Failed fwdump template allocate %x bytes.\n", risc_size);
6949 goto failed_template;
6950 }
6951
6952 dcode = ha->fw_dump_template;
6953 risc_size = dlen / sizeof(*dcode);
6954 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6955 for (i = 0; i < risc_size; i++)
6956 dcode[i] = be32_to_cpu(dcode[i]);
6957
6958 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6959 ql_log(ql_log_warn, vha, 0x016b,
6960 "Failed fwdump template validate\n");
6961 goto failed_template;
6962 }
6963
6964 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6965 ql_dbg(ql_dbg_init, vha, 0x016c,
6966 "-> template size %x bytes\n", dlen);
6967 ha->fw_dump_template_len = dlen;
6968 return rval;
6969
6970 failed_template:
6971 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6972 if (ha->fw_dump_template)
6973 vfree(ha->fw_dump_template);
6974 ha->fw_dump_template = NULL;
6975 ha->fw_dump_template_len = 0;
6976 return rval;
6977 }
6978
6979 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
6980
6981 int
6982 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6983 {
6984 int rval;
6985 int i, fragment;
6986 uint16_t *wcode, *fwcode;
6987 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6988 struct fw_blob *blob;
6989 struct qla_hw_data *ha = vha->hw;
6990 struct req_que *req = ha->req_q_map[0];
6991
6992 /* Load firmware blob. */
6993 blob = qla2x00_request_firmware(vha);
6994 if (!blob) {
6995 ql_log(ql_log_info, vha, 0x0083,
6996 "Firmware image unavailable.\n");
6997 ql_log(ql_log_info, vha, 0x0084,
6998 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
6999 return QLA_FUNCTION_FAILED;
7000 }
7001
7002 rval = QLA_SUCCESS;
7003
7004 wcode = (uint16_t *)req->ring;
7005 *srisc_addr = 0;
7006 fwcode = (uint16_t *)blob->fw->data;
7007 fwclen = 0;
7008
7009 /* Validate firmware image by checking version. */
7010 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7011 ql_log(ql_log_fatal, vha, 0x0085,
7012 "Unable to verify integrity of firmware image (%zd).\n",
7013 blob->fw->size);
7014 goto fail_fw_integrity;
7015 }
7016 for (i = 0; i < 4; i++)
7017 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7018 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7019 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7020 wcode[2] == 0 && wcode[3] == 0)) {
7021 ql_log(ql_log_fatal, vha, 0x0086,
7022 "Unable to verify integrity of firmware image.\n");
7023 ql_log(ql_log_fatal, vha, 0x0087,
7024 "Firmware data: %04x %04x %04x %04x.\n",
7025 wcode[0], wcode[1], wcode[2], wcode[3]);
7026 goto fail_fw_integrity;
7027 }
7028
7029 seg = blob->segs;
7030 while (*seg && rval == QLA_SUCCESS) {
7031 risc_addr = *seg;
7032 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7033 risc_size = be16_to_cpu(fwcode[3]);
7034
7035 /* Validate firmware image size. */
7036 fwclen += risc_size * sizeof(uint16_t);
7037 if (blob->fw->size < fwclen) {
7038 ql_log(ql_log_fatal, vha, 0x0088,
7039 "Unable to verify integrity of firmware image "
7040 "(%zd).\n", blob->fw->size);
7041 goto fail_fw_integrity;
7042 }
7043
7044 fragment = 0;
7045 while (risc_size > 0 && rval == QLA_SUCCESS) {
7046 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7047 if (wlen > risc_size)
7048 wlen = risc_size;
7049 ql_dbg(ql_dbg_init, vha, 0x0089,
7050 "Loading risc segment@ risc addr %x number of "
7051 "words 0x%x.\n", risc_addr, wlen);
7052
7053 for (i = 0; i < wlen; i++)
7054 wcode[i] = swab16(fwcode[i]);
7055
7056 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7057 wlen);
7058 if (rval) {
7059 ql_log(ql_log_fatal, vha, 0x008a,
7060 "Failed to load segment %d of firmware.\n",
7061 fragment);
7062 break;
7063 }
7064
7065 fwcode += wlen;
7066 risc_addr += wlen;
7067 risc_size -= wlen;
7068 fragment++;
7069 }
7070
7071 /* Next segment. */
7072 seg++;
7073 }
7074 return rval;
7075
7076 fail_fw_integrity:
7077 return QLA_FUNCTION_FAILED;
7078 }
7079
7080 static int
7081 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7082 {
7083 int rval;
7084 int segments, fragment;
7085 uint32_t *dcode, dlen;
7086 uint32_t risc_addr;
7087 uint32_t risc_size;
7088 uint32_t i;
7089 struct fw_blob *blob;
7090 const uint32_t *fwcode;
7091 uint32_t fwclen;
7092 struct qla_hw_data *ha = vha->hw;
7093 struct req_que *req = ha->req_q_map[0];
7094
7095 /* Load firmware blob. */
7096 blob = qla2x00_request_firmware(vha);
7097 if (!blob) {
7098 ql_log(ql_log_warn, vha, 0x0090,
7099 "Firmware image unavailable.\n");
7100 ql_log(ql_log_warn, vha, 0x0091,
7101 "Firmware images can be retrieved from: "
7102 QLA_FW_URL ".\n");
7103
7104 return QLA_FUNCTION_FAILED;
7105 }
7106
7107 ql_dbg(ql_dbg_init, vha, 0x0092,
7108 "FW: Loading via request-firmware.\n");
7109
7110 rval = QLA_SUCCESS;
7111
7112 segments = FA_RISC_CODE_SEGMENTS;
7113 dcode = (uint32_t *)req->ring;
7114 *srisc_addr = 0;
7115 fwcode = (uint32_t *)blob->fw->data;
7116 fwclen = 0;
7117
7118 /* Validate firmware image by checking version. */
7119 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7120 ql_log(ql_log_fatal, vha, 0x0093,
7121 "Unable to verify integrity of firmware image (%zd).\n",
7122 blob->fw->size);
7123 return QLA_FUNCTION_FAILED;
7124 }
7125 for (i = 0; i < 4; i++)
7126 dcode[i] = be32_to_cpu(fwcode[i + 4]);
7127 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7128 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7129 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7130 dcode[3] == 0)) {
7131 ql_log(ql_log_fatal, vha, 0x0094,
7132 "Unable to verify integrity of firmware image (%zd).\n",
7133 blob->fw->size);
7134 ql_log(ql_log_fatal, vha, 0x0095,
7135 "Firmware data: %08x %08x %08x %08x.\n",
7136 dcode[0], dcode[1], dcode[2], dcode[3]);
7137 return QLA_FUNCTION_FAILED;
7138 }
7139
7140 while (segments && rval == QLA_SUCCESS) {
7141 risc_addr = be32_to_cpu(fwcode[2]);
7142 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7143 risc_size = be32_to_cpu(fwcode[3]);
7144
7145 /* Validate firmware image size. */
7146 fwclen += risc_size * sizeof(uint32_t);
7147 if (blob->fw->size < fwclen) {
7148 ql_log(ql_log_fatal, vha, 0x0096,
7149 "Unable to verify integrity of firmware image "
7150 "(%zd).\n", blob->fw->size);
7151 return QLA_FUNCTION_FAILED;
7152 }
7153
7154 fragment = 0;
7155 while (risc_size > 0 && rval == QLA_SUCCESS) {
7156 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7157 if (dlen > risc_size)
7158 dlen = risc_size;
7159
7160 ql_dbg(ql_dbg_init, vha, 0x0097,
7161 "Loading risc segment@ risc addr %x "
7162 "number of dwords 0x%x.\n", risc_addr, dlen);
7163
7164 for (i = 0; i < dlen; i++)
7165 dcode[i] = swab32(fwcode[i]);
7166
7167 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7168 dlen);
7169 if (rval) {
7170 ql_log(ql_log_fatal, vha, 0x0098,
7171 "Failed to load segment %d of firmware.\n",
7172 fragment);
7173 return QLA_FUNCTION_FAILED;
7174 }
7175
7176 fwcode += dlen;
7177 risc_addr += dlen;
7178 risc_size -= dlen;
7179 fragment++;
7180 }
7181
7182 /* Next segment. */
7183 segments--;
7184 }
7185
7186 if (!IS_QLA27XX(ha))
7187 return rval;
7188
7189 if (ha->fw_dump_template)
7190 vfree(ha->fw_dump_template);
7191 ha->fw_dump_template = NULL;
7192 ha->fw_dump_template_len = 0;
7193
7194 ql_dbg(ql_dbg_init, vha, 0x171,
7195 "Loading fwdump template from %x\n",
7196 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
7197 risc_size = be32_to_cpu(fwcode[2]);
7198 ql_dbg(ql_dbg_init, vha, 0x172,
7199 "-> array size %x dwords\n", risc_size);
7200 if (risc_size == 0 || risc_size == ~0)
7201 goto default_template;
7202
7203 dlen = (risc_size - 8) * sizeof(*fwcode);
7204 ql_dbg(ql_dbg_init, vha, 0x0173,
7205 "-> template allocating %x bytes...\n", dlen);
7206 ha->fw_dump_template = vmalloc(dlen);
7207 if (!ha->fw_dump_template) {
7208 ql_log(ql_log_warn, vha, 0x0174,
7209 "Failed fwdump template allocate %x bytes.\n", risc_size);
7210 goto default_template;
7211 }
7212
7213 fwcode += 7;
7214 risc_size -= 8;
7215 dcode = ha->fw_dump_template;
7216 for (i = 0; i < risc_size; i++)
7217 dcode[i] = le32_to_cpu(fwcode[i]);
7218
7219 if (!qla27xx_fwdt_template_valid(dcode)) {
7220 ql_log(ql_log_warn, vha, 0x0175,
7221 "Failed fwdump template validate\n");
7222 goto default_template;
7223 }
7224
7225 dlen = qla27xx_fwdt_template_size(dcode);
7226 ql_dbg(ql_dbg_init, vha, 0x0176,
7227 "-> template size %x bytes\n", dlen);
7228 if (dlen > risc_size * sizeof(*fwcode)) {
7229 ql_log(ql_log_warn, vha, 0x0177,
7230 "Failed fwdump template exceeds array by %zx bytes\n",
7231 (size_t)(dlen - risc_size * sizeof(*fwcode)));
7232 goto default_template;
7233 }
7234 ha->fw_dump_template_len = dlen;
7235 return rval;
7236
7237 default_template:
7238 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
7239 if (ha->fw_dump_template)
7240 vfree(ha->fw_dump_template);
7241 ha->fw_dump_template = NULL;
7242 ha->fw_dump_template_len = 0;
7243
7244 dlen = qla27xx_fwdt_template_default_size();
7245 ql_dbg(ql_dbg_init, vha, 0x0179,
7246 "-> template allocating %x bytes...\n", dlen);
7247 ha->fw_dump_template = vmalloc(dlen);
7248 if (!ha->fw_dump_template) {
7249 ql_log(ql_log_warn, vha, 0x017a,
7250 "Failed fwdump template allocate %x bytes.\n", risc_size);
7251 goto failed_template;
7252 }
7253
7254 dcode = ha->fw_dump_template;
7255 risc_size = dlen / sizeof(*fwcode);
7256 fwcode = qla27xx_fwdt_template_default();
7257 for (i = 0; i < risc_size; i++)
7258 dcode[i] = be32_to_cpu(fwcode[i]);
7259
7260 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7261 ql_log(ql_log_warn, vha, 0x017b,
7262 "Failed fwdump template validate\n");
7263 goto failed_template;
7264 }
7265
7266 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7267 ql_dbg(ql_dbg_init, vha, 0x017c,
7268 "-> template size %x bytes\n", dlen);
7269 ha->fw_dump_template_len = dlen;
7270 return rval;
7271
7272 failed_template:
7273 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
7274 if (ha->fw_dump_template)
7275 vfree(ha->fw_dump_template);
7276 ha->fw_dump_template = NULL;
7277 ha->fw_dump_template_len = 0;
7278 return rval;
7279 }
7280
7281 int
7282 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7283 {
7284 int rval;
7285
7286 if (ql2xfwloadbin == 1)
7287 return qla81xx_load_risc(vha, srisc_addr);
7288
7289 /*
7290 * FW Load priority:
7291 * 1) Firmware via request-firmware interface (.bin file).
7292 * 2) Firmware residing in flash.
7293 */
7294 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7295 if (rval == QLA_SUCCESS)
7296 return rval;
7297
7298 return qla24xx_load_risc_flash(vha, srisc_addr,
7299 vha->hw->flt_region_fw);
7300 }
7301
7302 int
7303 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7304 {
7305 int rval;
7306 struct qla_hw_data *ha = vha->hw;
7307
7308 if (ql2xfwloadbin == 2)
7309 goto try_blob_fw;
7310
7311 /*
7312 * FW Load priority:
7313 * 1) Firmware residing in flash.
7314 * 2) Firmware via request-firmware interface (.bin file).
7315 * 3) Golden-Firmware residing in flash -- limited operation.
7316 */
7317 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
7318 if (rval == QLA_SUCCESS)
7319 return rval;
7320
7321 try_blob_fw:
7322 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7323 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
7324 return rval;
7325
7326 ql_log(ql_log_info, vha, 0x0099,
7327 "Attempting to fallback to golden firmware.\n");
7328 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
7329 if (rval != QLA_SUCCESS)
7330 return rval;
7331
7332 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
7333 ha->flags.running_gold_fw = 1;
7334 return rval;
7335 }
7336
7337 void
7338 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
7339 {
7340 int ret, retries;
7341 struct qla_hw_data *ha = vha->hw;
7342
7343 if (ha->flags.pci_channel_io_perm_failure)
7344 return;
7345 if (!IS_FWI2_CAPABLE(ha))
7346 return;
7347 if (!ha->fw_major_version)
7348 return;
7349 if (!ha->flags.fw_started)
7350 return;
7351
7352 ret = qla2x00_stop_firmware(vha);
7353 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
7354 ret != QLA_INVALID_COMMAND && retries ; retries--) {
7355 ha->isp_ops->reset_chip(vha);
7356 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
7357 continue;
7358 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
7359 continue;
7360 ql_log(ql_log_info, vha, 0x8015,
7361 "Attempting retry of stop-firmware command.\n");
7362 ret = qla2x00_stop_firmware(vha);
7363 }
7364
7365 QLA_FW_STOPPED(ha);
7366 ha->flags.fw_init_done = 0;
7367 }
7368
7369 int
7370 qla24xx_configure_vhba(scsi_qla_host_t *vha)
7371 {
7372 int rval = QLA_SUCCESS;
7373 int rval2;
7374 uint16_t mb[MAILBOX_REGISTER_COUNT];
7375 struct qla_hw_data *ha = vha->hw;
7376 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7377 struct req_que *req;
7378 struct rsp_que *rsp;
7379
7380 if (!vha->vp_idx)
7381 return -EINVAL;
7382
7383 rval = qla2x00_fw_ready(base_vha);
7384 if (vha->qpair)
7385 req = vha->qpair->req;
7386 else
7387 req = ha->req_q_map[0];
7388 rsp = req->rsp;
7389
7390 if (rval == QLA_SUCCESS) {
7391 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7392 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7393 }
7394
7395 vha->flags.management_server_logged_in = 0;
7396
7397 /* Login to SNS first */
7398 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7399 BIT_1);
7400 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7401 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7402 ql_dbg(ql_dbg_init, vha, 0x0120,
7403 "Failed SNS login: loop_id=%x, rval2=%d\n",
7404 NPH_SNS, rval2);
7405 else
7406 ql_dbg(ql_dbg_init, vha, 0x0103,
7407 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7408 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7409 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
7410 return (QLA_FUNCTION_FAILED);
7411 }
7412
7413 atomic_set(&vha->loop_down_timer, 0);
7414 atomic_set(&vha->loop_state, LOOP_UP);
7415 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7416 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7417 rval = qla2x00_loop_resync(base_vha);
7418
7419 return rval;
7420 }
7421
7422 /* 84XX Support **************************************************************/
7423
7424 static LIST_HEAD(qla_cs84xx_list);
7425 static DEFINE_MUTEX(qla_cs84xx_mutex);
7426
7427 static struct qla_chip_state_84xx *
7428 qla84xx_get_chip(struct scsi_qla_host *vha)
7429 {
7430 struct qla_chip_state_84xx *cs84xx;
7431 struct qla_hw_data *ha = vha->hw;
7432
7433 mutex_lock(&qla_cs84xx_mutex);
7434
7435 /* Find any shared 84xx chip. */
7436 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7437 if (cs84xx->bus == ha->pdev->bus) {
7438 kref_get(&cs84xx->kref);
7439 goto done;
7440 }
7441 }
7442
7443 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7444 if (!cs84xx)
7445 goto done;
7446
7447 kref_init(&cs84xx->kref);
7448 spin_lock_init(&cs84xx->access_lock);
7449 mutex_init(&cs84xx->fw_update_mutex);
7450 cs84xx->bus = ha->pdev->bus;
7451
7452 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7453 done:
7454 mutex_unlock(&qla_cs84xx_mutex);
7455 return cs84xx;
7456 }
7457
7458 static void
7459 __qla84xx_chip_release(struct kref *kref)
7460 {
7461 struct qla_chip_state_84xx *cs84xx =
7462 container_of(kref, struct qla_chip_state_84xx, kref);
7463
7464 mutex_lock(&qla_cs84xx_mutex);
7465 list_del(&cs84xx->list);
7466 mutex_unlock(&qla_cs84xx_mutex);
7467 kfree(cs84xx);
7468 }
7469
7470 void
7471 qla84xx_put_chip(struct scsi_qla_host *vha)
7472 {
7473 struct qla_hw_data *ha = vha->hw;
7474 if (ha->cs84xx)
7475 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7476 }
7477
7478 static int
7479 qla84xx_init_chip(scsi_qla_host_t *vha)
7480 {
7481 int rval;
7482 uint16_t status[2];
7483 struct qla_hw_data *ha = vha->hw;
7484
7485 mutex_lock(&ha->cs84xx->fw_update_mutex);
7486
7487 rval = qla84xx_verify_chip(vha, status);
7488
7489 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7490
7491 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7492 QLA_SUCCESS;
7493 }
7494
7495 /* 81XX Support **************************************************************/
7496
7497 int
7498 qla81xx_nvram_config(scsi_qla_host_t *vha)
7499 {
7500 int rval;
7501 struct init_cb_81xx *icb;
7502 struct nvram_81xx *nv;
7503 uint32_t *dptr;
7504 uint8_t *dptr1, *dptr2;
7505 uint32_t chksum;
7506 uint16_t cnt;
7507 struct qla_hw_data *ha = vha->hw;
7508
7509 rval = QLA_SUCCESS;
7510 icb = (struct init_cb_81xx *)ha->init_cb;
7511 nv = ha->nvram;
7512
7513 /* Determine NVRAM starting address. */
7514 ha->nvram_size = sizeof(struct nvram_81xx);
7515 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7516 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7517 ha->vpd_size = FA_VPD_SIZE_82XX;
7518
7519 /* Get VPD data into cache */
7520 ha->vpd = ha->nvram + VPD_OFFSET;
7521 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7522 ha->vpd_size);
7523
7524 /* Get NVRAM data into cache and calculate checksum. */
7525 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
7526 ha->nvram_size);
7527 dptr = (uint32_t *)nv;
7528 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7529 chksum += le32_to_cpu(*dptr);
7530
7531 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7532 "Contents of NVRAM:\n");
7533 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7534 (uint8_t *)nv, ha->nvram_size);
7535
7536 /* Bad NVRAM data, set defaults parameters. */
7537 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7538 || nv->id[3] != ' ' ||
7539 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
7540 /* Reset NVRAM data. */
7541 ql_log(ql_log_info, vha, 0x0073,
7542 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7543 "version=0x%x.\n", chksum, nv->id[0],
7544 le16_to_cpu(nv->nvram_version));
7545 ql_log(ql_log_info, vha, 0x0074,
7546 "Falling back to functioning (yet invalid -- WWPN) "
7547 "defaults.\n");
7548
7549 /*
7550 * Set default initialization control block.
7551 */
7552 memset(nv, 0, ha->nvram_size);
7553 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7554 nv->version = cpu_to_le16(ICB_VERSION);
7555 nv->frame_payload_size = 2048;
7556 nv->execution_throttle = cpu_to_le16(0xFFFF);
7557 nv->exchange_count = cpu_to_le16(0);
7558 nv->port_name[0] = 0x21;
7559 nv->port_name[1] = 0x00 + ha->port_no + 1;
7560 nv->port_name[2] = 0x00;
7561 nv->port_name[3] = 0xe0;
7562 nv->port_name[4] = 0x8b;
7563 nv->port_name[5] = 0x1c;
7564 nv->port_name[6] = 0x55;
7565 nv->port_name[7] = 0x86;
7566 nv->node_name[0] = 0x20;
7567 nv->node_name[1] = 0x00;
7568 nv->node_name[2] = 0x00;
7569 nv->node_name[3] = 0xe0;
7570 nv->node_name[4] = 0x8b;
7571 nv->node_name[5] = 0x1c;
7572 nv->node_name[6] = 0x55;
7573 nv->node_name[7] = 0x86;
7574 nv->login_retry_count = cpu_to_le16(8);
7575 nv->interrupt_delay_timer = cpu_to_le16(0);
7576 nv->login_timeout = cpu_to_le16(0);
7577 nv->firmware_options_1 =
7578 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7579 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7580 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7581 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7582 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7583 nv->efi_parameters = cpu_to_le32(0);
7584 nv->reset_delay = 5;
7585 nv->max_luns_per_target = cpu_to_le16(128);
7586 nv->port_down_retry_count = cpu_to_le16(30);
7587 nv->link_down_timeout = cpu_to_le16(180);
7588 nv->enode_mac[0] = 0x00;
7589 nv->enode_mac[1] = 0xC0;
7590 nv->enode_mac[2] = 0xDD;
7591 nv->enode_mac[3] = 0x04;
7592 nv->enode_mac[4] = 0x05;
7593 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
7594
7595 rval = 1;
7596 }
7597
7598 if (IS_T10_PI_CAPABLE(ha))
7599 nv->frame_payload_size &= ~7;
7600
7601 qlt_81xx_config_nvram_stage1(vha, nv);
7602
7603 /* Reset Initialization control block */
7604 memset(icb, 0, ha->init_cb_size);
7605
7606 /* Copy 1st segment. */
7607 dptr1 = (uint8_t *)icb;
7608 dptr2 = (uint8_t *)&nv->version;
7609 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7610 while (cnt--)
7611 *dptr1++ = *dptr2++;
7612
7613 icb->login_retry_count = nv->login_retry_count;
7614
7615 /* Copy 2nd segment. */
7616 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7617 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7618 cnt = (uint8_t *)&icb->reserved_5 -
7619 (uint8_t *)&icb->interrupt_delay_timer;
7620 while (cnt--)
7621 *dptr1++ = *dptr2++;
7622
7623 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7624 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7625 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
7626 icb->enode_mac[0] = 0x00;
7627 icb->enode_mac[1] = 0xC0;
7628 icb->enode_mac[2] = 0xDD;
7629 icb->enode_mac[3] = 0x04;
7630 icb->enode_mac[4] = 0x05;
7631 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
7632 }
7633
7634 /* Use extended-initialization control block. */
7635 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7636
7637 /*
7638 * Setup driver NVRAM options.
7639 */
7640 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7641 "QLE8XXX");
7642
7643 qlt_81xx_config_nvram_stage2(vha, icb);
7644
7645 /* Use alternate WWN? */
7646 if (nv->host_p & cpu_to_le32(BIT_15)) {
7647 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7648 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7649 }
7650
7651 /* Prepare nodename */
7652 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7653 /*
7654 * Firmware will apply the following mask if the nodename was
7655 * not provided.
7656 */
7657 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7658 icb->node_name[0] &= 0xF0;
7659 }
7660
7661 /* Set host adapter parameters. */
7662 ha->flags.disable_risc_code_load = 0;
7663 ha->flags.enable_lip_reset = 0;
7664 ha->flags.enable_lip_full_login =
7665 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7666 ha->flags.enable_target_reset =
7667 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7668 ha->flags.enable_led_scheme = 0;
7669 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7670
7671 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7672 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7673
7674 /* save HBA serial number */
7675 ha->serial0 = icb->port_name[5];
7676 ha->serial1 = icb->port_name[6];
7677 ha->serial2 = icb->port_name[7];
7678 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7679 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7680
7681 icb->execution_throttle = cpu_to_le16(0xFFFF);
7682
7683 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7684
7685 /* Set minimum login_timeout to 4 seconds. */
7686 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7687 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7688 if (le16_to_cpu(nv->login_timeout) < 4)
7689 nv->login_timeout = cpu_to_le16(4);
7690 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7691
7692 /* Set minimum RATOV to 100 tenths of a second. */
7693 ha->r_a_tov = 100;
7694
7695 ha->loop_reset_delay = nv->reset_delay;
7696
7697 /* Link Down Timeout = 0:
7698 *
7699 * When Port Down timer expires we will start returning
7700 * I/O's to OS with "DID_NO_CONNECT".
7701 *
7702 * Link Down Timeout != 0:
7703 *
7704 * The driver waits for the link to come up after link down
7705 * before returning I/Os to OS with "DID_NO_CONNECT".
7706 */
7707 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7708 ha->loop_down_abort_time =
7709 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7710 } else {
7711 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7712 ha->loop_down_abort_time =
7713 (LOOP_DOWN_TIME - ha->link_down_timeout);
7714 }
7715
7716 /* Need enough time to try and get the port back. */
7717 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7718 if (qlport_down_retry)
7719 ha->port_down_retry_count = qlport_down_retry;
7720
7721 /* Set login_retry_count */
7722 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7723 if (ha->port_down_retry_count ==
7724 le16_to_cpu(nv->port_down_retry_count) &&
7725 ha->port_down_retry_count > 3)
7726 ha->login_retry_count = ha->port_down_retry_count;
7727 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7728 ha->login_retry_count = ha->port_down_retry_count;
7729 if (ql2xloginretrycount)
7730 ha->login_retry_count = ql2xloginretrycount;
7731
7732 /* if not running MSI-X we need handshaking on interrupts */
7733 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
7734 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
7735
7736 /* Enable ZIO. */
7737 if (!vha->flags.init_done) {
7738 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7739 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7740 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7741 le16_to_cpu(icb->interrupt_delay_timer): 2;
7742 }
7743 icb->firmware_options_2 &= cpu_to_le32(
7744 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7745 vha->flags.process_response_queue = 0;
7746 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7747 ha->zio_mode = QLA_ZIO_MODE_6;
7748
7749 ql_log(ql_log_info, vha, 0x0075,
7750 "ZIO mode %d enabled; timer delay (%d us).\n",
7751 ha->zio_mode,
7752 ha->zio_timer * 100);
7753
7754 icb->firmware_options_2 |= cpu_to_le32(
7755 (uint32_t)ha->zio_mode);
7756 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7757 vha->flags.process_response_queue = 1;
7758 }
7759
7760 /* enable RIDA Format2 */
7761 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7762 icb->firmware_options_3 |= BIT_0;
7763
7764 if (IS_QLA27XX(ha)) {
7765 icb->firmware_options_3 |= BIT_8;
7766 ql_dbg(ql_log_info, vha, 0x0075,
7767 "Enabling direct connection.\n");
7768 }
7769
7770 if (rval) {
7771 ql_log(ql_log_warn, vha, 0x0076,
7772 "NVRAM configuration failed.\n");
7773 }
7774 return (rval);
7775 }
7776
7777 int
7778 qla82xx_restart_isp(scsi_qla_host_t *vha)
7779 {
7780 int status, rval;
7781 struct qla_hw_data *ha = vha->hw;
7782 struct req_que *req = ha->req_q_map[0];
7783 struct rsp_que *rsp = ha->rsp_q_map[0];
7784 struct scsi_qla_host *vp;
7785 unsigned long flags;
7786
7787 status = qla2x00_init_rings(vha);
7788 if (!status) {
7789 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7790 ha->flags.chip_reset_done = 1;
7791
7792 status = qla2x00_fw_ready(vha);
7793 if (!status) {
7794 /* Issue a marker after FW becomes ready. */
7795 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7796 vha->flags.online = 1;
7797 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7798 }
7799
7800 /* if no cable then assume it's good */
7801 if ((vha->device_flags & DFLG_NO_CABLE))
7802 status = 0;
7803 }
7804
7805 if (!status) {
7806 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7807
7808 if (!atomic_read(&vha->loop_down_timer)) {
7809 /*
7810 * Issue marker command only when we are going
7811 * to start the I/O .
7812 */
7813 vha->marker_needed = 1;
7814 }
7815
7816 ha->isp_ops->enable_intrs(ha);
7817
7818 ha->isp_abort_cnt = 0;
7819 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7820
7821 /* Update the firmware version */
7822 status = qla82xx_check_md_needed(vha);
7823
7824 if (ha->fce) {
7825 ha->flags.fce_enabled = 1;
7826 memset(ha->fce, 0,
7827 fce_calc_size(ha->fce_bufs));
7828 rval = qla2x00_enable_fce_trace(vha,
7829 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7830 &ha->fce_bufs);
7831 if (rval) {
7832 ql_log(ql_log_warn, vha, 0x8001,
7833 "Unable to reinitialize FCE (%d).\n",
7834 rval);
7835 ha->flags.fce_enabled = 0;
7836 }
7837 }
7838
7839 if (ha->eft) {
7840 memset(ha->eft, 0, EFT_SIZE);
7841 rval = qla2x00_enable_eft_trace(vha,
7842 ha->eft_dma, EFT_NUM_BUFFERS);
7843 if (rval) {
7844 ql_log(ql_log_warn, vha, 0x8010,
7845 "Unable to reinitialize EFT (%d).\n",
7846 rval);
7847 }
7848 }
7849 }
7850
7851 if (!status) {
7852 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7853 "qla82xx_restart_isp succeeded.\n");
7854
7855 spin_lock_irqsave(&ha->vport_slock, flags);
7856 list_for_each_entry(vp, &ha->vp_list, list) {
7857 if (vp->vp_idx) {
7858 atomic_inc(&vp->vref_count);
7859 spin_unlock_irqrestore(&ha->vport_slock, flags);
7860
7861 qla2x00_vp_abort_isp(vp);
7862
7863 spin_lock_irqsave(&ha->vport_slock, flags);
7864 atomic_dec(&vp->vref_count);
7865 }
7866 }
7867 spin_unlock_irqrestore(&ha->vport_slock, flags);
7868
7869 } else {
7870 ql_log(ql_log_warn, vha, 0x8016,
7871 "qla82xx_restart_isp **** FAILED ****.\n");
7872 }
7873
7874 return status;
7875 }
7876
7877 void
7878 qla81xx_update_fw_options(scsi_qla_host_t *vha)
7879 {
7880 struct qla_hw_data *ha = vha->hw;
7881
7882 /* Hold status IOCBs until ABTS response received. */
7883 if (ql2xfwholdabts)
7884 ha->fw_options[3] |= BIT_12;
7885
7886 /* Set Retry FLOGI in case of P2P connection */
7887 if (ha->operating_mode == P2P) {
7888 ha->fw_options[2] |= BIT_3;
7889 ql_dbg(ql_dbg_disc, vha, 0x2103,
7890 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7891 __func__, ha->fw_options[2]);
7892 }
7893
7894 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
7895 if (ql2xmvasynctoatio) {
7896 if (qla_tgt_mode_enabled(vha) ||
7897 qla_dual_mode_enabled(vha))
7898 ha->fw_options[2] |= BIT_11;
7899 else
7900 ha->fw_options[2] &= ~BIT_11;
7901 }
7902
7903 if (qla_tgt_mode_enabled(vha) ||
7904 qla_dual_mode_enabled(vha)) {
7905 /* FW auto send SCSI status during */
7906 ha->fw_options[1] |= BIT_8;
7907 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
7908
7909 /* FW perform Exchange validation */
7910 ha->fw_options[2] |= BIT_4;
7911 } else {
7912 ha->fw_options[1] &= ~BIT_8;
7913 ha->fw_options[10] &= 0x00ff;
7914
7915 ha->fw_options[2] &= ~BIT_4;
7916 }
7917
7918 if (ql2xetsenable) {
7919 /* Enable ETS Burst. */
7920 memset(ha->fw_options, 0, sizeof(ha->fw_options));
7921 ha->fw_options[2] |= BIT_9;
7922 }
7923
7924 ql_dbg(ql_dbg_init, vha, 0x00e9,
7925 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
7926 __func__, ha->fw_options[1], ha->fw_options[2],
7927 ha->fw_options[3], vha->host->active_mode);
7928
7929 qla2x00_set_fw_options(vha, ha->fw_options);
7930 }
7931
7932 /*
7933 * qla24xx_get_fcp_prio
7934 * Gets the fcp cmd priority value for the logged in port.
7935 * Looks for a match of the port descriptors within
7936 * each of the fcp prio config entries. If a match is found,
7937 * the tag (priority) value is returned.
7938 *
7939 * Input:
7940 * vha = scsi host structure pointer.
7941 * fcport = port structure pointer.
7942 *
7943 * Return:
7944 * non-zero (if found)
7945 * -1 (if not found)
7946 *
7947 * Context:
7948 * Kernel context
7949 */
7950 static int
7951 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7952 {
7953 int i, entries;
7954 uint8_t pid_match, wwn_match;
7955 int priority;
7956 uint32_t pid1, pid2;
7957 uint64_t wwn1, wwn2;
7958 struct qla_fcp_prio_entry *pri_entry;
7959 struct qla_hw_data *ha = vha->hw;
7960
7961 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
7962 return -1;
7963
7964 priority = -1;
7965 entries = ha->fcp_prio_cfg->num_entries;
7966 pri_entry = &ha->fcp_prio_cfg->entry[0];
7967
7968 for (i = 0; i < entries; i++) {
7969 pid_match = wwn_match = 0;
7970
7971 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7972 pri_entry++;
7973 continue;
7974 }
7975
7976 /* check source pid for a match */
7977 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7978 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7979 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7980 if (pid1 == INVALID_PORT_ID)
7981 pid_match++;
7982 else if (pid1 == pid2)
7983 pid_match++;
7984 }
7985
7986 /* check destination pid for a match */
7987 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7988 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7989 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7990 if (pid1 == INVALID_PORT_ID)
7991 pid_match++;
7992 else if (pid1 == pid2)
7993 pid_match++;
7994 }
7995
7996 /* check source WWN for a match */
7997 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7998 wwn1 = wwn_to_u64(vha->port_name);
7999 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8000 if (wwn2 == (uint64_t)-1)
8001 wwn_match++;
8002 else if (wwn1 == wwn2)
8003 wwn_match++;
8004 }
8005
8006 /* check destination WWN for a match */
8007 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8008 wwn1 = wwn_to_u64(fcport->port_name);
8009 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8010 if (wwn2 == (uint64_t)-1)
8011 wwn_match++;
8012 else if (wwn1 == wwn2)
8013 wwn_match++;
8014 }
8015
8016 if (pid_match == 2 || wwn_match == 2) {
8017 /* Found a matching entry */
8018 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8019 priority = pri_entry->tag;
8020 break;
8021 }
8022
8023 pri_entry++;
8024 }
8025
8026 return priority;
8027 }
8028
8029 /*
8030 * qla24xx_update_fcport_fcp_prio
8031 * Activates fcp priority for the logged in fc port
8032 *
8033 * Input:
8034 * vha = scsi host structure pointer.
8035 * fcp = port structure pointer.
8036 *
8037 * Return:
8038 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8039 *
8040 * Context:
8041 * Kernel context.
8042 */
8043 int
8044 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8045 {
8046 int ret;
8047 int priority;
8048 uint16_t mb[5];
8049
8050 if (fcport->port_type != FCT_TARGET ||
8051 fcport->loop_id == FC_NO_LOOP_ID)
8052 return QLA_FUNCTION_FAILED;
8053
8054 priority = qla24xx_get_fcp_prio(vha, fcport);
8055 if (priority < 0)
8056 return QLA_FUNCTION_FAILED;
8057
8058 if (IS_P3P_TYPE(vha->hw)) {
8059 fcport->fcp_prio = priority & 0xf;
8060 return QLA_SUCCESS;
8061 }
8062
8063 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8064 if (ret == QLA_SUCCESS) {
8065 if (fcport->fcp_prio != priority)
8066 ql_dbg(ql_dbg_user, vha, 0x709e,
8067 "Updated FCP_CMND priority - value=%d loop_id=%d "
8068 "port_id=%02x%02x%02x.\n", priority,
8069 fcport->loop_id, fcport->d_id.b.domain,
8070 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8071 fcport->fcp_prio = priority & 0xf;
8072 } else
8073 ql_dbg(ql_dbg_user, vha, 0x704f,
8074 "Unable to update FCP_CMND priority - ret=0x%x for "
8075 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8076 fcport->d_id.b.domain, fcport->d_id.b.area,
8077 fcport->d_id.b.al_pa);
8078 return ret;
8079 }
8080
8081 /*
8082 * qla24xx_update_all_fcp_prio
8083 * Activates fcp priority for all the logged in ports
8084 *
8085 * Input:
8086 * ha = adapter block pointer.
8087 *
8088 * Return:
8089 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8090 *
8091 * Context:
8092 * Kernel context.
8093 */
8094 int
8095 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8096 {
8097 int ret;
8098 fc_port_t *fcport;
8099
8100 ret = QLA_FUNCTION_FAILED;
8101 /* We need to set priority for all logged in ports */
8102 list_for_each_entry(fcport, &vha->vp_fcports, list)
8103 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8104
8105 return ret;
8106 }
8107
8108 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8109 int vp_idx, bool startqp)
8110 {
8111 int rsp_id = 0;
8112 int req_id = 0;
8113 int i;
8114 struct qla_hw_data *ha = vha->hw;
8115 uint16_t qpair_id = 0;
8116 struct qla_qpair *qpair = NULL;
8117 struct qla_msix_entry *msix;
8118
8119 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8120 ql_log(ql_log_warn, vha, 0x00181,
8121 "FW/Driver is not multi-queue capable.\n");
8122 return NULL;
8123 }
8124
8125 if (ql2xmqsupport || ql2xnvmeenable) {
8126 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8127 if (qpair == NULL) {
8128 ql_log(ql_log_warn, vha, 0x0182,
8129 "Failed to allocate memory for queue pair.\n");
8130 return NULL;
8131 }
8132 memset(qpair, 0, sizeof(struct qla_qpair));
8133
8134 qpair->hw = vha->hw;
8135 qpair->vha = vha;
8136 qpair->qp_lock_ptr = &qpair->qp_lock;
8137 spin_lock_init(&qpair->qp_lock);
8138 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
8139
8140 /* Assign available que pair id */
8141 mutex_lock(&ha->mq_lock);
8142 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
8143 if (ha->num_qpairs >= ha->max_qpairs) {
8144 mutex_unlock(&ha->mq_lock);
8145 ql_log(ql_log_warn, vha, 0x0183,
8146 "No resources to create additional q pair.\n");
8147 goto fail_qid_map;
8148 }
8149 ha->num_qpairs++;
8150 set_bit(qpair_id, ha->qpair_qid_map);
8151 ha->queue_pair_map[qpair_id] = qpair;
8152 qpair->id = qpair_id;
8153 qpair->vp_idx = vp_idx;
8154 qpair->fw_started = ha->flags.fw_started;
8155 INIT_LIST_HEAD(&qpair->hints_list);
8156 INIT_LIST_HEAD(&qpair->nvme_done_list);
8157 qpair->chip_reset = ha->base_qpair->chip_reset;
8158 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8159 qpair->enable_explicit_conf =
8160 ha->base_qpair->enable_explicit_conf;
8161
8162 for (i = 0; i < ha->msix_count; i++) {
8163 msix = &ha->msix_entries[i];
8164 if (msix->in_use)
8165 continue;
8166 qpair->msix = msix;
8167 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
8168 "Vector %x selected for qpair\n", msix->vector);
8169 break;
8170 }
8171 if (!qpair->msix) {
8172 ql_log(ql_log_warn, vha, 0x0184,
8173 "Out of MSI-X vectors!.\n");
8174 goto fail_msix;
8175 }
8176
8177 qpair->msix->in_use = 1;
8178 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8179 qpair->pdev = ha->pdev;
8180 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
8181 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
8182
8183 mutex_unlock(&ha->mq_lock);
8184
8185 /* Create response queue first */
8186 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
8187 if (!rsp_id) {
8188 ql_log(ql_log_warn, vha, 0x0185,
8189 "Failed to create response queue.\n");
8190 goto fail_rsp;
8191 }
8192
8193 qpair->rsp = ha->rsp_q_map[rsp_id];
8194
8195 /* Create request queue */
8196 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8197 startqp);
8198 if (!req_id) {
8199 ql_log(ql_log_warn, vha, 0x0186,
8200 "Failed to create request queue.\n");
8201 goto fail_req;
8202 }
8203
8204 qpair->req = ha->req_q_map[req_id];
8205 qpair->rsp->req = qpair->req;
8206 qpair->rsp->qpair = qpair;
8207 /* init qpair to this cpu. Will adjust at run time. */
8208 qla_cpu_update(qpair, smp_processor_id());
8209
8210 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8211 if (ha->fw_attributes & BIT_4)
8212 qpair->difdix_supported = 1;
8213 }
8214
8215 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8216 if (!qpair->srb_mempool) {
8217 ql_log(ql_log_warn, vha, 0xd036,
8218 "Failed to create srb mempool for qpair %d\n",
8219 qpair->id);
8220 goto fail_mempool;
8221 }
8222
8223 /* Mark as online */
8224 qpair->online = 1;
8225
8226 if (!vha->flags.qpairs_available)
8227 vha->flags.qpairs_available = 1;
8228
8229 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8230 "Request/Response queue pair created, id %d\n",
8231 qpair->id);
8232 ql_dbg(ql_dbg_init, vha, 0x0187,
8233 "Request/Response queue pair created, id %d\n",
8234 qpair->id);
8235 }
8236 return qpair;
8237
8238 fail_mempool:
8239 fail_req:
8240 qla25xx_delete_rsp_que(vha, qpair->rsp);
8241 fail_rsp:
8242 mutex_lock(&ha->mq_lock);
8243 qpair->msix->in_use = 0;
8244 list_del(&qpair->qp_list_elem);
8245 if (list_empty(&vha->qp_list))
8246 vha->flags.qpairs_available = 0;
8247 fail_msix:
8248 ha->queue_pair_map[qpair_id] = NULL;
8249 clear_bit(qpair_id, ha->qpair_qid_map);
8250 ha->num_qpairs--;
8251 mutex_unlock(&ha->mq_lock);
8252 fail_qid_map:
8253 kfree(qpair);
8254 return NULL;
8255 }
8256
8257 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8258 {
8259 int ret = QLA_FUNCTION_FAILED;
8260 struct qla_hw_data *ha = qpair->hw;
8261
8262 qpair->delete_in_progress = 1;
8263 while (atomic_read(&qpair->ref_count))
8264 msleep(500);
8265
8266 ret = qla25xx_delete_req_que(vha, qpair->req);
8267 if (ret != QLA_SUCCESS)
8268 goto fail;
8269
8270 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8271 if (ret != QLA_SUCCESS)
8272 goto fail;
8273
8274 mutex_lock(&ha->mq_lock);
8275 ha->queue_pair_map[qpair->id] = NULL;
8276 clear_bit(qpair->id, ha->qpair_qid_map);
8277 ha->num_qpairs--;
8278 list_del(&qpair->qp_list_elem);
8279 if (list_empty(&vha->qp_list)) {
8280 vha->flags.qpairs_available = 0;
8281 vha->flags.qpairs_req_created = 0;
8282 vha->flags.qpairs_rsp_created = 0;
8283 }
8284 mempool_destroy(qpair->srb_mempool);
8285 kfree(qpair);
8286 mutex_unlock(&ha->mq_lock);
8287
8288 return QLA_SUCCESS;
8289 fail:
8290 return ret;
8291 }