]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/scsi/qla2xxx/qla_init.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[mirror_ubuntu-hirsute-kernel.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22
23 /*
24 * QLogic ISP2x00 Hardware Support Function Prototypes.
25 */
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
35
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
45
46 /* SRB Extensions ---------------------------------------------------------- */
47
48 void
49 qla2x00_sp_timeout(struct timer_list *t)
50 {
51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
52 struct srb_iocb *iocb;
53 scsi_qla_host_t *vha = sp->vha;
54 struct req_que *req;
55 unsigned long flags;
56
57 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
58 req = vha->hw->req_q_map[0];
59 req->outstanding_cmds[sp->handle] = NULL;
60 iocb = &sp->u.iocb_cmd;
61 iocb->timeout(sp);
62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
63 }
64
65 void
66 qla2x00_sp_free(void *ptr)
67 {
68 srb_t *sp = ptr;
69 struct srb_iocb *iocb = &sp->u.iocb_cmd;
70
71 del_timer(&iocb->timer);
72 qla2x00_rel_sp(sp);
73 }
74
75 /* Asynchronous Login/Logout Routines -------------------------------------- */
76
77 unsigned long
78 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79 {
80 unsigned long tmo;
81 struct qla_hw_data *ha = vha->hw;
82
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
88 /*
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
91 */
92 tmo = ha->login_timeout;
93 }
94 return tmo;
95 }
96
97 void
98 qla2x00_async_iocb_timeout(void *data)
99 {
100 srb_t *sp = data;
101 fc_port_t *fcport = sp->fcport;
102 struct srb_iocb *lio = &sp->u.iocb_cmd;
103
104 if (fcport) {
105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
108
109 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
110 } else {
111 pr_info("Async-%s timeout - hdl=%x.\n",
112 sp->name, sp->handle);
113 }
114
115 switch (sp->type) {
116 case SRB_LOGIN_CMD:
117 /* Retry as needed. */
118 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
119 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
120 QLA_LOGIO_LOGIN_RETRIED : 0;
121 sp->done(sp, QLA_FUNCTION_TIMEOUT);
122 break;
123 case SRB_LOGOUT_CMD:
124 case SRB_CT_PTHRU_CMD:
125 case SRB_MB_IOCB:
126 case SRB_NACK_PLOGI:
127 case SRB_NACK_PRLI:
128 case SRB_NACK_LOGO:
129 case SRB_CTRL_VP:
130 sp->done(sp, QLA_FUNCTION_TIMEOUT);
131 break;
132 }
133 }
134
135 static void
136 qla2x00_async_login_sp_done(void *ptr, int res)
137 {
138 srb_t *sp = ptr;
139 struct scsi_qla_host *vha = sp->vha;
140 struct srb_iocb *lio = &sp->u.iocb_cmd;
141 struct event_arg ea;
142
143 ql_dbg(ql_dbg_disc, vha, 0x20dd,
144 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
145
146 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
147
148 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
149 memset(&ea, 0, sizeof(ea));
150 ea.event = FCME_PLOGI_DONE;
151 ea.fcport = sp->fcport;
152 ea.data[0] = lio->u.logio.data[0];
153 ea.data[1] = lio->u.logio.data[1];
154 ea.iop[0] = lio->u.logio.iop[0];
155 ea.iop[1] = lio->u.logio.iop[1];
156 ea.sp = sp;
157 qla2x00_fcport_event_handler(vha, &ea);
158 }
159
160 sp->free(sp);
161 }
162
163 int
164 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
165 uint16_t *data)
166 {
167 srb_t *sp;
168 struct srb_iocb *lio;
169 int rval = QLA_FUNCTION_FAILED;
170
171 if (!vha->flags.online)
172 goto done;
173
174 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
175 if (!sp)
176 goto done;
177
178 fcport->flags |= FCF_ASYNC_SENT;
179 fcport->logout_completed = 0;
180
181 fcport->disc_state = DSC_LOGIN_PEND;
182 sp->type = SRB_LOGIN_CMD;
183 sp->name = "login";
184 sp->gen1 = fcport->rscn_gen;
185 sp->gen2 = fcport->login_gen;
186 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
187
188 lio = &sp->u.iocb_cmd;
189 lio->timeout = qla2x00_async_iocb_timeout;
190 sp->done = qla2x00_async_login_sp_done;
191 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
192
193 if (fcport->fc4f_nvme)
194 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
195
196 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
197 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
198 rval = qla2x00_start_sp(sp);
199 if (rval != QLA_SUCCESS) {
200 fcport->flags |= FCF_LOGIN_NEEDED;
201 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
202 goto done_free_sp;
203 }
204
205 ql_dbg(ql_dbg_disc, vha, 0x2072,
206 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
207 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
208 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
209 fcport->login_retry);
210 return rval;
211
212 done_free_sp:
213 sp->free(sp);
214 fcport->flags &= ~FCF_ASYNC_SENT;
215 done:
216 fcport->flags &= ~FCF_ASYNC_ACTIVE;
217 return rval;
218 }
219
220 static void
221 qla2x00_async_logout_sp_done(void *ptr, int res)
222 {
223 srb_t *sp = ptr;
224
225 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
226 sp->fcport->login_gen++;
227 qlt_logo_completion_handler(sp->fcport, res);
228 sp->free(sp);
229 }
230
231 int
232 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
233 {
234 srb_t *sp;
235 struct srb_iocb *lio;
236 int rval = QLA_FUNCTION_FAILED;
237
238 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
239 return rval;
240
241 fcport->flags |= FCF_ASYNC_SENT;
242 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
243 if (!sp)
244 goto done;
245
246 sp->type = SRB_LOGOUT_CMD;
247 sp->name = "logout";
248 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
249
250 lio = &sp->u.iocb_cmd;
251 lio->timeout = qla2x00_async_iocb_timeout;
252 sp->done = qla2x00_async_logout_sp_done;
253 rval = qla2x00_start_sp(sp);
254 if (rval != QLA_SUCCESS)
255 goto done_free_sp;
256
257 ql_dbg(ql_dbg_disc, vha, 0x2070,
258 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
259 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
260 fcport->d_id.b.area, fcport->d_id.b.al_pa,
261 fcport->port_name);
262 return rval;
263
264 done_free_sp:
265 sp->free(sp);
266 done:
267 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
268 return rval;
269 }
270
271 void
272 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
273 uint16_t *data)
274 {
275 fcport->flags &= ~FCF_ASYNC_ACTIVE;
276 /* Don't re-login in target mode */
277 if (!fcport->tgt_session)
278 qla2x00_mark_device_lost(vha, fcport, 1, 0);
279 qlt_logo_completion_handler(fcport, data[0]);
280 }
281
282 static void
283 qla2x00_async_prlo_sp_done(void *s, int res)
284 {
285 srb_t *sp = (srb_t *)s;
286 struct srb_iocb *lio = &sp->u.iocb_cmd;
287 struct scsi_qla_host *vha = sp->vha;
288
289 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
290 if (!test_bit(UNLOADING, &vha->dpc_flags))
291 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
292 lio->u.logio.data);
293 sp->free(sp);
294 }
295
296 int
297 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
298 {
299 srb_t *sp;
300 struct srb_iocb *lio;
301 int rval;
302
303 rval = QLA_FUNCTION_FAILED;
304 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
305 if (!sp)
306 goto done;
307
308 sp->type = SRB_PRLO_CMD;
309 sp->name = "prlo";
310 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
311
312 lio = &sp->u.iocb_cmd;
313 lio->timeout = qla2x00_async_iocb_timeout;
314 sp->done = qla2x00_async_prlo_sp_done;
315 rval = qla2x00_start_sp(sp);
316 if (rval != QLA_SUCCESS)
317 goto done_free_sp;
318
319 ql_dbg(ql_dbg_disc, vha, 0x2070,
320 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
321 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
322 fcport->d_id.b.area, fcport->d_id.b.al_pa);
323 return rval;
324
325 done_free_sp:
326 sp->free(sp);
327 done:
328 fcport->flags &= ~FCF_ASYNC_ACTIVE;
329 return rval;
330 }
331
332 static
333 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
334 {
335 struct fc_port *fcport = ea->fcport;
336
337 ql_dbg(ql_dbg_disc, vha, 0x20d2,
338 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
339 __func__, fcport->port_name, fcport->disc_state,
340 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
341 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
342
343 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
344 ql_dbg(ql_dbg_disc, vha, 0x2066,
345 "%s %8phC: adisc fail: post delete\n",
346 __func__, ea->fcport->port_name);
347 qlt_schedule_sess_for_deletion(ea->fcport);
348 return;
349 }
350
351 if (ea->fcport->disc_state == DSC_DELETE_PEND)
352 return;
353
354 if (ea->sp->gen2 != ea->fcport->login_gen) {
355 /* target side must have changed it. */
356 ql_dbg(ql_dbg_disc, vha, 0x20d3,
357 "%s %8phC generation changed\n",
358 __func__, ea->fcport->port_name);
359 return;
360 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
361 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
362 __func__, __LINE__, ea->fcport->port_name);
363 qla24xx_post_gidpn_work(vha, ea->fcport);
364 return;
365 }
366
367 __qla24xx_handle_gpdb_event(vha, ea);
368 }
369
370 static void
371 qla2x00_async_adisc_sp_done(void *ptr, int res)
372 {
373 srb_t *sp = ptr;
374 struct scsi_qla_host *vha = sp->vha;
375 struct event_arg ea;
376 struct srb_iocb *lio = &sp->u.iocb_cmd;
377
378 ql_dbg(ql_dbg_disc, vha, 0x2066,
379 "Async done-%s res %x %8phC\n",
380 sp->name, res, sp->fcport->port_name);
381
382 sp->fcport->flags &= ~FCF_ASYNC_SENT;
383
384 memset(&ea, 0, sizeof(ea));
385 ea.event = FCME_ADISC_DONE;
386 ea.rc = res;
387 ea.data[0] = lio->u.logio.data[0];
388 ea.data[1] = lio->u.logio.data[1];
389 ea.iop[0] = lio->u.logio.iop[0];
390 ea.iop[1] = lio->u.logio.iop[1];
391 ea.fcport = sp->fcport;
392 ea.sp = sp;
393
394 qla2x00_fcport_event_handler(vha, &ea);
395
396 sp->free(sp);
397 }
398
399 int
400 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
401 uint16_t *data)
402 {
403 srb_t *sp;
404 struct srb_iocb *lio;
405 int rval;
406
407 rval = QLA_FUNCTION_FAILED;
408 fcport->flags |= FCF_ASYNC_SENT;
409 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
410 if (!sp)
411 goto done;
412
413 sp->type = SRB_ADISC_CMD;
414 sp->name = "adisc";
415 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
416
417 lio = &sp->u.iocb_cmd;
418 lio->timeout = qla2x00_async_iocb_timeout;
419 sp->done = qla2x00_async_adisc_sp_done;
420 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
421 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
422 rval = qla2x00_start_sp(sp);
423 if (rval != QLA_SUCCESS)
424 goto done_free_sp;
425
426 ql_dbg(ql_dbg_disc, vha, 0x206f,
427 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
428 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
429 return rval;
430
431 done_free_sp:
432 sp->free(sp);
433 done:
434 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
435 qla2x00_post_async_adisc_work(vha, fcport, data);
436 return rval;
437 }
438
439 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
440 struct event_arg *ea)
441 {
442 fc_port_t *fcport, *conflict_fcport;
443 struct get_name_list_extended *e;
444 u16 i, n, found = 0, loop_id;
445 port_id_t id;
446 u64 wwn;
447 u16 data[2];
448 u8 current_login_state;
449
450 fcport = ea->fcport;
451 ql_dbg(ql_dbg_disc, vha, 0xffff,
452 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
453 __func__, fcport->port_name, fcport->disc_state,
454 fcport->fw_login_state, ea->rc,
455 fcport->login_gen, fcport->last_login_gen,
456 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
457
458 if (fcport->disc_state == DSC_DELETE_PEND)
459 return;
460
461 if (ea->rc) { /* rval */
462 if (fcport->login_retry == 0) {
463 fcport->login_retry = vha->hw->login_retry_count;
464 ql_dbg(ql_dbg_disc, vha, 0x20de,
465 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
466 fcport->port_name, fcport->login_retry);
467 }
468 return;
469 }
470
471 if (fcport->last_rscn_gen != fcport->rscn_gen) {
472 ql_dbg(ql_dbg_disc, vha, 0x20df,
473 "%s %8phC rscn gen changed rscn %d|%d \n",
474 __func__, fcport->port_name,
475 fcport->last_rscn_gen, fcport->rscn_gen);
476 qla24xx_post_gidpn_work(vha, fcport);
477 return;
478 } else if (fcport->last_login_gen != fcport->login_gen) {
479 ql_dbg(ql_dbg_disc, vha, 0x20e0,
480 "%s %8phC login gen changed\n",
481 __func__, fcport->port_name);
482 return;
483 }
484
485 n = ea->data[0] / sizeof(struct get_name_list_extended);
486
487 ql_dbg(ql_dbg_disc, vha, 0x20e1,
488 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
489 __func__, __LINE__, fcport->port_name, n,
490 fcport->d_id.b.domain, fcport->d_id.b.area,
491 fcport->d_id.b.al_pa, fcport->loop_id);
492
493 for (i = 0; i < n; i++) {
494 e = &vha->gnl.l[i];
495 wwn = wwn_to_u64(e->port_name);
496
497 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
498 continue;
499
500 found = 1;
501 id.b.domain = e->port_id[2];
502 id.b.area = e->port_id[1];
503 id.b.al_pa = e->port_id[0];
504 id.b.rsvd_1 = 0;
505
506 loop_id = le16_to_cpu(e->nport_handle);
507 loop_id = (loop_id & 0x7fff);
508
509 ql_dbg(ql_dbg_disc, vha, 0x20e2,
510 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
511 __func__, fcport->port_name,
512 e->current_login_state, fcport->fw_login_state,
513 id.b.domain, id.b.area, id.b.al_pa,
514 fcport->d_id.b.domain, fcport->d_id.b.area,
515 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
516
517 if ((id.b24 != fcport->d_id.b24) ||
518 ((fcport->loop_id != FC_NO_LOOP_ID) &&
519 (fcport->loop_id != loop_id))) {
520 ql_dbg(ql_dbg_disc, vha, 0x20e3,
521 "%s %d %8phC post del sess\n",
522 __func__, __LINE__, fcport->port_name);
523 qlt_schedule_sess_for_deletion(fcport);
524 return;
525 }
526
527 fcport->loop_id = loop_id;
528
529 wwn = wwn_to_u64(fcport->port_name);
530 qlt_find_sess_invalidate_other(vha, wwn,
531 id, loop_id, &conflict_fcport);
532
533 if (conflict_fcport) {
534 /*
535 * Another share fcport share the same loop_id &
536 * nport id. Conflict fcport needs to finish
537 * cleanup before this fcport can proceed to login.
538 */
539 conflict_fcport->conflict = fcport;
540 fcport->login_pause = 1;
541 }
542
543 if (fcport->fc4f_nvme)
544 current_login_state = e->current_login_state >> 4;
545 else
546 current_login_state = e->current_login_state & 0xf;
547
548 switch (current_login_state) {
549 case DSC_LS_PRLI_COMP:
550 ql_dbg(ql_dbg_disc, vha, 0x20e4,
551 "%s %d %8phC post gpdb\n",
552 __func__, __LINE__, fcport->port_name);
553
554 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
555 fcport->port_type = FCT_INITIATOR;
556 else
557 fcport->port_type = FCT_TARGET;
558
559 data[0] = data[1] = 0;
560 qla2x00_post_async_adisc_work(vha, fcport, data);
561 break;
562 case DSC_LS_PORT_UNAVAIL:
563 default:
564 if (fcport->loop_id == FC_NO_LOOP_ID) {
565 qla2x00_find_new_loop_id(vha, fcport);
566 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
567 }
568 ql_dbg(ql_dbg_disc, vha, 0x20e5,
569 "%s %d %8phC\n",
570 __func__, __LINE__, fcport->port_name);
571 qla24xx_fcport_handle_login(vha, fcport);
572 break;
573 }
574 }
575
576 if (!found) {
577 /* fw has no record of this port */
578 for (i = 0; i < n; i++) {
579 e = &vha->gnl.l[i];
580 id.b.domain = e->port_id[0];
581 id.b.area = e->port_id[1];
582 id.b.al_pa = e->port_id[2];
583 id.b.rsvd_1 = 0;
584 loop_id = le16_to_cpu(e->nport_handle);
585
586 if (fcport->d_id.b24 == id.b24) {
587 conflict_fcport =
588 qla2x00_find_fcport_by_wwpn(vha,
589 e->port_name, 0);
590 ql_dbg(ql_dbg_disc, vha, 0x20e6,
591 "%s %d %8phC post del sess\n",
592 __func__, __LINE__,
593 conflict_fcport->port_name);
594 qlt_schedule_sess_for_deletion
595 (conflict_fcport);
596 }
597
598 /* FW already picked this loop id for another fcport */
599 if (fcport->loop_id == loop_id)
600 fcport->loop_id = FC_NO_LOOP_ID;
601 }
602 qla24xx_fcport_handle_login(vha, fcport);
603 }
604 } /* gnl_event */
605
606 static void
607 qla24xx_async_gnl_sp_done(void *s, int res)
608 {
609 struct srb *sp = s;
610 struct scsi_qla_host *vha = sp->vha;
611 unsigned long flags;
612 struct fc_port *fcport = NULL, *tf;
613 u16 i, n = 0, loop_id;
614 struct event_arg ea;
615 struct get_name_list_extended *e;
616 u64 wwn;
617 struct list_head h;
618 bool found = false;
619
620 ql_dbg(ql_dbg_disc, vha, 0x20e7,
621 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
622 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
623 sp->u.iocb_cmd.u.mbx.in_mb[2]);
624
625 memset(&ea, 0, sizeof(ea));
626 ea.sp = sp;
627 ea.rc = res;
628 ea.event = FCME_GNL_DONE;
629
630 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
631 sizeof(struct get_name_list_extended)) {
632 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
633 sizeof(struct get_name_list_extended);
634 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
635 }
636
637 for (i = 0; i < n; i++) {
638 e = &vha->gnl.l[i];
639 loop_id = le16_to_cpu(e->nport_handle);
640 /* mask out reserve bit */
641 loop_id = (loop_id & 0x7fff);
642 set_bit(loop_id, vha->hw->loop_id_map);
643 wwn = wwn_to_u64(e->port_name);
644
645 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
646 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
647 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
648 e->port_id[0], e->current_login_state, e->last_login_state,
649 (loop_id & 0x7fff));
650 }
651
652 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
653
654 INIT_LIST_HEAD(&h);
655 fcport = tf = NULL;
656 if (!list_empty(&vha->gnl.fcports))
657 list_splice_init(&vha->gnl.fcports, &h);
658
659 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
660 list_del_init(&fcport->gnl_entry);
661 spin_lock(&vha->hw->tgt.sess_lock);
662 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
663 spin_unlock(&vha->hw->tgt.sess_lock);
664 ea.fcport = fcport;
665
666 qla2x00_fcport_event_handler(vha, &ea);
667 }
668 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
669
670 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
671 /* create new fcport if fw has knowledge of new sessions */
672 for (i = 0; i < n; i++) {
673 port_id_t id;
674 u64 wwnn;
675
676 e = &vha->gnl.l[i];
677 wwn = wwn_to_u64(e->port_name);
678
679 found = false;
680 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
681 if (!memcmp((u8 *)&wwn, fcport->port_name,
682 WWN_SIZE)) {
683 found = true;
684 break;
685 }
686 }
687
688 id.b.domain = e->port_id[2];
689 id.b.area = e->port_id[1];
690 id.b.al_pa = e->port_id[0];
691 id.b.rsvd_1 = 0;
692
693 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
694 ql_dbg(ql_dbg_disc, vha, 0x2065,
695 "%s %d %8phC %06x post new sess\n",
696 __func__, __LINE__, (u8 *)&wwn, id.b24);
697 wwnn = wwn_to_u64(e->node_name);
698 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
699 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
700 }
701 }
702
703 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
704
705 sp->free(sp);
706 }
707
708 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
709 {
710 srb_t *sp;
711 struct srb_iocb *mbx;
712 int rval = QLA_FUNCTION_FAILED;
713 unsigned long flags;
714 u16 *mb;
715
716 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
717 return rval;
718
719 ql_dbg(ql_dbg_disc, vha, 0x20d9,
720 "Async-gnlist WWPN %8phC \n", fcport->port_name);
721
722 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
723 if (!list_empty(&fcport->gnl_entry)) {
724 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
725 rval = QLA_SUCCESS;
726 goto done;
727 }
728
729 spin_lock(&vha->hw->tgt.sess_lock);
730 fcport->disc_state = DSC_GNL;
731 fcport->last_rscn_gen = fcport->rscn_gen;
732 fcport->last_login_gen = fcport->login_gen;
733 spin_unlock(&vha->hw->tgt.sess_lock);
734
735 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
736 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
737
738 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
739 if (!sp)
740 goto done;
741
742 fcport->flags |= FCF_ASYNC_SENT;
743 sp->type = SRB_MB_IOCB;
744 sp->name = "gnlist";
745 sp->gen1 = fcport->rscn_gen;
746 sp->gen2 = fcport->login_gen;
747
748 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
749
750 mb = sp->u.iocb_cmd.u.mbx.out_mb;
751 mb[0] = MBC_PORT_NODE_NAME_LIST;
752 mb[1] = BIT_2 | BIT_3;
753 mb[2] = MSW(vha->gnl.ldma);
754 mb[3] = LSW(vha->gnl.ldma);
755 mb[6] = MSW(MSD(vha->gnl.ldma));
756 mb[7] = LSW(MSD(vha->gnl.ldma));
757 mb[8] = vha->gnl.size;
758 mb[9] = vha->vp_idx;
759
760 mbx = &sp->u.iocb_cmd;
761 mbx->timeout = qla2x00_async_iocb_timeout;
762
763 sp->done = qla24xx_async_gnl_sp_done;
764
765 rval = qla2x00_start_sp(sp);
766 if (rval != QLA_SUCCESS)
767 goto done_free_sp;
768
769 ql_dbg(ql_dbg_disc, vha, 0x20da,
770 "Async-%s - OUT WWPN %8phC hndl %x\n",
771 sp->name, fcport->port_name, sp->handle);
772
773 return rval;
774
775 done_free_sp:
776 sp->free(sp);
777 fcport->flags &= ~FCF_ASYNC_SENT;
778 done:
779 return rval;
780 }
781
782 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
783 {
784 struct qla_work_evt *e;
785
786 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
787 if (!e)
788 return QLA_FUNCTION_FAILED;
789
790 e->u.fcport.fcport = fcport;
791 fcport->flags |= FCF_ASYNC_ACTIVE;
792 return qla2x00_post_work(vha, e);
793 }
794
795 static
796 void qla24xx_async_gpdb_sp_done(void *s, int res)
797 {
798 struct srb *sp = s;
799 struct scsi_qla_host *vha = sp->vha;
800 struct qla_hw_data *ha = vha->hw;
801 fc_port_t *fcport = sp->fcport;
802 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
803 struct event_arg ea;
804
805 ql_dbg(ql_dbg_disc, vha, 0x20db,
806 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
807 sp->name, res, fcport->port_name, mb[1], mb[2]);
808
809 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
810
811 memset(&ea, 0, sizeof(ea));
812 ea.event = FCME_GPDB_DONE;
813 ea.fcport = fcport;
814 ea.sp = sp;
815
816 qla2x00_fcport_event_handler(vha, &ea);
817
818 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
819 sp->u.iocb_cmd.u.mbx.in_dma);
820
821 sp->free(sp);
822 }
823
824 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
825 {
826 struct qla_work_evt *e;
827
828 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
829 if (!e)
830 return QLA_FUNCTION_FAILED;
831
832 e->u.fcport.fcport = fcport;
833
834 return qla2x00_post_work(vha, e);
835 }
836
837 static void
838 qla2x00_async_prli_sp_done(void *ptr, int res)
839 {
840 srb_t *sp = ptr;
841 struct scsi_qla_host *vha = sp->vha;
842 struct srb_iocb *lio = &sp->u.iocb_cmd;
843 struct event_arg ea;
844
845 ql_dbg(ql_dbg_disc, vha, 0x2129,
846 "%s %8phC res %d \n", __func__,
847 sp->fcport->port_name, res);
848
849 sp->fcport->flags &= ~FCF_ASYNC_SENT;
850
851 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
852 memset(&ea, 0, sizeof(ea));
853 ea.event = FCME_PRLI_DONE;
854 ea.fcport = sp->fcport;
855 ea.data[0] = lio->u.logio.data[0];
856 ea.data[1] = lio->u.logio.data[1];
857 ea.iop[0] = lio->u.logio.iop[0];
858 ea.iop[1] = lio->u.logio.iop[1];
859 ea.sp = sp;
860
861 qla2x00_fcport_event_handler(vha, &ea);
862 }
863
864 sp->free(sp);
865 }
866
867 int
868 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
869 {
870 srb_t *sp;
871 struct srb_iocb *lio;
872 int rval = QLA_FUNCTION_FAILED;
873
874 if (!vha->flags.online)
875 return rval;
876
877 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
878 fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
879 fcport->fw_login_state == DSC_LS_PRLI_PEND)
880 return rval;
881
882 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
883 if (!sp)
884 return rval;
885
886 fcport->flags |= FCF_ASYNC_SENT;
887 fcport->logout_completed = 0;
888
889 sp->type = SRB_PRLI_CMD;
890 sp->name = "prli";
891 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
892
893 lio = &sp->u.iocb_cmd;
894 lio->timeout = qla2x00_async_iocb_timeout;
895 sp->done = qla2x00_async_prli_sp_done;
896 lio->u.logio.flags = 0;
897
898 if (fcport->fc4f_nvme)
899 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
900
901 rval = qla2x00_start_sp(sp);
902 if (rval != QLA_SUCCESS) {
903 fcport->flags |= FCF_LOGIN_NEEDED;
904 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
905 goto done_free_sp;
906 }
907
908 ql_dbg(ql_dbg_disc, vha, 0x211b,
909 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
910 fcport->port_name, sp->handle, fcport->loop_id,
911 fcport->d_id.b24, fcport->login_retry);
912
913 return rval;
914
915 done_free_sp:
916 sp->free(sp);
917 fcport->flags &= ~FCF_ASYNC_SENT;
918 return rval;
919 }
920
921 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
922 {
923 struct qla_work_evt *e;
924
925 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
926 if (!e)
927 return QLA_FUNCTION_FAILED;
928
929 e->u.fcport.fcport = fcport;
930 e->u.fcport.opt = opt;
931 fcport->flags |= FCF_ASYNC_ACTIVE;
932 return qla2x00_post_work(vha, e);
933 }
934
935 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
936 {
937 srb_t *sp;
938 struct srb_iocb *mbx;
939 int rval = QLA_FUNCTION_FAILED;
940 u16 *mb;
941 dma_addr_t pd_dma;
942 struct port_database_24xx *pd;
943 struct qla_hw_data *ha = vha->hw;
944
945 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
946 return rval;
947
948 fcport->disc_state = DSC_GPDB;
949
950 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
951 if (!sp)
952 goto done;
953
954 fcport->flags |= FCF_ASYNC_SENT;
955 sp->type = SRB_MB_IOCB;
956 sp->name = "gpdb";
957 sp->gen1 = fcport->rscn_gen;
958 sp->gen2 = fcport->login_gen;
959 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
960
961 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
962 if (pd == NULL) {
963 ql_log(ql_log_warn, vha, 0xd043,
964 "Failed to allocate port database structure.\n");
965 goto done_free_sp;
966 }
967
968 mb = sp->u.iocb_cmd.u.mbx.out_mb;
969 mb[0] = MBC_GET_PORT_DATABASE;
970 mb[1] = fcport->loop_id;
971 mb[2] = MSW(pd_dma);
972 mb[3] = LSW(pd_dma);
973 mb[6] = MSW(MSD(pd_dma));
974 mb[7] = LSW(MSD(pd_dma));
975 mb[9] = vha->vp_idx;
976 mb[10] = opt;
977
978 mbx = &sp->u.iocb_cmd;
979 mbx->timeout = qla2x00_async_iocb_timeout;
980 mbx->u.mbx.in = (void *)pd;
981 mbx->u.mbx.in_dma = pd_dma;
982
983 sp->done = qla24xx_async_gpdb_sp_done;
984
985 rval = qla2x00_start_sp(sp);
986 if (rval != QLA_SUCCESS)
987 goto done_free_sp;
988
989 ql_dbg(ql_dbg_disc, vha, 0x20dc,
990 "Async-%s %8phC hndl %x opt %x\n",
991 sp->name, fcport->port_name, sp->handle, opt);
992
993 return rval;
994
995 done_free_sp:
996 if (pd)
997 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
998
999 sp->free(sp);
1000 fcport->flags &= ~FCF_ASYNC_SENT;
1001 done:
1002 qla24xx_post_gpdb_work(vha, fcport, opt);
1003 return rval;
1004 }
1005
1006 static
1007 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1008 {
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1012 ea->fcport->login_gen++;
1013 ea->fcport->deleted = 0;
1014 ea->fcport->logout_on_delete = 1;
1015
1016 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1017 vha->fcport_count++;
1018 ea->fcport->login_succ = 1;
1019
1020 if (!IS_IIDMA_CAPABLE(vha->hw) ||
1021 !vha->hw->flags.gpsc_supported) {
1022 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1023 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
1024 __func__, __LINE__, ea->fcport->port_name,
1025 vha->fcport_count);
1026
1027 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1028 } else {
1029 if (ea->fcport->id_changed) {
1030 ea->fcport->id_changed = 0;
1031 ql_dbg(ql_dbg_disc, vha, 0x20d7,
1032 "%s %d %8phC post gfpnid fcp_cnt %d\n",
1033 __func__, __LINE__, ea->fcport->port_name,
1034 vha->fcport_count);
1035 qla24xx_post_gfpnid_work(vha, ea->fcport);
1036 } else {
1037 ql_dbg(ql_dbg_disc, vha, 0x20d7,
1038 "%s %d %8phC post gpsc fcp_cnt %d\n",
1039 __func__, __LINE__, ea->fcport->port_name,
1040 vha->fcport_count);
1041 qla24xx_post_gpsc_work(vha, ea->fcport);
1042 }
1043 }
1044 } else if (ea->fcport->login_succ) {
1045 /*
1046 * We have an existing session. A late RSCN delivery
1047 * must have triggered the session to be re-validate.
1048 * Session is still valid.
1049 */
1050 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1051 "%s %d %8phC session revalidate success\n",
1052 __func__, __LINE__, ea->fcport->port_name);
1053 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
1054 }
1055 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1056 }
1057
1058 static
1059 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1060 {
1061 fc_port_t *fcport = ea->fcport;
1062 struct port_database_24xx *pd;
1063 struct srb *sp = ea->sp;
1064 uint8_t ls;
1065
1066 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1067
1068 fcport->flags &= ~FCF_ASYNC_SENT;
1069
1070 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1071 "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
1072 fcport->disc_state, pd->current_login_state, ea->rc);
1073
1074 if (fcport->disc_state == DSC_DELETE_PEND)
1075 return;
1076
1077 if (fcport->fc4f_nvme)
1078 ls = pd->current_login_state >> 4;
1079 else
1080 ls = pd->current_login_state & 0xf;
1081
1082 switch (ls) {
1083 case PDS_PRLI_COMPLETE:
1084 __qla24xx_parse_gpdb(vha, fcport, pd);
1085 break;
1086 case PDS_PLOGI_PENDING:
1087 case PDS_PLOGI_COMPLETE:
1088 case PDS_PRLI_PENDING:
1089 case PDS_PRLI2_PENDING:
1090 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
1091 __func__, __LINE__, fcport->port_name);
1092 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1093 return;
1094 case PDS_LOGO_PENDING:
1095 case PDS_PORT_UNAVAILABLE:
1096 default:
1097 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1098 __func__, __LINE__, fcport->port_name);
1099 qlt_schedule_sess_for_deletion(fcport);
1100 return;
1101 }
1102 __qla24xx_handle_gpdb_event(vha, ea);
1103 } /* gpdb event */
1104
1105 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1106 {
1107 u8 login = 0;
1108 int rc;
1109
1110 if (qla_tgt_mode_enabled(vha))
1111 return;
1112
1113 if (qla_dual_mode_enabled(vha)) {
1114 if (N2N_TOPO(vha->hw)) {
1115 u64 mywwn, wwn;
1116
1117 mywwn = wwn_to_u64(vha->port_name);
1118 wwn = wwn_to_u64(fcport->port_name);
1119 if (mywwn > wwn)
1120 login = 1;
1121 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1122 && time_after_eq(jiffies,
1123 fcport->plogi_nack_done_deadline))
1124 login = 1;
1125 } else {
1126 login = 1;
1127 }
1128 } else {
1129 /* initiator mode */
1130 login = 1;
1131 }
1132
1133 if (login) {
1134 if (fcport->loop_id == FC_NO_LOOP_ID) {
1135 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1136 rc = qla2x00_find_new_loop_id(vha, fcport);
1137 if (rc) {
1138 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1139 "%s %d %8phC post del sess - out of loopid\n",
1140 __func__, __LINE__, fcport->port_name);
1141 fcport->scan_state = 0;
1142 qlt_schedule_sess_for_deletion(fcport);
1143 return;
1144 }
1145 }
1146 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1147 "%s %d %8phC post login\n",
1148 __func__, __LINE__, fcport->port_name);
1149 qla2x00_post_async_login_work(vha, fcport, NULL);
1150 }
1151 }
1152
1153 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1154 {
1155 u16 data[2];
1156 u64 wwn;
1157
1158 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1159 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
1160 __func__, fcport->port_name, fcport->disc_state,
1161 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1162 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1163 fcport->login_gen, fcport->login_retry,
1164 fcport->loop_id, fcport->scan_state);
1165
1166 if (fcport->login_retry == 0)
1167 return 0;
1168
1169 if (fcport->scan_state != QLA_FCPORT_FOUND)
1170 return 0;
1171
1172 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1173 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1174 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1175 return 0;
1176
1177 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1178 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1179 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1180 return 0;
1181 }
1182 }
1183
1184 /* for pure Target Mode. Login will not be initiated */
1185 if (vha->host->active_mode == MODE_TARGET)
1186 return 0;
1187
1188 if (fcport->flags & FCF_ASYNC_SENT) {
1189 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1190 return 0;
1191 }
1192
1193 fcport->login_retry--;
1194
1195 switch (fcport->disc_state) {
1196 case DSC_DELETED:
1197 wwn = wwn_to_u64(fcport->node_name);
1198 if (wwn == 0) {
1199 ql_dbg(ql_dbg_disc, vha, 0xffff,
1200 "%s %d %8phC post GNNID\n",
1201 __func__, __LINE__, fcport->port_name);
1202 qla24xx_post_gnnid_work(vha, fcport);
1203 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1204 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1205 "%s %d %8phC post gnl\n",
1206 __func__, __LINE__, fcport->port_name);
1207 qla24xx_post_gnl_work(vha, fcport);
1208 } else {
1209 qla_chk_n2n_b4_login(vha, fcport);
1210 }
1211 break;
1212
1213 case DSC_GNL:
1214 if (fcport->login_pause) {
1215 fcport->last_rscn_gen = fcport->rscn_gen;
1216 fcport->last_login_gen = fcport->login_gen;
1217 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1218 break;
1219 }
1220
1221 qla_chk_n2n_b4_login(vha, fcport);
1222 break;
1223
1224 case DSC_LOGIN_FAILED:
1225 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1226 "%s %d %8phC post gidpn\n",
1227 __func__, __LINE__, fcport->port_name);
1228 if (N2N_TOPO(vha->hw))
1229 qla_chk_n2n_b4_login(vha, fcport);
1230 else
1231 qla24xx_post_gidpn_work(vha, fcport);
1232 break;
1233
1234 case DSC_LOGIN_COMPLETE:
1235 /* recheck login state */
1236 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1237 "%s %d %8phC post adisc\n",
1238 __func__, __LINE__, fcport->port_name);
1239 data[0] = data[1] = 0;
1240 qla2x00_post_async_adisc_work(vha, fcport, data);
1241 break;
1242
1243 default:
1244 break;
1245 }
1246
1247 return 0;
1248 }
1249
1250 static
1251 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1252 {
1253 fcport->rscn_gen++;
1254
1255 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1256 "%s %8phC DS %d LS %d\n",
1257 __func__, fcport->port_name, fcport->disc_state,
1258 fcport->fw_login_state);
1259
1260 if (fcport->flags & FCF_ASYNC_SENT)
1261 return;
1262
1263 switch (fcport->disc_state) {
1264 case DSC_DELETED:
1265 case DSC_LOGIN_COMPLETE:
1266 qla24xx_post_gpnid_work(fcport->vha, &ea->id);
1267 break;
1268 default:
1269 break;
1270 }
1271 }
1272
1273 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1274 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1275 {
1276 struct qla_work_evt *e;
1277 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1278 if (!e)
1279 return QLA_FUNCTION_FAILED;
1280
1281 e->u.new_sess.id = *id;
1282 e->u.new_sess.pla = pla;
1283 e->u.new_sess.fc4_type = fc4_type;
1284 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1285 if (node_name)
1286 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1287
1288 return qla2x00_post_work(vha, e);
1289 }
1290
1291 static
1292 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1293 struct event_arg *ea)
1294 {
1295 fc_port_t *fcport = ea->fcport;
1296
1297 ql_dbg(ql_dbg_disc, vha, 0x2102,
1298 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1299 __func__, fcport->port_name, fcport->disc_state,
1300 fcport->fw_login_state, fcport->login_pause,
1301 fcport->deleted, fcport->conflict,
1302 fcport->last_rscn_gen, fcport->rscn_gen,
1303 fcport->last_login_gen, fcport->login_gen,
1304 fcport->flags);
1305
1306 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1307 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1308 return;
1309
1310 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1311 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1312 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1313 return;
1314 }
1315 }
1316
1317 if (fcport->flags & FCF_ASYNC_SENT) {
1318 fcport->login_retry++;
1319 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1320 return;
1321 }
1322
1323 if (fcport->disc_state == DSC_DELETE_PEND) {
1324 fcport->login_retry++;
1325 return;
1326 }
1327
1328 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1329 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1330 __func__, __LINE__, fcport->port_name);
1331
1332 qla24xx_post_gidpn_work(vha, fcport);
1333 return;
1334 }
1335
1336 qla24xx_fcport_handle_login(vha, fcport);
1337 }
1338
1339 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1340 {
1341 fc_port_t *f, *tf;
1342 uint32_t id = 0, mask, rid;
1343 unsigned long flags;
1344
1345 switch (ea->event) {
1346 case FCME_RSCN:
1347 case FCME_GIDPN_DONE:
1348 case FCME_GPSC_DONE:
1349 case FCME_GPNID_DONE:
1350 case FCME_GNNID_DONE:
1351 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1352 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1353 return;
1354 break;
1355 default:
1356 break;
1357 }
1358
1359 switch (ea->event) {
1360 case FCME_RELOGIN:
1361 if (test_bit(UNLOADING, &vha->dpc_flags))
1362 return;
1363
1364 qla24xx_handle_relogin_event(vha, ea);
1365 break;
1366 case FCME_RSCN:
1367 if (test_bit(UNLOADING, &vha->dpc_flags))
1368 return;
1369 switch (ea->id.b.rsvd_1) {
1370 case RSCN_PORT_ADDR:
1371 spin_lock_irqsave(&vha->work_lock, flags);
1372 if (vha->scan.scan_flags == 0) {
1373 ql_dbg(ql_dbg_disc, vha, 0xffff,
1374 "%s: schedule\n", __func__);
1375 vha->scan.scan_flags |= SF_QUEUED;
1376 schedule_delayed_work(&vha->scan.scan_work, 5);
1377 }
1378 spin_unlock_irqrestore(&vha->work_lock, flags);
1379
1380 break;
1381 case RSCN_AREA_ADDR:
1382 case RSCN_DOM_ADDR:
1383 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1384 mask = 0xffff00;
1385 ql_dbg(ql_dbg_async, vha, 0x5044,
1386 "RSCN: Area 0x%06x was affected\n",
1387 ea->id.b24);
1388 } else {
1389 mask = 0xff0000;
1390 ql_dbg(ql_dbg_async, vha, 0x507a,
1391 "RSCN: Domain 0x%06x was affected\n",
1392 ea->id.b24);
1393 }
1394
1395 rid = ea->id.b24 & mask;
1396 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1397 list) {
1398 id = f->d_id.b24 & mask;
1399 if (rid == id) {
1400 ea->fcport = f;
1401 qla24xx_handle_rscn_event(f, ea);
1402 }
1403 }
1404 break;
1405 case RSCN_FAB_ADDR:
1406 default:
1407 ql_log(ql_log_warn, vha, 0xd045,
1408 "RSCN: Fabric was affected. Addr format %d\n",
1409 ea->id.b.rsvd_1);
1410 qla2x00_mark_all_devices_lost(vha, 1);
1411 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1412 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1413 }
1414 break;
1415 case FCME_GIDPN_DONE:
1416 qla24xx_handle_gidpn_event(vha, ea);
1417 break;
1418 case FCME_GNL_DONE:
1419 qla24xx_handle_gnl_done_event(vha, ea);
1420 break;
1421 case FCME_GPSC_DONE:
1422 qla24xx_handle_gpsc_event(vha, ea);
1423 break;
1424 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1425 qla24xx_handle_plogi_done_event(vha, ea);
1426 break;
1427 case FCME_PRLI_DONE:
1428 qla24xx_handle_prli_done_event(vha, ea);
1429 break;
1430 case FCME_GPDB_DONE:
1431 qla24xx_handle_gpdb_event(vha, ea);
1432 break;
1433 case FCME_GPNID_DONE:
1434 qla24xx_handle_gpnid_event(vha, ea);
1435 break;
1436 case FCME_GFFID_DONE:
1437 qla24xx_handle_gffid_event(vha, ea);
1438 break;
1439 case FCME_ADISC_DONE:
1440 qla24xx_handle_adisc_event(vha, ea);
1441 break;
1442 case FCME_GNNID_DONE:
1443 qla24xx_handle_gnnid_event(vha, ea);
1444 break;
1445 case FCME_GFPNID_DONE:
1446 qla24xx_handle_gfpnid_event(vha, ea);
1447 break;
1448 default:
1449 BUG_ON(1);
1450 break;
1451 }
1452 }
1453
1454 static void
1455 qla2x00_tmf_iocb_timeout(void *data)
1456 {
1457 srb_t *sp = data;
1458 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1459
1460 tmf->u.tmf.comp_status = CS_TIMEOUT;
1461 complete(&tmf->u.tmf.comp);
1462 }
1463
1464 static void
1465 qla2x00_tmf_sp_done(void *ptr, int res)
1466 {
1467 srb_t *sp = ptr;
1468 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1469
1470 complete(&tmf->u.tmf.comp);
1471 }
1472
1473 int
1474 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1475 uint32_t tag)
1476 {
1477 struct scsi_qla_host *vha = fcport->vha;
1478 struct srb_iocb *tm_iocb;
1479 srb_t *sp;
1480 int rval = QLA_FUNCTION_FAILED;
1481
1482 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1483 if (!sp)
1484 goto done;
1485
1486 tm_iocb = &sp->u.iocb_cmd;
1487 sp->type = SRB_TM_CMD;
1488 sp->name = "tmf";
1489 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1490 tm_iocb->u.tmf.flags = flags;
1491 tm_iocb->u.tmf.lun = lun;
1492 tm_iocb->u.tmf.data = tag;
1493 sp->done = qla2x00_tmf_sp_done;
1494 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1495 init_completion(&tm_iocb->u.tmf.comp);
1496
1497 rval = qla2x00_start_sp(sp);
1498 if (rval != QLA_SUCCESS)
1499 goto done_free_sp;
1500
1501 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1502 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1503 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1504 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1505
1506 wait_for_completion(&tm_iocb->u.tmf.comp);
1507
1508 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1509 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1510
1511 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1512 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1513 "TM IOCB failed (%x).\n", rval);
1514 }
1515
1516 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1517 flags = tm_iocb->u.tmf.flags;
1518 lun = (uint16_t)tm_iocb->u.tmf.lun;
1519
1520 /* Issue Marker IOCB */
1521 qla2x00_marker(vha, vha->hw->req_q_map[0],
1522 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1523 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1524 }
1525
1526 done_free_sp:
1527 sp->free(sp);
1528 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1529 done:
1530 return rval;
1531 }
1532
1533 static void
1534 qla24xx_abort_iocb_timeout(void *data)
1535 {
1536 srb_t *sp = data;
1537 struct srb_iocb *abt = &sp->u.iocb_cmd;
1538
1539 abt->u.abt.comp_status = CS_TIMEOUT;
1540 complete(&abt->u.abt.comp);
1541 }
1542
1543 static void
1544 qla24xx_abort_sp_done(void *ptr, int res)
1545 {
1546 srb_t *sp = ptr;
1547 struct srb_iocb *abt = &sp->u.iocb_cmd;
1548
1549 del_timer(&sp->u.iocb_cmd.timer);
1550 complete(&abt->u.abt.comp);
1551 }
1552
1553 int
1554 qla24xx_async_abort_cmd(srb_t *cmd_sp)
1555 {
1556 scsi_qla_host_t *vha = cmd_sp->vha;
1557 fc_port_t *fcport = cmd_sp->fcport;
1558 struct srb_iocb *abt_iocb;
1559 srb_t *sp;
1560 int rval = QLA_FUNCTION_FAILED;
1561
1562 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1563 if (!sp)
1564 goto done;
1565
1566 abt_iocb = &sp->u.iocb_cmd;
1567 sp->type = SRB_ABT_CMD;
1568 sp->name = "abort";
1569 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1570 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1571
1572 if (vha->flags.qpairs_available && cmd_sp->qpair)
1573 abt_iocb->u.abt.req_que_no =
1574 cpu_to_le16(cmd_sp->qpair->req->id);
1575 else
1576 abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
1577
1578 sp->done = qla24xx_abort_sp_done;
1579 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1580 init_completion(&abt_iocb->u.abt.comp);
1581
1582 rval = qla2x00_start_sp(sp);
1583 if (rval != QLA_SUCCESS)
1584 goto done_free_sp;
1585
1586 ql_dbg(ql_dbg_async, vha, 0x507c,
1587 "Abort command issued - hdl=%x, target_id=%x\n",
1588 cmd_sp->handle, fcport->tgt_id);
1589
1590 wait_for_completion(&abt_iocb->u.abt.comp);
1591
1592 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1593 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1594
1595 done_free_sp:
1596 sp->free(sp);
1597 done:
1598 return rval;
1599 }
1600
1601 int
1602 qla24xx_async_abort_command(srb_t *sp)
1603 {
1604 unsigned long flags = 0;
1605
1606 uint32_t handle;
1607 fc_port_t *fcport = sp->fcport;
1608 struct scsi_qla_host *vha = fcport->vha;
1609 struct qla_hw_data *ha = vha->hw;
1610 struct req_que *req = vha->req;
1611
1612 if (vha->flags.qpairs_available && sp->qpair)
1613 req = sp->qpair->req;
1614
1615 spin_lock_irqsave(&ha->hardware_lock, flags);
1616 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1617 if (req->outstanding_cmds[handle] == sp)
1618 break;
1619 }
1620 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1621 if (handle == req->num_outstanding_cmds) {
1622 /* Command not found. */
1623 return QLA_FUNCTION_FAILED;
1624 }
1625 if (sp->type == SRB_FXIOCB_DCMD)
1626 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1627 FXDISC_ABORT_IOCTL);
1628
1629 return qla24xx_async_abort_cmd(sp);
1630 }
1631
1632 static void
1633 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1634 {
1635 switch (ea->data[0]) {
1636 case MBS_COMMAND_COMPLETE:
1637 ql_dbg(ql_dbg_disc, vha, 0x2118,
1638 "%s %d %8phC post gpdb\n",
1639 __func__, __LINE__, ea->fcport->port_name);
1640
1641 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1642 ea->fcport->logout_on_delete = 1;
1643 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1644 break;
1645 default:
1646 if (ea->fcport->n2n_flag) {
1647 ql_dbg(ql_dbg_disc, vha, 0x2118,
1648 "%s %d %8phC post fc4 prli\n",
1649 __func__, __LINE__, ea->fcport->port_name);
1650 ea->fcport->fc4f_nvme = 0;
1651 ea->fcport->n2n_flag = 0;
1652 qla24xx_post_prli_work(vha, ea->fcport);
1653 }
1654 ql_dbg(ql_dbg_disc, vha, 0x2119,
1655 "%s %d %8phC unhandle event of %x\n",
1656 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1657 break;
1658 }
1659 }
1660
1661 static void
1662 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1663 {
1664 port_id_t cid; /* conflict Nport id */
1665 u16 lid;
1666 struct fc_port *conflict_fcport;
1667 unsigned long flags;
1668 struct fc_port *fcport = ea->fcport;
1669
1670 ql_dbg(ql_dbg_disc, vha, 0xffff,
1671 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1672 __func__, fcport->port_name, fcport->disc_state,
1673 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1674 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
1675 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1676
1677 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1678 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1679 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1680 "%s %d %8phC Remote is trying to login\n",
1681 __func__, __LINE__, fcport->port_name);
1682 return;
1683 }
1684
1685 if (fcport->disc_state == DSC_DELETE_PEND)
1686 return;
1687
1688 if (ea->sp->gen2 != fcport->login_gen) {
1689 /* target side must have changed it. */
1690 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1691 "%s %8phC generation changed\n",
1692 __func__, fcport->port_name);
1693 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1694 return;
1695 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1696 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
1697 __func__, __LINE__, fcport->port_name);
1698 qla24xx_post_gidpn_work(vha, fcport);
1699 return;
1700 }
1701
1702 switch (ea->data[0]) {
1703 case MBS_COMMAND_COMPLETE:
1704 /*
1705 * Driver must validate login state - If PRLI not complete,
1706 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1707 * requests.
1708 */
1709 if (ea->fcport->fc4f_nvme) {
1710 ql_dbg(ql_dbg_disc, vha, 0x2117,
1711 "%s %d %8phC post prli\n",
1712 __func__, __LINE__, ea->fcport->port_name);
1713 qla24xx_post_prli_work(vha, ea->fcport);
1714 } else {
1715 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1716 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1717 __func__, __LINE__, ea->fcport->port_name,
1718 ea->fcport->loop_id, ea->fcport->d_id.b24);
1719
1720 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1721 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1722 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1723 ea->fcport->logout_on_delete = 1;
1724 ea->fcport->send_els_logo = 0;
1725 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1726 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1727
1728 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1729 }
1730 break;
1731 case MBS_COMMAND_ERROR:
1732 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
1733 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1734
1735 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1736 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1737 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1738 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1739 else
1740 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
1741 break;
1742 case MBS_LOOP_ID_USED:
1743 /* data[1] = IO PARAM 1 = nport ID */
1744 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1745 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1746 cid.b.al_pa = ea->iop[1] & 0xff;
1747 cid.b.rsvd_1 = 0;
1748
1749 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1750 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1751 __func__, __LINE__, ea->fcport->port_name,
1752 ea->fcport->loop_id);
1753
1754 if (IS_SW_RESV_ADDR(cid)) {
1755 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1756 ea->fcport->loop_id = FC_NO_LOOP_ID;
1757 } else {
1758 qla2x00_clear_loop_id(ea->fcport);
1759 }
1760 qla24xx_post_gnl_work(vha, ea->fcport);
1761 break;
1762 case MBS_PORT_ID_USED:
1763 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1764 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1765 __func__, __LINE__, ea->fcport->port_name,
1766 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1767 ea->fcport->d_id.b.al_pa);
1768
1769 lid = ea->iop[1] & 0xffff;
1770 qlt_find_sess_invalidate_other(vha,
1771 wwn_to_u64(ea->fcport->port_name),
1772 ea->fcport->d_id, lid, &conflict_fcport);
1773
1774 if (conflict_fcport) {
1775 /*
1776 * Another fcport share the same loop_id/nport id.
1777 * Conflict fcport needs to finish cleanup before this
1778 * fcport can proceed to login.
1779 */
1780 conflict_fcport->conflict = ea->fcport;
1781 ea->fcport->login_pause = 1;
1782
1783 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1784 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1785 __func__, __LINE__, ea->fcport->port_name,
1786 ea->fcport->d_id.b24, lid);
1787 qla2x00_clear_loop_id(ea->fcport);
1788 qla24xx_post_gidpn_work(vha, ea->fcport);
1789 } else {
1790 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1791 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1792 __func__, __LINE__, ea->fcport->port_name,
1793 ea->fcport->d_id.b24, lid);
1794
1795 qla2x00_clear_loop_id(ea->fcport);
1796 set_bit(lid, vha->hw->loop_id_map);
1797 ea->fcport->loop_id = lid;
1798 ea->fcport->keep_nport_handle = 0;
1799 qlt_schedule_sess_for_deletion(ea->fcport);
1800 }
1801 break;
1802 }
1803 return;
1804 }
1805
1806 void
1807 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1808 uint16_t *data)
1809 {
1810 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1811 qlt_logo_completion_handler(fcport, data[0]);
1812 fcport->login_gen++;
1813 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1814 return;
1815 }
1816
1817 void
1818 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1819 uint16_t *data)
1820 {
1821 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1822 if (data[0] == MBS_COMMAND_COMPLETE) {
1823 qla2x00_update_fcport(vha, fcport);
1824
1825 return;
1826 }
1827
1828 /* Retry login. */
1829 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1830 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1831 else
1832 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1833
1834 return;
1835 }
1836
1837 /****************************************************************************/
1838 /* QLogic ISP2x00 Hardware Support Functions. */
1839 /****************************************************************************/
1840
1841 static int
1842 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1843 {
1844 int rval = QLA_SUCCESS;
1845 struct qla_hw_data *ha = vha->hw;
1846 uint32_t idc_major_ver, idc_minor_ver;
1847 uint16_t config[4];
1848
1849 qla83xx_idc_lock(vha, 0);
1850
1851 /* SV: TODO: Assign initialization timeout from
1852 * flash-info / other param
1853 */
1854 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1855 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1856
1857 /* Set our fcoe function presence */
1858 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1859 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1860 "Error while setting DRV-Presence.\n");
1861 rval = QLA_FUNCTION_FAILED;
1862 goto exit;
1863 }
1864
1865 /* Decide the reset ownership */
1866 qla83xx_reset_ownership(vha);
1867
1868 /*
1869 * On first protocol driver load:
1870 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1871 * register.
1872 * Others: Check compatibility with current IDC Major version.
1873 */
1874 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1875 if (ha->flags.nic_core_reset_owner) {
1876 /* Set IDC Major version */
1877 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1878 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1879
1880 /* Clearing IDC-Lock-Recovery register */
1881 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1882 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1883 /*
1884 * Clear further IDC participation if we are not compatible with
1885 * the current IDC Major Version.
1886 */
1887 ql_log(ql_log_warn, vha, 0xb07d,
1888 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1889 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1890 __qla83xx_clear_drv_presence(vha);
1891 rval = QLA_FUNCTION_FAILED;
1892 goto exit;
1893 }
1894 /* Each function sets its supported Minor version. */
1895 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1896 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1897 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1898
1899 if (ha->flags.nic_core_reset_owner) {
1900 memset(config, 0, sizeof(config));
1901 if (!qla81xx_get_port_config(vha, config))
1902 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1903 QLA8XXX_DEV_READY);
1904 }
1905
1906 rval = qla83xx_idc_state_handler(vha);
1907
1908 exit:
1909 qla83xx_idc_unlock(vha, 0);
1910
1911 return rval;
1912 }
1913
1914 /*
1915 * qla2x00_initialize_adapter
1916 * Initialize board.
1917 *
1918 * Input:
1919 * ha = adapter block pointer.
1920 *
1921 * Returns:
1922 * 0 = success
1923 */
1924 int
1925 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1926 {
1927 int rval;
1928 struct qla_hw_data *ha = vha->hw;
1929 struct req_que *req = ha->req_q_map[0];
1930
1931 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1932 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1933
1934 /* Clear adapter flags. */
1935 vha->flags.online = 0;
1936 ha->flags.chip_reset_done = 0;
1937 vha->flags.reset_active = 0;
1938 ha->flags.pci_channel_io_perm_failure = 0;
1939 ha->flags.eeh_busy = 0;
1940 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1941 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1942 atomic_set(&vha->loop_state, LOOP_DOWN);
1943 vha->device_flags = DFLG_NO_CABLE;
1944 vha->dpc_flags = 0;
1945 vha->flags.management_server_logged_in = 0;
1946 vha->marker_needed = 0;
1947 ha->isp_abort_cnt = 0;
1948 ha->beacon_blink_led = 0;
1949
1950 set_bit(0, ha->req_qid_map);
1951 set_bit(0, ha->rsp_qid_map);
1952
1953 ql_dbg(ql_dbg_init, vha, 0x0040,
1954 "Configuring PCI space...\n");
1955 rval = ha->isp_ops->pci_config(vha);
1956 if (rval) {
1957 ql_log(ql_log_warn, vha, 0x0044,
1958 "Unable to configure PCI space.\n");
1959 return (rval);
1960 }
1961
1962 ha->isp_ops->reset_chip(vha);
1963
1964 rval = qla2xxx_get_flash_info(vha);
1965 if (rval) {
1966 ql_log(ql_log_fatal, vha, 0x004f,
1967 "Unable to validate FLASH data.\n");
1968 return rval;
1969 }
1970
1971 if (IS_QLA8044(ha)) {
1972 qla8044_read_reset_template(vha);
1973
1974 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1975 * If DONRESET_BIT0 is set, drivers should not set dev_state
1976 * to NEED_RESET. But if NEED_RESET is set, drivers should
1977 * should honor the reset. */
1978 if (ql2xdontresethba == 1)
1979 qla8044_set_idc_dontreset(vha);
1980 }
1981
1982 ha->isp_ops->get_flash_version(vha, req->ring);
1983 ql_dbg(ql_dbg_init, vha, 0x0061,
1984 "Configure NVRAM parameters...\n");
1985
1986 ha->isp_ops->nvram_config(vha);
1987
1988 if (ha->flags.disable_serdes) {
1989 /* Mask HBA via NVRAM settings? */
1990 ql_log(ql_log_info, vha, 0x0077,
1991 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
1992 return QLA_FUNCTION_FAILED;
1993 }
1994
1995 ql_dbg(ql_dbg_init, vha, 0x0078,
1996 "Verifying loaded RISC code...\n");
1997
1998 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1999 rval = ha->isp_ops->chip_diag(vha);
2000 if (rval)
2001 return (rval);
2002 rval = qla2x00_setup_chip(vha);
2003 if (rval)
2004 return (rval);
2005 }
2006
2007 if (IS_QLA84XX(ha)) {
2008 ha->cs84xx = qla84xx_get_chip(vha);
2009 if (!ha->cs84xx) {
2010 ql_log(ql_log_warn, vha, 0x00d0,
2011 "Unable to configure ISP84XX.\n");
2012 return QLA_FUNCTION_FAILED;
2013 }
2014 }
2015
2016 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2017 rval = qla2x00_init_rings(vha);
2018
2019 ha->flags.chip_reset_done = 1;
2020
2021 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2022 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2023 rval = qla84xx_init_chip(vha);
2024 if (rval != QLA_SUCCESS) {
2025 ql_log(ql_log_warn, vha, 0x00d4,
2026 "Unable to initialize ISP84XX.\n");
2027 qla84xx_put_chip(vha);
2028 }
2029 }
2030
2031 /* Load the NIC Core f/w if we are the first protocol driver. */
2032 if (IS_QLA8031(ha)) {
2033 rval = qla83xx_nic_core_fw_load(vha);
2034 if (rval)
2035 ql_log(ql_log_warn, vha, 0x0124,
2036 "Error in initializing NIC Core f/w.\n");
2037 }
2038
2039 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2040 qla24xx_read_fcp_prio_cfg(vha);
2041
2042 if (IS_P3P_TYPE(ha))
2043 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2044 else
2045 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2046
2047 return (rval);
2048 }
2049
2050 /**
2051 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2052 * @ha: HA context
2053 *
2054 * Returns 0 on success.
2055 */
2056 int
2057 qla2100_pci_config(scsi_qla_host_t *vha)
2058 {
2059 uint16_t w;
2060 unsigned long flags;
2061 struct qla_hw_data *ha = vha->hw;
2062 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2063
2064 pci_set_master(ha->pdev);
2065 pci_try_set_mwi(ha->pdev);
2066
2067 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2068 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2069 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2070
2071 pci_disable_rom(ha->pdev);
2072
2073 /* Get PCI bus information. */
2074 spin_lock_irqsave(&ha->hardware_lock, flags);
2075 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
2076 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2077
2078 return QLA_SUCCESS;
2079 }
2080
2081 /**
2082 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2083 * @ha: HA context
2084 *
2085 * Returns 0 on success.
2086 */
2087 int
2088 qla2300_pci_config(scsi_qla_host_t *vha)
2089 {
2090 uint16_t w;
2091 unsigned long flags = 0;
2092 uint32_t cnt;
2093 struct qla_hw_data *ha = vha->hw;
2094 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2095
2096 pci_set_master(ha->pdev);
2097 pci_try_set_mwi(ha->pdev);
2098
2099 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2100 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2101
2102 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2103 w &= ~PCI_COMMAND_INTX_DISABLE;
2104 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2105
2106 /*
2107 * If this is a 2300 card and not 2312, reset the
2108 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2109 * the 2310 also reports itself as a 2300 so we need to get the
2110 * fb revision level -- a 6 indicates it really is a 2300 and
2111 * not a 2310.
2112 */
2113 if (IS_QLA2300(ha)) {
2114 spin_lock_irqsave(&ha->hardware_lock, flags);
2115
2116 /* Pause RISC. */
2117 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2118 for (cnt = 0; cnt < 30000; cnt++) {
2119 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
2120 break;
2121
2122 udelay(10);
2123 }
2124
2125 /* Select FPM registers. */
2126 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2127 RD_REG_WORD(&reg->ctrl_status);
2128
2129 /* Get the fb rev level */
2130 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2131
2132 if (ha->fb_rev == FPM_2300)
2133 pci_clear_mwi(ha->pdev);
2134
2135 /* Deselect FPM registers. */
2136 WRT_REG_WORD(&reg->ctrl_status, 0x0);
2137 RD_REG_WORD(&reg->ctrl_status);
2138
2139 /* Release RISC module. */
2140 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2141 for (cnt = 0; cnt < 30000; cnt++) {
2142 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
2143 break;
2144
2145 udelay(10);
2146 }
2147
2148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2149 }
2150
2151 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2152
2153 pci_disable_rom(ha->pdev);
2154
2155 /* Get PCI bus information. */
2156 spin_lock_irqsave(&ha->hardware_lock, flags);
2157 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
2158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2159
2160 return QLA_SUCCESS;
2161 }
2162
2163 /**
2164 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2165 * @ha: HA context
2166 *
2167 * Returns 0 on success.
2168 */
2169 int
2170 qla24xx_pci_config(scsi_qla_host_t *vha)
2171 {
2172 uint16_t w;
2173 unsigned long flags = 0;
2174 struct qla_hw_data *ha = vha->hw;
2175 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2176
2177 pci_set_master(ha->pdev);
2178 pci_try_set_mwi(ha->pdev);
2179
2180 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2181 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2182 w &= ~PCI_COMMAND_INTX_DISABLE;
2183 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2184
2185 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2186
2187 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2188 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2189 pcix_set_mmrbc(ha->pdev, 2048);
2190
2191 /* PCIe -- adjust Maximum Read Request Size (2048). */
2192 if (pci_is_pcie(ha->pdev))
2193 pcie_set_readrq(ha->pdev, 4096);
2194
2195 pci_disable_rom(ha->pdev);
2196
2197 ha->chip_revision = ha->pdev->revision;
2198
2199 /* Get PCI bus information. */
2200 spin_lock_irqsave(&ha->hardware_lock, flags);
2201 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
2202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2203
2204 return QLA_SUCCESS;
2205 }
2206
2207 /**
2208 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2209 * @ha: HA context
2210 *
2211 * Returns 0 on success.
2212 */
2213 int
2214 qla25xx_pci_config(scsi_qla_host_t *vha)
2215 {
2216 uint16_t w;
2217 struct qla_hw_data *ha = vha->hw;
2218
2219 pci_set_master(ha->pdev);
2220 pci_try_set_mwi(ha->pdev);
2221
2222 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2223 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2224 w &= ~PCI_COMMAND_INTX_DISABLE;
2225 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2226
2227 /* PCIe -- adjust Maximum Read Request Size (2048). */
2228 if (pci_is_pcie(ha->pdev))
2229 pcie_set_readrq(ha->pdev, 4096);
2230
2231 pci_disable_rom(ha->pdev);
2232
2233 ha->chip_revision = ha->pdev->revision;
2234
2235 return QLA_SUCCESS;
2236 }
2237
2238 /**
2239 * qla2x00_isp_firmware() - Choose firmware image.
2240 * @ha: HA context
2241 *
2242 * Returns 0 on success.
2243 */
2244 static int
2245 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2246 {
2247 int rval;
2248 uint16_t loop_id, topo, sw_cap;
2249 uint8_t domain, area, al_pa;
2250 struct qla_hw_data *ha = vha->hw;
2251
2252 /* Assume loading risc code */
2253 rval = QLA_FUNCTION_FAILED;
2254
2255 if (ha->flags.disable_risc_code_load) {
2256 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2257
2258 /* Verify checksum of loaded RISC code. */
2259 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2260 if (rval == QLA_SUCCESS) {
2261 /* And, verify we are not in ROM code. */
2262 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2263 &area, &domain, &topo, &sw_cap);
2264 }
2265 }
2266
2267 if (rval)
2268 ql_dbg(ql_dbg_init, vha, 0x007a,
2269 "**** Load RISC code ****.\n");
2270
2271 return (rval);
2272 }
2273
2274 /**
2275 * qla2x00_reset_chip() - Reset ISP chip.
2276 * @ha: HA context
2277 *
2278 * Returns 0 on success.
2279 */
2280 void
2281 qla2x00_reset_chip(scsi_qla_host_t *vha)
2282 {
2283 unsigned long flags = 0;
2284 struct qla_hw_data *ha = vha->hw;
2285 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2286 uint32_t cnt;
2287 uint16_t cmd;
2288
2289 if (unlikely(pci_channel_offline(ha->pdev)))
2290 return;
2291
2292 ha->isp_ops->disable_intrs(ha);
2293
2294 spin_lock_irqsave(&ha->hardware_lock, flags);
2295
2296 /* Turn off master enable */
2297 cmd = 0;
2298 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2299 cmd &= ~PCI_COMMAND_MASTER;
2300 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2301
2302 if (!IS_QLA2100(ha)) {
2303 /* Pause RISC. */
2304 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2305 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2306 for (cnt = 0; cnt < 30000; cnt++) {
2307 if ((RD_REG_WORD(&reg->hccr) &
2308 HCCR_RISC_PAUSE) != 0)
2309 break;
2310 udelay(100);
2311 }
2312 } else {
2313 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2314 udelay(10);
2315 }
2316
2317 /* Select FPM registers. */
2318 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2319 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2320
2321 /* FPM Soft Reset. */
2322 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2323 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2324
2325 /* Toggle Fpm Reset. */
2326 if (!IS_QLA2200(ha)) {
2327 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2328 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2329 }
2330
2331 /* Select frame buffer registers. */
2332 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2333 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2334
2335 /* Reset frame buffer FIFOs. */
2336 if (IS_QLA2200(ha)) {
2337 WRT_FB_CMD_REG(ha, reg, 0xa000);
2338 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2339 } else {
2340 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2341
2342 /* Read back fb_cmd until zero or 3 seconds max */
2343 for (cnt = 0; cnt < 3000; cnt++) {
2344 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2345 break;
2346 udelay(100);
2347 }
2348 }
2349
2350 /* Select RISC module registers. */
2351 WRT_REG_WORD(&reg->ctrl_status, 0);
2352 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2353
2354 /* Reset RISC processor. */
2355 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2356 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2357
2358 /* Release RISC processor. */
2359 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2360 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2361 }
2362
2363 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2364 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2365
2366 /* Reset ISP chip. */
2367 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2368
2369 /* Wait for RISC to recover from reset. */
2370 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2371 /*
2372 * It is necessary to for a delay here since the card doesn't
2373 * respond to PCI reads during a reset. On some architectures
2374 * this will result in an MCA.
2375 */
2376 udelay(20);
2377 for (cnt = 30000; cnt; cnt--) {
2378 if ((RD_REG_WORD(&reg->ctrl_status) &
2379 CSR_ISP_SOFT_RESET) == 0)
2380 break;
2381 udelay(100);
2382 }
2383 } else
2384 udelay(10);
2385
2386 /* Reset RISC processor. */
2387 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2388
2389 WRT_REG_WORD(&reg->semaphore, 0);
2390
2391 /* Release RISC processor. */
2392 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2393 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2394
2395 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2396 for (cnt = 0; cnt < 30000; cnt++) {
2397 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2398 break;
2399
2400 udelay(100);
2401 }
2402 } else
2403 udelay(100);
2404
2405 /* Turn on master enable */
2406 cmd |= PCI_COMMAND_MASTER;
2407 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2408
2409 /* Disable RISC pause on FPM parity error. */
2410 if (!IS_QLA2100(ha)) {
2411 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2412 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2413 }
2414
2415 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2416 }
2417
2418 /**
2419 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2420 *
2421 * Returns 0 on success.
2422 */
2423 static int
2424 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2425 {
2426 uint16_t mb[4] = {0x1010, 0, 1, 0};
2427
2428 if (!IS_QLA81XX(vha->hw))
2429 return QLA_SUCCESS;
2430
2431 return qla81xx_write_mpi_register(vha, mb);
2432 }
2433
2434 /**
2435 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2436 * @ha: HA context
2437 *
2438 * Returns 0 on success.
2439 */
2440 static inline int
2441 qla24xx_reset_risc(scsi_qla_host_t *vha)
2442 {
2443 unsigned long flags = 0;
2444 struct qla_hw_data *ha = vha->hw;
2445 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2446 uint32_t cnt;
2447 uint16_t wd;
2448 static int abts_cnt; /* ISP abort retry counts */
2449 int rval = QLA_SUCCESS;
2450
2451 spin_lock_irqsave(&ha->hardware_lock, flags);
2452
2453 /* Reset RISC. */
2454 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2455 for (cnt = 0; cnt < 30000; cnt++) {
2456 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2457 break;
2458
2459 udelay(10);
2460 }
2461
2462 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2463 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2464
2465 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2466 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2467 RD_REG_DWORD(&reg->hccr),
2468 RD_REG_DWORD(&reg->ctrl_status),
2469 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2470
2471 WRT_REG_DWORD(&reg->ctrl_status,
2472 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2473 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2474
2475 udelay(100);
2476
2477 /* Wait for firmware to complete NVRAM accesses. */
2478 RD_REG_WORD(&reg->mailbox0);
2479 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2480 rval == QLA_SUCCESS; cnt--) {
2481 barrier();
2482 if (cnt)
2483 udelay(5);
2484 else
2485 rval = QLA_FUNCTION_TIMEOUT;
2486 }
2487
2488 if (rval == QLA_SUCCESS)
2489 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2490
2491 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2492 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2493 RD_REG_DWORD(&reg->hccr),
2494 RD_REG_DWORD(&reg->mailbox0));
2495
2496 /* Wait for soft-reset to complete. */
2497 RD_REG_DWORD(&reg->ctrl_status);
2498 for (cnt = 0; cnt < 60; cnt++) {
2499 barrier();
2500 if ((RD_REG_DWORD(&reg->ctrl_status) &
2501 CSRX_ISP_SOFT_RESET) == 0)
2502 break;
2503
2504 udelay(5);
2505 }
2506 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2507 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2508
2509 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2510 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2511 RD_REG_DWORD(&reg->hccr),
2512 RD_REG_DWORD(&reg->ctrl_status));
2513
2514 /* If required, do an MPI FW reset now */
2515 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2516 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2517 if (++abts_cnt < 5) {
2518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2519 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2520 } else {
2521 /*
2522 * We exhausted the ISP abort retries. We have to
2523 * set the board offline.
2524 */
2525 abts_cnt = 0;
2526 vha->flags.online = 0;
2527 }
2528 }
2529 }
2530
2531 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2532 RD_REG_DWORD(&reg->hccr);
2533
2534 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2535 RD_REG_DWORD(&reg->hccr);
2536
2537 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2538 RD_REG_DWORD(&reg->hccr);
2539
2540 RD_REG_WORD(&reg->mailbox0);
2541 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
2542 rval == QLA_SUCCESS; cnt--) {
2543 barrier();
2544 if (cnt)
2545 udelay(5);
2546 else
2547 rval = QLA_FUNCTION_TIMEOUT;
2548 }
2549 if (rval == QLA_SUCCESS)
2550 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2551
2552 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2553 "Host Risc 0x%x, mailbox0 0x%x\n",
2554 RD_REG_DWORD(&reg->hccr),
2555 RD_REG_WORD(&reg->mailbox0));
2556
2557 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2558
2559 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2560 "Driver in %s mode\n",
2561 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2562
2563 if (IS_NOPOLLING_TYPE(ha))
2564 ha->isp_ops->enable_intrs(ha);
2565
2566 return rval;
2567 }
2568
2569 static void
2570 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2571 {
2572 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2573
2574 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2575 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2576
2577 }
2578
2579 static void
2580 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2581 {
2582 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2583
2584 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2585 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2586 }
2587
2588 static void
2589 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2590 {
2591 uint32_t wd32 = 0;
2592 uint delta_msec = 100;
2593 uint elapsed_msec = 0;
2594 uint timeout_msec;
2595 ulong n;
2596
2597 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2598 vha->hw->pdev->subsystem_device != 0x0240)
2599 return;
2600
2601 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2602 udelay(100);
2603
2604 attempt:
2605 timeout_msec = TIMEOUT_SEMAPHORE;
2606 n = timeout_msec / delta_msec;
2607 while (n--) {
2608 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2609 qla25xx_read_risc_sema_reg(vha, &wd32);
2610 if (wd32 & RISC_SEMAPHORE)
2611 break;
2612 msleep(delta_msec);
2613 elapsed_msec += delta_msec;
2614 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2615 goto force;
2616 }
2617
2618 if (!(wd32 & RISC_SEMAPHORE))
2619 goto force;
2620
2621 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2622 goto acquired;
2623
2624 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2625 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2626 n = timeout_msec / delta_msec;
2627 while (n--) {
2628 qla25xx_read_risc_sema_reg(vha, &wd32);
2629 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2630 break;
2631 msleep(delta_msec);
2632 elapsed_msec += delta_msec;
2633 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2634 goto force;
2635 }
2636
2637 if (wd32 & RISC_SEMAPHORE_FORCE)
2638 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2639
2640 goto attempt;
2641
2642 force:
2643 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2644
2645 acquired:
2646 return;
2647 }
2648
2649 /**
2650 * qla24xx_reset_chip() - Reset ISP24xx chip.
2651 * @ha: HA context
2652 *
2653 * Returns 0 on success.
2654 */
2655 void
2656 qla24xx_reset_chip(scsi_qla_host_t *vha)
2657 {
2658 struct qla_hw_data *ha = vha->hw;
2659
2660 if (pci_channel_offline(ha->pdev) &&
2661 ha->flags.pci_channel_io_perm_failure) {
2662 return;
2663 }
2664
2665 ha->isp_ops->disable_intrs(ha);
2666
2667 qla25xx_manipulate_risc_semaphore(vha);
2668
2669 /* Perform RISC reset. */
2670 qla24xx_reset_risc(vha);
2671 }
2672
2673 /**
2674 * qla2x00_chip_diag() - Test chip for proper operation.
2675 * @ha: HA context
2676 *
2677 * Returns 0 on success.
2678 */
2679 int
2680 qla2x00_chip_diag(scsi_qla_host_t *vha)
2681 {
2682 int rval;
2683 struct qla_hw_data *ha = vha->hw;
2684 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2685 unsigned long flags = 0;
2686 uint16_t data;
2687 uint32_t cnt;
2688 uint16_t mb[5];
2689 struct req_que *req = ha->req_q_map[0];
2690
2691 /* Assume a failed state */
2692 rval = QLA_FUNCTION_FAILED;
2693
2694 ql_dbg(ql_dbg_init, vha, 0x007b,
2695 "Testing device at %lx.\n", (u_long)&reg->flash_address);
2696
2697 spin_lock_irqsave(&ha->hardware_lock, flags);
2698
2699 /* Reset ISP chip. */
2700 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2701
2702 /*
2703 * We need to have a delay here since the card will not respond while
2704 * in reset causing an MCA on some architectures.
2705 */
2706 udelay(20);
2707 data = qla2x00_debounce_register(&reg->ctrl_status);
2708 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2709 udelay(5);
2710 data = RD_REG_WORD(&reg->ctrl_status);
2711 barrier();
2712 }
2713
2714 if (!cnt)
2715 goto chip_diag_failed;
2716
2717 ql_dbg(ql_dbg_init, vha, 0x007c,
2718 "Reset register cleared by chip reset.\n");
2719
2720 /* Reset RISC processor. */
2721 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2722 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2723
2724 /* Workaround for QLA2312 PCI parity error */
2725 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2726 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2727 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2728 udelay(5);
2729 data = RD_MAILBOX_REG(ha, reg, 0);
2730 barrier();
2731 }
2732 } else
2733 udelay(10);
2734
2735 if (!cnt)
2736 goto chip_diag_failed;
2737
2738 /* Check product ID of chip */
2739 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2740
2741 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2742 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2743 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2744 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2745 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2746 mb[3] != PROD_ID_3) {
2747 ql_log(ql_log_warn, vha, 0x0062,
2748 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2749 mb[1], mb[2], mb[3]);
2750
2751 goto chip_diag_failed;
2752 }
2753 ha->product_id[0] = mb[1];
2754 ha->product_id[1] = mb[2];
2755 ha->product_id[2] = mb[3];
2756 ha->product_id[3] = mb[4];
2757
2758 /* Adjust fw RISC transfer size */
2759 if (req->length > 1024)
2760 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2761 else
2762 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
2763 req->length;
2764
2765 if (IS_QLA2200(ha) &&
2766 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2767 /* Limit firmware transfer size with a 2200A */
2768 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
2769
2770 ha->device_type |= DT_ISP2200A;
2771 ha->fw_transfer_size = 128;
2772 }
2773
2774 /* Wrap Incoming Mailboxes Test. */
2775 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2776
2777 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
2778 rval = qla2x00_mbx_reg_test(vha);
2779 if (rval)
2780 ql_log(ql_log_warn, vha, 0x0080,
2781 "Failed mailbox send register test.\n");
2782 else
2783 /* Flag a successful rval */
2784 rval = QLA_SUCCESS;
2785 spin_lock_irqsave(&ha->hardware_lock, flags);
2786
2787 chip_diag_failed:
2788 if (rval)
2789 ql_log(ql_log_info, vha, 0x0081,
2790 "Chip diagnostics **** FAILED ****.\n");
2791
2792 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2793
2794 return (rval);
2795 }
2796
2797 /**
2798 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2799 * @ha: HA context
2800 *
2801 * Returns 0 on success.
2802 */
2803 int
2804 qla24xx_chip_diag(scsi_qla_host_t *vha)
2805 {
2806 int rval;
2807 struct qla_hw_data *ha = vha->hw;
2808 struct req_que *req = ha->req_q_map[0];
2809
2810 if (IS_P3P_TYPE(ha))
2811 return QLA_SUCCESS;
2812
2813 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
2814
2815 rval = qla2x00_mbx_reg_test(vha);
2816 if (rval) {
2817 ql_log(ql_log_warn, vha, 0x0082,
2818 "Failed mailbox send register test.\n");
2819 } else {
2820 /* Flag a successful rval */
2821 rval = QLA_SUCCESS;
2822 }
2823
2824 return rval;
2825 }
2826
2827 static void
2828 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
2829 {
2830 int rval;
2831 dma_addr_t tc_dma;
2832 void *tc;
2833 struct qla_hw_data *ha = vha->hw;
2834
2835 if (ha->eft) {
2836 ql_dbg(ql_dbg_init, vha, 0x00bd,
2837 "%s: Offload Mem is already allocated.\n",
2838 __func__);
2839 return;
2840 }
2841
2842 if (IS_FWI2_CAPABLE(ha)) {
2843 /* Allocate memory for Fibre Channel Event Buffer. */
2844 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2845 !IS_QLA27XX(ha))
2846 goto try_eft;
2847
2848 if (ha->fce)
2849 dma_free_coherent(&ha->pdev->dev,
2850 FCE_SIZE, ha->fce, ha->fce_dma);
2851
2852 /* Allocate memory for Fibre Channel Event Buffer. */
2853 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2854 GFP_KERNEL);
2855 if (!tc) {
2856 ql_log(ql_log_warn, vha, 0x00be,
2857 "Unable to allocate (%d KB) for FCE.\n",
2858 FCE_SIZE / 1024);
2859 goto try_eft;
2860 }
2861
2862 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
2863 ha->fce_mb, &ha->fce_bufs);
2864 if (rval) {
2865 ql_log(ql_log_warn, vha, 0x00bf,
2866 "Unable to initialize FCE (%d).\n", rval);
2867 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2868 tc_dma);
2869 ha->flags.fce_enabled = 0;
2870 goto try_eft;
2871 }
2872 ql_dbg(ql_dbg_init, vha, 0x00c0,
2873 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
2874
2875 ha->flags.fce_enabled = 1;
2876 ha->fce_dma = tc_dma;
2877 ha->fce = tc;
2878
2879 try_eft:
2880 if (ha->eft)
2881 dma_free_coherent(&ha->pdev->dev,
2882 EFT_SIZE, ha->eft, ha->eft_dma);
2883
2884 /* Allocate memory for Extended Trace Buffer. */
2885 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2886 GFP_KERNEL);
2887 if (!tc) {
2888 ql_log(ql_log_warn, vha, 0x00c1,
2889 "Unable to allocate (%d KB) for EFT.\n",
2890 EFT_SIZE / 1024);
2891 goto eft_err;
2892 }
2893
2894 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
2895 if (rval) {
2896 ql_log(ql_log_warn, vha, 0x00c2,
2897 "Unable to initialize EFT (%d).\n", rval);
2898 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2899 tc_dma);
2900 goto eft_err;
2901 }
2902 ql_dbg(ql_dbg_init, vha, 0x00c3,
2903 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2904
2905 ha->eft_dma = tc_dma;
2906 ha->eft = tc;
2907 }
2908
2909 eft_err:
2910 return;
2911 }
2912
2913 void
2914 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2915 {
2916 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2917 eft_size, fce_size, mq_size;
2918 struct qla_hw_data *ha = vha->hw;
2919 struct req_que *req = ha->req_q_map[0];
2920 struct rsp_que *rsp = ha->rsp_q_map[0];
2921 struct qla2xxx_fw_dump *fw_dump;
2922
2923 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2924 req_q_size = rsp_q_size = 0;
2925
2926 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2927 fixed_size = sizeof(struct qla2100_fw_dump);
2928 } else if (IS_QLA23XX(ha)) {
2929 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2930 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2931 sizeof(uint16_t);
2932 } else if (IS_FWI2_CAPABLE(ha)) {
2933 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2934 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2935 else if (IS_QLA81XX(ha))
2936 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2937 else if (IS_QLA25XX(ha))
2938 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2939 else
2940 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2941
2942 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2943 sizeof(uint32_t);
2944 if (ha->mqenable) {
2945 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2946 mq_size = sizeof(struct qla2xxx_mq_chain);
2947 /*
2948 * Allocate maximum buffer size for all queues.
2949 * Resizing must be done at end-of-dump processing.
2950 */
2951 mq_size += ha->max_req_queues *
2952 (req->length * sizeof(request_t));
2953 mq_size += ha->max_rsp_queues *
2954 (rsp->length * sizeof(response_t));
2955 }
2956 if (ha->tgt.atio_ring)
2957 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2958 /* Allocate memory for Fibre Channel Event Buffer. */
2959 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2960 !IS_QLA27XX(ha))
2961 goto try_eft;
2962
2963 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2964 try_eft:
2965 ql_dbg(ql_dbg_init, vha, 0x00c3,
2966 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2967 eft_size = EFT_SIZE;
2968 }
2969
2970 if (IS_QLA27XX(ha)) {
2971 if (!ha->fw_dump_template) {
2972 ql_log(ql_log_warn, vha, 0x00ba,
2973 "Failed missing fwdump template\n");
2974 return;
2975 }
2976 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2977 ql_dbg(ql_dbg_init, vha, 0x00fa,
2978 "-> allocating fwdump (%x bytes)...\n", dump_size);
2979 goto allocate;
2980 }
2981
2982 req_q_size = req->length * sizeof(request_t);
2983 rsp_q_size = rsp->length * sizeof(response_t);
2984 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2985 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
2986 ha->chain_offset = dump_size;
2987 dump_size += mq_size + fce_size;
2988
2989 if (ha->exchoffld_buf)
2990 dump_size += sizeof(struct qla2xxx_offld_chain) +
2991 ha->exchoffld_size;
2992 if (ha->exlogin_buf)
2993 dump_size += sizeof(struct qla2xxx_offld_chain) +
2994 ha->exlogin_size;
2995
2996 allocate:
2997 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
2998 fw_dump = vmalloc(dump_size);
2999 if (!fw_dump) {
3000 ql_log(ql_log_warn, vha, 0x00c4,
3001 "Unable to allocate (%d KB) for firmware dump.\n",
3002 dump_size / 1024);
3003 } else {
3004 if (ha->fw_dump)
3005 vfree(ha->fw_dump);
3006 ha->fw_dump = fw_dump;
3007
3008 ha->fw_dump_len = dump_size;
3009 ql_dbg(ql_dbg_init, vha, 0x00c5,
3010 "Allocated (%d KB) for firmware dump.\n",
3011 dump_size / 1024);
3012
3013 if (IS_QLA27XX(ha))
3014 return;
3015
3016 ha->fw_dump->signature[0] = 'Q';
3017 ha->fw_dump->signature[1] = 'L';
3018 ha->fw_dump->signature[2] = 'G';
3019 ha->fw_dump->signature[3] = 'C';
3020 ha->fw_dump->version = htonl(1);
3021
3022 ha->fw_dump->fixed_size = htonl(fixed_size);
3023 ha->fw_dump->mem_size = htonl(mem_size);
3024 ha->fw_dump->req_q_size = htonl(req_q_size);
3025 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3026
3027 ha->fw_dump->eft_size = htonl(eft_size);
3028 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
3029 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
3030
3031 ha->fw_dump->header_size =
3032 htonl(offsetof(struct qla2xxx_fw_dump, isp));
3033 }
3034 }
3035 }
3036
3037 static int
3038 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3039 {
3040 #define MPS_MASK 0xe0
3041 int rval;
3042 uint16_t dc;
3043 uint32_t dw;
3044
3045 if (!IS_QLA81XX(vha->hw))
3046 return QLA_SUCCESS;
3047
3048 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3049 if (rval != QLA_SUCCESS) {
3050 ql_log(ql_log_warn, vha, 0x0105,
3051 "Unable to acquire semaphore.\n");
3052 goto done;
3053 }
3054
3055 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3056 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3057 if (rval != QLA_SUCCESS) {
3058 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3059 goto done_release;
3060 }
3061
3062 dc &= MPS_MASK;
3063 if (dc == (dw & MPS_MASK))
3064 goto done_release;
3065
3066 dw &= ~MPS_MASK;
3067 dw |= dc;
3068 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3069 if (rval != QLA_SUCCESS) {
3070 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3071 }
3072
3073 done_release:
3074 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3075 if (rval != QLA_SUCCESS) {
3076 ql_log(ql_log_warn, vha, 0x006d,
3077 "Unable to release semaphore.\n");
3078 }
3079
3080 done:
3081 return rval;
3082 }
3083
3084 int
3085 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3086 {
3087 /* Don't try to reallocate the array */
3088 if (req->outstanding_cmds)
3089 return QLA_SUCCESS;
3090
3091 if (!IS_FWI2_CAPABLE(ha))
3092 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3093 else {
3094 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3095 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3096 else
3097 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3098 }
3099
3100 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
3101 req->num_outstanding_cmds, GFP_KERNEL);
3102
3103 if (!req->outstanding_cmds) {
3104 /*
3105 * Try to allocate a minimal size just so we can get through
3106 * initialization.
3107 */
3108 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3109 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
3110 req->num_outstanding_cmds, GFP_KERNEL);
3111
3112 if (!req->outstanding_cmds) {
3113 ql_log(ql_log_fatal, NULL, 0x0126,
3114 "Failed to allocate memory for "
3115 "outstanding_cmds for req_que %p.\n", req);
3116 req->num_outstanding_cmds = 0;
3117 return QLA_FUNCTION_FAILED;
3118 }
3119 }
3120
3121 return QLA_SUCCESS;
3122 }
3123
3124 #define PRINT_FIELD(_field, _flag, _str) { \
3125 if (a0->_field & _flag) {\
3126 if (p) {\
3127 strcat(ptr, "|");\
3128 ptr++;\
3129 leftover--;\
3130 } \
3131 len = snprintf(ptr, leftover, "%s", _str); \
3132 p = 1;\
3133 leftover -= len;\
3134 ptr += len; \
3135 } \
3136 }
3137
3138 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3139 {
3140 #define STR_LEN 64
3141 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3142 u8 str[STR_LEN], *ptr, p;
3143 int leftover, len;
3144
3145 memset(str, 0, STR_LEN);
3146 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3147 ql_dbg(ql_dbg_init, vha, 0x015a,
3148 "SFP MFG Name: %s\n", str);
3149
3150 memset(str, 0, STR_LEN);
3151 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3152 ql_dbg(ql_dbg_init, vha, 0x015c,
3153 "SFP Part Name: %s\n", str);
3154
3155 /* media */
3156 memset(str, 0, STR_LEN);
3157 ptr = str;
3158 leftover = STR_LEN;
3159 p = len = 0;
3160 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3161 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3162 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3163 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3164 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3165 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3166 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3167 ql_dbg(ql_dbg_init, vha, 0x0160,
3168 "SFP Media: %s\n", str);
3169
3170 /* link length */
3171 memset(str, 0, STR_LEN);
3172 ptr = str;
3173 leftover = STR_LEN;
3174 p = len = 0;
3175 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3176 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3177 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3178 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3179 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3180 ql_dbg(ql_dbg_init, vha, 0x0196,
3181 "SFP Link Length: %s\n", str);
3182
3183 memset(str, 0, STR_LEN);
3184 ptr = str;
3185 leftover = STR_LEN;
3186 p = len = 0;
3187 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3188 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3189 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3190 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3191 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3192 ql_dbg(ql_dbg_init, vha, 0x016e,
3193 "SFP FC Link Tech: %s\n", str);
3194
3195 if (a0->length_km)
3196 ql_dbg(ql_dbg_init, vha, 0x016f,
3197 "SFP Distant: %d km\n", a0->length_km);
3198 if (a0->length_100m)
3199 ql_dbg(ql_dbg_init, vha, 0x0170,
3200 "SFP Distant: %d m\n", a0->length_100m*100);
3201 if (a0->length_50um_10m)
3202 ql_dbg(ql_dbg_init, vha, 0x0189,
3203 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3204 if (a0->length_62um_10m)
3205 ql_dbg(ql_dbg_init, vha, 0x018a,
3206 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3207 if (a0->length_om4_10m)
3208 ql_dbg(ql_dbg_init, vha, 0x0194,
3209 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3210 if (a0->length_om3_10m)
3211 ql_dbg(ql_dbg_init, vha, 0x0195,
3212 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3213 }
3214
3215
3216 /*
3217 * Return Code:
3218 * QLA_SUCCESS: no action
3219 * QLA_INTERFACE_ERROR: SFP is not there.
3220 * QLA_FUNCTION_FAILED: detected New SFP
3221 */
3222 int
3223 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3224 {
3225 int rc = QLA_SUCCESS;
3226 struct sff_8247_a0 *a;
3227 struct qla_hw_data *ha = vha->hw;
3228
3229 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3230 goto out;
3231
3232 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3233 if (rc)
3234 goto out;
3235
3236 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3237 qla2xxx_print_sfp_info(vha);
3238
3239 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3240 /* long range */
3241 ha->flags.detected_lr_sfp = 1;
3242
3243 if (a->length_km > 5 || a->length_100m > 50)
3244 ha->long_range_distance = LR_DISTANCE_10K;
3245 else
3246 ha->long_range_distance = LR_DISTANCE_5K;
3247
3248 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3249 ql_dbg(ql_dbg_async, vha, 0x507b,
3250 "Detected Long Range SFP.\n");
3251 } else {
3252 /* short range */
3253 ha->flags.detected_lr_sfp = 0;
3254 if (ha->flags.using_lr_setting)
3255 ql_dbg(ql_dbg_async, vha, 0x5084,
3256 "Detected Short Range SFP.\n");
3257 }
3258
3259 if (!vha->flags.init_done)
3260 rc = QLA_SUCCESS;
3261 out:
3262 return rc;
3263 }
3264
3265 /**
3266 * qla2x00_setup_chip() - Load and start RISC firmware.
3267 * @ha: HA context
3268 *
3269 * Returns 0 on success.
3270 */
3271 static int
3272 qla2x00_setup_chip(scsi_qla_host_t *vha)
3273 {
3274 int rval;
3275 uint32_t srisc_address = 0;
3276 struct qla_hw_data *ha = vha->hw;
3277 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3278 unsigned long flags;
3279 uint16_t fw_major_version;
3280
3281 if (IS_P3P_TYPE(ha)) {
3282 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3283 if (rval == QLA_SUCCESS) {
3284 qla2x00_stop_firmware(vha);
3285 goto enable_82xx_npiv;
3286 } else
3287 goto failed;
3288 }
3289
3290 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3291 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3292 spin_lock_irqsave(&ha->hardware_lock, flags);
3293 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3294 RD_REG_WORD(&reg->hccr);
3295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3296 }
3297
3298 qla81xx_mpi_sync(vha);
3299
3300 /* Load firmware sequences */
3301 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3302 if (rval == QLA_SUCCESS) {
3303 ql_dbg(ql_dbg_init, vha, 0x00c9,
3304 "Verifying Checksum of loaded RISC code.\n");
3305
3306 rval = qla2x00_verify_checksum(vha, srisc_address);
3307 if (rval == QLA_SUCCESS) {
3308 /* Start firmware execution. */
3309 ql_dbg(ql_dbg_init, vha, 0x00ca,
3310 "Starting firmware.\n");
3311
3312 if (ql2xexlogins)
3313 ha->flags.exlogins_enabled = 1;
3314
3315 if (qla_is_exch_offld_enabled(vha))
3316 ha->flags.exchoffld_enabled = 1;
3317
3318 rval = qla2x00_execute_fw(vha, srisc_address);
3319 /* Retrieve firmware information. */
3320 if (rval == QLA_SUCCESS) {
3321 qla24xx_detect_sfp(vha);
3322
3323 rval = qla2x00_set_exlogins_buffer(vha);
3324 if (rval != QLA_SUCCESS)
3325 goto failed;
3326
3327 rval = qla2x00_set_exchoffld_buffer(vha);
3328 if (rval != QLA_SUCCESS)
3329 goto failed;
3330
3331 enable_82xx_npiv:
3332 fw_major_version = ha->fw_major_version;
3333 if (IS_P3P_TYPE(ha))
3334 qla82xx_check_md_needed(vha);
3335 else
3336 rval = qla2x00_get_fw_version(vha);
3337 if (rval != QLA_SUCCESS)
3338 goto failed;
3339 ha->flags.npiv_supported = 0;
3340 if (IS_QLA2XXX_MIDTYPE(ha) &&
3341 (ha->fw_attributes & BIT_2)) {
3342 ha->flags.npiv_supported = 1;
3343 if ((!ha->max_npiv_vports) ||
3344 ((ha->max_npiv_vports + 1) %
3345 MIN_MULTI_ID_FABRIC))
3346 ha->max_npiv_vports =
3347 MIN_MULTI_ID_FABRIC - 1;
3348 }
3349 qla2x00_get_resource_cnts(vha);
3350
3351 /*
3352 * Allocate the array of outstanding commands
3353 * now that we know the firmware resources.
3354 */
3355 rval = qla2x00_alloc_outstanding_cmds(ha,
3356 vha->req);
3357 if (rval != QLA_SUCCESS)
3358 goto failed;
3359
3360 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3361 qla2x00_alloc_offload_mem(vha);
3362
3363 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
3364 qla2x00_alloc_fw_dump(vha);
3365
3366 } else {
3367 goto failed;
3368 }
3369 } else {
3370 ql_log(ql_log_fatal, vha, 0x00cd,
3371 "ISP Firmware failed checksum.\n");
3372 goto failed;
3373 }
3374 } else
3375 goto failed;
3376
3377 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3378 /* Enable proper parity. */
3379 spin_lock_irqsave(&ha->hardware_lock, flags);
3380 if (IS_QLA2300(ha))
3381 /* SRAM parity */
3382 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
3383 else
3384 /* SRAM, Instruction RAM and GP RAM parity */
3385 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
3386 RD_REG_WORD(&reg->hccr);
3387 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3388 }
3389
3390 if (IS_QLA27XX(ha))
3391 ha->flags.fac_supported = 1;
3392 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3393 uint32_t size;
3394
3395 rval = qla81xx_fac_get_sector_size(vha, &size);
3396 if (rval == QLA_SUCCESS) {
3397 ha->flags.fac_supported = 1;
3398 ha->fdt_block_size = size << 2;
3399 } else {
3400 ql_log(ql_log_warn, vha, 0x00ce,
3401 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3402 ha->fw_major_version, ha->fw_minor_version,
3403 ha->fw_subminor_version);
3404
3405 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3406 ha->flags.fac_supported = 0;
3407 rval = QLA_SUCCESS;
3408 }
3409 }
3410 }
3411 failed:
3412 if (rval) {
3413 ql_log(ql_log_fatal, vha, 0x00cf,
3414 "Setup chip ****FAILED****.\n");
3415 }
3416
3417 return (rval);
3418 }
3419
3420 /**
3421 * qla2x00_init_response_q_entries() - Initializes response queue entries.
3422 * @ha: HA context
3423 *
3424 * Beginning of request ring has initialization control block already built
3425 * by nvram config routine.
3426 *
3427 * Returns 0 on success.
3428 */
3429 void
3430 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3431 {
3432 uint16_t cnt;
3433 response_t *pkt;
3434
3435 rsp->ring_ptr = rsp->ring;
3436 rsp->ring_index = 0;
3437 rsp->status_srb = NULL;
3438 pkt = rsp->ring_ptr;
3439 for (cnt = 0; cnt < rsp->length; cnt++) {
3440 pkt->signature = RESPONSE_PROCESSED;
3441 pkt++;
3442 }
3443 }
3444
3445 /**
3446 * qla2x00_update_fw_options() - Read and process firmware options.
3447 * @ha: HA context
3448 *
3449 * Returns 0 on success.
3450 */
3451 void
3452 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3453 {
3454 uint16_t swing, emphasis, tx_sens, rx_sens;
3455 struct qla_hw_data *ha = vha->hw;
3456
3457 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3458 qla2x00_get_fw_options(vha, ha->fw_options);
3459
3460 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3461 return;
3462
3463 /* Serial Link options. */
3464 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3465 "Serial link options.\n");
3466 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3467 (uint8_t *)&ha->fw_seriallink_options,
3468 sizeof(ha->fw_seriallink_options));
3469
3470 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3471 if (ha->fw_seriallink_options[3] & BIT_2) {
3472 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3473
3474 /* 1G settings */
3475 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3476 emphasis = (ha->fw_seriallink_options[2] &
3477 (BIT_4 | BIT_3)) >> 3;
3478 tx_sens = ha->fw_seriallink_options[0] &
3479 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3480 rx_sens = (ha->fw_seriallink_options[0] &
3481 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3482 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3483 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3484 if (rx_sens == 0x0)
3485 rx_sens = 0x3;
3486 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3487 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3488 ha->fw_options[10] |= BIT_5 |
3489 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3490 (tx_sens & (BIT_1 | BIT_0));
3491
3492 /* 2G settings */
3493 swing = (ha->fw_seriallink_options[2] &
3494 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3495 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3496 tx_sens = ha->fw_seriallink_options[1] &
3497 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3498 rx_sens = (ha->fw_seriallink_options[1] &
3499 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3500 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3501 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3502 if (rx_sens == 0x0)
3503 rx_sens = 0x3;
3504 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3505 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3506 ha->fw_options[11] |= BIT_5 |
3507 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3508 (tx_sens & (BIT_1 | BIT_0));
3509 }
3510
3511 /* FCP2 options. */
3512 /* Return command IOCBs without waiting for an ABTS to complete. */
3513 ha->fw_options[3] |= BIT_13;
3514
3515 /* LED scheme. */
3516 if (ha->flags.enable_led_scheme)
3517 ha->fw_options[2] |= BIT_12;
3518
3519 /* Detect ISP6312. */
3520 if (IS_QLA6312(ha))
3521 ha->fw_options[2] |= BIT_13;
3522
3523 /* Set Retry FLOGI in case of P2P connection */
3524 if (ha->operating_mode == P2P) {
3525 ha->fw_options[2] |= BIT_3;
3526 ql_dbg(ql_dbg_disc, vha, 0x2100,
3527 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3528 __func__, ha->fw_options[2]);
3529 }
3530
3531 /* Update firmware options. */
3532 qla2x00_set_fw_options(vha, ha->fw_options);
3533 }
3534
3535 void
3536 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3537 {
3538 int rval;
3539 struct qla_hw_data *ha = vha->hw;
3540
3541 if (IS_P3P_TYPE(ha))
3542 return;
3543
3544 /* Hold status IOCBs until ABTS response received. */
3545 if (ql2xfwholdabts)
3546 ha->fw_options[3] |= BIT_12;
3547
3548 /* Set Retry FLOGI in case of P2P connection */
3549 if (ha->operating_mode == P2P) {
3550 ha->fw_options[2] |= BIT_3;
3551 ql_dbg(ql_dbg_disc, vha, 0x2101,
3552 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3553 __func__, ha->fw_options[2]);
3554 }
3555
3556 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3557 if (ql2xmvasynctoatio &&
3558 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
3559 if (qla_tgt_mode_enabled(vha) ||
3560 qla_dual_mode_enabled(vha))
3561 ha->fw_options[2] |= BIT_11;
3562 else
3563 ha->fw_options[2] &= ~BIT_11;
3564 }
3565
3566 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3567 /*
3568 * Tell FW to track each exchange to prevent
3569 * driver from using stale exchange.
3570 */
3571 if (qla_tgt_mode_enabled(vha) ||
3572 qla_dual_mode_enabled(vha))
3573 ha->fw_options[2] |= BIT_4;
3574 else
3575 ha->fw_options[2] &= ~BIT_4;
3576
3577 /* Reserve 1/2 of emergency exchanges for ELS.*/
3578 if (qla2xuseresexchforels)
3579 ha->fw_options[2] |= BIT_8;
3580 else
3581 ha->fw_options[2] &= ~BIT_8;
3582 }
3583
3584 ql_dbg(ql_dbg_init, vha, 0x00e8,
3585 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3586 __func__, ha->fw_options[1], ha->fw_options[2],
3587 ha->fw_options[3], vha->host->active_mode);
3588
3589 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3590 qla2x00_set_fw_options(vha, ha->fw_options);
3591
3592 /* Update Serial Link options. */
3593 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3594 return;
3595
3596 rval = qla2x00_set_serdes_params(vha,
3597 le16_to_cpu(ha->fw_seriallink_options24[1]),
3598 le16_to_cpu(ha->fw_seriallink_options24[2]),
3599 le16_to_cpu(ha->fw_seriallink_options24[3]));
3600 if (rval != QLA_SUCCESS) {
3601 ql_log(ql_log_warn, vha, 0x0104,
3602 "Unable to update Serial Link options (%x).\n", rval);
3603 }
3604 }
3605
3606 void
3607 qla2x00_config_rings(struct scsi_qla_host *vha)
3608 {
3609 struct qla_hw_data *ha = vha->hw;
3610 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3611 struct req_que *req = ha->req_q_map[0];
3612 struct rsp_que *rsp = ha->rsp_q_map[0];
3613
3614 /* Setup ring parameters in initialization control block. */
3615 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3616 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3617 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3618 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3619 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3620 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3621 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3622 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3623
3624 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3625 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3626 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3627 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3628 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3629 }
3630
3631 void
3632 qla24xx_config_rings(struct scsi_qla_host *vha)
3633 {
3634 struct qla_hw_data *ha = vha->hw;
3635 device_reg_t *reg = ISP_QUE_REG(ha, 0);
3636 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3637 struct qla_msix_entry *msix;
3638 struct init_cb_24xx *icb;
3639 uint16_t rid = 0;
3640 struct req_que *req = ha->req_q_map[0];
3641 struct rsp_que *rsp = ha->rsp_q_map[0];
3642
3643 /* Setup ring parameters in initialization control block. */
3644 icb = (struct init_cb_24xx *)ha->init_cb;
3645 icb->request_q_outpointer = cpu_to_le16(0);
3646 icb->response_q_inpointer = cpu_to_le16(0);
3647 icb->request_q_length = cpu_to_le16(req->length);
3648 icb->response_q_length = cpu_to_le16(rsp->length);
3649 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3650 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3651 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3652 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3653
3654 /* Setup ATIO queue dma pointers for target mode */
3655 icb->atio_q_inpointer = cpu_to_le16(0);
3656 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3657 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3658 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3659
3660 if (IS_SHADOW_REG_CAPABLE(ha))
3661 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3662
3663 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3664 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3665 icb->rid = cpu_to_le16(rid);
3666 if (ha->flags.msix_enabled) {
3667 msix = &ha->msix_entries[1];
3668 ql_dbg(ql_dbg_init, vha, 0x0019,
3669 "Registering vector 0x%x for base que.\n",
3670 msix->entry);
3671 icb->msix = cpu_to_le16(msix->entry);
3672 }
3673 /* Use alternate PCI bus number */
3674 if (MSB(rid))
3675 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3676 /* Use alternate PCI devfn */
3677 if (LSB(rid))
3678 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3679
3680 /* Use Disable MSIX Handshake mode for capable adapters */
3681 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3682 (ha->flags.msix_enabled)) {
3683 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3684 ha->flags.disable_msix_handshake = 1;
3685 ql_dbg(ql_dbg_init, vha, 0x00fe,
3686 "MSIX Handshake Disable Mode turned on.\n");
3687 } else {
3688 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3689 }
3690 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
3691
3692 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3693 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3694 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3695 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3696 } else {
3697 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3698 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3699 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3700 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3701 }
3702 qlt_24xx_config_rings(vha);
3703
3704 /* PCI posting */
3705 RD_REG_DWORD(&ioreg->hccr);
3706 }
3707
3708 /**
3709 * qla2x00_init_rings() - Initializes firmware.
3710 * @ha: HA context
3711 *
3712 * Beginning of request ring has initialization control block already built
3713 * by nvram config routine.
3714 *
3715 * Returns 0 on success.
3716 */
3717 int
3718 qla2x00_init_rings(scsi_qla_host_t *vha)
3719 {
3720 int rval;
3721 unsigned long flags = 0;
3722 int cnt, que;
3723 struct qla_hw_data *ha = vha->hw;
3724 struct req_que *req;
3725 struct rsp_que *rsp;
3726 struct mid_init_cb_24xx *mid_init_cb =
3727 (struct mid_init_cb_24xx *) ha->init_cb;
3728
3729 spin_lock_irqsave(&ha->hardware_lock, flags);
3730
3731 /* Clear outstanding commands array. */
3732 for (que = 0; que < ha->max_req_queues; que++) {
3733 req = ha->req_q_map[que];
3734 if (!req || !test_bit(que, ha->req_qid_map))
3735 continue;
3736 req->out_ptr = (void *)(req->ring + req->length);
3737 *req->out_ptr = 0;
3738 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
3739 req->outstanding_cmds[cnt] = NULL;
3740
3741 req->current_outstanding_cmd = 1;
3742
3743 /* Initialize firmware. */
3744 req->ring_ptr = req->ring;
3745 req->ring_index = 0;
3746 req->cnt = req->length;
3747 }
3748
3749 for (que = 0; que < ha->max_rsp_queues; que++) {
3750 rsp = ha->rsp_q_map[que];
3751 if (!rsp || !test_bit(que, ha->rsp_qid_map))
3752 continue;
3753 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3754 *rsp->in_ptr = 0;
3755 /* Initialize response queue entries */
3756 if (IS_QLAFX00(ha))
3757 qlafx00_init_response_q_entries(rsp);
3758 else
3759 qla2x00_init_response_q_entries(rsp);
3760 }
3761
3762 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3763 ha->tgt.atio_ring_index = 0;
3764 /* Initialize ATIO queue entries */
3765 qlt_init_atio_q_entries(vha);
3766
3767 ha->isp_ops->config_rings(vha);
3768
3769 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3770
3771 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3772
3773 if (IS_QLAFX00(ha)) {
3774 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3775 goto next_check;
3776 }
3777
3778 /* Update any ISP specific firmware options before initialization. */
3779 ha->isp_ops->update_fw_options(vha);
3780
3781 if (ha->flags.npiv_supported) {
3782 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
3783 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
3784 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
3785 }
3786
3787 if (IS_FWI2_CAPABLE(ha)) {
3788 mid_init_cb->options = cpu_to_le16(BIT_1);
3789 mid_init_cb->init_cb.execution_throttle =
3790 cpu_to_le16(ha->cur_fw_xcb_count);
3791 ha->flags.dport_enabled =
3792 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3793 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3794 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3795 /* FA-WWPN Status */
3796 ha->flags.fawwpn_enabled =
3797 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
3798 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
3799 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
3800 }
3801
3802 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
3803 next_check:
3804 if (rval) {
3805 ql_log(ql_log_fatal, vha, 0x00d2,
3806 "Init Firmware **** FAILED ****.\n");
3807 } else {
3808 ql_dbg(ql_dbg_init, vha, 0x00d3,
3809 "Init Firmware -- success.\n");
3810 QLA_FW_STARTED(ha);
3811 }
3812
3813 return (rval);
3814 }
3815
3816 /**
3817 * qla2x00_fw_ready() - Waits for firmware ready.
3818 * @ha: HA context
3819 *
3820 * Returns 0 on success.
3821 */
3822 static int
3823 qla2x00_fw_ready(scsi_qla_host_t *vha)
3824 {
3825 int rval;
3826 unsigned long wtime, mtime, cs84xx_time;
3827 uint16_t min_wait; /* Minimum wait time if loop is down */
3828 uint16_t wait_time; /* Wait time if loop is coming ready */
3829 uint16_t state[6];
3830 struct qla_hw_data *ha = vha->hw;
3831
3832 if (IS_QLAFX00(vha->hw))
3833 return qlafx00_fw_ready(vha);
3834
3835 rval = QLA_SUCCESS;
3836
3837 /* Time to wait for loop down */
3838 if (IS_P3P_TYPE(ha))
3839 min_wait = 30;
3840 else
3841 min_wait = 20;
3842
3843 /*
3844 * Firmware should take at most one RATOV to login, plus 5 seconds for
3845 * our own processing.
3846 */
3847 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3848 wait_time = min_wait;
3849 }
3850
3851 /* Min wait time if loop down */
3852 mtime = jiffies + (min_wait * HZ);
3853
3854 /* wait time before firmware ready */
3855 wtime = jiffies + (wait_time * HZ);
3856
3857 /* Wait for ISP to finish LIP */
3858 if (!vha->flags.init_done)
3859 ql_log(ql_log_info, vha, 0x801e,
3860 "Waiting for LIP to complete.\n");
3861
3862 do {
3863 memset(state, -1, sizeof(state));
3864 rval = qla2x00_get_firmware_state(vha, state);
3865 if (rval == QLA_SUCCESS) {
3866 if (state[0] < FSTATE_LOSS_OF_SYNC) {
3867 vha->device_flags &= ~DFLG_NO_CABLE;
3868 }
3869 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
3870 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3871 "fw_state=%x 84xx=%x.\n", state[0],
3872 state[2]);
3873 if ((state[2] & FSTATE_LOGGED_IN) &&
3874 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
3875 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3876 "Sending verify iocb.\n");
3877
3878 cs84xx_time = jiffies;
3879 rval = qla84xx_init_chip(vha);
3880 if (rval != QLA_SUCCESS) {
3881 ql_log(ql_log_warn,
3882 vha, 0x8007,
3883 "Init chip failed.\n");
3884 break;
3885 }
3886
3887 /* Add time taken to initialize. */
3888 cs84xx_time = jiffies - cs84xx_time;
3889 wtime += cs84xx_time;
3890 mtime += cs84xx_time;
3891 ql_dbg(ql_dbg_taskm, vha, 0x8008,
3892 "Increasing wait time by %ld. "
3893 "New time %ld.\n", cs84xx_time,
3894 wtime);
3895 }
3896 } else if (state[0] == FSTATE_READY) {
3897 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3898 "F/W Ready - OK.\n");
3899
3900 qla2x00_get_retry_cnt(vha, &ha->retry_count,
3901 &ha->login_timeout, &ha->r_a_tov);
3902
3903 rval = QLA_SUCCESS;
3904 break;
3905 }
3906
3907 rval = QLA_FUNCTION_FAILED;
3908
3909 if (atomic_read(&vha->loop_down_timer) &&
3910 state[0] != FSTATE_READY) {
3911 /* Loop down. Timeout on min_wait for states
3912 * other than Wait for Login.
3913 */
3914 if (time_after_eq(jiffies, mtime)) {
3915 ql_log(ql_log_info, vha, 0x8038,
3916 "Cable is unplugged...\n");
3917
3918 vha->device_flags |= DFLG_NO_CABLE;
3919 break;
3920 }
3921 }
3922 } else {
3923 /* Mailbox cmd failed. Timeout on min_wait. */
3924 if (time_after_eq(jiffies, mtime) ||
3925 ha->flags.isp82xx_fw_hung)
3926 break;
3927 }
3928
3929 if (time_after_eq(jiffies, wtime))
3930 break;
3931
3932 /* Delay for a while */
3933 msleep(500);
3934 } while (1);
3935
3936 ql_dbg(ql_dbg_taskm, vha, 0x803a,
3937 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3938 state[1], state[2], state[3], state[4], state[5], jiffies);
3939
3940 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
3941 ql_log(ql_log_warn, vha, 0x803b,
3942 "Firmware ready **** FAILED ****.\n");
3943 }
3944
3945 return (rval);
3946 }
3947
3948 /*
3949 * qla2x00_configure_hba
3950 * Setup adapter context.
3951 *
3952 * Input:
3953 * ha = adapter state pointer.
3954 *
3955 * Returns:
3956 * 0 = success
3957 *
3958 * Context:
3959 * Kernel context.
3960 */
3961 static int
3962 qla2x00_configure_hba(scsi_qla_host_t *vha)
3963 {
3964 int rval;
3965 uint16_t loop_id;
3966 uint16_t topo;
3967 uint16_t sw_cap;
3968 uint8_t al_pa;
3969 uint8_t area;
3970 uint8_t domain;
3971 char connect_type[22];
3972 struct qla_hw_data *ha = vha->hw;
3973 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3974 port_id_t id;
3975 unsigned long flags;
3976
3977 /* Get host addresses. */
3978 rval = qla2x00_get_adapter_id(vha,
3979 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
3980 if (rval != QLA_SUCCESS) {
3981 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
3982 IS_CNA_CAPABLE(ha) ||
3983 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
3984 ql_dbg(ql_dbg_disc, vha, 0x2008,
3985 "Loop is in a transition state.\n");
3986 } else {
3987 ql_log(ql_log_warn, vha, 0x2009,
3988 "Unable to get host loop ID.\n");
3989 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3990 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3991 ql_log(ql_log_warn, vha, 0x1151,
3992 "Doing link init.\n");
3993 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3994 return rval;
3995 }
3996 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3997 }
3998 return (rval);
3999 }
4000
4001 if (topo == 4) {
4002 ql_log(ql_log_info, vha, 0x200a,
4003 "Cannot get topology - retrying.\n");
4004 return (QLA_FUNCTION_FAILED);
4005 }
4006
4007 vha->loop_id = loop_id;
4008
4009 /* initialize */
4010 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4011 ha->operating_mode = LOOP;
4012 ha->switch_cap = 0;
4013
4014 switch (topo) {
4015 case 0:
4016 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4017 ha->current_topology = ISP_CFG_NL;
4018 strcpy(connect_type, "(Loop)");
4019 break;
4020
4021 case 1:
4022 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4023 ha->switch_cap = sw_cap;
4024 ha->current_topology = ISP_CFG_FL;
4025 strcpy(connect_type, "(FL_Port)");
4026 break;
4027
4028 case 2:
4029 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4030 ha->operating_mode = P2P;
4031 ha->current_topology = ISP_CFG_N;
4032 strcpy(connect_type, "(N_Port-to-N_Port)");
4033 break;
4034
4035 case 3:
4036 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4037 ha->switch_cap = sw_cap;
4038 ha->operating_mode = P2P;
4039 ha->current_topology = ISP_CFG_F;
4040 strcpy(connect_type, "(F_Port)");
4041 break;
4042
4043 default:
4044 ql_dbg(ql_dbg_disc, vha, 0x200f,
4045 "HBA in unknown topology %x, using NL.\n", topo);
4046 ha->current_topology = ISP_CFG_NL;
4047 strcpy(connect_type, "(Loop)");
4048 break;
4049 }
4050
4051 /* Save Host port and loop ID. */
4052 /* byte order - Big Endian */
4053 id.b.domain = domain;
4054 id.b.area = area;
4055 id.b.al_pa = al_pa;
4056 id.b.rsvd_1 = 0;
4057 spin_lock_irqsave(&ha->hardware_lock, flags);
4058 qlt_update_host_map(vha, id);
4059 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4060
4061 if (!vha->flags.init_done)
4062 ql_log(ql_log_info, vha, 0x2010,
4063 "Topology - %s, Host Loop address 0x%x.\n",
4064 connect_type, vha->loop_id);
4065
4066 return(rval);
4067 }
4068
4069 inline void
4070 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4071 char *def)
4072 {
4073 char *st, *en;
4074 uint16_t index;
4075 struct qla_hw_data *ha = vha->hw;
4076 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4077 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4078
4079 if (memcmp(model, BINZERO, len) != 0) {
4080 strncpy(ha->model_number, model, len);
4081 st = en = ha->model_number;
4082 en += len - 1;
4083 while (en > st) {
4084 if (*en != 0x20 && *en != 0x00)
4085 break;
4086 *en-- = '\0';
4087 }
4088
4089 index = (ha->pdev->subsystem_device & 0xff);
4090 if (use_tbl &&
4091 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4092 index < QLA_MODEL_NAMES)
4093 strncpy(ha->model_desc,
4094 qla2x00_model_name[index * 2 + 1],
4095 sizeof(ha->model_desc) - 1);
4096 } else {
4097 index = (ha->pdev->subsystem_device & 0xff);
4098 if (use_tbl &&
4099 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4100 index < QLA_MODEL_NAMES) {
4101 strcpy(ha->model_number,
4102 qla2x00_model_name[index * 2]);
4103 strncpy(ha->model_desc,
4104 qla2x00_model_name[index * 2 + 1],
4105 sizeof(ha->model_desc) - 1);
4106 } else {
4107 strcpy(ha->model_number, def);
4108 }
4109 }
4110 if (IS_FWI2_CAPABLE(ha))
4111 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4112 sizeof(ha->model_desc));
4113 }
4114
4115 /* On sparc systems, obtain port and node WWN from firmware
4116 * properties.
4117 */
4118 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4119 {
4120 #ifdef CONFIG_SPARC
4121 struct qla_hw_data *ha = vha->hw;
4122 struct pci_dev *pdev = ha->pdev;
4123 struct device_node *dp = pci_device_to_OF_node(pdev);
4124 const u8 *val;
4125 int len;
4126
4127 val = of_get_property(dp, "port-wwn", &len);
4128 if (val && len >= WWN_SIZE)
4129 memcpy(nv->port_name, val, WWN_SIZE);
4130
4131 val = of_get_property(dp, "node-wwn", &len);
4132 if (val && len >= WWN_SIZE)
4133 memcpy(nv->node_name, val, WWN_SIZE);
4134 #endif
4135 }
4136
4137 /*
4138 * NVRAM configuration for ISP 2xxx
4139 *
4140 * Input:
4141 * ha = adapter block pointer.
4142 *
4143 * Output:
4144 * initialization control block in response_ring
4145 * host adapters parameters in host adapter block
4146 *
4147 * Returns:
4148 * 0 = success.
4149 */
4150 int
4151 qla2x00_nvram_config(scsi_qla_host_t *vha)
4152 {
4153 int rval;
4154 uint8_t chksum = 0;
4155 uint16_t cnt;
4156 uint8_t *dptr1, *dptr2;
4157 struct qla_hw_data *ha = vha->hw;
4158 init_cb_t *icb = ha->init_cb;
4159 nvram_t *nv = ha->nvram;
4160 uint8_t *ptr = ha->nvram;
4161 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4162
4163 rval = QLA_SUCCESS;
4164
4165 /* Determine NVRAM starting address. */
4166 ha->nvram_size = sizeof(nvram_t);
4167 ha->nvram_base = 0;
4168 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4169 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
4170 ha->nvram_base = 0x80;
4171
4172 /* Get NVRAM data and calculate checksum. */
4173 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4174 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4175 chksum += *ptr++;
4176
4177 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4178 "Contents of NVRAM.\n");
4179 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4180 (uint8_t *)nv, ha->nvram_size);
4181
4182 /* Bad NVRAM data, set defaults parameters. */
4183 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
4184 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
4185 /* Reset NVRAM data. */
4186 ql_log(ql_log_warn, vha, 0x0064,
4187 "Inconsistent NVRAM "
4188 "detected: checksum=0x%x id=%c version=0x%x.\n",
4189 chksum, nv->id[0], nv->nvram_version);
4190 ql_log(ql_log_warn, vha, 0x0065,
4191 "Falling back to "
4192 "functioning (yet invalid -- WWPN) defaults.\n");
4193
4194 /*
4195 * Set default initialization control block.
4196 */
4197 memset(nv, 0, ha->nvram_size);
4198 nv->parameter_block_version = ICB_VERSION;
4199
4200 if (IS_QLA23XX(ha)) {
4201 nv->firmware_options[0] = BIT_2 | BIT_1;
4202 nv->firmware_options[1] = BIT_7 | BIT_5;
4203 nv->add_firmware_options[0] = BIT_5;
4204 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4205 nv->frame_payload_size = 2048;
4206 nv->special_options[1] = BIT_7;
4207 } else if (IS_QLA2200(ha)) {
4208 nv->firmware_options[0] = BIT_2 | BIT_1;
4209 nv->firmware_options[1] = BIT_7 | BIT_5;
4210 nv->add_firmware_options[0] = BIT_5;
4211 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4212 nv->frame_payload_size = 1024;
4213 } else if (IS_QLA2100(ha)) {
4214 nv->firmware_options[0] = BIT_3 | BIT_1;
4215 nv->firmware_options[1] = BIT_5;
4216 nv->frame_payload_size = 1024;
4217 }
4218
4219 nv->max_iocb_allocation = cpu_to_le16(256);
4220 nv->execution_throttle = cpu_to_le16(16);
4221 nv->retry_count = 8;
4222 nv->retry_delay = 1;
4223
4224 nv->port_name[0] = 33;
4225 nv->port_name[3] = 224;
4226 nv->port_name[4] = 139;
4227
4228 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4229
4230 nv->login_timeout = 4;
4231
4232 /*
4233 * Set default host adapter parameters
4234 */
4235 nv->host_p[1] = BIT_2;
4236 nv->reset_delay = 5;
4237 nv->port_down_retry_count = 8;
4238 nv->max_luns_per_target = cpu_to_le16(8);
4239 nv->link_down_timeout = 60;
4240
4241 rval = 1;
4242 }
4243
4244 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
4245 /*
4246 * The SN2 does not provide BIOS emulation which means you can't change
4247 * potentially bogus BIOS settings. Force the use of default settings
4248 * for link rate and frame size. Hope that the rest of the settings
4249 * are valid.
4250 */
4251 if (ia64_platform_is("sn2")) {
4252 nv->frame_payload_size = 2048;
4253 if (IS_QLA23XX(ha))
4254 nv->special_options[1] = BIT_7;
4255 }
4256 #endif
4257
4258 /* Reset Initialization control block */
4259 memset(icb, 0, ha->init_cb_size);
4260
4261 /*
4262 * Setup driver NVRAM options.
4263 */
4264 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4265 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4266 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4267 nv->firmware_options[1] &= ~BIT_4;
4268
4269 if (IS_QLA23XX(ha)) {
4270 nv->firmware_options[0] |= BIT_2;
4271 nv->firmware_options[0] &= ~BIT_3;
4272 nv->special_options[0] &= ~BIT_6;
4273 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4274
4275 if (IS_QLA2300(ha)) {
4276 if (ha->fb_rev == FPM_2310) {
4277 strcpy(ha->model_number, "QLA2310");
4278 } else {
4279 strcpy(ha->model_number, "QLA2300");
4280 }
4281 } else {
4282 qla2x00_set_model_info(vha, nv->model_number,
4283 sizeof(nv->model_number), "QLA23xx");
4284 }
4285 } else if (IS_QLA2200(ha)) {
4286 nv->firmware_options[0] |= BIT_2;
4287 /*
4288 * 'Point-to-point preferred, else loop' is not a safe
4289 * connection mode setting.
4290 */
4291 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4292 (BIT_5 | BIT_4)) {
4293 /* Force 'loop preferred, else point-to-point'. */
4294 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4295 nv->add_firmware_options[0] |= BIT_5;
4296 }
4297 strcpy(ha->model_number, "QLA22xx");
4298 } else /*if (IS_QLA2100(ha))*/ {
4299 strcpy(ha->model_number, "QLA2100");
4300 }
4301
4302 /*
4303 * Copy over NVRAM RISC parameter block to initialization control block.
4304 */
4305 dptr1 = (uint8_t *)icb;
4306 dptr2 = (uint8_t *)&nv->parameter_block_version;
4307 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4308 while (cnt--)
4309 *dptr1++ = *dptr2++;
4310
4311 /* Copy 2nd half. */
4312 dptr1 = (uint8_t *)icb->add_firmware_options;
4313 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4314 while (cnt--)
4315 *dptr1++ = *dptr2++;
4316
4317 /* Use alternate WWN? */
4318 if (nv->host_p[1] & BIT_7) {
4319 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4320 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4321 }
4322
4323 /* Prepare nodename */
4324 if ((icb->firmware_options[1] & BIT_6) == 0) {
4325 /*
4326 * Firmware will apply the following mask if the nodename was
4327 * not provided.
4328 */
4329 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4330 icb->node_name[0] &= 0xF0;
4331 }
4332
4333 /*
4334 * Set host adapter parameters.
4335 */
4336
4337 /*
4338 * BIT_7 in the host-parameters section allows for modification to
4339 * internal driver logging.
4340 */
4341 if (nv->host_p[0] & BIT_7)
4342 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4343 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4344 /* Always load RISC code on non ISP2[12]00 chips. */
4345 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4346 ha->flags.disable_risc_code_load = 0;
4347 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4348 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4349 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4350 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4351 ha->flags.disable_serdes = 0;
4352
4353 ha->operating_mode =
4354 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4355
4356 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4357 sizeof(ha->fw_seriallink_options));
4358
4359 /* save HBA serial number */
4360 ha->serial0 = icb->port_name[5];
4361 ha->serial1 = icb->port_name[6];
4362 ha->serial2 = icb->port_name[7];
4363 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4364 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4365
4366 icb->execution_throttle = cpu_to_le16(0xFFFF);
4367
4368 ha->retry_count = nv->retry_count;
4369
4370 /* Set minimum login_timeout to 4 seconds. */
4371 if (nv->login_timeout != ql2xlogintimeout)
4372 nv->login_timeout = ql2xlogintimeout;
4373 if (nv->login_timeout < 4)
4374 nv->login_timeout = 4;
4375 ha->login_timeout = nv->login_timeout;
4376
4377 /* Set minimum RATOV to 100 tenths of a second. */
4378 ha->r_a_tov = 100;
4379
4380 ha->loop_reset_delay = nv->reset_delay;
4381
4382 /* Link Down Timeout = 0:
4383 *
4384 * When Port Down timer expires we will start returning
4385 * I/O's to OS with "DID_NO_CONNECT".
4386 *
4387 * Link Down Timeout != 0:
4388 *
4389 * The driver waits for the link to come up after link down
4390 * before returning I/Os to OS with "DID_NO_CONNECT".
4391 */
4392 if (nv->link_down_timeout == 0) {
4393 ha->loop_down_abort_time =
4394 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4395 } else {
4396 ha->link_down_timeout = nv->link_down_timeout;
4397 ha->loop_down_abort_time =
4398 (LOOP_DOWN_TIME - ha->link_down_timeout);
4399 }
4400
4401 /*
4402 * Need enough time to try and get the port back.
4403 */
4404 ha->port_down_retry_count = nv->port_down_retry_count;
4405 if (qlport_down_retry)
4406 ha->port_down_retry_count = qlport_down_retry;
4407 /* Set login_retry_count */
4408 ha->login_retry_count = nv->retry_count;
4409 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4410 ha->port_down_retry_count > 3)
4411 ha->login_retry_count = ha->port_down_retry_count;
4412 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4413 ha->login_retry_count = ha->port_down_retry_count;
4414 if (ql2xloginretrycount)
4415 ha->login_retry_count = ql2xloginretrycount;
4416
4417 icb->lun_enables = cpu_to_le16(0);
4418 icb->command_resource_count = 0;
4419 icb->immediate_notify_resource_count = 0;
4420 icb->timeout = cpu_to_le16(0);
4421
4422 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4423 /* Enable RIO */
4424 icb->firmware_options[0] &= ~BIT_3;
4425 icb->add_firmware_options[0] &=
4426 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4427 icb->add_firmware_options[0] |= BIT_2;
4428 icb->response_accumulation_timer = 3;
4429 icb->interrupt_delay_timer = 5;
4430
4431 vha->flags.process_response_queue = 1;
4432 } else {
4433 /* Enable ZIO. */
4434 if (!vha->flags.init_done) {
4435 ha->zio_mode = icb->add_firmware_options[0] &
4436 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4437 ha->zio_timer = icb->interrupt_delay_timer ?
4438 icb->interrupt_delay_timer: 2;
4439 }
4440 icb->add_firmware_options[0] &=
4441 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4442 vha->flags.process_response_queue = 0;
4443 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4444 ha->zio_mode = QLA_ZIO_MODE_6;
4445
4446 ql_log(ql_log_info, vha, 0x0068,
4447 "ZIO mode %d enabled; timer delay (%d us).\n",
4448 ha->zio_mode, ha->zio_timer * 100);
4449
4450 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4451 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4452 vha->flags.process_response_queue = 1;
4453 }
4454 }
4455
4456 if (rval) {
4457 ql_log(ql_log_warn, vha, 0x0069,
4458 "NVRAM configuration failed.\n");
4459 }
4460 return (rval);
4461 }
4462
4463 static void
4464 qla2x00_rport_del(void *data)
4465 {
4466 fc_port_t *fcport = data;
4467 struct fc_rport *rport;
4468 unsigned long flags;
4469
4470 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4471 rport = fcport->drport ? fcport->drport: fcport->rport;
4472 fcport->drport = NULL;
4473 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4474 if (rport) {
4475 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4476 "%s %8phN. rport %p roles %x\n",
4477 __func__, fcport->port_name, rport,
4478 rport->roles);
4479
4480 fc_remote_port_delete(rport);
4481 }
4482 }
4483
4484 /**
4485 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4486 * @ha: HA context
4487 * @flags: allocation flags
4488 *
4489 * Returns a pointer to the allocated fcport, or NULL, if none available.
4490 */
4491 fc_port_t *
4492 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4493 {
4494 fc_port_t *fcport;
4495
4496 fcport = kzalloc(sizeof(fc_port_t), flags);
4497 if (!fcport)
4498 return NULL;
4499
4500 /* Setup fcport template structure. */
4501 fcport->vha = vha;
4502 fcport->port_type = FCT_UNKNOWN;
4503 fcport->loop_id = FC_NO_LOOP_ID;
4504 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4505 fcport->supported_classes = FC_COS_UNSPECIFIED;
4506
4507 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4508 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4509 flags);
4510 fcport->disc_state = DSC_DELETED;
4511 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4512 fcport->deleted = QLA_SESS_DELETED;
4513 fcport->login_retry = vha->hw->login_retry_count;
4514 fcport->login_retry = 5;
4515 fcport->logout_on_delete = 1;
4516
4517 if (!fcport->ct_desc.ct_sns) {
4518 ql_log(ql_log_warn, vha, 0xd049,
4519 "Failed to allocate ct_sns request.\n");
4520 kfree(fcport);
4521 fcport = NULL;
4522 }
4523 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4524 INIT_LIST_HEAD(&fcport->gnl_entry);
4525 INIT_LIST_HEAD(&fcport->list);
4526
4527 return fcport;
4528 }
4529
4530 void
4531 qla2x00_free_fcport(fc_port_t *fcport)
4532 {
4533 if (fcport->ct_desc.ct_sns) {
4534 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4535 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4536 fcport->ct_desc.ct_sns_dma);
4537
4538 fcport->ct_desc.ct_sns = NULL;
4539 }
4540 kfree(fcport);
4541 }
4542
4543 /*
4544 * qla2x00_configure_loop
4545 * Updates Fibre Channel Device Database with what is actually on loop.
4546 *
4547 * Input:
4548 * ha = adapter block pointer.
4549 *
4550 * Returns:
4551 * 0 = success.
4552 * 1 = error.
4553 * 2 = database was full and device was not configured.
4554 */
4555 static int
4556 qla2x00_configure_loop(scsi_qla_host_t *vha)
4557 {
4558 int rval;
4559 unsigned long flags, save_flags;
4560 struct qla_hw_data *ha = vha->hw;
4561 rval = QLA_SUCCESS;
4562
4563 /* Get Initiator ID */
4564 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4565 rval = qla2x00_configure_hba(vha);
4566 if (rval != QLA_SUCCESS) {
4567 ql_dbg(ql_dbg_disc, vha, 0x2013,
4568 "Unable to configure HBA.\n");
4569 return (rval);
4570 }
4571 }
4572
4573 save_flags = flags = vha->dpc_flags;
4574 ql_dbg(ql_dbg_disc, vha, 0x2014,
4575 "Configure loop -- dpc flags = 0x%lx.\n", flags);
4576
4577 /*
4578 * If we have both an RSCN and PORT UPDATE pending then handle them
4579 * both at the same time.
4580 */
4581 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4582 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4583
4584 qla2x00_get_data_rate(vha);
4585
4586 /* Determine what we need to do */
4587 if (ha->current_topology == ISP_CFG_FL &&
4588 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4589
4590 set_bit(RSCN_UPDATE, &flags);
4591
4592 } else if (ha->current_topology == ISP_CFG_F &&
4593 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4594
4595 set_bit(RSCN_UPDATE, &flags);
4596 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4597
4598 } else if (ha->current_topology == ISP_CFG_N) {
4599 clear_bit(RSCN_UPDATE, &flags);
4600 if (ha->flags.rida_fmt2) {
4601 /* With Rida Format 2, the login is already triggered.
4602 * We know who is on the other side of the wire.
4603 * No need to login to do login to find out or drop into
4604 * qla2x00_configure_local_loop().
4605 */
4606 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4607 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4608 } else {
4609 if (qla_tgt_mode_enabled(vha)) {
4610 /* allow the other side to start the login */
4611 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4612 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4613 }
4614 }
4615 } else if (ha->current_topology == ISP_CFG_NL) {
4616 clear_bit(RSCN_UPDATE, &flags);
4617 set_bit(LOCAL_LOOP_UPDATE, &flags);
4618 } else if (!vha->flags.online ||
4619 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4620 set_bit(RSCN_UPDATE, &flags);
4621 set_bit(LOCAL_LOOP_UPDATE, &flags);
4622 }
4623
4624 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4625 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4626 ql_dbg(ql_dbg_disc, vha, 0x2015,
4627 "Loop resync needed, failing.\n");
4628 rval = QLA_FUNCTION_FAILED;
4629 } else
4630 rval = qla2x00_configure_local_loop(vha);
4631 }
4632
4633 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4634 if (LOOP_TRANSITION(vha)) {
4635 ql_dbg(ql_dbg_disc, vha, 0x2099,
4636 "Needs RSCN update and loop transition.\n");
4637 rval = QLA_FUNCTION_FAILED;
4638 }
4639 else
4640 rval = qla2x00_configure_fabric(vha);
4641 }
4642
4643 if (rval == QLA_SUCCESS) {
4644 if (atomic_read(&vha->loop_down_timer) ||
4645 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4646 rval = QLA_FUNCTION_FAILED;
4647 } else {
4648 atomic_set(&vha->loop_state, LOOP_READY);
4649 ql_dbg(ql_dbg_disc, vha, 0x2069,
4650 "LOOP READY.\n");
4651 ha->flags.fw_init_done = 1;
4652
4653 /*
4654 * Process any ATIO queue entries that came in
4655 * while we weren't online.
4656 */
4657 if (qla_tgt_mode_enabled(vha) ||
4658 qla_dual_mode_enabled(vha)) {
4659 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4660 spin_lock_irqsave(&ha->tgt.atio_lock,
4661 flags);
4662 qlt_24xx_process_atio_queue(vha, 0);
4663 spin_unlock_irqrestore(
4664 &ha->tgt.atio_lock, flags);
4665 } else {
4666 spin_lock_irqsave(&ha->hardware_lock,
4667 flags);
4668 qlt_24xx_process_atio_queue(vha, 1);
4669 spin_unlock_irqrestore(
4670 &ha->hardware_lock, flags);
4671 }
4672 }
4673 }
4674 }
4675
4676 if (rval) {
4677 ql_dbg(ql_dbg_disc, vha, 0x206a,
4678 "%s *** FAILED ***.\n", __func__);
4679 } else {
4680 ql_dbg(ql_dbg_disc, vha, 0x206b,
4681 "%s: exiting normally.\n", __func__);
4682 }
4683
4684 /* Restore state if a resync event occurred during processing */
4685 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4686 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
4687 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4688 if (test_bit(RSCN_UPDATE, &save_flags)) {
4689 set_bit(RSCN_UPDATE, &vha->dpc_flags);
4690 }
4691 }
4692
4693 return (rval);
4694 }
4695
4696 /*
4697 * N2N Login
4698 * Updates Fibre Channel Device Database with local loop devices.
4699 *
4700 * Input:
4701 * ha = adapter block pointer.
4702 *
4703 * Returns:
4704 */
4705 static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
4706 fc_port_t *fcport)
4707 {
4708 struct qla_hw_data *ha = vha->hw;
4709 int res = QLA_SUCCESS, rval;
4710 int greater_wwpn = 0;
4711 int logged_in = 0;
4712
4713 if (ha->current_topology != ISP_CFG_N)
4714 return res;
4715
4716 if (wwn_to_u64(vha->port_name) >
4717 wwn_to_u64(vha->n2n_port_name)) {
4718 ql_dbg(ql_dbg_disc, vha, 0x2002,
4719 "HBA WWPN is greater %llx > target %llx\n",
4720 wwn_to_u64(vha->port_name),
4721 wwn_to_u64(vha->n2n_port_name));
4722 greater_wwpn = 1;
4723 fcport->d_id.b24 = vha->n2n_id;
4724 }
4725
4726 fcport->loop_id = vha->loop_id;
4727 fcport->fc4f_nvme = 0;
4728 fcport->query = 1;
4729
4730 ql_dbg(ql_dbg_disc, vha, 0x4001,
4731 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
4732 fcport->d_id.b24, vha->loop_id);
4733
4734 /* Fill in member data. */
4735 if (!greater_wwpn) {
4736 rval = qla2x00_get_port_database(vha, fcport, 0);
4737 ql_dbg(ql_dbg_disc, vha, 0x1051,
4738 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
4739 fcport->current_login_state, fcport->last_login_state,
4740 fcport->d_id.b24, fcport->loop_id, rval);
4741
4742 if (((fcport->current_login_state & 0xf) == 0x4) ||
4743 ((fcport->current_login_state & 0xf) == 0x6))
4744 logged_in = 1;
4745 }
4746
4747 if (logged_in || greater_wwpn) {
4748 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4749 qla_nvme_register_hba(vha);
4750
4751 /* Set connected N_Port d_id */
4752 if (vha->flags.nvme_enabled)
4753 fcport->fc4f_nvme = 1;
4754
4755 fcport->scan_state = QLA_FCPORT_FOUND;
4756 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4757 fcport->disc_state = DSC_GNL;
4758 fcport->n2n_flag = 1;
4759 fcport->flags = 3;
4760 vha->hw->flags.gpsc_supported = 0;
4761
4762 if (greater_wwpn) {
4763 ql_dbg(ql_dbg_disc, vha, 0x20e5,
4764 "%s %d PLOGI ELS %8phC\n",
4765 __func__, __LINE__, fcport->port_name);
4766
4767 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
4768 fcport, fcport->d_id);
4769 }
4770
4771 if (res != QLA_SUCCESS) {
4772 ql_log(ql_log_info, vha, 0xd04d,
4773 "PLOGI Failed: portid=%06x - retrying\n",
4774 fcport->d_id.b24);
4775 res = QLA_SUCCESS;
4776 } else {
4777 /* State 0x6 means FCP PRLI complete */
4778 if ((fcport->current_login_state & 0xf) == 0x6) {
4779 ql_dbg(ql_dbg_disc, vha, 0x2118,
4780 "%s %d %8phC post GPDB work\n",
4781 __func__, __LINE__, fcport->port_name);
4782 fcport->chip_reset =
4783 vha->hw->base_qpair->chip_reset;
4784 qla24xx_post_gpdb_work(vha, fcport, 0);
4785 } else {
4786 ql_dbg(ql_dbg_disc, vha, 0x2118,
4787 "%s %d %8phC post NVMe PRLI\n",
4788 __func__, __LINE__, fcport->port_name);
4789 qla24xx_post_prli_work(vha, fcport);
4790 }
4791 }
4792 } else {
4793 /* Wait for next database change */
4794 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4795 }
4796
4797 return res;
4798 }
4799
4800 /*
4801 * qla2x00_configure_local_loop
4802 * Updates Fibre Channel Device Database with local loop devices.
4803 *
4804 * Input:
4805 * ha = adapter block pointer.
4806 *
4807 * Returns:
4808 * 0 = success.
4809 */
4810 static int
4811 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4812 {
4813 int rval, rval2;
4814 int found_devs;
4815 int found;
4816 fc_port_t *fcport, *new_fcport;
4817
4818 uint16_t index;
4819 uint16_t entries;
4820 char *id_iter;
4821 uint16_t loop_id;
4822 uint8_t domain, area, al_pa;
4823 struct qla_hw_data *ha = vha->hw;
4824 unsigned long flags;
4825
4826 found_devs = 0;
4827 new_fcport = NULL;
4828 entries = MAX_FIBRE_DEVICES_LOOP;
4829
4830 /* Get list of logged in devices. */
4831 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
4832 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
4833 &entries);
4834 if (rval != QLA_SUCCESS)
4835 goto cleanup_allocation;
4836
4837 ql_dbg(ql_dbg_disc, vha, 0x2011,
4838 "Entries in ID list (%d).\n", entries);
4839 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4840 (uint8_t *)ha->gid_list,
4841 entries * sizeof(struct gid_list_info));
4842
4843 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4844 fcport->scan_state = QLA_FCPORT_SCAN;
4845 }
4846
4847 /* Allocate temporary fcport for any new fcports discovered. */
4848 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4849 if (new_fcport == NULL) {
4850 ql_log(ql_log_warn, vha, 0x2012,
4851 "Memory allocation failed for fcport.\n");
4852 rval = QLA_MEMORY_ALLOC_FAILED;
4853 goto cleanup_allocation;
4854 }
4855 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4856
4857 /* Inititae N2N login. */
4858 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4859 rval = qla24xx_n2n_handle_login(vha, new_fcport);
4860 if (rval != QLA_SUCCESS)
4861 goto cleanup_allocation;
4862 return QLA_SUCCESS;
4863 }
4864
4865 /* Add devices to port list. */
4866 id_iter = (char *)ha->gid_list;
4867 for (index = 0; index < entries; index++) {
4868 domain = ((struct gid_list_info *)id_iter)->domain;
4869 area = ((struct gid_list_info *)id_iter)->area;
4870 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
4871 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4872 loop_id = (uint16_t)
4873 ((struct gid_list_info *)id_iter)->loop_id_2100;
4874 else
4875 loop_id = le16_to_cpu(
4876 ((struct gid_list_info *)id_iter)->loop_id);
4877 id_iter += ha->gid_list_info_size;
4878
4879 /* Bypass reserved domain fields. */
4880 if ((domain & 0xf0) == 0xf0)
4881 continue;
4882
4883 /* Bypass if not same domain and area of adapter. */
4884 if (area && domain &&
4885 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
4886 continue;
4887
4888 /* Bypass invalid local loop ID. */
4889 if (loop_id > LAST_LOCAL_LOOP_ID)
4890 continue;
4891
4892 memset(new_fcport->port_name, 0, WWN_SIZE);
4893
4894 /* Fill in member data. */
4895 new_fcport->d_id.b.domain = domain;
4896 new_fcport->d_id.b.area = area;
4897 new_fcport->d_id.b.al_pa = al_pa;
4898 new_fcport->loop_id = loop_id;
4899 new_fcport->scan_state = QLA_FCPORT_FOUND;
4900
4901 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
4902 if (rval2 != QLA_SUCCESS) {
4903 ql_dbg(ql_dbg_disc, vha, 0x2097,
4904 "Failed to retrieve fcport information "
4905 "-- get_port_database=%x, loop_id=0x%04x.\n",
4906 rval2, new_fcport->loop_id);
4907 /* Skip retry if N2N */
4908 if (ha->current_topology != ISP_CFG_N) {
4909 ql_dbg(ql_dbg_disc, vha, 0x2105,
4910 "Scheduling resync.\n");
4911 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4912 continue;
4913 }
4914 }
4915
4916 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4917 /* Check for matching device in port list. */
4918 found = 0;
4919 fcport = NULL;
4920 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4921 if (memcmp(new_fcport->port_name, fcport->port_name,
4922 WWN_SIZE))
4923 continue;
4924
4925 fcport->flags &= ~FCF_FABRIC_DEVICE;
4926 fcport->loop_id = new_fcport->loop_id;
4927 fcport->port_type = new_fcport->port_type;
4928 fcport->d_id.b24 = new_fcport->d_id.b24;
4929 memcpy(fcport->node_name, new_fcport->node_name,
4930 WWN_SIZE);
4931 fcport->scan_state = QLA_FCPORT_FOUND;
4932 found++;
4933 break;
4934 }
4935
4936 if (!found) {
4937 /* New device, add to fcports list. */
4938 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4939
4940 /* Allocate a new replacement fcport. */
4941 fcport = new_fcport;
4942
4943 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4944
4945 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4946
4947 if (new_fcport == NULL) {
4948 ql_log(ql_log_warn, vha, 0xd031,
4949 "Failed to allocate memory for fcport.\n");
4950 rval = QLA_MEMORY_ALLOC_FAILED;
4951 goto cleanup_allocation;
4952 }
4953 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4954 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4955 }
4956
4957 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4958
4959 /* Base iIDMA settings on HBA port speed. */
4960 fcport->fp_speed = ha->link_data_rate;
4961
4962 found_devs++;
4963 }
4964
4965 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4966 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4967 break;
4968
4969 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4970 if ((qla_dual_mode_enabled(vha) ||
4971 qla_ini_mode_enabled(vha)) &&
4972 atomic_read(&fcport->state) == FCS_ONLINE) {
4973 qla2x00_mark_device_lost(vha, fcport,
4974 ql2xplogiabsentdevice, 0);
4975 if (fcport->loop_id != FC_NO_LOOP_ID &&
4976 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4977 fcport->port_type != FCT_INITIATOR &&
4978 fcport->port_type != FCT_BROADCAST) {
4979 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4980 "%s %d %8phC post del sess\n",
4981 __func__, __LINE__,
4982 fcport->port_name);
4983
4984 qlt_schedule_sess_for_deletion(fcport);
4985 continue;
4986 }
4987 }
4988 }
4989
4990 if (fcport->scan_state == QLA_FCPORT_FOUND)
4991 qla24xx_fcport_handle_login(vha, fcport);
4992 }
4993
4994 cleanup_allocation:
4995 kfree(new_fcport);
4996
4997 if (rval != QLA_SUCCESS) {
4998 ql_dbg(ql_dbg_disc, vha, 0x2098,
4999 "Configure local loop error exit: rval=%x.\n", rval);
5000 }
5001
5002 return (rval);
5003 }
5004
5005 static void
5006 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5007 {
5008 int rval;
5009 uint16_t mb[MAILBOX_REGISTER_COUNT];
5010 struct qla_hw_data *ha = vha->hw;
5011
5012 if (!IS_IIDMA_CAPABLE(ha))
5013 return;
5014
5015 if (atomic_read(&fcport->state) != FCS_ONLINE)
5016 return;
5017
5018 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5019 fcport->fp_speed > ha->link_data_rate)
5020 return;
5021
5022 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5023 mb);
5024 if (rval != QLA_SUCCESS) {
5025 ql_dbg(ql_dbg_disc, vha, 0x2004,
5026 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5027 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5028 } else {
5029 ql_dbg(ql_dbg_disc, vha, 0x2005,
5030 "iIDMA adjusted to %s GB/s on %8phN.\n",
5031 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5032 fcport->port_name);
5033 }
5034 }
5035
5036 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5037 static void
5038 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5039 {
5040 struct fc_rport_identifiers rport_ids;
5041 struct fc_rport *rport;
5042 unsigned long flags;
5043
5044 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5045 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5046 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5047 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5048 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5049 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5050 if (!rport) {
5051 ql_log(ql_log_warn, vha, 0x2006,
5052 "Unable to allocate fc remote port.\n");
5053 return;
5054 }
5055
5056 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5057 *((fc_port_t **)rport->dd_data) = fcport;
5058 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5059
5060 rport->supported_classes = fcport->supported_classes;
5061
5062 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5063 if (fcport->port_type == FCT_INITIATOR)
5064 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
5065 if (fcport->port_type == FCT_TARGET)
5066 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
5067
5068 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5069 "%s %8phN. rport %p is %s mode\n",
5070 __func__, fcport->port_name, rport,
5071 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
5072
5073 fc_remote_port_rolechg(rport, rport_ids.roles);
5074 }
5075
5076 /*
5077 * qla2x00_update_fcport
5078 * Updates device on list.
5079 *
5080 * Input:
5081 * ha = adapter block pointer.
5082 * fcport = port structure pointer.
5083 *
5084 * Return:
5085 * 0 - Success
5086 * BIT_0 - error
5087 *
5088 * Context:
5089 * Kernel context.
5090 */
5091 void
5092 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5093 {
5094 fcport->vha = vha;
5095
5096 if (IS_SW_RESV_ADDR(fcport->d_id))
5097 return;
5098
5099 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5100 __func__, fcport->port_name);
5101
5102 if (IS_QLAFX00(vha->hw)) {
5103 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5104 goto reg_port;
5105 }
5106 fcport->login_retry = 0;
5107 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5108 fcport->disc_state = DSC_LOGIN_COMPLETE;
5109 fcport->deleted = 0;
5110 fcport->logout_on_delete = 1;
5111
5112 if (fcport->fc4f_nvme) {
5113 qla_nvme_register_remote(vha, fcport);
5114 return;
5115 }
5116
5117 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5118 qla2x00_iidma_fcport(vha, fcport);
5119 qla24xx_update_fcport_fcp_prio(vha, fcport);
5120
5121 reg_port:
5122 switch (vha->host->active_mode) {
5123 case MODE_INITIATOR:
5124 qla2x00_reg_remote_port(vha, fcport);
5125 break;
5126 case MODE_TARGET:
5127 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5128 !vha->vha_tgt.qla_tgt->tgt_stopped)
5129 qlt_fc_port_added(vha, fcport);
5130 break;
5131 case MODE_DUAL:
5132 qla2x00_reg_remote_port(vha, fcport);
5133 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5134 !vha->vha_tgt.qla_tgt->tgt_stopped)
5135 qlt_fc_port_added(vha, fcport);
5136 break;
5137 default:
5138 break;
5139 }
5140 }
5141
5142 /*
5143 * qla2x00_configure_fabric
5144 * Setup SNS devices with loop ID's.
5145 *
5146 * Input:
5147 * ha = adapter block pointer.
5148 *
5149 * Returns:
5150 * 0 = success.
5151 * BIT_0 = error
5152 */
5153 static int
5154 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5155 {
5156 int rval;
5157 fc_port_t *fcport;
5158 uint16_t mb[MAILBOX_REGISTER_COUNT];
5159 uint16_t loop_id;
5160 LIST_HEAD(new_fcports);
5161 struct qla_hw_data *ha = vha->hw;
5162 int discovery_gen;
5163
5164 /* If FL port exists, then SNS is present */
5165 if (IS_FWI2_CAPABLE(ha))
5166 loop_id = NPH_F_PORT;
5167 else
5168 loop_id = SNS_FL_PORT;
5169 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5170 if (rval != QLA_SUCCESS) {
5171 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5172 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5173
5174 vha->device_flags &= ~SWITCH_FOUND;
5175 return (QLA_SUCCESS);
5176 }
5177 vha->device_flags |= SWITCH_FOUND;
5178
5179
5180 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5181 rval = qla2x00_send_change_request(vha, 0x3, 0);
5182 if (rval != QLA_SUCCESS)
5183 ql_log(ql_log_warn, vha, 0x121,
5184 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5185 rval);
5186 }
5187
5188
5189 do {
5190 qla2x00_mgmt_svr_login(vha);
5191
5192 /* FDMI support. */
5193 if (ql2xfdmienable &&
5194 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5195 qla2x00_fdmi_register(vha);
5196
5197 /* Ensure we are logged into the SNS. */
5198 loop_id = NPH_SNS_LID(ha);
5199 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5200 0xfc, mb, BIT_1|BIT_0);
5201 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5202 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5203 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5204 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
5205 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5206 return rval;
5207 }
5208 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5209 if (qla2x00_rft_id(vha)) {
5210 /* EMPTY */
5211 ql_dbg(ql_dbg_disc, vha, 0x20a2,
5212 "Register FC-4 TYPE failed.\n");
5213 if (test_bit(LOOP_RESYNC_NEEDED,
5214 &vha->dpc_flags))
5215 break;
5216 }
5217 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
5218 /* EMPTY */
5219 ql_dbg(ql_dbg_disc, vha, 0x209a,
5220 "Register FC-4 Features failed.\n");
5221 if (test_bit(LOOP_RESYNC_NEEDED,
5222 &vha->dpc_flags))
5223 break;
5224 }
5225 if (vha->flags.nvme_enabled) {
5226 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5227 ql_dbg(ql_dbg_disc, vha, 0x2049,
5228 "Register NVME FC Type Features failed.\n");
5229 }
5230 }
5231 if (qla2x00_rnn_id(vha)) {
5232 /* EMPTY */
5233 ql_dbg(ql_dbg_disc, vha, 0x2104,
5234 "Register Node Name failed.\n");
5235 if (test_bit(LOOP_RESYNC_NEEDED,
5236 &vha->dpc_flags))
5237 break;
5238 } else if (qla2x00_rsnn_nn(vha)) {
5239 /* EMPTY */
5240 ql_dbg(ql_dbg_disc, vha, 0x209b,
5241 "Register Symbolic Node Name failed.\n");
5242 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5243 break;
5244 }
5245 }
5246
5247
5248 /* Mark the time right before querying FW for connected ports.
5249 * This process is long, asynchronous and by the time it's done,
5250 * collected information might not be accurate anymore. E.g.
5251 * disconnected port might have re-connected and a brand new
5252 * session has been created. In this case session's generation
5253 * will be newer than discovery_gen. */
5254 qlt_do_generation_tick(vha, &discovery_gen);
5255
5256 if (USE_ASYNC_SCAN(ha)) {
5257 rval = QLA_SUCCESS;
5258 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
5259 if (rval)
5260 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5261 } else {
5262 list_for_each_entry(fcport, &vha->vp_fcports, list)
5263 fcport->scan_state = QLA_FCPORT_SCAN;
5264
5265 rval = qla2x00_find_all_fabric_devs(vha);
5266 }
5267 if (rval != QLA_SUCCESS)
5268 break;
5269 } while (0);
5270
5271 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5272 qla_nvme_register_hba(vha);
5273
5274 if (rval)
5275 ql_dbg(ql_dbg_disc, vha, 0x2068,
5276 "Configure fabric error exit rval=%d.\n", rval);
5277
5278 return (rval);
5279 }
5280
5281 /*
5282 * qla2x00_find_all_fabric_devs
5283 *
5284 * Input:
5285 * ha = adapter block pointer.
5286 * dev = database device entry pointer.
5287 *
5288 * Returns:
5289 * 0 = success.
5290 *
5291 * Context:
5292 * Kernel context.
5293 */
5294 static int
5295 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5296 {
5297 int rval;
5298 uint16_t loop_id;
5299 fc_port_t *fcport, *new_fcport;
5300 int found;
5301
5302 sw_info_t *swl;
5303 int swl_idx;
5304 int first_dev, last_dev;
5305 port_id_t wrap = {}, nxt_d_id;
5306 struct qla_hw_data *ha = vha->hw;
5307 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5308 unsigned long flags;
5309
5310 rval = QLA_SUCCESS;
5311
5312 /* Try GID_PT to get device list, else GAN. */
5313 if (!ha->swl)
5314 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5315 GFP_KERNEL);
5316 swl = ha->swl;
5317 if (!swl) {
5318 /*EMPTY*/
5319 ql_dbg(ql_dbg_disc, vha, 0x209c,
5320 "GID_PT allocations failed, fallback on GA_NXT.\n");
5321 } else {
5322 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5323 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5324 swl = NULL;
5325 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5326 return rval;
5327 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5328 swl = NULL;
5329 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5330 return rval;
5331 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5332 swl = NULL;
5333 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5334 return rval;
5335 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5336 swl = NULL;
5337 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5338 return rval;
5339 }
5340
5341 /* If other queries succeeded probe for FC-4 type */
5342 if (swl) {
5343 qla2x00_gff_id(vha, swl);
5344 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5345 return rval;
5346 }
5347 }
5348 swl_idx = 0;
5349
5350 /* Allocate temporary fcport for any new fcports discovered. */
5351 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5352 if (new_fcport == NULL) {
5353 ql_log(ql_log_warn, vha, 0x209d,
5354 "Failed to allocate memory for fcport.\n");
5355 return (QLA_MEMORY_ALLOC_FAILED);
5356 }
5357 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5358 /* Set start port ID scan at adapter ID. */
5359 first_dev = 1;
5360 last_dev = 0;
5361
5362 /* Starting free loop ID. */
5363 loop_id = ha->min_external_loopid;
5364 for (; loop_id <= ha->max_loop_id; loop_id++) {
5365 if (qla2x00_is_reserved_id(vha, loop_id))
5366 continue;
5367
5368 if (ha->current_topology == ISP_CFG_FL &&
5369 (atomic_read(&vha->loop_down_timer) ||
5370 LOOP_TRANSITION(vha))) {
5371 atomic_set(&vha->loop_down_timer, 0);
5372 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5373 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5374 break;
5375 }
5376
5377 if (swl != NULL) {
5378 if (last_dev) {
5379 wrap.b24 = new_fcport->d_id.b24;
5380 } else {
5381 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5382 memcpy(new_fcport->node_name,
5383 swl[swl_idx].node_name, WWN_SIZE);
5384 memcpy(new_fcport->port_name,
5385 swl[swl_idx].port_name, WWN_SIZE);
5386 memcpy(new_fcport->fabric_port_name,
5387 swl[swl_idx].fabric_port_name, WWN_SIZE);
5388 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5389 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5390
5391 new_fcport->nvme_flag = 0;
5392 new_fcport->fc4f_nvme = 0;
5393 if (vha->flags.nvme_enabled &&
5394 swl[swl_idx].fc4f_nvme) {
5395 new_fcport->fc4f_nvme =
5396 swl[swl_idx].fc4f_nvme;
5397 ql_log(ql_log_info, vha, 0x2131,
5398 "FOUND: NVME port %8phC as FC Type 28h\n",
5399 new_fcport->port_name);
5400 }
5401
5402 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5403 last_dev = 1;
5404 }
5405 swl_idx++;
5406 }
5407 } else {
5408 /* Send GA_NXT to the switch */
5409 rval = qla2x00_ga_nxt(vha, new_fcport);
5410 if (rval != QLA_SUCCESS) {
5411 ql_log(ql_log_warn, vha, 0x209e,
5412 "SNS scan failed -- assuming "
5413 "zero-entry result.\n");
5414 rval = QLA_SUCCESS;
5415 break;
5416 }
5417 }
5418
5419 /* If wrap on switch device list, exit. */
5420 if (first_dev) {
5421 wrap.b24 = new_fcport->d_id.b24;
5422 first_dev = 0;
5423 } else if (new_fcport->d_id.b24 == wrap.b24) {
5424 ql_dbg(ql_dbg_disc, vha, 0x209f,
5425 "Device wrap (%02x%02x%02x).\n",
5426 new_fcport->d_id.b.domain,
5427 new_fcport->d_id.b.area,
5428 new_fcport->d_id.b.al_pa);
5429 break;
5430 }
5431
5432 /* Bypass if same physical adapter. */
5433 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5434 continue;
5435
5436 /* Bypass virtual ports of the same host. */
5437 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5438 continue;
5439
5440 /* Bypass if same domain and area of adapter. */
5441 if (((new_fcport->d_id.b24 & 0xffff00) ==
5442 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5443 ISP_CFG_FL)
5444 continue;
5445
5446 /* Bypass reserved domain fields. */
5447 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5448 continue;
5449
5450 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
5451 if (ql2xgffidenable &&
5452 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5453 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
5454 continue;
5455
5456 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5457
5458 /* Locate matching device in database. */
5459 found = 0;
5460 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5461 if (memcmp(new_fcport->port_name, fcport->port_name,
5462 WWN_SIZE))
5463 continue;
5464
5465 fcport->scan_state = QLA_FCPORT_FOUND;
5466
5467 found++;
5468
5469 /* Update port state. */
5470 memcpy(fcport->fabric_port_name,
5471 new_fcport->fabric_port_name, WWN_SIZE);
5472 fcport->fp_speed = new_fcport->fp_speed;
5473
5474 /*
5475 * If address the same and state FCS_ONLINE
5476 * (or in target mode), nothing changed.
5477 */
5478 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5479 (atomic_read(&fcport->state) == FCS_ONLINE ||
5480 (vha->host->active_mode == MODE_TARGET))) {
5481 break;
5482 }
5483
5484 /*
5485 * If device was not a fabric device before.
5486 */
5487 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5488 fcport->d_id.b24 = new_fcport->d_id.b24;
5489 qla2x00_clear_loop_id(fcport);
5490 fcport->flags |= (FCF_FABRIC_DEVICE |
5491 FCF_LOGIN_NEEDED);
5492 break;
5493 }
5494
5495 /*
5496 * Port ID changed or device was marked to be updated;
5497 * Log it out if still logged in and mark it for
5498 * relogin later.
5499 */
5500 if (qla_tgt_mode_enabled(base_vha)) {
5501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5502 "port changed FC ID, %8phC"
5503 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5504 fcport->port_name,
5505 fcport->d_id.b.domain,
5506 fcport->d_id.b.area,
5507 fcport->d_id.b.al_pa,
5508 fcport->loop_id,
5509 new_fcport->d_id.b.domain,
5510 new_fcport->d_id.b.area,
5511 new_fcport->d_id.b.al_pa);
5512 fcport->d_id.b24 = new_fcport->d_id.b24;
5513 break;
5514 }
5515
5516 fcport->d_id.b24 = new_fcport->d_id.b24;
5517 fcport->flags |= FCF_LOGIN_NEEDED;
5518 break;
5519 }
5520
5521 if (found) {
5522 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5523 continue;
5524 }
5525 /* If device was not in our fcports list, then add it. */
5526 new_fcport->scan_state = QLA_FCPORT_FOUND;
5527 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5528
5529 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5530
5531
5532 /* Allocate a new replacement fcport. */
5533 nxt_d_id.b24 = new_fcport->d_id.b24;
5534 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5535 if (new_fcport == NULL) {
5536 ql_log(ql_log_warn, vha, 0xd032,
5537 "Memory allocation failed for fcport.\n");
5538 return (QLA_MEMORY_ALLOC_FAILED);
5539 }
5540 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5541 new_fcport->d_id.b24 = nxt_d_id.b24;
5542 }
5543
5544 qla2x00_free_fcport(new_fcport);
5545
5546 /*
5547 * Logout all previous fabric dev marked lost, except FCP2 devices.
5548 */
5549 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5550 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5551 break;
5552
5553 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5554 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5555 continue;
5556
5557 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5558 if ((qla_dual_mode_enabled(vha) ||
5559 qla_ini_mode_enabled(vha)) &&
5560 atomic_read(&fcport->state) == FCS_ONLINE) {
5561 qla2x00_mark_device_lost(vha, fcport,
5562 ql2xplogiabsentdevice, 0);
5563 if (fcport->loop_id != FC_NO_LOOP_ID &&
5564 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5565 fcport->port_type != FCT_INITIATOR &&
5566 fcport->port_type != FCT_BROADCAST) {
5567 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5568 "%s %d %8phC post del sess\n",
5569 __func__, __LINE__,
5570 fcport->port_name);
5571 qlt_schedule_sess_for_deletion(fcport);
5572 continue;
5573 }
5574 }
5575 }
5576
5577 if (fcport->scan_state == QLA_FCPORT_FOUND)
5578 qla24xx_fcport_handle_login(vha, fcport);
5579 }
5580 return (rval);
5581 }
5582
5583 /*
5584 * qla2x00_find_new_loop_id
5585 * Scan through our port list and find a new usable loop ID.
5586 *
5587 * Input:
5588 * ha: adapter state pointer.
5589 * dev: port structure pointer.
5590 *
5591 * Returns:
5592 * qla2x00 local function return status code.
5593 *
5594 * Context:
5595 * Kernel context.
5596 */
5597 int
5598 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
5599 {
5600 int rval;
5601 struct qla_hw_data *ha = vha->hw;
5602 unsigned long flags = 0;
5603
5604 rval = QLA_SUCCESS;
5605
5606 spin_lock_irqsave(&ha->vport_slock, flags);
5607
5608 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5609 LOOPID_MAP_SIZE);
5610 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5611 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5612 dev->loop_id = FC_NO_LOOP_ID;
5613 rval = QLA_FUNCTION_FAILED;
5614 } else
5615 set_bit(dev->loop_id, ha->loop_id_map);
5616
5617 spin_unlock_irqrestore(&ha->vport_slock, flags);
5618
5619 if (rval == QLA_SUCCESS)
5620 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5621 "Assigning new loopid=%x, portid=%x.\n",
5622 dev->loop_id, dev->d_id.b24);
5623 else
5624 ql_log(ql_log_warn, dev->vha, 0x2087,
5625 "No loop_id's available, portid=%x.\n",
5626 dev->d_id.b24);
5627
5628 return (rval);
5629 }
5630
5631
5632 /*
5633 * qla2x00_fabric_login
5634 * Issue fabric login command.
5635 *
5636 * Input:
5637 * ha = adapter block pointer.
5638 * device = pointer to FC device type structure.
5639 *
5640 * Returns:
5641 * 0 - Login successfully
5642 * 1 - Login failed
5643 * 2 - Initiator device
5644 * 3 - Fatal error
5645 */
5646 int
5647 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5648 uint16_t *next_loopid)
5649 {
5650 int rval;
5651 int retry;
5652 uint16_t tmp_loopid;
5653 uint16_t mb[MAILBOX_REGISTER_COUNT];
5654 struct qla_hw_data *ha = vha->hw;
5655
5656 retry = 0;
5657 tmp_loopid = 0;
5658
5659 for (;;) {
5660 ql_dbg(ql_dbg_disc, vha, 0x2000,
5661 "Trying Fabric Login w/loop id 0x%04x for port "
5662 "%02x%02x%02x.\n",
5663 fcport->loop_id, fcport->d_id.b.domain,
5664 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5665
5666 /* Login fcport on switch. */
5667 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
5668 fcport->d_id.b.domain, fcport->d_id.b.area,
5669 fcport->d_id.b.al_pa, mb, BIT_0);
5670 if (rval != QLA_SUCCESS) {
5671 return rval;
5672 }
5673 if (mb[0] == MBS_PORT_ID_USED) {
5674 /*
5675 * Device has another loop ID. The firmware team
5676 * recommends the driver perform an implicit login with
5677 * the specified ID again. The ID we just used is save
5678 * here so we return with an ID that can be tried by
5679 * the next login.
5680 */
5681 retry++;
5682 tmp_loopid = fcport->loop_id;
5683 fcport->loop_id = mb[1];
5684
5685 ql_dbg(ql_dbg_disc, vha, 0x2001,
5686 "Fabric Login: port in use - next loop "
5687 "id=0x%04x, port id= %02x%02x%02x.\n",
5688 fcport->loop_id, fcport->d_id.b.domain,
5689 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5690
5691 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5692 /*
5693 * Login succeeded.
5694 */
5695 if (retry) {
5696 /* A retry occurred before. */
5697 *next_loopid = tmp_loopid;
5698 } else {
5699 /*
5700 * No retry occurred before. Just increment the
5701 * ID value for next login.
5702 */
5703 *next_loopid = (fcport->loop_id + 1);
5704 }
5705
5706 if (mb[1] & BIT_0) {
5707 fcport->port_type = FCT_INITIATOR;
5708 } else {
5709 fcport->port_type = FCT_TARGET;
5710 if (mb[1] & BIT_1) {
5711 fcport->flags |= FCF_FCP2_DEVICE;
5712 }
5713 }
5714
5715 if (mb[10] & BIT_0)
5716 fcport->supported_classes |= FC_COS_CLASS2;
5717 if (mb[10] & BIT_1)
5718 fcport->supported_classes |= FC_COS_CLASS3;
5719
5720 if (IS_FWI2_CAPABLE(ha)) {
5721 if (mb[10] & BIT_7)
5722 fcport->flags |=
5723 FCF_CONF_COMP_SUPPORTED;
5724 }
5725
5726 rval = QLA_SUCCESS;
5727 break;
5728 } else if (mb[0] == MBS_LOOP_ID_USED) {
5729 /*
5730 * Loop ID already used, try next loop ID.
5731 */
5732 fcport->loop_id++;
5733 rval = qla2x00_find_new_loop_id(vha, fcport);
5734 if (rval != QLA_SUCCESS) {
5735 /* Ran out of loop IDs to use */
5736 break;
5737 }
5738 } else if (mb[0] == MBS_COMMAND_ERROR) {
5739 /*
5740 * Firmware possibly timed out during login. If NO
5741 * retries are left to do then the device is declared
5742 * dead.
5743 */
5744 *next_loopid = fcport->loop_id;
5745 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5746 fcport->d_id.b.domain, fcport->d_id.b.area,
5747 fcport->d_id.b.al_pa);
5748 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5749
5750 rval = 1;
5751 break;
5752 } else {
5753 /*
5754 * unrecoverable / not handled error
5755 */
5756 ql_dbg(ql_dbg_disc, vha, 0x2002,
5757 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5758 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5759 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5760 fcport->loop_id, jiffies);
5761
5762 *next_loopid = fcport->loop_id;
5763 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5764 fcport->d_id.b.domain, fcport->d_id.b.area,
5765 fcport->d_id.b.al_pa);
5766 qla2x00_clear_loop_id(fcport);
5767 fcport->login_retry = 0;
5768
5769 rval = 3;
5770 break;
5771 }
5772 }
5773
5774 return (rval);
5775 }
5776
5777 /*
5778 * qla2x00_local_device_login
5779 * Issue local device login command.
5780 *
5781 * Input:
5782 * ha = adapter block pointer.
5783 * loop_id = loop id of device to login to.
5784 *
5785 * Returns (Where's the #define!!!!):
5786 * 0 - Login successfully
5787 * 1 - Login failed
5788 * 3 - Fatal error
5789 */
5790 int
5791 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
5792 {
5793 int rval;
5794 uint16_t mb[MAILBOX_REGISTER_COUNT];
5795
5796 memset(mb, 0, sizeof(mb));
5797 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
5798 if (rval == QLA_SUCCESS) {
5799 /* Interrogate mailbox registers for any errors */
5800 if (mb[0] == MBS_COMMAND_ERROR)
5801 rval = 1;
5802 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5803 /* device not in PCB table */
5804 rval = 3;
5805 }
5806
5807 return (rval);
5808 }
5809
5810 /*
5811 * qla2x00_loop_resync
5812 * Resync with fibre channel devices.
5813 *
5814 * Input:
5815 * ha = adapter block pointer.
5816 *
5817 * Returns:
5818 * 0 = success
5819 */
5820 int
5821 qla2x00_loop_resync(scsi_qla_host_t *vha)
5822 {
5823 int rval = QLA_SUCCESS;
5824 uint32_t wait_time;
5825 struct req_que *req;
5826 struct rsp_que *rsp;
5827
5828 req = vha->req;
5829 rsp = req->rsp;
5830
5831 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5832 if (vha->flags.online) {
5833 if (!(rval = qla2x00_fw_ready(vha))) {
5834 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5835 wait_time = 256;
5836 do {
5837 if (!IS_QLAFX00(vha->hw)) {
5838 /*
5839 * Issue a marker after FW becomes
5840 * ready.
5841 */
5842 qla2x00_marker(vha, req, rsp, 0, 0,
5843 MK_SYNC_ALL);
5844 vha->marker_needed = 0;
5845 }
5846
5847 /* Remap devices on Loop. */
5848 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5849
5850 if (IS_QLAFX00(vha->hw))
5851 qlafx00_configure_devices(vha);
5852 else
5853 qla2x00_configure_loop(vha);
5854
5855 wait_time--;
5856 } while (!atomic_read(&vha->loop_down_timer) &&
5857 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5858 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5859 &vha->dpc_flags)));
5860 }
5861 }
5862
5863 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5864 return (QLA_FUNCTION_FAILED);
5865
5866 if (rval)
5867 ql_dbg(ql_dbg_disc, vha, 0x206c,
5868 "%s *** FAILED ***.\n", __func__);
5869
5870 return (rval);
5871 }
5872
5873 /*
5874 * qla2x00_perform_loop_resync
5875 * Description: This function will set the appropriate flags and call
5876 * qla2x00_loop_resync. If successful loop will be resynced
5877 * Arguments : scsi_qla_host_t pointer
5878 * returm : Success or Failure
5879 */
5880
5881 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5882 {
5883 int32_t rval = 0;
5884
5885 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5886 /*Configure the flags so that resync happens properly*/
5887 atomic_set(&ha->loop_down_timer, 0);
5888 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5889 atomic_set(&ha->loop_state, LOOP_UP);
5890 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5891 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5892 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5893
5894 rval = qla2x00_loop_resync(ha);
5895 } else
5896 atomic_set(&ha->loop_state, LOOP_DEAD);
5897
5898 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5899 }
5900
5901 return rval;
5902 }
5903
5904 void
5905 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5906 {
5907 fc_port_t *fcport;
5908 struct scsi_qla_host *vha;
5909 struct qla_hw_data *ha = base_vha->hw;
5910 unsigned long flags;
5911
5912 spin_lock_irqsave(&ha->vport_slock, flags);
5913 /* Go with deferred removal of rport references. */
5914 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5915 atomic_inc(&vha->vref_count);
5916 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5917 if (fcport->drport &&
5918 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5919 spin_unlock_irqrestore(&ha->vport_slock, flags);
5920 qla2x00_rport_del(fcport);
5921
5922 spin_lock_irqsave(&ha->vport_slock, flags);
5923 }
5924 }
5925 atomic_dec(&vha->vref_count);
5926 wake_up(&vha->vref_waitq);
5927 }
5928 spin_unlock_irqrestore(&ha->vport_slock, flags);
5929 }
5930
5931 /* Assumes idc_lock always held on entry */
5932 void
5933 qla83xx_reset_ownership(scsi_qla_host_t *vha)
5934 {
5935 struct qla_hw_data *ha = vha->hw;
5936 uint32_t drv_presence, drv_presence_mask;
5937 uint32_t dev_part_info1, dev_part_info2, class_type;
5938 uint32_t class_type_mask = 0x3;
5939 uint16_t fcoe_other_function = 0xffff, i;
5940
5941 if (IS_QLA8044(ha)) {
5942 drv_presence = qla8044_rd_direct(vha,
5943 QLA8044_CRB_DRV_ACTIVE_INDEX);
5944 dev_part_info1 = qla8044_rd_direct(vha,
5945 QLA8044_CRB_DEV_PART_INFO_INDEX);
5946 dev_part_info2 = qla8044_rd_direct(vha,
5947 QLA8044_CRB_DEV_PART_INFO2);
5948 } else {
5949 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5950 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5951 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5952 }
5953 for (i = 0; i < 8; i++) {
5954 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5955 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5956 (i != ha->portnum)) {
5957 fcoe_other_function = i;
5958 break;
5959 }
5960 }
5961 if (fcoe_other_function == 0xffff) {
5962 for (i = 0; i < 8; i++) {
5963 class_type = ((dev_part_info2 >> (i * 4)) &
5964 class_type_mask);
5965 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5966 ((i + 8) != ha->portnum)) {
5967 fcoe_other_function = i + 8;
5968 break;
5969 }
5970 }
5971 }
5972 /*
5973 * Prepare drv-presence mask based on fcoe functions present.
5974 * However consider only valid physical fcoe function numbers (0-15).
5975 */
5976 drv_presence_mask = ~((1 << (ha->portnum)) |
5977 ((fcoe_other_function == 0xffff) ?
5978 0 : (1 << (fcoe_other_function))));
5979
5980 /* We are the reset owner iff:
5981 * - No other protocol drivers present.
5982 * - This is the lowest among fcoe functions. */
5983 if (!(drv_presence & drv_presence_mask) &&
5984 (ha->portnum < fcoe_other_function)) {
5985 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5986 "This host is Reset owner.\n");
5987 ha->flags.nic_core_reset_owner = 1;
5988 }
5989 }
5990
5991 static int
5992 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5993 {
5994 int rval = QLA_SUCCESS;
5995 struct qla_hw_data *ha = vha->hw;
5996 uint32_t drv_ack;
5997
5998 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5999 if (rval == QLA_SUCCESS) {
6000 drv_ack |= (1 << ha->portnum);
6001 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6002 }
6003
6004 return rval;
6005 }
6006
6007 static int
6008 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6009 {
6010 int rval = QLA_SUCCESS;
6011 struct qla_hw_data *ha = vha->hw;
6012 uint32_t drv_ack;
6013
6014 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6015 if (rval == QLA_SUCCESS) {
6016 drv_ack &= ~(1 << ha->portnum);
6017 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6018 }
6019
6020 return rval;
6021 }
6022
6023 static const char *
6024 qla83xx_dev_state_to_string(uint32_t dev_state)
6025 {
6026 switch (dev_state) {
6027 case QLA8XXX_DEV_COLD:
6028 return "COLD/RE-INIT";
6029 case QLA8XXX_DEV_INITIALIZING:
6030 return "INITIALIZING";
6031 case QLA8XXX_DEV_READY:
6032 return "READY";
6033 case QLA8XXX_DEV_NEED_RESET:
6034 return "NEED RESET";
6035 case QLA8XXX_DEV_NEED_QUIESCENT:
6036 return "NEED QUIESCENT";
6037 case QLA8XXX_DEV_FAILED:
6038 return "FAILED";
6039 case QLA8XXX_DEV_QUIESCENT:
6040 return "QUIESCENT";
6041 default:
6042 return "Unknown";
6043 }
6044 }
6045
6046 /* Assumes idc-lock always held on entry */
6047 void
6048 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6049 {
6050 struct qla_hw_data *ha = vha->hw;
6051 uint32_t idc_audit_reg = 0, duration_secs = 0;
6052
6053 switch (audit_type) {
6054 case IDC_AUDIT_TIMESTAMP:
6055 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6056 idc_audit_reg = (ha->portnum) |
6057 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6058 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6059 break;
6060
6061 case IDC_AUDIT_COMPLETION:
6062 duration_secs = ((jiffies_to_msecs(jiffies) -
6063 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6064 idc_audit_reg = (ha->portnum) |
6065 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6066 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6067 break;
6068
6069 default:
6070 ql_log(ql_log_warn, vha, 0xb078,
6071 "Invalid audit type specified.\n");
6072 break;
6073 }
6074 }
6075
6076 /* Assumes idc_lock always held on entry */
6077 static int
6078 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6079 {
6080 struct qla_hw_data *ha = vha->hw;
6081 uint32_t idc_control, dev_state;
6082
6083 __qla83xx_get_idc_control(vha, &idc_control);
6084 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6085 ql_log(ql_log_info, vha, 0xb080,
6086 "NIC Core reset has been disabled. idc-control=0x%x\n",
6087 idc_control);
6088 return QLA_FUNCTION_FAILED;
6089 }
6090
6091 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6092 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6093 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6094 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6095 QLA8XXX_DEV_NEED_RESET);
6096 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6097 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6098 } else {
6099 const char *state = qla83xx_dev_state_to_string(dev_state);
6100 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6101
6102 /* SV: XXX: Is timeout required here? */
6103 /* Wait for IDC state change READY -> NEED_RESET */
6104 while (dev_state == QLA8XXX_DEV_READY) {
6105 qla83xx_idc_unlock(vha, 0);
6106 msleep(200);
6107 qla83xx_idc_lock(vha, 0);
6108 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6109 }
6110 }
6111
6112 /* Send IDC ack by writing to drv-ack register */
6113 __qla83xx_set_drv_ack(vha);
6114
6115 return QLA_SUCCESS;
6116 }
6117
6118 int
6119 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6120 {
6121 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6122 }
6123
6124 int
6125 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6126 {
6127 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6128 }
6129
6130 static int
6131 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6132 {
6133 uint32_t drv_presence = 0;
6134 struct qla_hw_data *ha = vha->hw;
6135
6136 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6137 if (drv_presence & (1 << ha->portnum))
6138 return QLA_SUCCESS;
6139 else
6140 return QLA_TEST_FAILED;
6141 }
6142
6143 int
6144 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6145 {
6146 int rval = QLA_SUCCESS;
6147 struct qla_hw_data *ha = vha->hw;
6148
6149 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6150 "Entered %s().\n", __func__);
6151
6152 if (vha->device_flags & DFLG_DEV_FAILED) {
6153 ql_log(ql_log_warn, vha, 0xb059,
6154 "Device in unrecoverable FAILED state.\n");
6155 return QLA_FUNCTION_FAILED;
6156 }
6157
6158 qla83xx_idc_lock(vha, 0);
6159
6160 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6161 ql_log(ql_log_warn, vha, 0xb05a,
6162 "Function=0x%x has been removed from IDC participation.\n",
6163 ha->portnum);
6164 rval = QLA_FUNCTION_FAILED;
6165 goto exit;
6166 }
6167
6168 qla83xx_reset_ownership(vha);
6169
6170 rval = qla83xx_initiating_reset(vha);
6171
6172 /*
6173 * Perform reset if we are the reset-owner,
6174 * else wait till IDC state changes to READY/FAILED.
6175 */
6176 if (rval == QLA_SUCCESS) {
6177 rval = qla83xx_idc_state_handler(vha);
6178
6179 if (rval == QLA_SUCCESS)
6180 ha->flags.nic_core_hung = 0;
6181 __qla83xx_clear_drv_ack(vha);
6182 }
6183
6184 exit:
6185 qla83xx_idc_unlock(vha, 0);
6186
6187 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6188
6189 return rval;
6190 }
6191
6192 int
6193 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6194 {
6195 struct qla_hw_data *ha = vha->hw;
6196 int rval = QLA_FUNCTION_FAILED;
6197
6198 if (!IS_MCTP_CAPABLE(ha)) {
6199 /* This message can be removed from the final version */
6200 ql_log(ql_log_info, vha, 0x506d,
6201 "This board is not MCTP capable\n");
6202 return rval;
6203 }
6204
6205 if (!ha->mctp_dump) {
6206 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6207 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6208
6209 if (!ha->mctp_dump) {
6210 ql_log(ql_log_warn, vha, 0x506e,
6211 "Failed to allocate memory for mctp dump\n");
6212 return rval;
6213 }
6214 }
6215
6216 #define MCTP_DUMP_STR_ADDR 0x00000000
6217 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6218 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6219 if (rval != QLA_SUCCESS) {
6220 ql_log(ql_log_warn, vha, 0x506f,
6221 "Failed to capture mctp dump\n");
6222 } else {
6223 ql_log(ql_log_info, vha, 0x5070,
6224 "Mctp dump capture for host (%ld/%p).\n",
6225 vha->host_no, ha->mctp_dump);
6226 ha->mctp_dumped = 1;
6227 }
6228
6229 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6230 ha->flags.nic_core_reset_hdlr_active = 1;
6231 rval = qla83xx_restart_nic_firmware(vha);
6232 if (rval)
6233 /* NIC Core reset failed. */
6234 ql_log(ql_log_warn, vha, 0x5071,
6235 "Failed to restart nic firmware\n");
6236 else
6237 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6238 "Restarted NIC firmware successfully.\n");
6239 ha->flags.nic_core_reset_hdlr_active = 0;
6240 }
6241
6242 return rval;
6243
6244 }
6245
6246 /*
6247 * qla2x00_quiesce_io
6248 * Description: This function will block the new I/Os
6249 * Its not aborting any I/Os as context
6250 * is not destroyed during quiescence
6251 * Arguments: scsi_qla_host_t
6252 * return : void
6253 */
6254 void
6255 qla2x00_quiesce_io(scsi_qla_host_t *vha)
6256 {
6257 struct qla_hw_data *ha = vha->hw;
6258 struct scsi_qla_host *vp;
6259
6260 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6261 "Quiescing I/O - ha=%p.\n", ha);
6262
6263 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6264 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6265 atomic_set(&vha->loop_state, LOOP_DOWN);
6266 qla2x00_mark_all_devices_lost(vha, 0);
6267 list_for_each_entry(vp, &ha->vp_list, list)
6268 qla2x00_mark_all_devices_lost(vp, 0);
6269 } else {
6270 if (!atomic_read(&vha->loop_down_timer))
6271 atomic_set(&vha->loop_down_timer,
6272 LOOP_DOWN_TIME);
6273 }
6274 /* Wait for pending cmds to complete */
6275 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
6276 }
6277
6278 void
6279 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6280 {
6281 struct qla_hw_data *ha = vha->hw;
6282 struct scsi_qla_host *vp;
6283 unsigned long flags;
6284 fc_port_t *fcport;
6285 u16 i;
6286
6287 /* For ISP82XX, driver waits for completion of the commands.
6288 * online flag should be set.
6289 */
6290 if (!(IS_P3P_TYPE(ha)))
6291 vha->flags.online = 0;
6292 ha->flags.chip_reset_done = 0;
6293 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6294 vha->qla_stats.total_isp_aborts++;
6295
6296 ql_log(ql_log_info, vha, 0x00af,
6297 "Performing ISP error recovery - ha=%p.\n", ha);
6298
6299 /* For ISP82XX, reset_chip is just disabling interrupts.
6300 * Driver waits for the completion of the commands.
6301 * the interrupts need to be enabled.
6302 */
6303 if (!(IS_P3P_TYPE(ha)))
6304 ha->isp_ops->reset_chip(vha);
6305
6306 SAVE_TOPO(ha);
6307 ha->flags.rida_fmt2 = 0;
6308 ha->flags.n2n_ae = 0;
6309 ha->flags.lip_ae = 0;
6310 ha->current_topology = 0;
6311 ha->flags.fw_started = 0;
6312 ha->flags.fw_init_done = 0;
6313 ha->base_qpair->chip_reset++;
6314 for (i = 0; i < ha->max_qpairs; i++) {
6315 if (ha->queue_pair_map[i])
6316 ha->queue_pair_map[i]->chip_reset =
6317 ha->base_qpair->chip_reset;
6318 }
6319
6320 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6321 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6322 atomic_set(&vha->loop_state, LOOP_DOWN);
6323 qla2x00_mark_all_devices_lost(vha, 0);
6324
6325 spin_lock_irqsave(&ha->vport_slock, flags);
6326 list_for_each_entry(vp, &ha->vp_list, list) {
6327 atomic_inc(&vp->vref_count);
6328 spin_unlock_irqrestore(&ha->vport_slock, flags);
6329
6330 qla2x00_mark_all_devices_lost(vp, 0);
6331
6332 spin_lock_irqsave(&ha->vport_slock, flags);
6333 atomic_dec(&vp->vref_count);
6334 }
6335 spin_unlock_irqrestore(&ha->vport_slock, flags);
6336 } else {
6337 if (!atomic_read(&vha->loop_down_timer))
6338 atomic_set(&vha->loop_down_timer,
6339 LOOP_DOWN_TIME);
6340 }
6341
6342 /* Clear all async request states across all VPs. */
6343 list_for_each_entry(fcport, &vha->vp_fcports, list)
6344 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6345 spin_lock_irqsave(&ha->vport_slock, flags);
6346 list_for_each_entry(vp, &ha->vp_list, list) {
6347 atomic_inc(&vp->vref_count);
6348 spin_unlock_irqrestore(&ha->vport_slock, flags);
6349
6350 list_for_each_entry(fcport, &vp->vp_fcports, list)
6351 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6352
6353 spin_lock_irqsave(&ha->vport_slock, flags);
6354 atomic_dec(&vp->vref_count);
6355 }
6356 spin_unlock_irqrestore(&ha->vport_slock, flags);
6357
6358 if (!ha->flags.eeh_busy) {
6359 /* Make sure for ISP 82XX IO DMA is complete */
6360 if (IS_P3P_TYPE(ha)) {
6361 qla82xx_chip_reset_cleanup(vha);
6362 ql_log(ql_log_info, vha, 0x00b4,
6363 "Done chip reset cleanup.\n");
6364
6365 /* Done waiting for pending commands.
6366 * Reset the online flag.
6367 */
6368 vha->flags.online = 0;
6369 }
6370
6371 /* Requeue all commands in outstanding command list. */
6372 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6373 }
6374 /* memory barrier */
6375 wmb();
6376 }
6377
6378 /*
6379 * qla2x00_abort_isp
6380 * Resets ISP and aborts all outstanding commands.
6381 *
6382 * Input:
6383 * ha = adapter block pointer.
6384 *
6385 * Returns:
6386 * 0 = success
6387 */
6388 int
6389 qla2x00_abort_isp(scsi_qla_host_t *vha)
6390 {
6391 int rval;
6392 uint8_t status = 0;
6393 struct qla_hw_data *ha = vha->hw;
6394 struct scsi_qla_host *vp;
6395 struct req_que *req = ha->req_q_map[0];
6396 unsigned long flags;
6397
6398 if (vha->flags.online) {
6399 qla2x00_abort_isp_cleanup(vha);
6400
6401 if (IS_QLA8031(ha)) {
6402 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6403 "Clearing fcoe driver presence.\n");
6404 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6405 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6406 "Error while clearing DRV-Presence.\n");
6407 }
6408
6409 if (unlikely(pci_channel_offline(ha->pdev) &&
6410 ha->flags.pci_channel_io_perm_failure)) {
6411 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6412 status = 0;
6413 return status;
6414 }
6415
6416 ha->isp_ops->get_flash_version(vha, req->ring);
6417
6418 ha->isp_ops->nvram_config(vha);
6419
6420 if (!qla2x00_restart_isp(vha)) {
6421 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6422
6423 if (!atomic_read(&vha->loop_down_timer)) {
6424 /*
6425 * Issue marker command only when we are going
6426 * to start the I/O .
6427 */
6428 vha->marker_needed = 1;
6429 }
6430
6431 vha->flags.online = 1;
6432
6433 ha->isp_ops->enable_intrs(ha);
6434
6435 ha->isp_abort_cnt = 0;
6436 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6437
6438 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6439 qla2x00_get_fw_version(vha);
6440 if (ha->fce) {
6441 ha->flags.fce_enabled = 1;
6442 memset(ha->fce, 0,
6443 fce_calc_size(ha->fce_bufs));
6444 rval = qla2x00_enable_fce_trace(vha,
6445 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6446 &ha->fce_bufs);
6447 if (rval) {
6448 ql_log(ql_log_warn, vha, 0x8033,
6449 "Unable to reinitialize FCE "
6450 "(%d).\n", rval);
6451 ha->flags.fce_enabled = 0;
6452 }
6453 }
6454
6455 if (ha->eft) {
6456 memset(ha->eft, 0, EFT_SIZE);
6457 rval = qla2x00_enable_eft_trace(vha,
6458 ha->eft_dma, EFT_NUM_BUFFERS);
6459 if (rval) {
6460 ql_log(ql_log_warn, vha, 0x8034,
6461 "Unable to reinitialize EFT "
6462 "(%d).\n", rval);
6463 }
6464 }
6465 } else { /* failed the ISP abort */
6466 vha->flags.online = 1;
6467 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6468 if (ha->isp_abort_cnt == 0) {
6469 ql_log(ql_log_fatal, vha, 0x8035,
6470 "ISP error recover failed - "
6471 "board disabled.\n");
6472 /*
6473 * The next call disables the board
6474 * completely.
6475 */
6476 ha->isp_ops->reset_adapter(vha);
6477 vha->flags.online = 0;
6478 clear_bit(ISP_ABORT_RETRY,
6479 &vha->dpc_flags);
6480 status = 0;
6481 } else { /* schedule another ISP abort */
6482 ha->isp_abort_cnt--;
6483 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6484 "ISP abort - retry remaining %d.\n",
6485 ha->isp_abort_cnt);
6486 status = 1;
6487 }
6488 } else {
6489 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
6490 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6491 "ISP error recovery - retrying (%d) "
6492 "more times.\n", ha->isp_abort_cnt);
6493 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6494 status = 1;
6495 }
6496 }
6497
6498 }
6499
6500 if (!status) {
6501 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
6502 qla2x00_configure_hba(vha);
6503 spin_lock_irqsave(&ha->vport_slock, flags);
6504 list_for_each_entry(vp, &ha->vp_list, list) {
6505 if (vp->vp_idx) {
6506 atomic_inc(&vp->vref_count);
6507 spin_unlock_irqrestore(&ha->vport_slock, flags);
6508
6509 qla2x00_vp_abort_isp(vp);
6510
6511 spin_lock_irqsave(&ha->vport_slock, flags);
6512 atomic_dec(&vp->vref_count);
6513 }
6514 }
6515 spin_unlock_irqrestore(&ha->vport_slock, flags);
6516
6517 if (IS_QLA8031(ha)) {
6518 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6519 "Setting back fcoe driver presence.\n");
6520 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6521 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6522 "Error while setting DRV-Presence.\n");
6523 }
6524 } else {
6525 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6526 __func__);
6527 }
6528
6529 return(status);
6530 }
6531
6532 /*
6533 * qla2x00_restart_isp
6534 * restarts the ISP after a reset
6535 *
6536 * Input:
6537 * ha = adapter block pointer.
6538 *
6539 * Returns:
6540 * 0 = success
6541 */
6542 static int
6543 qla2x00_restart_isp(scsi_qla_host_t *vha)
6544 {
6545 int status = 0;
6546 struct qla_hw_data *ha = vha->hw;
6547 struct req_que *req = ha->req_q_map[0];
6548 struct rsp_que *rsp = ha->rsp_q_map[0];
6549
6550 /* If firmware needs to be loaded */
6551 if (qla2x00_isp_firmware(vha)) {
6552 vha->flags.online = 0;
6553 status = ha->isp_ops->chip_diag(vha);
6554 if (!status)
6555 status = qla2x00_setup_chip(vha);
6556 }
6557
6558 if (!status && !(status = qla2x00_init_rings(vha))) {
6559 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6560 ha->flags.chip_reset_done = 1;
6561
6562 /* Initialize the queues in use */
6563 qla25xx_init_queues(ha);
6564
6565 status = qla2x00_fw_ready(vha);
6566 if (!status) {
6567 /* Issue a marker after FW becomes ready. */
6568 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6569 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6570 }
6571
6572 /* if no cable then assume it's good */
6573 if ((vha->device_flags & DFLG_NO_CABLE))
6574 status = 0;
6575 }
6576 return (status);
6577 }
6578
6579 static int
6580 qla25xx_init_queues(struct qla_hw_data *ha)
6581 {
6582 struct rsp_que *rsp = NULL;
6583 struct req_que *req = NULL;
6584 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6585 int ret = -1;
6586 int i;
6587
6588 for (i = 1; i < ha->max_rsp_queues; i++) {
6589 rsp = ha->rsp_q_map[i];
6590 if (rsp && test_bit(i, ha->rsp_qid_map)) {
6591 rsp->options &= ~BIT_0;
6592 ret = qla25xx_init_rsp_que(base_vha, rsp);
6593 if (ret != QLA_SUCCESS)
6594 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6595 "%s Rsp que: %d init failed.\n",
6596 __func__, rsp->id);
6597 else
6598 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6599 "%s Rsp que: %d inited.\n",
6600 __func__, rsp->id);
6601 }
6602 }
6603 for (i = 1; i < ha->max_req_queues; i++) {
6604 req = ha->req_q_map[i];
6605 if (req && test_bit(i, ha->req_qid_map)) {
6606 /* Clear outstanding commands array. */
6607 req->options &= ~BIT_0;
6608 ret = qla25xx_init_req_que(base_vha, req);
6609 if (ret != QLA_SUCCESS)
6610 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6611 "%s Req que: %d init failed.\n",
6612 __func__, req->id);
6613 else
6614 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6615 "%s Req que: %d inited.\n",
6616 __func__, req->id);
6617 }
6618 }
6619 return ret;
6620 }
6621
6622 /*
6623 * qla2x00_reset_adapter
6624 * Reset adapter.
6625 *
6626 * Input:
6627 * ha = adapter block pointer.
6628 */
6629 void
6630 qla2x00_reset_adapter(scsi_qla_host_t *vha)
6631 {
6632 unsigned long flags = 0;
6633 struct qla_hw_data *ha = vha->hw;
6634 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
6635
6636 vha->flags.online = 0;
6637 ha->isp_ops->disable_intrs(ha);
6638
6639 spin_lock_irqsave(&ha->hardware_lock, flags);
6640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6641 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6642 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6643 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6644 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6645 }
6646
6647 void
6648 qla24xx_reset_adapter(scsi_qla_host_t *vha)
6649 {
6650 unsigned long flags = 0;
6651 struct qla_hw_data *ha = vha->hw;
6652 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6653
6654 if (IS_P3P_TYPE(ha))
6655 return;
6656
6657 vha->flags.online = 0;
6658 ha->isp_ops->disable_intrs(ha);
6659
6660 spin_lock_irqsave(&ha->hardware_lock, flags);
6661 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6662 RD_REG_DWORD(&reg->hccr);
6663 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6664 RD_REG_DWORD(&reg->hccr);
6665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6666
6667 if (IS_NOPOLLING_TYPE(ha))
6668 ha->isp_ops->enable_intrs(ha);
6669 }
6670
6671 /* On sparc systems, obtain port and node WWN from firmware
6672 * properties.
6673 */
6674 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6675 struct nvram_24xx *nv)
6676 {
6677 #ifdef CONFIG_SPARC
6678 struct qla_hw_data *ha = vha->hw;
6679 struct pci_dev *pdev = ha->pdev;
6680 struct device_node *dp = pci_device_to_OF_node(pdev);
6681 const u8 *val;
6682 int len;
6683
6684 val = of_get_property(dp, "port-wwn", &len);
6685 if (val && len >= WWN_SIZE)
6686 memcpy(nv->port_name, val, WWN_SIZE);
6687
6688 val = of_get_property(dp, "node-wwn", &len);
6689 if (val && len >= WWN_SIZE)
6690 memcpy(nv->node_name, val, WWN_SIZE);
6691 #endif
6692 }
6693
6694 int
6695 qla24xx_nvram_config(scsi_qla_host_t *vha)
6696 {
6697 int rval;
6698 struct init_cb_24xx *icb;
6699 struct nvram_24xx *nv;
6700 uint32_t *dptr;
6701 uint8_t *dptr1, *dptr2;
6702 uint32_t chksum;
6703 uint16_t cnt;
6704 struct qla_hw_data *ha = vha->hw;
6705
6706 rval = QLA_SUCCESS;
6707 icb = (struct init_cb_24xx *)ha->init_cb;
6708 nv = ha->nvram;
6709
6710 /* Determine NVRAM starting address. */
6711 if (ha->port_no == 0) {
6712 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6713 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6714 } else {
6715 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6716 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6717 }
6718
6719 ha->nvram_size = sizeof(struct nvram_24xx);
6720 ha->vpd_size = FA_NVRAM_VPD_SIZE;
6721
6722 /* Get VPD data into cache */
6723 ha->vpd = ha->nvram + VPD_OFFSET;
6724 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
6725 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6726
6727 /* Get NVRAM data into cache and calculate checksum. */
6728 dptr = (uint32_t *)nv;
6729 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
6730 ha->nvram_size);
6731 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6732 chksum += le32_to_cpu(*dptr);
6733
6734 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6735 "Contents of NVRAM\n");
6736 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6737 (uint8_t *)nv, ha->nvram_size);
6738
6739 /* Bad NVRAM data, set defaults parameters. */
6740 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6741 || nv->id[3] != ' ' ||
6742 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6743 /* Reset NVRAM data. */
6744 ql_log(ql_log_warn, vha, 0x006b,
6745 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
6746 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6747 ql_log(ql_log_warn, vha, 0x006c,
6748 "Falling back to functioning (yet invalid -- WWPN) "
6749 "defaults.\n");
6750
6751 /*
6752 * Set default initialization control block.
6753 */
6754 memset(nv, 0, ha->nvram_size);
6755 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6756 nv->version = cpu_to_le16(ICB_VERSION);
6757 nv->frame_payload_size = 2048;
6758 nv->execution_throttle = cpu_to_le16(0xFFFF);
6759 nv->exchange_count = cpu_to_le16(0);
6760 nv->hard_address = cpu_to_le16(124);
6761 nv->port_name[0] = 0x21;
6762 nv->port_name[1] = 0x00 + ha->port_no + 1;
6763 nv->port_name[2] = 0x00;
6764 nv->port_name[3] = 0xe0;
6765 nv->port_name[4] = 0x8b;
6766 nv->port_name[5] = 0x1c;
6767 nv->port_name[6] = 0x55;
6768 nv->port_name[7] = 0x86;
6769 nv->node_name[0] = 0x20;
6770 nv->node_name[1] = 0x00;
6771 nv->node_name[2] = 0x00;
6772 nv->node_name[3] = 0xe0;
6773 nv->node_name[4] = 0x8b;
6774 nv->node_name[5] = 0x1c;
6775 nv->node_name[6] = 0x55;
6776 nv->node_name[7] = 0x86;
6777 qla24xx_nvram_wwn_from_ofw(vha, nv);
6778 nv->login_retry_count = cpu_to_le16(8);
6779 nv->interrupt_delay_timer = cpu_to_le16(0);
6780 nv->login_timeout = cpu_to_le16(0);
6781 nv->firmware_options_1 =
6782 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6783 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6784 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6785 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6786 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6787 nv->efi_parameters = cpu_to_le32(0);
6788 nv->reset_delay = 5;
6789 nv->max_luns_per_target = cpu_to_le16(128);
6790 nv->port_down_retry_count = cpu_to_le16(30);
6791 nv->link_down_timeout = cpu_to_le16(30);
6792
6793 rval = 1;
6794 }
6795
6796 if (qla_tgt_mode_enabled(vha)) {
6797 /* Don't enable full login after initial LIP */
6798 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6799 /* Don't enable LIP full login for initiator */
6800 nv->host_p &= cpu_to_le32(~BIT_10);
6801 }
6802
6803 qlt_24xx_config_nvram_stage1(vha, nv);
6804
6805 /* Reset Initialization control block */
6806 memset(icb, 0, ha->init_cb_size);
6807
6808 /* Copy 1st segment. */
6809 dptr1 = (uint8_t *)icb;
6810 dptr2 = (uint8_t *)&nv->version;
6811 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6812 while (cnt--)
6813 *dptr1++ = *dptr2++;
6814
6815 icb->login_retry_count = nv->login_retry_count;
6816 icb->link_down_on_nos = nv->link_down_on_nos;
6817
6818 /* Copy 2nd segment. */
6819 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6820 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6821 cnt = (uint8_t *)&icb->reserved_3 -
6822 (uint8_t *)&icb->interrupt_delay_timer;
6823 while (cnt--)
6824 *dptr1++ = *dptr2++;
6825
6826 /*
6827 * Setup driver NVRAM options.
6828 */
6829 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6830 "QLA2462");
6831
6832 qlt_24xx_config_nvram_stage2(vha, icb);
6833
6834 if (nv->host_p & cpu_to_le32(BIT_15)) {
6835 /* Use alternate WWN? */
6836 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6837 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6838 }
6839
6840 /* Prepare nodename */
6841 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6842 /*
6843 * Firmware will apply the following mask if the nodename was
6844 * not provided.
6845 */
6846 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6847 icb->node_name[0] &= 0xF0;
6848 }
6849
6850 /* Set host adapter parameters. */
6851 ha->flags.disable_risc_code_load = 0;
6852 ha->flags.enable_lip_reset = 0;
6853 ha->flags.enable_lip_full_login =
6854 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6855 ha->flags.enable_target_reset =
6856 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6857 ha->flags.enable_led_scheme = 0;
6858 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6859
6860 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6861 (BIT_6 | BIT_5 | BIT_4)) >> 4;
6862
6863 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6864 sizeof(ha->fw_seriallink_options24));
6865
6866 /* save HBA serial number */
6867 ha->serial0 = icb->port_name[5];
6868 ha->serial1 = icb->port_name[6];
6869 ha->serial2 = icb->port_name[7];
6870 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6871 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6872
6873 icb->execution_throttle = cpu_to_le16(0xFFFF);
6874
6875 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6876
6877 /* Set minimum login_timeout to 4 seconds. */
6878 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6879 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6880 if (le16_to_cpu(nv->login_timeout) < 4)
6881 nv->login_timeout = cpu_to_le16(4);
6882 ha->login_timeout = le16_to_cpu(nv->login_timeout);
6883
6884 /* Set minimum RATOV to 100 tenths of a second. */
6885 ha->r_a_tov = 100;
6886
6887 ha->loop_reset_delay = nv->reset_delay;
6888
6889 /* Link Down Timeout = 0:
6890 *
6891 * When Port Down timer expires we will start returning
6892 * I/O's to OS with "DID_NO_CONNECT".
6893 *
6894 * Link Down Timeout != 0:
6895 *
6896 * The driver waits for the link to come up after link down
6897 * before returning I/Os to OS with "DID_NO_CONNECT".
6898 */
6899 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6900 ha->loop_down_abort_time =
6901 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6902 } else {
6903 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6904 ha->loop_down_abort_time =
6905 (LOOP_DOWN_TIME - ha->link_down_timeout);
6906 }
6907
6908 /* Need enough time to try and get the port back. */
6909 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6910 if (qlport_down_retry)
6911 ha->port_down_retry_count = qlport_down_retry;
6912
6913 /* Set login_retry_count */
6914 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6915 if (ha->port_down_retry_count ==
6916 le16_to_cpu(nv->port_down_retry_count) &&
6917 ha->port_down_retry_count > 3)
6918 ha->login_retry_count = ha->port_down_retry_count;
6919 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6920 ha->login_retry_count = ha->port_down_retry_count;
6921 if (ql2xloginretrycount)
6922 ha->login_retry_count = ql2xloginretrycount;
6923
6924 /* Enable ZIO. */
6925 if (!vha->flags.init_done) {
6926 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6927 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6928 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6929 le16_to_cpu(icb->interrupt_delay_timer): 2;
6930 }
6931 icb->firmware_options_2 &= cpu_to_le32(
6932 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6933 vha->flags.process_response_queue = 0;
6934 if (ha->zio_mode != QLA_ZIO_DISABLED) {
6935 ha->zio_mode = QLA_ZIO_MODE_6;
6936
6937 ql_log(ql_log_info, vha, 0x006f,
6938 "ZIO mode %d enabled; timer delay (%d us).\n",
6939 ha->zio_mode, ha->zio_timer * 100);
6940
6941 icb->firmware_options_2 |= cpu_to_le32(
6942 (uint32_t)ha->zio_mode);
6943 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6944 vha->flags.process_response_queue = 1;
6945 }
6946
6947 if (rval) {
6948 ql_log(ql_log_warn, vha, 0x0070,
6949 "NVRAM configuration failed.\n");
6950 }
6951 return (rval);
6952 }
6953
6954 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6955 {
6956 struct qla27xx_image_status pri_image_status, sec_image_status;
6957 uint8_t valid_pri_image, valid_sec_image;
6958 uint32_t *wptr;
6959 uint32_t cnt, chksum, size;
6960 struct qla_hw_data *ha = vha->hw;
6961
6962 valid_pri_image = valid_sec_image = 1;
6963 ha->active_image = 0;
6964 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6965
6966 if (!ha->flt_region_img_status_pri) {
6967 valid_pri_image = 0;
6968 goto check_sec_image;
6969 }
6970
6971 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6972 ha->flt_region_img_status_pri, size);
6973
6974 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6975 ql_dbg(ql_dbg_init, vha, 0x018b,
6976 "Primary image signature (0x%x) not valid\n",
6977 pri_image_status.signature);
6978 valid_pri_image = 0;
6979 goto check_sec_image;
6980 }
6981
6982 wptr = (uint32_t *)(&pri_image_status);
6983 cnt = size;
6984
6985 for (chksum = 0; cnt--; wptr++)
6986 chksum += le32_to_cpu(*wptr);
6987
6988 if (chksum) {
6989 ql_dbg(ql_dbg_init, vha, 0x018c,
6990 "Checksum validation failed for primary image (0x%x)\n",
6991 chksum);
6992 valid_pri_image = 0;
6993 }
6994
6995 check_sec_image:
6996 if (!ha->flt_region_img_status_sec) {
6997 valid_sec_image = 0;
6998 goto check_valid_image;
6999 }
7000
7001 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7002 ha->flt_region_img_status_sec, size);
7003
7004 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
7005 ql_dbg(ql_dbg_init, vha, 0x018d,
7006 "Secondary image signature(0x%x) not valid\n",
7007 sec_image_status.signature);
7008 valid_sec_image = 0;
7009 goto check_valid_image;
7010 }
7011
7012 wptr = (uint32_t *)(&sec_image_status);
7013 cnt = size;
7014 for (chksum = 0; cnt--; wptr++)
7015 chksum += le32_to_cpu(*wptr);
7016 if (chksum) {
7017 ql_dbg(ql_dbg_init, vha, 0x018e,
7018 "Checksum validation failed for secondary image (0x%x)\n",
7019 chksum);
7020 valid_sec_image = 0;
7021 }
7022
7023 check_valid_image:
7024 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
7025 ha->active_image = QLA27XX_PRIMARY_IMAGE;
7026 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
7027 if (!ha->active_image ||
7028 pri_image_status.generation_number <
7029 sec_image_status.generation_number)
7030 ha->active_image = QLA27XX_SECONDARY_IMAGE;
7031 }
7032
7033 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
7034 ha->active_image == 0 ? "default bootld and fw" :
7035 ha->active_image == 1 ? "primary" :
7036 ha->active_image == 2 ? "secondary" :
7037 "Invalid");
7038
7039 return ha->active_image;
7040 }
7041
7042 static int
7043 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7044 uint32_t faddr)
7045 {
7046 int rval = QLA_SUCCESS;
7047 int segments, fragment;
7048 uint32_t *dcode, dlen;
7049 uint32_t risc_addr;
7050 uint32_t risc_size;
7051 uint32_t i;
7052 struct qla_hw_data *ha = vha->hw;
7053 struct req_que *req = ha->req_q_map[0];
7054
7055 ql_dbg(ql_dbg_init, vha, 0x008b,
7056 "FW: Loading firmware from flash (%x).\n", faddr);
7057
7058 rval = QLA_SUCCESS;
7059
7060 segments = FA_RISC_CODE_SEGMENTS;
7061 dcode = (uint32_t *)req->ring;
7062 *srisc_addr = 0;
7063
7064 if (IS_QLA27XX(ha) &&
7065 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
7066 faddr = ha->flt_region_fw_sec;
7067
7068 /* Validate firmware image by checking version. */
7069 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
7070 for (i = 0; i < 4; i++)
7071 dcode[i] = be32_to_cpu(dcode[i]);
7072 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7073 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7074 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7075 dcode[3] == 0)) {
7076 ql_log(ql_log_fatal, vha, 0x008c,
7077 "Unable to verify the integrity of flash firmware "
7078 "image.\n");
7079 ql_log(ql_log_fatal, vha, 0x008d,
7080 "Firmware data: %08x %08x %08x %08x.\n",
7081 dcode[0], dcode[1], dcode[2], dcode[3]);
7082
7083 return QLA_FUNCTION_FAILED;
7084 }
7085
7086 while (segments && rval == QLA_SUCCESS) {
7087 /* Read segment's load information. */
7088 qla24xx_read_flash_data(vha, dcode, faddr, 4);
7089
7090 risc_addr = be32_to_cpu(dcode[2]);
7091 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7092 risc_size = be32_to_cpu(dcode[3]);
7093
7094 fragment = 0;
7095 while (risc_size > 0 && rval == QLA_SUCCESS) {
7096 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7097 if (dlen > risc_size)
7098 dlen = risc_size;
7099
7100 ql_dbg(ql_dbg_init, vha, 0x008e,
7101 "Loading risc segment@ risc addr %x "
7102 "number of dwords 0x%x offset 0x%x.\n",
7103 risc_addr, dlen, faddr);
7104
7105 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
7106 for (i = 0; i < dlen; i++)
7107 dcode[i] = swab32(dcode[i]);
7108
7109 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7110 dlen);
7111 if (rval) {
7112 ql_log(ql_log_fatal, vha, 0x008f,
7113 "Failed to load segment %d of firmware.\n",
7114 fragment);
7115 return QLA_FUNCTION_FAILED;
7116 }
7117
7118 faddr += dlen;
7119 risc_addr += dlen;
7120 risc_size -= dlen;
7121 fragment++;
7122 }
7123
7124 /* Next segment. */
7125 segments--;
7126 }
7127
7128 if (!IS_QLA27XX(ha))
7129 return rval;
7130
7131 if (ha->fw_dump_template)
7132 vfree(ha->fw_dump_template);
7133 ha->fw_dump_template = NULL;
7134 ha->fw_dump_template_len = 0;
7135
7136 ql_dbg(ql_dbg_init, vha, 0x0161,
7137 "Loading fwdump template from %x\n", faddr);
7138 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7139 risc_size = be32_to_cpu(dcode[2]);
7140 ql_dbg(ql_dbg_init, vha, 0x0162,
7141 "-> array size %x dwords\n", risc_size);
7142 if (risc_size == 0 || risc_size == ~0)
7143 goto default_template;
7144
7145 dlen = (risc_size - 8) * sizeof(*dcode);
7146 ql_dbg(ql_dbg_init, vha, 0x0163,
7147 "-> template allocating %x bytes...\n", dlen);
7148 ha->fw_dump_template = vmalloc(dlen);
7149 if (!ha->fw_dump_template) {
7150 ql_log(ql_log_warn, vha, 0x0164,
7151 "Failed fwdump template allocate %x bytes.\n", risc_size);
7152 goto default_template;
7153 }
7154
7155 faddr += 7;
7156 risc_size -= 8;
7157 dcode = ha->fw_dump_template;
7158 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7159 for (i = 0; i < risc_size; i++)
7160 dcode[i] = le32_to_cpu(dcode[i]);
7161
7162 if (!qla27xx_fwdt_template_valid(dcode)) {
7163 ql_log(ql_log_warn, vha, 0x0165,
7164 "Failed fwdump template validate\n");
7165 goto default_template;
7166 }
7167
7168 dlen = qla27xx_fwdt_template_size(dcode);
7169 ql_dbg(ql_dbg_init, vha, 0x0166,
7170 "-> template size %x bytes\n", dlen);
7171 if (dlen > risc_size * sizeof(*dcode)) {
7172 ql_log(ql_log_warn, vha, 0x0167,
7173 "Failed fwdump template exceeds array by %zx bytes\n",
7174 (size_t)(dlen - risc_size * sizeof(*dcode)));
7175 goto default_template;
7176 }
7177 ha->fw_dump_template_len = dlen;
7178 return rval;
7179
7180 default_template:
7181 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
7182 if (ha->fw_dump_template)
7183 vfree(ha->fw_dump_template);
7184 ha->fw_dump_template = NULL;
7185 ha->fw_dump_template_len = 0;
7186
7187 dlen = qla27xx_fwdt_template_default_size();
7188 ql_dbg(ql_dbg_init, vha, 0x0169,
7189 "-> template allocating %x bytes...\n", dlen);
7190 ha->fw_dump_template = vmalloc(dlen);
7191 if (!ha->fw_dump_template) {
7192 ql_log(ql_log_warn, vha, 0x016a,
7193 "Failed fwdump template allocate %x bytes.\n", risc_size);
7194 goto failed_template;
7195 }
7196
7197 dcode = ha->fw_dump_template;
7198 risc_size = dlen / sizeof(*dcode);
7199 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
7200 for (i = 0; i < risc_size; i++)
7201 dcode[i] = be32_to_cpu(dcode[i]);
7202
7203 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7204 ql_log(ql_log_warn, vha, 0x016b,
7205 "Failed fwdump template validate\n");
7206 goto failed_template;
7207 }
7208
7209 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7210 ql_dbg(ql_dbg_init, vha, 0x016c,
7211 "-> template size %x bytes\n", dlen);
7212 ha->fw_dump_template_len = dlen;
7213 return rval;
7214
7215 failed_template:
7216 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
7217 if (ha->fw_dump_template)
7218 vfree(ha->fw_dump_template);
7219 ha->fw_dump_template = NULL;
7220 ha->fw_dump_template_len = 0;
7221 return rval;
7222 }
7223
7224 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
7225
7226 int
7227 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7228 {
7229 int rval;
7230 int i, fragment;
7231 uint16_t *wcode, *fwcode;
7232 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7233 struct fw_blob *blob;
7234 struct qla_hw_data *ha = vha->hw;
7235 struct req_que *req = ha->req_q_map[0];
7236
7237 /* Load firmware blob. */
7238 blob = qla2x00_request_firmware(vha);
7239 if (!blob) {
7240 ql_log(ql_log_info, vha, 0x0083,
7241 "Firmware image unavailable.\n");
7242 ql_log(ql_log_info, vha, 0x0084,
7243 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
7244 return QLA_FUNCTION_FAILED;
7245 }
7246
7247 rval = QLA_SUCCESS;
7248
7249 wcode = (uint16_t *)req->ring;
7250 *srisc_addr = 0;
7251 fwcode = (uint16_t *)blob->fw->data;
7252 fwclen = 0;
7253
7254 /* Validate firmware image by checking version. */
7255 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7256 ql_log(ql_log_fatal, vha, 0x0085,
7257 "Unable to verify integrity of firmware image (%zd).\n",
7258 blob->fw->size);
7259 goto fail_fw_integrity;
7260 }
7261 for (i = 0; i < 4; i++)
7262 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7263 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7264 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7265 wcode[2] == 0 && wcode[3] == 0)) {
7266 ql_log(ql_log_fatal, vha, 0x0086,
7267 "Unable to verify integrity of firmware image.\n");
7268 ql_log(ql_log_fatal, vha, 0x0087,
7269 "Firmware data: %04x %04x %04x %04x.\n",
7270 wcode[0], wcode[1], wcode[2], wcode[3]);
7271 goto fail_fw_integrity;
7272 }
7273
7274 seg = blob->segs;
7275 while (*seg && rval == QLA_SUCCESS) {
7276 risc_addr = *seg;
7277 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7278 risc_size = be16_to_cpu(fwcode[3]);
7279
7280 /* Validate firmware image size. */
7281 fwclen += risc_size * sizeof(uint16_t);
7282 if (blob->fw->size < fwclen) {
7283 ql_log(ql_log_fatal, vha, 0x0088,
7284 "Unable to verify integrity of firmware image "
7285 "(%zd).\n", blob->fw->size);
7286 goto fail_fw_integrity;
7287 }
7288
7289 fragment = 0;
7290 while (risc_size > 0 && rval == QLA_SUCCESS) {
7291 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7292 if (wlen > risc_size)
7293 wlen = risc_size;
7294 ql_dbg(ql_dbg_init, vha, 0x0089,
7295 "Loading risc segment@ risc addr %x number of "
7296 "words 0x%x.\n", risc_addr, wlen);
7297
7298 for (i = 0; i < wlen; i++)
7299 wcode[i] = swab16(fwcode[i]);
7300
7301 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7302 wlen);
7303 if (rval) {
7304 ql_log(ql_log_fatal, vha, 0x008a,
7305 "Failed to load segment %d of firmware.\n",
7306 fragment);
7307 break;
7308 }
7309
7310 fwcode += wlen;
7311 risc_addr += wlen;
7312 risc_size -= wlen;
7313 fragment++;
7314 }
7315
7316 /* Next segment. */
7317 seg++;
7318 }
7319 return rval;
7320
7321 fail_fw_integrity:
7322 return QLA_FUNCTION_FAILED;
7323 }
7324
7325 static int
7326 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7327 {
7328 int rval;
7329 int segments, fragment;
7330 uint32_t *dcode, dlen;
7331 uint32_t risc_addr;
7332 uint32_t risc_size;
7333 uint32_t i;
7334 struct fw_blob *blob;
7335 const uint32_t *fwcode;
7336 uint32_t fwclen;
7337 struct qla_hw_data *ha = vha->hw;
7338 struct req_que *req = ha->req_q_map[0];
7339
7340 /* Load firmware blob. */
7341 blob = qla2x00_request_firmware(vha);
7342 if (!blob) {
7343 ql_log(ql_log_warn, vha, 0x0090,
7344 "Firmware image unavailable.\n");
7345 ql_log(ql_log_warn, vha, 0x0091,
7346 "Firmware images can be retrieved from: "
7347 QLA_FW_URL ".\n");
7348
7349 return QLA_FUNCTION_FAILED;
7350 }
7351
7352 ql_dbg(ql_dbg_init, vha, 0x0092,
7353 "FW: Loading via request-firmware.\n");
7354
7355 rval = QLA_SUCCESS;
7356
7357 segments = FA_RISC_CODE_SEGMENTS;
7358 dcode = (uint32_t *)req->ring;
7359 *srisc_addr = 0;
7360 fwcode = (uint32_t *)blob->fw->data;
7361 fwclen = 0;
7362
7363 /* Validate firmware image by checking version. */
7364 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7365 ql_log(ql_log_fatal, vha, 0x0093,
7366 "Unable to verify integrity of firmware image (%zd).\n",
7367 blob->fw->size);
7368 return QLA_FUNCTION_FAILED;
7369 }
7370 for (i = 0; i < 4; i++)
7371 dcode[i] = be32_to_cpu(fwcode[i + 4]);
7372 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7373 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7374 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7375 dcode[3] == 0)) {
7376 ql_log(ql_log_fatal, vha, 0x0094,
7377 "Unable to verify integrity of firmware image (%zd).\n",
7378 blob->fw->size);
7379 ql_log(ql_log_fatal, vha, 0x0095,
7380 "Firmware data: %08x %08x %08x %08x.\n",
7381 dcode[0], dcode[1], dcode[2], dcode[3]);
7382 return QLA_FUNCTION_FAILED;
7383 }
7384
7385 while (segments && rval == QLA_SUCCESS) {
7386 risc_addr = be32_to_cpu(fwcode[2]);
7387 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7388 risc_size = be32_to_cpu(fwcode[3]);
7389
7390 /* Validate firmware image size. */
7391 fwclen += risc_size * sizeof(uint32_t);
7392 if (blob->fw->size < fwclen) {
7393 ql_log(ql_log_fatal, vha, 0x0096,
7394 "Unable to verify integrity of firmware image "
7395 "(%zd).\n", blob->fw->size);
7396 return QLA_FUNCTION_FAILED;
7397 }
7398
7399 fragment = 0;
7400 while (risc_size > 0 && rval == QLA_SUCCESS) {
7401 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7402 if (dlen > risc_size)
7403 dlen = risc_size;
7404
7405 ql_dbg(ql_dbg_init, vha, 0x0097,
7406 "Loading risc segment@ risc addr %x "
7407 "number of dwords 0x%x.\n", risc_addr, dlen);
7408
7409 for (i = 0; i < dlen; i++)
7410 dcode[i] = swab32(fwcode[i]);
7411
7412 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7413 dlen);
7414 if (rval) {
7415 ql_log(ql_log_fatal, vha, 0x0098,
7416 "Failed to load segment %d of firmware.\n",
7417 fragment);
7418 return QLA_FUNCTION_FAILED;
7419 }
7420
7421 fwcode += dlen;
7422 risc_addr += dlen;
7423 risc_size -= dlen;
7424 fragment++;
7425 }
7426
7427 /* Next segment. */
7428 segments--;
7429 }
7430
7431 if (!IS_QLA27XX(ha))
7432 return rval;
7433
7434 if (ha->fw_dump_template)
7435 vfree(ha->fw_dump_template);
7436 ha->fw_dump_template = NULL;
7437 ha->fw_dump_template_len = 0;
7438
7439 ql_dbg(ql_dbg_init, vha, 0x171,
7440 "Loading fwdump template from %x\n",
7441 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
7442 risc_size = be32_to_cpu(fwcode[2]);
7443 ql_dbg(ql_dbg_init, vha, 0x172,
7444 "-> array size %x dwords\n", risc_size);
7445 if (risc_size == 0 || risc_size == ~0)
7446 goto default_template;
7447
7448 dlen = (risc_size - 8) * sizeof(*fwcode);
7449 ql_dbg(ql_dbg_init, vha, 0x0173,
7450 "-> template allocating %x bytes...\n", dlen);
7451 ha->fw_dump_template = vmalloc(dlen);
7452 if (!ha->fw_dump_template) {
7453 ql_log(ql_log_warn, vha, 0x0174,
7454 "Failed fwdump template allocate %x bytes.\n", risc_size);
7455 goto default_template;
7456 }
7457
7458 fwcode += 7;
7459 risc_size -= 8;
7460 dcode = ha->fw_dump_template;
7461 for (i = 0; i < risc_size; i++)
7462 dcode[i] = le32_to_cpu(fwcode[i]);
7463
7464 if (!qla27xx_fwdt_template_valid(dcode)) {
7465 ql_log(ql_log_warn, vha, 0x0175,
7466 "Failed fwdump template validate\n");
7467 goto default_template;
7468 }
7469
7470 dlen = qla27xx_fwdt_template_size(dcode);
7471 ql_dbg(ql_dbg_init, vha, 0x0176,
7472 "-> template size %x bytes\n", dlen);
7473 if (dlen > risc_size * sizeof(*fwcode)) {
7474 ql_log(ql_log_warn, vha, 0x0177,
7475 "Failed fwdump template exceeds array by %zx bytes\n",
7476 (size_t)(dlen - risc_size * sizeof(*fwcode)));
7477 goto default_template;
7478 }
7479 ha->fw_dump_template_len = dlen;
7480 return rval;
7481
7482 default_template:
7483 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
7484 if (ha->fw_dump_template)
7485 vfree(ha->fw_dump_template);
7486 ha->fw_dump_template = NULL;
7487 ha->fw_dump_template_len = 0;
7488
7489 dlen = qla27xx_fwdt_template_default_size();
7490 ql_dbg(ql_dbg_init, vha, 0x0179,
7491 "-> template allocating %x bytes...\n", dlen);
7492 ha->fw_dump_template = vmalloc(dlen);
7493 if (!ha->fw_dump_template) {
7494 ql_log(ql_log_warn, vha, 0x017a,
7495 "Failed fwdump template allocate %x bytes.\n", risc_size);
7496 goto failed_template;
7497 }
7498
7499 dcode = ha->fw_dump_template;
7500 risc_size = dlen / sizeof(*fwcode);
7501 fwcode = qla27xx_fwdt_template_default();
7502 for (i = 0; i < risc_size; i++)
7503 dcode[i] = be32_to_cpu(fwcode[i]);
7504
7505 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7506 ql_log(ql_log_warn, vha, 0x017b,
7507 "Failed fwdump template validate\n");
7508 goto failed_template;
7509 }
7510
7511 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7512 ql_dbg(ql_dbg_init, vha, 0x017c,
7513 "-> template size %x bytes\n", dlen);
7514 ha->fw_dump_template_len = dlen;
7515 return rval;
7516
7517 failed_template:
7518 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
7519 if (ha->fw_dump_template)
7520 vfree(ha->fw_dump_template);
7521 ha->fw_dump_template = NULL;
7522 ha->fw_dump_template_len = 0;
7523 return rval;
7524 }
7525
7526 int
7527 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7528 {
7529 int rval;
7530
7531 if (ql2xfwloadbin == 1)
7532 return qla81xx_load_risc(vha, srisc_addr);
7533
7534 /*
7535 * FW Load priority:
7536 * 1) Firmware via request-firmware interface (.bin file).
7537 * 2) Firmware residing in flash.
7538 */
7539 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7540 if (rval == QLA_SUCCESS)
7541 return rval;
7542
7543 return qla24xx_load_risc_flash(vha, srisc_addr,
7544 vha->hw->flt_region_fw);
7545 }
7546
7547 int
7548 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7549 {
7550 int rval;
7551 struct qla_hw_data *ha = vha->hw;
7552
7553 if (ql2xfwloadbin == 2)
7554 goto try_blob_fw;
7555
7556 /*
7557 * FW Load priority:
7558 * 1) Firmware residing in flash.
7559 * 2) Firmware via request-firmware interface (.bin file).
7560 * 3) Golden-Firmware residing in flash -- limited operation.
7561 */
7562 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
7563 if (rval == QLA_SUCCESS)
7564 return rval;
7565
7566 try_blob_fw:
7567 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7568 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
7569 return rval;
7570
7571 ql_log(ql_log_info, vha, 0x0099,
7572 "Attempting to fallback to golden firmware.\n");
7573 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
7574 if (rval != QLA_SUCCESS)
7575 return rval;
7576
7577 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
7578 ha->flags.running_gold_fw = 1;
7579 return rval;
7580 }
7581
7582 void
7583 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
7584 {
7585 int ret, retries;
7586 struct qla_hw_data *ha = vha->hw;
7587
7588 if (ha->flags.pci_channel_io_perm_failure)
7589 return;
7590 if (!IS_FWI2_CAPABLE(ha))
7591 return;
7592 if (!ha->fw_major_version)
7593 return;
7594 if (!ha->flags.fw_started)
7595 return;
7596
7597 ret = qla2x00_stop_firmware(vha);
7598 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
7599 ret != QLA_INVALID_COMMAND && retries ; retries--) {
7600 ha->isp_ops->reset_chip(vha);
7601 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
7602 continue;
7603 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
7604 continue;
7605 ql_log(ql_log_info, vha, 0x8015,
7606 "Attempting retry of stop-firmware command.\n");
7607 ret = qla2x00_stop_firmware(vha);
7608 }
7609
7610 QLA_FW_STOPPED(ha);
7611 ha->flags.fw_init_done = 0;
7612 }
7613
7614 int
7615 qla24xx_configure_vhba(scsi_qla_host_t *vha)
7616 {
7617 int rval = QLA_SUCCESS;
7618 int rval2;
7619 uint16_t mb[MAILBOX_REGISTER_COUNT];
7620 struct qla_hw_data *ha = vha->hw;
7621 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7622 struct req_que *req;
7623 struct rsp_que *rsp;
7624
7625 if (!vha->vp_idx)
7626 return -EINVAL;
7627
7628 rval = qla2x00_fw_ready(base_vha);
7629 if (vha->qpair)
7630 req = vha->qpair->req;
7631 else
7632 req = ha->req_q_map[0];
7633 rsp = req->rsp;
7634
7635 if (rval == QLA_SUCCESS) {
7636 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7637 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7638 }
7639
7640 vha->flags.management_server_logged_in = 0;
7641
7642 /* Login to SNS first */
7643 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7644 BIT_1);
7645 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7646 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7647 ql_dbg(ql_dbg_init, vha, 0x0120,
7648 "Failed SNS login: loop_id=%x, rval2=%d\n",
7649 NPH_SNS, rval2);
7650 else
7651 ql_dbg(ql_dbg_init, vha, 0x0103,
7652 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7653 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7654 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
7655 return (QLA_FUNCTION_FAILED);
7656 }
7657
7658 atomic_set(&vha->loop_down_timer, 0);
7659 atomic_set(&vha->loop_state, LOOP_UP);
7660 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7661 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7662 rval = qla2x00_loop_resync(base_vha);
7663
7664 return rval;
7665 }
7666
7667 /* 84XX Support **************************************************************/
7668
7669 static LIST_HEAD(qla_cs84xx_list);
7670 static DEFINE_MUTEX(qla_cs84xx_mutex);
7671
7672 static struct qla_chip_state_84xx *
7673 qla84xx_get_chip(struct scsi_qla_host *vha)
7674 {
7675 struct qla_chip_state_84xx *cs84xx;
7676 struct qla_hw_data *ha = vha->hw;
7677
7678 mutex_lock(&qla_cs84xx_mutex);
7679
7680 /* Find any shared 84xx chip. */
7681 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7682 if (cs84xx->bus == ha->pdev->bus) {
7683 kref_get(&cs84xx->kref);
7684 goto done;
7685 }
7686 }
7687
7688 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7689 if (!cs84xx)
7690 goto done;
7691
7692 kref_init(&cs84xx->kref);
7693 spin_lock_init(&cs84xx->access_lock);
7694 mutex_init(&cs84xx->fw_update_mutex);
7695 cs84xx->bus = ha->pdev->bus;
7696
7697 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7698 done:
7699 mutex_unlock(&qla_cs84xx_mutex);
7700 return cs84xx;
7701 }
7702
7703 static void
7704 __qla84xx_chip_release(struct kref *kref)
7705 {
7706 struct qla_chip_state_84xx *cs84xx =
7707 container_of(kref, struct qla_chip_state_84xx, kref);
7708
7709 mutex_lock(&qla_cs84xx_mutex);
7710 list_del(&cs84xx->list);
7711 mutex_unlock(&qla_cs84xx_mutex);
7712 kfree(cs84xx);
7713 }
7714
7715 void
7716 qla84xx_put_chip(struct scsi_qla_host *vha)
7717 {
7718 struct qla_hw_data *ha = vha->hw;
7719 if (ha->cs84xx)
7720 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7721 }
7722
7723 static int
7724 qla84xx_init_chip(scsi_qla_host_t *vha)
7725 {
7726 int rval;
7727 uint16_t status[2];
7728 struct qla_hw_data *ha = vha->hw;
7729
7730 mutex_lock(&ha->cs84xx->fw_update_mutex);
7731
7732 rval = qla84xx_verify_chip(vha, status);
7733
7734 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7735
7736 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7737 QLA_SUCCESS;
7738 }
7739
7740 /* 81XX Support **************************************************************/
7741
7742 int
7743 qla81xx_nvram_config(scsi_qla_host_t *vha)
7744 {
7745 int rval;
7746 struct init_cb_81xx *icb;
7747 struct nvram_81xx *nv;
7748 uint32_t *dptr;
7749 uint8_t *dptr1, *dptr2;
7750 uint32_t chksum;
7751 uint16_t cnt;
7752 struct qla_hw_data *ha = vha->hw;
7753
7754 rval = QLA_SUCCESS;
7755 icb = (struct init_cb_81xx *)ha->init_cb;
7756 nv = ha->nvram;
7757
7758 /* Determine NVRAM starting address. */
7759 ha->nvram_size = sizeof(struct nvram_81xx);
7760 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7761 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7762 ha->vpd_size = FA_VPD_SIZE_82XX;
7763
7764 /* Get VPD data into cache */
7765 ha->vpd = ha->nvram + VPD_OFFSET;
7766 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7767 ha->vpd_size);
7768
7769 /* Get NVRAM data into cache and calculate checksum. */
7770 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
7771 ha->nvram_size);
7772 dptr = (uint32_t *)nv;
7773 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7774 chksum += le32_to_cpu(*dptr);
7775
7776 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7777 "Contents of NVRAM:\n");
7778 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7779 (uint8_t *)nv, ha->nvram_size);
7780
7781 /* Bad NVRAM data, set defaults parameters. */
7782 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7783 || nv->id[3] != ' ' ||
7784 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
7785 /* Reset NVRAM data. */
7786 ql_log(ql_log_info, vha, 0x0073,
7787 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7788 "version=0x%x.\n", chksum, nv->id[0],
7789 le16_to_cpu(nv->nvram_version));
7790 ql_log(ql_log_info, vha, 0x0074,
7791 "Falling back to functioning (yet invalid -- WWPN) "
7792 "defaults.\n");
7793
7794 /*
7795 * Set default initialization control block.
7796 */
7797 memset(nv, 0, ha->nvram_size);
7798 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7799 nv->version = cpu_to_le16(ICB_VERSION);
7800 nv->frame_payload_size = 2048;
7801 nv->execution_throttle = cpu_to_le16(0xFFFF);
7802 nv->exchange_count = cpu_to_le16(0);
7803 nv->port_name[0] = 0x21;
7804 nv->port_name[1] = 0x00 + ha->port_no + 1;
7805 nv->port_name[2] = 0x00;
7806 nv->port_name[3] = 0xe0;
7807 nv->port_name[4] = 0x8b;
7808 nv->port_name[5] = 0x1c;
7809 nv->port_name[6] = 0x55;
7810 nv->port_name[7] = 0x86;
7811 nv->node_name[0] = 0x20;
7812 nv->node_name[1] = 0x00;
7813 nv->node_name[2] = 0x00;
7814 nv->node_name[3] = 0xe0;
7815 nv->node_name[4] = 0x8b;
7816 nv->node_name[5] = 0x1c;
7817 nv->node_name[6] = 0x55;
7818 nv->node_name[7] = 0x86;
7819 nv->login_retry_count = cpu_to_le16(8);
7820 nv->interrupt_delay_timer = cpu_to_le16(0);
7821 nv->login_timeout = cpu_to_le16(0);
7822 nv->firmware_options_1 =
7823 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7824 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7825 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7826 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7827 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7828 nv->efi_parameters = cpu_to_le32(0);
7829 nv->reset_delay = 5;
7830 nv->max_luns_per_target = cpu_to_le16(128);
7831 nv->port_down_retry_count = cpu_to_le16(30);
7832 nv->link_down_timeout = cpu_to_le16(180);
7833 nv->enode_mac[0] = 0x00;
7834 nv->enode_mac[1] = 0xC0;
7835 nv->enode_mac[2] = 0xDD;
7836 nv->enode_mac[3] = 0x04;
7837 nv->enode_mac[4] = 0x05;
7838 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
7839
7840 rval = 1;
7841 }
7842
7843 if (IS_T10_PI_CAPABLE(ha))
7844 nv->frame_payload_size &= ~7;
7845
7846 qlt_81xx_config_nvram_stage1(vha, nv);
7847
7848 /* Reset Initialization control block */
7849 memset(icb, 0, ha->init_cb_size);
7850
7851 /* Copy 1st segment. */
7852 dptr1 = (uint8_t *)icb;
7853 dptr2 = (uint8_t *)&nv->version;
7854 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7855 while (cnt--)
7856 *dptr1++ = *dptr2++;
7857
7858 icb->login_retry_count = nv->login_retry_count;
7859
7860 /* Copy 2nd segment. */
7861 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7862 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7863 cnt = (uint8_t *)&icb->reserved_5 -
7864 (uint8_t *)&icb->interrupt_delay_timer;
7865 while (cnt--)
7866 *dptr1++ = *dptr2++;
7867
7868 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7869 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7870 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
7871 icb->enode_mac[0] = 0x00;
7872 icb->enode_mac[1] = 0xC0;
7873 icb->enode_mac[2] = 0xDD;
7874 icb->enode_mac[3] = 0x04;
7875 icb->enode_mac[4] = 0x05;
7876 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
7877 }
7878
7879 /* Use extended-initialization control block. */
7880 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7881
7882 /*
7883 * Setup driver NVRAM options.
7884 */
7885 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7886 "QLE8XXX");
7887
7888 qlt_81xx_config_nvram_stage2(vha, icb);
7889
7890 /* Use alternate WWN? */
7891 if (nv->host_p & cpu_to_le32(BIT_15)) {
7892 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7893 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7894 }
7895
7896 /* Prepare nodename */
7897 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7898 /*
7899 * Firmware will apply the following mask if the nodename was
7900 * not provided.
7901 */
7902 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7903 icb->node_name[0] &= 0xF0;
7904 }
7905
7906 /* Set host adapter parameters. */
7907 ha->flags.disable_risc_code_load = 0;
7908 ha->flags.enable_lip_reset = 0;
7909 ha->flags.enable_lip_full_login =
7910 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7911 ha->flags.enable_target_reset =
7912 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7913 ha->flags.enable_led_scheme = 0;
7914 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7915
7916 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7917 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7918
7919 /* save HBA serial number */
7920 ha->serial0 = icb->port_name[5];
7921 ha->serial1 = icb->port_name[6];
7922 ha->serial2 = icb->port_name[7];
7923 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7924 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7925
7926 icb->execution_throttle = cpu_to_le16(0xFFFF);
7927
7928 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7929
7930 /* Set minimum login_timeout to 4 seconds. */
7931 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7932 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7933 if (le16_to_cpu(nv->login_timeout) < 4)
7934 nv->login_timeout = cpu_to_le16(4);
7935 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7936
7937 /* Set minimum RATOV to 100 tenths of a second. */
7938 ha->r_a_tov = 100;
7939
7940 ha->loop_reset_delay = nv->reset_delay;
7941
7942 /* Link Down Timeout = 0:
7943 *
7944 * When Port Down timer expires we will start returning
7945 * I/O's to OS with "DID_NO_CONNECT".
7946 *
7947 * Link Down Timeout != 0:
7948 *
7949 * The driver waits for the link to come up after link down
7950 * before returning I/Os to OS with "DID_NO_CONNECT".
7951 */
7952 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7953 ha->loop_down_abort_time =
7954 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7955 } else {
7956 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7957 ha->loop_down_abort_time =
7958 (LOOP_DOWN_TIME - ha->link_down_timeout);
7959 }
7960
7961 /* Need enough time to try and get the port back. */
7962 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7963 if (qlport_down_retry)
7964 ha->port_down_retry_count = qlport_down_retry;
7965
7966 /* Set login_retry_count */
7967 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7968 if (ha->port_down_retry_count ==
7969 le16_to_cpu(nv->port_down_retry_count) &&
7970 ha->port_down_retry_count > 3)
7971 ha->login_retry_count = ha->port_down_retry_count;
7972 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7973 ha->login_retry_count = ha->port_down_retry_count;
7974 if (ql2xloginretrycount)
7975 ha->login_retry_count = ql2xloginretrycount;
7976
7977 /* if not running MSI-X we need handshaking on interrupts */
7978 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
7979 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
7980
7981 /* Enable ZIO. */
7982 if (!vha->flags.init_done) {
7983 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7984 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7985 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7986 le16_to_cpu(icb->interrupt_delay_timer): 2;
7987 }
7988 icb->firmware_options_2 &= cpu_to_le32(
7989 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7990 vha->flags.process_response_queue = 0;
7991 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7992 ha->zio_mode = QLA_ZIO_MODE_6;
7993
7994 ql_log(ql_log_info, vha, 0x0075,
7995 "ZIO mode %d enabled; timer delay (%d us).\n",
7996 ha->zio_mode,
7997 ha->zio_timer * 100);
7998
7999 icb->firmware_options_2 |= cpu_to_le32(
8000 (uint32_t)ha->zio_mode);
8001 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8002 vha->flags.process_response_queue = 1;
8003 }
8004
8005 /* enable RIDA Format2 */
8006 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
8007 icb->firmware_options_3 |= BIT_0;
8008
8009 if (IS_QLA27XX(ha)) {
8010 icb->firmware_options_3 |= BIT_8;
8011 ql_dbg(ql_log_info, vha, 0x0075,
8012 "Enabling direct connection.\n");
8013 }
8014
8015 if (rval) {
8016 ql_log(ql_log_warn, vha, 0x0076,
8017 "NVRAM configuration failed.\n");
8018 }
8019 return (rval);
8020 }
8021
8022 int
8023 qla82xx_restart_isp(scsi_qla_host_t *vha)
8024 {
8025 int status, rval;
8026 struct qla_hw_data *ha = vha->hw;
8027 struct req_que *req = ha->req_q_map[0];
8028 struct rsp_que *rsp = ha->rsp_q_map[0];
8029 struct scsi_qla_host *vp;
8030 unsigned long flags;
8031
8032 status = qla2x00_init_rings(vha);
8033 if (!status) {
8034 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8035 ha->flags.chip_reset_done = 1;
8036
8037 status = qla2x00_fw_ready(vha);
8038 if (!status) {
8039 /* Issue a marker after FW becomes ready. */
8040 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
8041 vha->flags.online = 1;
8042 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8043 }
8044
8045 /* if no cable then assume it's good */
8046 if ((vha->device_flags & DFLG_NO_CABLE))
8047 status = 0;
8048 }
8049
8050 if (!status) {
8051 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8052
8053 if (!atomic_read(&vha->loop_down_timer)) {
8054 /*
8055 * Issue marker command only when we are going
8056 * to start the I/O .
8057 */
8058 vha->marker_needed = 1;
8059 }
8060
8061 ha->isp_ops->enable_intrs(ha);
8062
8063 ha->isp_abort_cnt = 0;
8064 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8065
8066 /* Update the firmware version */
8067 status = qla82xx_check_md_needed(vha);
8068
8069 if (ha->fce) {
8070 ha->flags.fce_enabled = 1;
8071 memset(ha->fce, 0,
8072 fce_calc_size(ha->fce_bufs));
8073 rval = qla2x00_enable_fce_trace(vha,
8074 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8075 &ha->fce_bufs);
8076 if (rval) {
8077 ql_log(ql_log_warn, vha, 0x8001,
8078 "Unable to reinitialize FCE (%d).\n",
8079 rval);
8080 ha->flags.fce_enabled = 0;
8081 }
8082 }
8083
8084 if (ha->eft) {
8085 memset(ha->eft, 0, EFT_SIZE);
8086 rval = qla2x00_enable_eft_trace(vha,
8087 ha->eft_dma, EFT_NUM_BUFFERS);
8088 if (rval) {
8089 ql_log(ql_log_warn, vha, 0x8010,
8090 "Unable to reinitialize EFT (%d).\n",
8091 rval);
8092 }
8093 }
8094 }
8095
8096 if (!status) {
8097 ql_dbg(ql_dbg_taskm, vha, 0x8011,
8098 "qla82xx_restart_isp succeeded.\n");
8099
8100 spin_lock_irqsave(&ha->vport_slock, flags);
8101 list_for_each_entry(vp, &ha->vp_list, list) {
8102 if (vp->vp_idx) {
8103 atomic_inc(&vp->vref_count);
8104 spin_unlock_irqrestore(&ha->vport_slock, flags);
8105
8106 qla2x00_vp_abort_isp(vp);
8107
8108 spin_lock_irqsave(&ha->vport_slock, flags);
8109 atomic_dec(&vp->vref_count);
8110 }
8111 }
8112 spin_unlock_irqrestore(&ha->vport_slock, flags);
8113
8114 } else {
8115 ql_log(ql_log_warn, vha, 0x8016,
8116 "qla82xx_restart_isp **** FAILED ****.\n");
8117 }
8118
8119 return status;
8120 }
8121
8122 void
8123 qla81xx_update_fw_options(scsi_qla_host_t *vha)
8124 {
8125 struct qla_hw_data *ha = vha->hw;
8126
8127 /* Hold status IOCBs until ABTS response received. */
8128 if (ql2xfwholdabts)
8129 ha->fw_options[3] |= BIT_12;
8130
8131 /* Set Retry FLOGI in case of P2P connection */
8132 if (ha->operating_mode == P2P) {
8133 ha->fw_options[2] |= BIT_3;
8134 ql_dbg(ql_dbg_disc, vha, 0x2103,
8135 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8136 __func__, ha->fw_options[2]);
8137 }
8138
8139 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
8140 if (ql2xmvasynctoatio) {
8141 if (qla_tgt_mode_enabled(vha) ||
8142 qla_dual_mode_enabled(vha))
8143 ha->fw_options[2] |= BIT_11;
8144 else
8145 ha->fw_options[2] &= ~BIT_11;
8146 }
8147
8148 if (qla_tgt_mode_enabled(vha) ||
8149 qla_dual_mode_enabled(vha)) {
8150 /* FW auto send SCSI status during */
8151 ha->fw_options[1] |= BIT_8;
8152 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8153
8154 /* FW perform Exchange validation */
8155 ha->fw_options[2] |= BIT_4;
8156 } else {
8157 ha->fw_options[1] &= ~BIT_8;
8158 ha->fw_options[10] &= 0x00ff;
8159
8160 ha->fw_options[2] &= ~BIT_4;
8161 }
8162
8163 if (ql2xetsenable) {
8164 /* Enable ETS Burst. */
8165 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8166 ha->fw_options[2] |= BIT_9;
8167 }
8168
8169 ql_dbg(ql_dbg_init, vha, 0x00e9,
8170 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8171 __func__, ha->fw_options[1], ha->fw_options[2],
8172 ha->fw_options[3], vha->host->active_mode);
8173
8174 qla2x00_set_fw_options(vha, ha->fw_options);
8175 }
8176
8177 /*
8178 * qla24xx_get_fcp_prio
8179 * Gets the fcp cmd priority value for the logged in port.
8180 * Looks for a match of the port descriptors within
8181 * each of the fcp prio config entries. If a match is found,
8182 * the tag (priority) value is returned.
8183 *
8184 * Input:
8185 * vha = scsi host structure pointer.
8186 * fcport = port structure pointer.
8187 *
8188 * Return:
8189 * non-zero (if found)
8190 * -1 (if not found)
8191 *
8192 * Context:
8193 * Kernel context
8194 */
8195 static int
8196 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8197 {
8198 int i, entries;
8199 uint8_t pid_match, wwn_match;
8200 int priority;
8201 uint32_t pid1, pid2;
8202 uint64_t wwn1, wwn2;
8203 struct qla_fcp_prio_entry *pri_entry;
8204 struct qla_hw_data *ha = vha->hw;
8205
8206 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
8207 return -1;
8208
8209 priority = -1;
8210 entries = ha->fcp_prio_cfg->num_entries;
8211 pri_entry = &ha->fcp_prio_cfg->entry[0];
8212
8213 for (i = 0; i < entries; i++) {
8214 pid_match = wwn_match = 0;
8215
8216 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8217 pri_entry++;
8218 continue;
8219 }
8220
8221 /* check source pid for a match */
8222 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8223 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8224 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8225 if (pid1 == INVALID_PORT_ID)
8226 pid_match++;
8227 else if (pid1 == pid2)
8228 pid_match++;
8229 }
8230
8231 /* check destination pid for a match */
8232 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8233 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8234 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8235 if (pid1 == INVALID_PORT_ID)
8236 pid_match++;
8237 else if (pid1 == pid2)
8238 pid_match++;
8239 }
8240
8241 /* check source WWN for a match */
8242 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8243 wwn1 = wwn_to_u64(vha->port_name);
8244 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8245 if (wwn2 == (uint64_t)-1)
8246 wwn_match++;
8247 else if (wwn1 == wwn2)
8248 wwn_match++;
8249 }
8250
8251 /* check destination WWN for a match */
8252 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8253 wwn1 = wwn_to_u64(fcport->port_name);
8254 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8255 if (wwn2 == (uint64_t)-1)
8256 wwn_match++;
8257 else if (wwn1 == wwn2)
8258 wwn_match++;
8259 }
8260
8261 if (pid_match == 2 || wwn_match == 2) {
8262 /* Found a matching entry */
8263 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8264 priority = pri_entry->tag;
8265 break;
8266 }
8267
8268 pri_entry++;
8269 }
8270
8271 return priority;
8272 }
8273
8274 /*
8275 * qla24xx_update_fcport_fcp_prio
8276 * Activates fcp priority for the logged in fc port
8277 *
8278 * Input:
8279 * vha = scsi host structure pointer.
8280 * fcp = port structure pointer.
8281 *
8282 * Return:
8283 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8284 *
8285 * Context:
8286 * Kernel context.
8287 */
8288 int
8289 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8290 {
8291 int ret;
8292 int priority;
8293 uint16_t mb[5];
8294
8295 if (fcport->port_type != FCT_TARGET ||
8296 fcport->loop_id == FC_NO_LOOP_ID)
8297 return QLA_FUNCTION_FAILED;
8298
8299 priority = qla24xx_get_fcp_prio(vha, fcport);
8300 if (priority < 0)
8301 return QLA_FUNCTION_FAILED;
8302
8303 if (IS_P3P_TYPE(vha->hw)) {
8304 fcport->fcp_prio = priority & 0xf;
8305 return QLA_SUCCESS;
8306 }
8307
8308 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8309 if (ret == QLA_SUCCESS) {
8310 if (fcport->fcp_prio != priority)
8311 ql_dbg(ql_dbg_user, vha, 0x709e,
8312 "Updated FCP_CMND priority - value=%d loop_id=%d "
8313 "port_id=%02x%02x%02x.\n", priority,
8314 fcport->loop_id, fcport->d_id.b.domain,
8315 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8316 fcport->fcp_prio = priority & 0xf;
8317 } else
8318 ql_dbg(ql_dbg_user, vha, 0x704f,
8319 "Unable to update FCP_CMND priority - ret=0x%x for "
8320 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8321 fcport->d_id.b.domain, fcport->d_id.b.area,
8322 fcport->d_id.b.al_pa);
8323 return ret;
8324 }
8325
8326 /*
8327 * qla24xx_update_all_fcp_prio
8328 * Activates fcp priority for all the logged in ports
8329 *
8330 * Input:
8331 * ha = adapter block pointer.
8332 *
8333 * Return:
8334 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8335 *
8336 * Context:
8337 * Kernel context.
8338 */
8339 int
8340 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8341 {
8342 int ret;
8343 fc_port_t *fcport;
8344
8345 ret = QLA_FUNCTION_FAILED;
8346 /* We need to set priority for all logged in ports */
8347 list_for_each_entry(fcport, &vha->vp_fcports, list)
8348 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8349
8350 return ret;
8351 }
8352
8353 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8354 int vp_idx, bool startqp)
8355 {
8356 int rsp_id = 0;
8357 int req_id = 0;
8358 int i;
8359 struct qla_hw_data *ha = vha->hw;
8360 uint16_t qpair_id = 0;
8361 struct qla_qpair *qpair = NULL;
8362 struct qla_msix_entry *msix;
8363
8364 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8365 ql_log(ql_log_warn, vha, 0x00181,
8366 "FW/Driver is not multi-queue capable.\n");
8367 return NULL;
8368 }
8369
8370 if (ql2xmqsupport || ql2xnvmeenable) {
8371 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8372 if (qpair == NULL) {
8373 ql_log(ql_log_warn, vha, 0x0182,
8374 "Failed to allocate memory for queue pair.\n");
8375 return NULL;
8376 }
8377 memset(qpair, 0, sizeof(struct qla_qpair));
8378
8379 qpair->hw = vha->hw;
8380 qpair->vha = vha;
8381 qpair->qp_lock_ptr = &qpair->qp_lock;
8382 spin_lock_init(&qpair->qp_lock);
8383 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
8384
8385 /* Assign available que pair id */
8386 mutex_lock(&ha->mq_lock);
8387 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
8388 if (ha->num_qpairs >= ha->max_qpairs) {
8389 mutex_unlock(&ha->mq_lock);
8390 ql_log(ql_log_warn, vha, 0x0183,
8391 "No resources to create additional q pair.\n");
8392 goto fail_qid_map;
8393 }
8394 ha->num_qpairs++;
8395 set_bit(qpair_id, ha->qpair_qid_map);
8396 ha->queue_pair_map[qpair_id] = qpair;
8397 qpair->id = qpair_id;
8398 qpair->vp_idx = vp_idx;
8399 qpair->fw_started = ha->flags.fw_started;
8400 INIT_LIST_HEAD(&qpair->hints_list);
8401 INIT_LIST_HEAD(&qpair->nvme_done_list);
8402 qpair->chip_reset = ha->base_qpair->chip_reset;
8403 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8404 qpair->enable_explicit_conf =
8405 ha->base_qpair->enable_explicit_conf;
8406
8407 for (i = 0; i < ha->msix_count; i++) {
8408 msix = &ha->msix_entries[i];
8409 if (msix->in_use)
8410 continue;
8411 qpair->msix = msix;
8412 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
8413 "Vector %x selected for qpair\n", msix->vector);
8414 break;
8415 }
8416 if (!qpair->msix) {
8417 ql_log(ql_log_warn, vha, 0x0184,
8418 "Out of MSI-X vectors!.\n");
8419 goto fail_msix;
8420 }
8421
8422 qpair->msix->in_use = 1;
8423 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8424 qpair->pdev = ha->pdev;
8425 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
8426 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
8427
8428 mutex_unlock(&ha->mq_lock);
8429
8430 /* Create response queue first */
8431 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
8432 if (!rsp_id) {
8433 ql_log(ql_log_warn, vha, 0x0185,
8434 "Failed to create response queue.\n");
8435 goto fail_rsp;
8436 }
8437
8438 qpair->rsp = ha->rsp_q_map[rsp_id];
8439
8440 /* Create request queue */
8441 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8442 startqp);
8443 if (!req_id) {
8444 ql_log(ql_log_warn, vha, 0x0186,
8445 "Failed to create request queue.\n");
8446 goto fail_req;
8447 }
8448
8449 qpair->req = ha->req_q_map[req_id];
8450 qpair->rsp->req = qpair->req;
8451 qpair->rsp->qpair = qpair;
8452 /* init qpair to this cpu. Will adjust at run time. */
8453 qla_cpu_update(qpair, smp_processor_id());
8454
8455 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8456 if (ha->fw_attributes & BIT_4)
8457 qpair->difdix_supported = 1;
8458 }
8459
8460 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8461 if (!qpair->srb_mempool) {
8462 ql_log(ql_log_warn, vha, 0xd036,
8463 "Failed to create srb mempool for qpair %d\n",
8464 qpair->id);
8465 goto fail_mempool;
8466 }
8467
8468 /* Mark as online */
8469 qpair->online = 1;
8470
8471 if (!vha->flags.qpairs_available)
8472 vha->flags.qpairs_available = 1;
8473
8474 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8475 "Request/Response queue pair created, id %d\n",
8476 qpair->id);
8477 ql_dbg(ql_dbg_init, vha, 0x0187,
8478 "Request/Response queue pair created, id %d\n",
8479 qpair->id);
8480 }
8481 return qpair;
8482
8483 fail_mempool:
8484 fail_req:
8485 qla25xx_delete_rsp_que(vha, qpair->rsp);
8486 fail_rsp:
8487 mutex_lock(&ha->mq_lock);
8488 qpair->msix->in_use = 0;
8489 list_del(&qpair->qp_list_elem);
8490 if (list_empty(&vha->qp_list))
8491 vha->flags.qpairs_available = 0;
8492 fail_msix:
8493 ha->queue_pair_map[qpair_id] = NULL;
8494 clear_bit(qpair_id, ha->qpair_qid_map);
8495 ha->num_qpairs--;
8496 mutex_unlock(&ha->mq_lock);
8497 fail_qid_map:
8498 kfree(qpair);
8499 return NULL;
8500 }
8501
8502 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8503 {
8504 int ret = QLA_FUNCTION_FAILED;
8505 struct qla_hw_data *ha = qpair->hw;
8506
8507 qpair->delete_in_progress = 1;
8508 while (atomic_read(&qpair->ref_count))
8509 msleep(500);
8510
8511 ret = qla25xx_delete_req_que(vha, qpair->req);
8512 if (ret != QLA_SUCCESS)
8513 goto fail;
8514
8515 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8516 if (ret != QLA_SUCCESS)
8517 goto fail;
8518
8519 mutex_lock(&ha->mq_lock);
8520 ha->queue_pair_map[qpair->id] = NULL;
8521 clear_bit(qpair->id, ha->qpair_qid_map);
8522 ha->num_qpairs--;
8523 list_del(&qpair->qp_list_elem);
8524 if (list_empty(&vha->qp_list)) {
8525 vha->flags.qpairs_available = 0;
8526 vha->flags.qpairs_req_created = 0;
8527 vha->flags.qpairs_rsp_created = 0;
8528 }
8529 mempool_destroy(qpair->srb_mempool);
8530 kfree(qpair);
8531 mutex_unlock(&ha->mq_lock);
8532
8533 return QLA_SUCCESS;
8534 fail:
8535 return ret;
8536 }