]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/qla2xxx/qla_init.c
[SCSI] qla2xxx: Use GFF_ID to check FCP-SCSI FC4 type before logging into Nx_Ports
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 /*
21 * QLogic ISP2x00 Hardware Support Function Prototypes.
22 */
23 static int qla2x00_isp_firmware(scsi_qla_host_t *);
24 static int qla2x00_setup_chip(scsi_qla_host_t *);
25 static int qla2x00_init_rings(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32 static int qla2x00_device_resync(scsi_qla_host_t *);
33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34 uint16_t *);
35
36 static int qla2x00_restart_isp(scsi_qla_host_t *);
37
38 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39
40 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41 static int qla84xx_init_chip(scsi_qla_host_t *);
42 static int qla25xx_init_queues(struct qla_hw_data *);
43
44 /* SRB Extensions ---------------------------------------------------------- */
45
46 static void
47 qla2x00_ctx_sp_timeout(unsigned long __data)
48 {
49 srb_t *sp = (srb_t *)__data;
50 struct srb_ctx *ctx;
51 struct srb_iocb *iocb;
52 fc_port_t *fcport = sp->fcport;
53 struct qla_hw_data *ha = fcport->vha->hw;
54 struct req_que *req;
55 unsigned long flags;
56
57 spin_lock_irqsave(&ha->hardware_lock, flags);
58 req = ha->req_q_map[0];
59 req->outstanding_cmds[sp->handle] = NULL;
60 ctx = sp->ctx;
61 iocb = ctx->u.iocb_cmd;
62 iocb->timeout(sp);
63 iocb->free(sp);
64 spin_unlock_irqrestore(&ha->hardware_lock, flags);
65 }
66
67 void
68 qla2x00_ctx_sp_free(srb_t *sp)
69 {
70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72
73 del_timer_sync(&iocb->timer);
74 kfree(iocb);
75 kfree(ctx);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
77 }
78
79 inline srb_t *
80 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
81 unsigned long tmo)
82 {
83 srb_t *sp;
84 struct qla_hw_data *ha = vha->hw;
85 struct srb_ctx *ctx;
86 struct srb_iocb *iocb;
87
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
89 if (!sp)
90 goto done;
91 ctx = kzalloc(size, GFP_KERNEL);
92 if (!ctx) {
93 mempool_free(sp, ha->srb_mempool);
94 sp = NULL;
95 goto done;
96 }
97 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
98 if (!iocb) {
99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL;
101 kfree(ctx);
102 goto done;
103 }
104
105 memset(sp, 0, sizeof(*sp));
106 sp->fcport = fcport;
107 sp->ctx = ctx;
108 ctx->u.iocb_cmd = iocb;
109 iocb->free = qla2x00_ctx_sp_free;
110
111 init_timer(&iocb->timer);
112 if (!tmo)
113 goto done;
114 iocb->timer.expires = jiffies + tmo * HZ;
115 iocb->timer.data = (unsigned long)sp;
116 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer);
118 done:
119 return sp;
120 }
121
122 /* Asynchronous Login/Logout Routines -------------------------------------- */
123
124 static inline unsigned long
125 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
126 {
127 unsigned long tmo;
128 struct qla_hw_data *ha = vha->hw;
129
130 /* Firmware should use switch negotiated r_a_tov for timeout. */
131 tmo = ha->r_a_tov / 10 * 2;
132 if (!IS_FWI2_CAPABLE(ha)) {
133 /*
134 * Except for earlier ISPs where the timeout is seeded from the
135 * initialization control block.
136 */
137 tmo = ha->login_timeout;
138 }
139 return tmo;
140 }
141
142 static void
143 qla2x00_async_iocb_timeout(srb_t *sp)
144 {
145 fc_port_t *fcport = sp->fcport;
146 struct srb_ctx *ctx = sp->ctx;
147
148 DEBUG2(printk(KERN_WARNING
149 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
150 fcport->vha->host_no, sp->handle,
151 ctx->name, fcport->d_id.b.domain,
152 fcport->d_id.b.area, fcport->d_id.b.al_pa));
153
154 fcport->flags &= ~FCF_ASYNC_SENT;
155 if (ctx->type == SRB_LOGIN_CMD) {
156 struct srb_iocb *lio = ctx->u.iocb_cmd;
157 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
158 /* Retry as needed. */
159 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
160 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
161 QLA_LOGIO_LOGIN_RETRIED : 0;
162 qla2x00_post_async_login_done_work(fcport->vha, fcport,
163 lio->u.logio.data);
164 }
165 }
166
167 static void
168 qla2x00_async_login_ctx_done(srb_t *sp)
169 {
170 struct srb_ctx *ctx = sp->ctx;
171 struct srb_iocb *lio = ctx->u.iocb_cmd;
172
173 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
174 lio->u.logio.data);
175 lio->free(sp);
176 }
177
178 int
179 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
180 uint16_t *data)
181 {
182 srb_t *sp;
183 struct srb_ctx *ctx;
184 struct srb_iocb *lio;
185 int rval;
186
187 rval = QLA_FUNCTION_FAILED;
188 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
189 qla2x00_get_async_timeout(vha) + 2);
190 if (!sp)
191 goto done;
192
193 ctx = sp->ctx;
194 ctx->type = SRB_LOGIN_CMD;
195 ctx->name = "login";
196 lio = ctx->u.iocb_cmd;
197 lio->timeout = qla2x00_async_iocb_timeout;
198 lio->done = qla2x00_async_login_ctx_done;
199 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
200 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
201 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
202 rval = qla2x00_start_sp(sp);
203 if (rval != QLA_SUCCESS)
204 goto done_free_sp;
205
206 DEBUG2(printk(KERN_DEBUG
207 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
208 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
209 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
210 fcport->login_retry));
211 return rval;
212
213 done_free_sp:
214 lio->free(sp);
215 done:
216 return rval;
217 }
218
219 static void
220 qla2x00_async_logout_ctx_done(srb_t *sp)
221 {
222 struct srb_ctx *ctx = sp->ctx;
223 struct srb_iocb *lio = ctx->u.iocb_cmd;
224
225 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
226 lio->u.logio.data);
227 lio->free(sp);
228 }
229
230 int
231 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
232 {
233 srb_t *sp;
234 struct srb_ctx *ctx;
235 struct srb_iocb *lio;
236 int rval;
237
238 rval = QLA_FUNCTION_FAILED;
239 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
240 qla2x00_get_async_timeout(vha) + 2);
241 if (!sp)
242 goto done;
243
244 ctx = sp->ctx;
245 ctx->type = SRB_LOGOUT_CMD;
246 ctx->name = "logout";
247 lio = ctx->u.iocb_cmd;
248 lio->timeout = qla2x00_async_iocb_timeout;
249 lio->done = qla2x00_async_logout_ctx_done;
250 rval = qla2x00_start_sp(sp);
251 if (rval != QLA_SUCCESS)
252 goto done_free_sp;
253
254 DEBUG2(printk(KERN_DEBUG
255 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
256 fcport->vha->host_no, sp->handle, fcport->loop_id,
257 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
258 return rval;
259
260 done_free_sp:
261 lio->free(sp);
262 done:
263 return rval;
264 }
265
266 static void
267 qla2x00_async_adisc_ctx_done(srb_t *sp)
268 {
269 struct srb_ctx *ctx = sp->ctx;
270 struct srb_iocb *lio = ctx->u.iocb_cmd;
271
272 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
273 lio->u.logio.data);
274 lio->free(sp);
275 }
276
277 int
278 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
279 uint16_t *data)
280 {
281 srb_t *sp;
282 struct srb_ctx *ctx;
283 struct srb_iocb *lio;
284 int rval;
285
286 rval = QLA_FUNCTION_FAILED;
287 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
288 qla2x00_get_async_timeout(vha) + 2);
289 if (!sp)
290 goto done;
291
292 ctx = sp->ctx;
293 ctx->type = SRB_ADISC_CMD;
294 ctx->name = "adisc";
295 lio = ctx->u.iocb_cmd;
296 lio->timeout = qla2x00_async_iocb_timeout;
297 lio->done = qla2x00_async_adisc_ctx_done;
298 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
299 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
300 rval = qla2x00_start_sp(sp);
301 if (rval != QLA_SUCCESS)
302 goto done_free_sp;
303
304 DEBUG2(printk(KERN_DEBUG
305 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
306 fcport->vha->host_no, sp->handle, fcport->loop_id,
307 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
308
309 return rval;
310
311 done_free_sp:
312 lio->free(sp);
313 done:
314 return rval;
315 }
316
317 static void
318 qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
319 {
320 struct srb_ctx *ctx = sp->ctx;
321 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
322
323 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
324 iocb->free(sp);
325 }
326
327 int
328 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
329 uint32_t tag)
330 {
331 struct scsi_qla_host *vha = fcport->vha;
332 srb_t *sp;
333 struct srb_ctx *ctx;
334 struct srb_iocb *tcf;
335 int rval;
336
337 rval = QLA_FUNCTION_FAILED;
338 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
339 qla2x00_get_async_timeout(vha) + 2);
340 if (!sp)
341 goto done;
342
343 ctx = sp->ctx;
344 ctx->type = SRB_TM_CMD;
345 ctx->name = "tmf";
346 tcf = ctx->u.iocb_cmd;
347 tcf->u.tmf.flags = flags;
348 tcf->u.tmf.lun = lun;
349 tcf->u.tmf.data = tag;
350 tcf->timeout = qla2x00_async_iocb_timeout;
351 tcf->done = qla2x00_async_tm_cmd_ctx_done;
352
353 rval = qla2x00_start_sp(sp);
354 if (rval != QLA_SUCCESS)
355 goto done_free_sp;
356
357 DEBUG2(printk(KERN_DEBUG
358 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
359 fcport->vha->host_no, sp->handle, fcport->loop_id,
360 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
361
362 return rval;
363
364 done_free_sp:
365 tcf->free(sp);
366 done:
367 return rval;
368 }
369
370 void
371 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
372 uint16_t *data)
373 {
374 int rval;
375
376 switch (data[0]) {
377 case MBS_COMMAND_COMPLETE:
378 if (fcport->flags & FCF_FCP2_DEVICE) {
379 fcport->flags |= FCF_ASYNC_SENT;
380 qla2x00_post_async_adisc_work(vha, fcport, data);
381 break;
382 }
383 qla2x00_update_fcport(vha, fcport);
384 break;
385 case MBS_COMMAND_ERROR:
386 fcport->flags &= ~FCF_ASYNC_SENT;
387 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
388 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
389 else
390 qla2x00_mark_device_lost(vha, fcport, 1, 1);
391 break;
392 case MBS_PORT_ID_USED:
393 fcport->loop_id = data[1];
394 qla2x00_post_async_logout_work(vha, fcport, NULL);
395 qla2x00_post_async_login_work(vha, fcport, NULL);
396 break;
397 case MBS_LOOP_ID_USED:
398 fcport->loop_id++;
399 rval = qla2x00_find_new_loop_id(vha, fcport);
400 if (rval != QLA_SUCCESS) {
401 fcport->flags &= ~FCF_ASYNC_SENT;
402 qla2x00_mark_device_lost(vha, fcport, 1, 1);
403 break;
404 }
405 qla2x00_post_async_login_work(vha, fcport, NULL);
406 break;
407 }
408 return;
409 }
410
411 void
412 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
413 uint16_t *data)
414 {
415 qla2x00_mark_device_lost(vha, fcport, 1, 0);
416 return;
417 }
418
419 void
420 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
421 uint16_t *data)
422 {
423 if (data[0] == MBS_COMMAND_COMPLETE) {
424 qla2x00_update_fcport(vha, fcport);
425
426 return;
427 }
428
429 /* Retry login. */
430 fcport->flags &= ~FCF_ASYNC_SENT;
431 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
432 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
433 else
434 qla2x00_mark_device_lost(vha, fcport, 1, 1);
435
436 return;
437 }
438
439 void
440 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
441 struct srb_iocb *iocb)
442 {
443 int rval;
444 uint32_t flags;
445 uint16_t lun;
446
447 flags = iocb->u.tmf.flags;
448 lun = (uint16_t)iocb->u.tmf.lun;
449
450 /* Issue Marker IOCB */
451 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
452 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
453 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
454
455 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
456 DEBUG2_3_11(printk(KERN_WARNING
457 "%s(%ld): TM IOCB failed (%x).\n",
458 __func__, vha->host_no, rval));
459 }
460
461 return;
462 }
463
464 /****************************************************************************/
465 /* QLogic ISP2x00 Hardware Support Functions. */
466 /****************************************************************************/
467
468 /*
469 * qla2x00_initialize_adapter
470 * Initialize board.
471 *
472 * Input:
473 * ha = adapter block pointer.
474 *
475 * Returns:
476 * 0 = success
477 */
478 int
479 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
480 {
481 int rval;
482 struct qla_hw_data *ha = vha->hw;
483 struct req_que *req = ha->req_q_map[0];
484
485 /* Clear adapter flags. */
486 vha->flags.online = 0;
487 ha->flags.chip_reset_done = 0;
488 vha->flags.reset_active = 0;
489 ha->flags.pci_channel_io_perm_failure = 0;
490 ha->flags.eeh_busy = 0;
491 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
492 atomic_set(&vha->loop_state, LOOP_DOWN);
493 vha->device_flags = DFLG_NO_CABLE;
494 vha->dpc_flags = 0;
495 vha->flags.management_server_logged_in = 0;
496 vha->marker_needed = 0;
497 ha->isp_abort_cnt = 0;
498 ha->beacon_blink_led = 0;
499
500 set_bit(0, ha->req_qid_map);
501 set_bit(0, ha->rsp_qid_map);
502
503 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
504 rval = ha->isp_ops->pci_config(vha);
505 if (rval) {
506 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
507 vha->host_no));
508 return (rval);
509 }
510
511 ha->isp_ops->reset_chip(vha);
512
513 rval = qla2xxx_get_flash_info(vha);
514 if (rval) {
515 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
516 vha->host_no));
517 return (rval);
518 }
519
520 ha->isp_ops->get_flash_version(vha, req->ring);
521
522 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
523
524 ha->isp_ops->nvram_config(vha);
525
526 if (ha->flags.disable_serdes) {
527 /* Mask HBA via NVRAM settings? */
528 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
529 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
530 vha->port_name[0], vha->port_name[1],
531 vha->port_name[2], vha->port_name[3],
532 vha->port_name[4], vha->port_name[5],
533 vha->port_name[6], vha->port_name[7]);
534 return QLA_FUNCTION_FAILED;
535 }
536
537 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
538
539 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
540 rval = ha->isp_ops->chip_diag(vha);
541 if (rval)
542 return (rval);
543 rval = qla2x00_setup_chip(vha);
544 if (rval)
545 return (rval);
546 }
547
548 if (IS_QLA84XX(ha)) {
549 ha->cs84xx = qla84xx_get_chip(vha);
550 if (!ha->cs84xx) {
551 qla_printk(KERN_ERR, ha,
552 "Unable to configure ISP84XX.\n");
553 return QLA_FUNCTION_FAILED;
554 }
555 }
556 rval = qla2x00_init_rings(vha);
557 ha->flags.chip_reset_done = 1;
558
559 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
560 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
561 rval = qla84xx_init_chip(vha);
562 if (rval != QLA_SUCCESS) {
563 qla_printk(KERN_ERR, ha,
564 "Unable to initialize ISP84XX.\n");
565 qla84xx_put_chip(vha);
566 }
567 }
568
569 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
570 qla24xx_read_fcp_prio_cfg(vha);
571
572 return (rval);
573 }
574
575 /**
576 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
577 * @ha: HA context
578 *
579 * Returns 0 on success.
580 */
581 int
582 qla2100_pci_config(scsi_qla_host_t *vha)
583 {
584 uint16_t w;
585 unsigned long flags;
586 struct qla_hw_data *ha = vha->hw;
587 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
588
589 pci_set_master(ha->pdev);
590 pci_try_set_mwi(ha->pdev);
591
592 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
593 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
594 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
595
596 pci_disable_rom(ha->pdev);
597
598 /* Get PCI bus information. */
599 spin_lock_irqsave(&ha->hardware_lock, flags);
600 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
601 spin_unlock_irqrestore(&ha->hardware_lock, flags);
602
603 return QLA_SUCCESS;
604 }
605
606 /**
607 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
608 * @ha: HA context
609 *
610 * Returns 0 on success.
611 */
612 int
613 qla2300_pci_config(scsi_qla_host_t *vha)
614 {
615 uint16_t w;
616 unsigned long flags = 0;
617 uint32_t cnt;
618 struct qla_hw_data *ha = vha->hw;
619 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
620
621 pci_set_master(ha->pdev);
622 pci_try_set_mwi(ha->pdev);
623
624 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
625 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
626
627 if (IS_QLA2322(ha) || IS_QLA6322(ha))
628 w &= ~PCI_COMMAND_INTX_DISABLE;
629 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
630
631 /*
632 * If this is a 2300 card and not 2312, reset the
633 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
634 * the 2310 also reports itself as a 2300 so we need to get the
635 * fb revision level -- a 6 indicates it really is a 2300 and
636 * not a 2310.
637 */
638 if (IS_QLA2300(ha)) {
639 spin_lock_irqsave(&ha->hardware_lock, flags);
640
641 /* Pause RISC. */
642 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
643 for (cnt = 0; cnt < 30000; cnt++) {
644 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
645 break;
646
647 udelay(10);
648 }
649
650 /* Select FPM registers. */
651 WRT_REG_WORD(&reg->ctrl_status, 0x20);
652 RD_REG_WORD(&reg->ctrl_status);
653
654 /* Get the fb rev level */
655 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
656
657 if (ha->fb_rev == FPM_2300)
658 pci_clear_mwi(ha->pdev);
659
660 /* Deselect FPM registers. */
661 WRT_REG_WORD(&reg->ctrl_status, 0x0);
662 RD_REG_WORD(&reg->ctrl_status);
663
664 /* Release RISC module. */
665 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
666 for (cnt = 0; cnt < 30000; cnt++) {
667 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
668 break;
669
670 udelay(10);
671 }
672
673 spin_unlock_irqrestore(&ha->hardware_lock, flags);
674 }
675
676 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
677
678 pci_disable_rom(ha->pdev);
679
680 /* Get PCI bus information. */
681 spin_lock_irqsave(&ha->hardware_lock, flags);
682 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
684
685 return QLA_SUCCESS;
686 }
687
688 /**
689 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
690 * @ha: HA context
691 *
692 * Returns 0 on success.
693 */
694 int
695 qla24xx_pci_config(scsi_qla_host_t *vha)
696 {
697 uint16_t w;
698 unsigned long flags = 0;
699 struct qla_hw_data *ha = vha->hw;
700 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
701
702 pci_set_master(ha->pdev);
703 pci_try_set_mwi(ha->pdev);
704
705 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
706 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
707 w &= ~PCI_COMMAND_INTX_DISABLE;
708 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
709
710 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
711
712 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
713 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
714 pcix_set_mmrbc(ha->pdev, 2048);
715
716 /* PCIe -- adjust Maximum Read Request Size (2048). */
717 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
718 pcie_set_readrq(ha->pdev, 2048);
719
720 pci_disable_rom(ha->pdev);
721
722 ha->chip_revision = ha->pdev->revision;
723
724 /* Get PCI bus information. */
725 spin_lock_irqsave(&ha->hardware_lock, flags);
726 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
727 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728
729 return QLA_SUCCESS;
730 }
731
732 /**
733 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
734 * @ha: HA context
735 *
736 * Returns 0 on success.
737 */
738 int
739 qla25xx_pci_config(scsi_qla_host_t *vha)
740 {
741 uint16_t w;
742 struct qla_hw_data *ha = vha->hw;
743
744 pci_set_master(ha->pdev);
745 pci_try_set_mwi(ha->pdev);
746
747 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
748 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
749 w &= ~PCI_COMMAND_INTX_DISABLE;
750 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
751
752 /* PCIe -- adjust Maximum Read Request Size (2048). */
753 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
754 pcie_set_readrq(ha->pdev, 2048);
755
756 pci_disable_rom(ha->pdev);
757
758 ha->chip_revision = ha->pdev->revision;
759
760 return QLA_SUCCESS;
761 }
762
763 /**
764 * qla2x00_isp_firmware() - Choose firmware image.
765 * @ha: HA context
766 *
767 * Returns 0 on success.
768 */
769 static int
770 qla2x00_isp_firmware(scsi_qla_host_t *vha)
771 {
772 int rval;
773 uint16_t loop_id, topo, sw_cap;
774 uint8_t domain, area, al_pa;
775 struct qla_hw_data *ha = vha->hw;
776
777 /* Assume loading risc code */
778 rval = QLA_FUNCTION_FAILED;
779
780 if (ha->flags.disable_risc_code_load) {
781 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
782 vha->host_no));
783 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
784
785 /* Verify checksum of loaded RISC code. */
786 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
787 if (rval == QLA_SUCCESS) {
788 /* And, verify we are not in ROM code. */
789 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
790 &area, &domain, &topo, &sw_cap);
791 }
792 }
793
794 if (rval) {
795 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
796 vha->host_no));
797 }
798
799 return (rval);
800 }
801
802 /**
803 * qla2x00_reset_chip() - Reset ISP chip.
804 * @ha: HA context
805 *
806 * Returns 0 on success.
807 */
808 void
809 qla2x00_reset_chip(scsi_qla_host_t *vha)
810 {
811 unsigned long flags = 0;
812 struct qla_hw_data *ha = vha->hw;
813 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
814 uint32_t cnt;
815 uint16_t cmd;
816
817 if (unlikely(pci_channel_offline(ha->pdev)))
818 return;
819
820 ha->isp_ops->disable_intrs(ha);
821
822 spin_lock_irqsave(&ha->hardware_lock, flags);
823
824 /* Turn off master enable */
825 cmd = 0;
826 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
827 cmd &= ~PCI_COMMAND_MASTER;
828 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
829
830 if (!IS_QLA2100(ha)) {
831 /* Pause RISC. */
832 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
833 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
834 for (cnt = 0; cnt < 30000; cnt++) {
835 if ((RD_REG_WORD(&reg->hccr) &
836 HCCR_RISC_PAUSE) != 0)
837 break;
838 udelay(100);
839 }
840 } else {
841 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
842 udelay(10);
843 }
844
845 /* Select FPM registers. */
846 WRT_REG_WORD(&reg->ctrl_status, 0x20);
847 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
848
849 /* FPM Soft Reset. */
850 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
851 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
852
853 /* Toggle Fpm Reset. */
854 if (!IS_QLA2200(ha)) {
855 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
856 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
857 }
858
859 /* Select frame buffer registers. */
860 WRT_REG_WORD(&reg->ctrl_status, 0x10);
861 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
862
863 /* Reset frame buffer FIFOs. */
864 if (IS_QLA2200(ha)) {
865 WRT_FB_CMD_REG(ha, reg, 0xa000);
866 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
867 } else {
868 WRT_FB_CMD_REG(ha, reg, 0x00fc);
869
870 /* Read back fb_cmd until zero or 3 seconds max */
871 for (cnt = 0; cnt < 3000; cnt++) {
872 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
873 break;
874 udelay(100);
875 }
876 }
877
878 /* Select RISC module registers. */
879 WRT_REG_WORD(&reg->ctrl_status, 0);
880 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
881
882 /* Reset RISC processor. */
883 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
884 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
885
886 /* Release RISC processor. */
887 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
888 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
889 }
890
891 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
892 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
893
894 /* Reset ISP chip. */
895 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
896
897 /* Wait for RISC to recover from reset. */
898 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
899 /*
900 * It is necessary to for a delay here since the card doesn't
901 * respond to PCI reads during a reset. On some architectures
902 * this will result in an MCA.
903 */
904 udelay(20);
905 for (cnt = 30000; cnt; cnt--) {
906 if ((RD_REG_WORD(&reg->ctrl_status) &
907 CSR_ISP_SOFT_RESET) == 0)
908 break;
909 udelay(100);
910 }
911 } else
912 udelay(10);
913
914 /* Reset RISC processor. */
915 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
916
917 WRT_REG_WORD(&reg->semaphore, 0);
918
919 /* Release RISC processor. */
920 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
921 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
922
923 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
924 for (cnt = 0; cnt < 30000; cnt++) {
925 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
926 break;
927
928 udelay(100);
929 }
930 } else
931 udelay(100);
932
933 /* Turn on master enable */
934 cmd |= PCI_COMMAND_MASTER;
935 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
936
937 /* Disable RISC pause on FPM parity error. */
938 if (!IS_QLA2100(ha)) {
939 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
940 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
941 }
942
943 spin_unlock_irqrestore(&ha->hardware_lock, flags);
944 }
945
946 /**
947 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
948 * @ha: HA context
949 *
950 * Returns 0 on success.
951 */
952 static inline void
953 qla24xx_reset_risc(scsi_qla_host_t *vha)
954 {
955 unsigned long flags = 0;
956 struct qla_hw_data *ha = vha->hw;
957 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
958 uint32_t cnt, d2;
959 uint16_t wd;
960
961 spin_lock_irqsave(&ha->hardware_lock, flags);
962
963 /* Reset RISC. */
964 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
965 for (cnt = 0; cnt < 30000; cnt++) {
966 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
967 break;
968
969 udelay(10);
970 }
971
972 WRT_REG_DWORD(&reg->ctrl_status,
973 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
974 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
975
976 udelay(100);
977 /* Wait for firmware to complete NVRAM accesses. */
978 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
979 for (cnt = 10000 ; cnt && d2; cnt--) {
980 udelay(5);
981 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
982 barrier();
983 }
984
985 /* Wait for soft-reset to complete. */
986 d2 = RD_REG_DWORD(&reg->ctrl_status);
987 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
988 udelay(5);
989 d2 = RD_REG_DWORD(&reg->ctrl_status);
990 barrier();
991 }
992
993 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
994 RD_REG_DWORD(&reg->hccr);
995
996 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
997 RD_REG_DWORD(&reg->hccr);
998
999 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1000 RD_REG_DWORD(&reg->hccr);
1001
1002 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1003 for (cnt = 6000000 ; cnt && d2; cnt--) {
1004 udelay(5);
1005 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1006 barrier();
1007 }
1008
1009 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1010
1011 if (IS_NOPOLLING_TYPE(ha))
1012 ha->isp_ops->enable_intrs(ha);
1013 }
1014
1015 /**
1016 * qla24xx_reset_chip() - Reset ISP24xx chip.
1017 * @ha: HA context
1018 *
1019 * Returns 0 on success.
1020 */
1021 void
1022 qla24xx_reset_chip(scsi_qla_host_t *vha)
1023 {
1024 struct qla_hw_data *ha = vha->hw;
1025
1026 if (pci_channel_offline(ha->pdev) &&
1027 ha->flags.pci_channel_io_perm_failure) {
1028 return;
1029 }
1030
1031 ha->isp_ops->disable_intrs(ha);
1032
1033 /* Perform RISC reset. */
1034 qla24xx_reset_risc(vha);
1035 }
1036
1037 /**
1038 * qla2x00_chip_diag() - Test chip for proper operation.
1039 * @ha: HA context
1040 *
1041 * Returns 0 on success.
1042 */
1043 int
1044 qla2x00_chip_diag(scsi_qla_host_t *vha)
1045 {
1046 int rval;
1047 struct qla_hw_data *ha = vha->hw;
1048 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1049 unsigned long flags = 0;
1050 uint16_t data;
1051 uint32_t cnt;
1052 uint16_t mb[5];
1053 struct req_que *req = ha->req_q_map[0];
1054
1055 /* Assume a failed state */
1056 rval = QLA_FUNCTION_FAILED;
1057
1058 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
1059 vha->host_no, (u_long)&reg->flash_address));
1060
1061 spin_lock_irqsave(&ha->hardware_lock, flags);
1062
1063 /* Reset ISP chip. */
1064 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1065
1066 /*
1067 * We need to have a delay here since the card will not respond while
1068 * in reset causing an MCA on some architectures.
1069 */
1070 udelay(20);
1071 data = qla2x00_debounce_register(&reg->ctrl_status);
1072 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1073 udelay(5);
1074 data = RD_REG_WORD(&reg->ctrl_status);
1075 barrier();
1076 }
1077
1078 if (!cnt)
1079 goto chip_diag_failed;
1080
1081 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
1082 vha->host_no));
1083
1084 /* Reset RISC processor. */
1085 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1086 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1087
1088 /* Workaround for QLA2312 PCI parity error */
1089 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1090 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1091 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1092 udelay(5);
1093 data = RD_MAILBOX_REG(ha, reg, 0);
1094 barrier();
1095 }
1096 } else
1097 udelay(10);
1098
1099 if (!cnt)
1100 goto chip_diag_failed;
1101
1102 /* Check product ID of chip */
1103 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
1104
1105 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1106 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1107 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1108 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1109 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1110 mb[3] != PROD_ID_3) {
1111 qla_printk(KERN_WARNING, ha,
1112 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
1113
1114 goto chip_diag_failed;
1115 }
1116 ha->product_id[0] = mb[1];
1117 ha->product_id[1] = mb[2];
1118 ha->product_id[2] = mb[3];
1119 ha->product_id[3] = mb[4];
1120
1121 /* Adjust fw RISC transfer size */
1122 if (req->length > 1024)
1123 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1124 else
1125 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1126 req->length;
1127
1128 if (IS_QLA2200(ha) &&
1129 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1130 /* Limit firmware transfer size with a 2200A */
1131 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
1132 vha->host_no));
1133
1134 ha->device_type |= DT_ISP2200A;
1135 ha->fw_transfer_size = 128;
1136 }
1137
1138 /* Wrap Incoming Mailboxes Test. */
1139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1140
1141 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
1142 rval = qla2x00_mbx_reg_test(vha);
1143 if (rval) {
1144 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1145 vha->host_no));
1146 qla_printk(KERN_WARNING, ha,
1147 "Failed mailbox send register test\n");
1148 }
1149 else {
1150 /* Flag a successful rval */
1151 rval = QLA_SUCCESS;
1152 }
1153 spin_lock_irqsave(&ha->hardware_lock, flags);
1154
1155 chip_diag_failed:
1156 if (rval)
1157 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
1158 "****\n", vha->host_no));
1159
1160 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1161
1162 return (rval);
1163 }
1164
1165 /**
1166 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1167 * @ha: HA context
1168 *
1169 * Returns 0 on success.
1170 */
1171 int
1172 qla24xx_chip_diag(scsi_qla_host_t *vha)
1173 {
1174 int rval;
1175 struct qla_hw_data *ha = vha->hw;
1176 struct req_que *req = ha->req_q_map[0];
1177
1178 if (IS_QLA82XX(ha))
1179 return QLA_SUCCESS;
1180
1181 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1182
1183 rval = qla2x00_mbx_reg_test(vha);
1184 if (rval) {
1185 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1186 vha->host_no));
1187 qla_printk(KERN_WARNING, ha,
1188 "Failed mailbox send register test\n");
1189 } else {
1190 /* Flag a successful rval */
1191 rval = QLA_SUCCESS;
1192 }
1193
1194 return rval;
1195 }
1196
1197 void
1198 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1199 {
1200 int rval;
1201 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1202 eft_size, fce_size, mq_size;
1203 dma_addr_t tc_dma;
1204 void *tc;
1205 struct qla_hw_data *ha = vha->hw;
1206 struct req_que *req = ha->req_q_map[0];
1207 struct rsp_que *rsp = ha->rsp_q_map[0];
1208
1209 if (ha->fw_dump) {
1210 qla_printk(KERN_WARNING, ha,
1211 "Firmware dump previously allocated.\n");
1212 return;
1213 }
1214
1215 ha->fw_dumped = 0;
1216 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1217 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1218 fixed_size = sizeof(struct qla2100_fw_dump);
1219 } else if (IS_QLA23XX(ha)) {
1220 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1221 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1222 sizeof(uint16_t);
1223 } else if (IS_FWI2_CAPABLE(ha)) {
1224 if (IS_QLA81XX(ha))
1225 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1226 else if (IS_QLA25XX(ha))
1227 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1228 else
1229 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1230 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1231 sizeof(uint32_t);
1232 if (ha->mqenable)
1233 mq_size = sizeof(struct qla2xxx_mq_chain);
1234 /* Allocate memory for Fibre Channel Event Buffer. */
1235 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1236 goto try_eft;
1237
1238 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1239 GFP_KERNEL);
1240 if (!tc) {
1241 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1242 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
1243 goto try_eft;
1244 }
1245
1246 memset(tc, 0, FCE_SIZE);
1247 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1248 ha->fce_mb, &ha->fce_bufs);
1249 if (rval) {
1250 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1251 "FCE (%d).\n", rval);
1252 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1253 tc_dma);
1254 ha->flags.fce_enabled = 0;
1255 goto try_eft;
1256 }
1257
1258 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
1259 FCE_SIZE / 1024);
1260
1261 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1262 ha->flags.fce_enabled = 1;
1263 ha->fce_dma = tc_dma;
1264 ha->fce = tc;
1265 try_eft:
1266 /* Allocate memory for Extended Trace Buffer. */
1267 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1268 GFP_KERNEL);
1269 if (!tc) {
1270 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1271 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
1272 goto cont_alloc;
1273 }
1274
1275 memset(tc, 0, EFT_SIZE);
1276 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1277 if (rval) {
1278 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1279 "EFT (%d).\n", rval);
1280 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1281 tc_dma);
1282 goto cont_alloc;
1283 }
1284
1285 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
1286 EFT_SIZE / 1024);
1287
1288 eft_size = EFT_SIZE;
1289 ha->eft_dma = tc_dma;
1290 ha->eft = tc;
1291 }
1292 cont_alloc:
1293 req_q_size = req->length * sizeof(request_t);
1294 rsp_q_size = rsp->length * sizeof(response_t);
1295
1296 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1297 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1298 ha->chain_offset = dump_size;
1299 dump_size += mq_size + fce_size;
1300
1301 ha->fw_dump = vmalloc(dump_size);
1302 if (!ha->fw_dump) {
1303 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1304 "firmware dump!!!\n", dump_size / 1024);
1305
1306 if (ha->eft) {
1307 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1308 ha->eft_dma);
1309 ha->eft = NULL;
1310 ha->eft_dma = 0;
1311 }
1312 return;
1313 }
1314 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
1315 dump_size / 1024);
1316
1317 ha->fw_dump_len = dump_size;
1318 ha->fw_dump->signature[0] = 'Q';
1319 ha->fw_dump->signature[1] = 'L';
1320 ha->fw_dump->signature[2] = 'G';
1321 ha->fw_dump->signature[3] = 'C';
1322 ha->fw_dump->version = __constant_htonl(1);
1323
1324 ha->fw_dump->fixed_size = htonl(fixed_size);
1325 ha->fw_dump->mem_size = htonl(mem_size);
1326 ha->fw_dump->req_q_size = htonl(req_q_size);
1327 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1328
1329 ha->fw_dump->eft_size = htonl(eft_size);
1330 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1331 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1332
1333 ha->fw_dump->header_size =
1334 htonl(offsetof(struct qla2xxx_fw_dump, isp));
1335 }
1336
1337 static int
1338 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1339 {
1340 #define MPS_MASK 0xe0
1341 int rval;
1342 uint16_t dc;
1343 uint32_t dw;
1344 struct qla_hw_data *ha = vha->hw;
1345
1346 if (!IS_QLA81XX(vha->hw))
1347 return QLA_SUCCESS;
1348
1349 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1350 if (rval != QLA_SUCCESS) {
1351 DEBUG2(qla_printk(KERN_WARNING, ha,
1352 "Sync-MPI: Unable to acquire semaphore.\n"));
1353 goto done;
1354 }
1355
1356 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1357 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1358 if (rval != QLA_SUCCESS) {
1359 DEBUG2(qla_printk(KERN_WARNING, ha,
1360 "Sync-MPI: Unable to read sync.\n"));
1361 goto done_release;
1362 }
1363
1364 dc &= MPS_MASK;
1365 if (dc == (dw & MPS_MASK))
1366 goto done_release;
1367
1368 dw &= ~MPS_MASK;
1369 dw |= dc;
1370 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1371 if (rval != QLA_SUCCESS) {
1372 DEBUG2(qla_printk(KERN_WARNING, ha,
1373 "Sync-MPI: Unable to gain sync.\n"));
1374 }
1375
1376 done_release:
1377 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1378 if (rval != QLA_SUCCESS) {
1379 DEBUG2(qla_printk(KERN_WARNING, ha,
1380 "Sync-MPI: Unable to release semaphore.\n"));
1381 }
1382
1383 done:
1384 return rval;
1385 }
1386
1387 /**
1388 * qla2x00_setup_chip() - Load and start RISC firmware.
1389 * @ha: HA context
1390 *
1391 * Returns 0 on success.
1392 */
1393 static int
1394 qla2x00_setup_chip(scsi_qla_host_t *vha)
1395 {
1396 int rval;
1397 uint32_t srisc_address = 0;
1398 struct qla_hw_data *ha = vha->hw;
1399 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1400 unsigned long flags;
1401 uint16_t fw_major_version;
1402
1403 if (IS_QLA82XX(ha)) {
1404 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1405 if (rval == QLA_SUCCESS)
1406 goto enable_82xx_npiv;
1407 else
1408 goto failed;
1409 }
1410
1411 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1412 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1413 spin_lock_irqsave(&ha->hardware_lock, flags);
1414 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1415 RD_REG_WORD(&reg->hccr);
1416 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1417 }
1418
1419 qla81xx_mpi_sync(vha);
1420
1421 /* Load firmware sequences */
1422 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1423 if (rval == QLA_SUCCESS) {
1424 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
1425 "code.\n", vha->host_no));
1426
1427 rval = qla2x00_verify_checksum(vha, srisc_address);
1428 if (rval == QLA_SUCCESS) {
1429 /* Start firmware execution. */
1430 DEBUG(printk("scsi(%ld): Checksum OK, start "
1431 "firmware.\n", vha->host_no));
1432
1433 rval = qla2x00_execute_fw(vha, srisc_address);
1434 /* Retrieve firmware information. */
1435 if (rval == QLA_SUCCESS) {
1436 enable_82xx_npiv:
1437 fw_major_version = ha->fw_major_version;
1438 rval = qla2x00_get_fw_version(vha,
1439 &ha->fw_major_version,
1440 &ha->fw_minor_version,
1441 &ha->fw_subminor_version,
1442 &ha->fw_attributes, &ha->fw_memory_size,
1443 ha->mpi_version, &ha->mpi_capabilities,
1444 ha->phy_version);
1445 if (rval != QLA_SUCCESS)
1446 goto failed;
1447 ha->flags.npiv_supported = 0;
1448 if (IS_QLA2XXX_MIDTYPE(ha) &&
1449 (ha->fw_attributes & BIT_2)) {
1450 ha->flags.npiv_supported = 1;
1451 if ((!ha->max_npiv_vports) ||
1452 ((ha->max_npiv_vports + 1) %
1453 MIN_MULTI_ID_FABRIC))
1454 ha->max_npiv_vports =
1455 MIN_MULTI_ID_FABRIC - 1;
1456 }
1457 qla2x00_get_resource_cnts(vha, NULL,
1458 &ha->fw_xcb_count, NULL, NULL,
1459 &ha->max_npiv_vports, NULL);
1460
1461 if (!fw_major_version && ql2xallocfwdump) {
1462 if (!IS_QLA82XX(ha))
1463 qla2x00_alloc_fw_dump(vha);
1464 }
1465 }
1466 } else {
1467 DEBUG2(printk(KERN_INFO
1468 "scsi(%ld): ISP Firmware failed checksum.\n",
1469 vha->host_no));
1470 }
1471 }
1472
1473 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1474 /* Enable proper parity. */
1475 spin_lock_irqsave(&ha->hardware_lock, flags);
1476 if (IS_QLA2300(ha))
1477 /* SRAM parity */
1478 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1479 else
1480 /* SRAM, Instruction RAM and GP RAM parity */
1481 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1482 RD_REG_WORD(&reg->hccr);
1483 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1484 }
1485
1486 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1487 uint32_t size;
1488
1489 rval = qla81xx_fac_get_sector_size(vha, &size);
1490 if (rval == QLA_SUCCESS) {
1491 ha->flags.fac_supported = 1;
1492 ha->fdt_block_size = size << 2;
1493 } else {
1494 qla_printk(KERN_ERR, ha,
1495 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1496 ha->fw_major_version, ha->fw_minor_version,
1497 ha->fw_subminor_version);
1498 }
1499 }
1500 failed:
1501 if (rval) {
1502 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1503 vha->host_no));
1504 }
1505
1506 return (rval);
1507 }
1508
1509 /**
1510 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1511 * @ha: HA context
1512 *
1513 * Beginning of request ring has initialization control block already built
1514 * by nvram config routine.
1515 *
1516 * Returns 0 on success.
1517 */
1518 void
1519 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1520 {
1521 uint16_t cnt;
1522 response_t *pkt;
1523
1524 rsp->ring_ptr = rsp->ring;
1525 rsp->ring_index = 0;
1526 rsp->status_srb = NULL;
1527 pkt = rsp->ring_ptr;
1528 for (cnt = 0; cnt < rsp->length; cnt++) {
1529 pkt->signature = RESPONSE_PROCESSED;
1530 pkt++;
1531 }
1532 }
1533
1534 /**
1535 * qla2x00_update_fw_options() - Read and process firmware options.
1536 * @ha: HA context
1537 *
1538 * Returns 0 on success.
1539 */
1540 void
1541 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1542 {
1543 uint16_t swing, emphasis, tx_sens, rx_sens;
1544 struct qla_hw_data *ha = vha->hw;
1545
1546 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1547 qla2x00_get_fw_options(vha, ha->fw_options);
1548
1549 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1550 return;
1551
1552 /* Serial Link options. */
1553 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1554 vha->host_no));
1555 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1556 sizeof(ha->fw_seriallink_options)));
1557
1558 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1559 if (ha->fw_seriallink_options[3] & BIT_2) {
1560 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1561
1562 /* 1G settings */
1563 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1564 emphasis = (ha->fw_seriallink_options[2] &
1565 (BIT_4 | BIT_3)) >> 3;
1566 tx_sens = ha->fw_seriallink_options[0] &
1567 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1568 rx_sens = (ha->fw_seriallink_options[0] &
1569 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1570 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1571 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1572 if (rx_sens == 0x0)
1573 rx_sens = 0x3;
1574 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1575 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1576 ha->fw_options[10] |= BIT_5 |
1577 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1578 (tx_sens & (BIT_1 | BIT_0));
1579
1580 /* 2G settings */
1581 swing = (ha->fw_seriallink_options[2] &
1582 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1583 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1584 tx_sens = ha->fw_seriallink_options[1] &
1585 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1586 rx_sens = (ha->fw_seriallink_options[1] &
1587 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1588 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1589 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1590 if (rx_sens == 0x0)
1591 rx_sens = 0x3;
1592 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1593 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1594 ha->fw_options[11] |= BIT_5 |
1595 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1596 (tx_sens & (BIT_1 | BIT_0));
1597 }
1598
1599 /* FCP2 options. */
1600 /* Return command IOCBs without waiting for an ABTS to complete. */
1601 ha->fw_options[3] |= BIT_13;
1602
1603 /* LED scheme. */
1604 if (ha->flags.enable_led_scheme)
1605 ha->fw_options[2] |= BIT_12;
1606
1607 /* Detect ISP6312. */
1608 if (IS_QLA6312(ha))
1609 ha->fw_options[2] |= BIT_13;
1610
1611 /* Update firmware options. */
1612 qla2x00_set_fw_options(vha, ha->fw_options);
1613 }
1614
1615 void
1616 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1617 {
1618 int rval;
1619 struct qla_hw_data *ha = vha->hw;
1620
1621 if (IS_QLA82XX(ha))
1622 return;
1623
1624 /* Update Serial Link options. */
1625 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1626 return;
1627
1628 rval = qla2x00_set_serdes_params(vha,
1629 le16_to_cpu(ha->fw_seriallink_options24[1]),
1630 le16_to_cpu(ha->fw_seriallink_options24[2]),
1631 le16_to_cpu(ha->fw_seriallink_options24[3]));
1632 if (rval != QLA_SUCCESS) {
1633 qla_printk(KERN_WARNING, ha,
1634 "Unable to update Serial Link options (%x).\n", rval);
1635 }
1636 }
1637
1638 void
1639 qla2x00_config_rings(struct scsi_qla_host *vha)
1640 {
1641 struct qla_hw_data *ha = vha->hw;
1642 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1643 struct req_que *req = ha->req_q_map[0];
1644 struct rsp_que *rsp = ha->rsp_q_map[0];
1645
1646 /* Setup ring parameters in initialization control block. */
1647 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1648 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1649 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1650 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1651 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1652 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1653 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1654 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1655
1656 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1657 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1658 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1659 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1660 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1661 }
1662
1663 void
1664 qla24xx_config_rings(struct scsi_qla_host *vha)
1665 {
1666 struct qla_hw_data *ha = vha->hw;
1667 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1668 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1669 struct qla_msix_entry *msix;
1670 struct init_cb_24xx *icb;
1671 uint16_t rid = 0;
1672 struct req_que *req = ha->req_q_map[0];
1673 struct rsp_que *rsp = ha->rsp_q_map[0];
1674
1675 /* Setup ring parameters in initialization control block. */
1676 icb = (struct init_cb_24xx *)ha->init_cb;
1677 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1678 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1679 icb->request_q_length = cpu_to_le16(req->length);
1680 icb->response_q_length = cpu_to_le16(rsp->length);
1681 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1682 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1683 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1684 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1685
1686 if (ha->mqenable) {
1687 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1688 icb->rid = __constant_cpu_to_le16(rid);
1689 if (ha->flags.msix_enabled) {
1690 msix = &ha->msix_entries[1];
1691 DEBUG2_17(printk(KERN_INFO
1692 "Registering vector 0x%x for base que\n", msix->entry));
1693 icb->msix = cpu_to_le16(msix->entry);
1694 }
1695 /* Use alternate PCI bus number */
1696 if (MSB(rid))
1697 icb->firmware_options_2 |=
1698 __constant_cpu_to_le32(BIT_19);
1699 /* Use alternate PCI devfn */
1700 if (LSB(rid))
1701 icb->firmware_options_2 |=
1702 __constant_cpu_to_le32(BIT_18);
1703
1704 /* Use Disable MSIX Handshake mode for capable adapters */
1705 if (IS_MSIX_NACK_CAPABLE(ha)) {
1706 icb->firmware_options_2 &=
1707 __constant_cpu_to_le32(~BIT_22);
1708 ha->flags.disable_msix_handshake = 1;
1709 qla_printk(KERN_INFO, ha,
1710 "MSIX Handshake Disable Mode turned on\n");
1711 } else {
1712 icb->firmware_options_2 |=
1713 __constant_cpu_to_le32(BIT_22);
1714 }
1715 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1716
1717 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1718 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1719 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1720 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1721 } else {
1722 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1723 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1724 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1725 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1726 }
1727 /* PCI posting */
1728 RD_REG_DWORD(&ioreg->hccr);
1729 }
1730
1731 /**
1732 * qla2x00_init_rings() - Initializes firmware.
1733 * @ha: HA context
1734 *
1735 * Beginning of request ring has initialization control block already built
1736 * by nvram config routine.
1737 *
1738 * Returns 0 on success.
1739 */
1740 static int
1741 qla2x00_init_rings(scsi_qla_host_t *vha)
1742 {
1743 int rval;
1744 unsigned long flags = 0;
1745 int cnt, que;
1746 struct qla_hw_data *ha = vha->hw;
1747 struct req_que *req;
1748 struct rsp_que *rsp;
1749 struct scsi_qla_host *vp;
1750 struct mid_init_cb_24xx *mid_init_cb =
1751 (struct mid_init_cb_24xx *) ha->init_cb;
1752
1753 spin_lock_irqsave(&ha->hardware_lock, flags);
1754
1755 /* Clear outstanding commands array. */
1756 for (que = 0; que < ha->max_req_queues; que++) {
1757 req = ha->req_q_map[que];
1758 if (!req)
1759 continue;
1760 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1761 req->outstanding_cmds[cnt] = NULL;
1762
1763 req->current_outstanding_cmd = 1;
1764
1765 /* Initialize firmware. */
1766 req->ring_ptr = req->ring;
1767 req->ring_index = 0;
1768 req->cnt = req->length;
1769 }
1770
1771 for (que = 0; que < ha->max_rsp_queues; que++) {
1772 rsp = ha->rsp_q_map[que];
1773 if (!rsp)
1774 continue;
1775 /* Initialize response queue entries */
1776 qla2x00_init_response_q_entries(rsp);
1777 }
1778
1779 /* Clear RSCN queue. */
1780 list_for_each_entry(vp, &ha->vp_list, list) {
1781 vp->rscn_in_ptr = 0;
1782 vp->rscn_out_ptr = 0;
1783 }
1784 ha->isp_ops->config_rings(vha);
1785
1786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1787
1788 /* Update any ISP specific firmware options before initialization. */
1789 ha->isp_ops->update_fw_options(vha);
1790
1791 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1792
1793 if (ha->flags.npiv_supported) {
1794 if (ha->operating_mode == LOOP)
1795 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1796 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1797 }
1798
1799 if (IS_FWI2_CAPABLE(ha)) {
1800 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1801 mid_init_cb->init_cb.execution_throttle =
1802 cpu_to_le16(ha->fw_xcb_count);
1803 }
1804
1805 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1806 if (rval) {
1807 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1808 vha->host_no));
1809 } else {
1810 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1811 vha->host_no));
1812 }
1813
1814 return (rval);
1815 }
1816
1817 /**
1818 * qla2x00_fw_ready() - Waits for firmware ready.
1819 * @ha: HA context
1820 *
1821 * Returns 0 on success.
1822 */
1823 static int
1824 qla2x00_fw_ready(scsi_qla_host_t *vha)
1825 {
1826 int rval;
1827 unsigned long wtime, mtime, cs84xx_time;
1828 uint16_t min_wait; /* Minimum wait time if loop is down */
1829 uint16_t wait_time; /* Wait time if loop is coming ready */
1830 uint16_t state[5];
1831 struct qla_hw_data *ha = vha->hw;
1832
1833 rval = QLA_SUCCESS;
1834
1835 /* 20 seconds for loop down. */
1836 min_wait = 20;
1837
1838 /*
1839 * Firmware should take at most one RATOV to login, plus 5 seconds for
1840 * our own processing.
1841 */
1842 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1843 wait_time = min_wait;
1844 }
1845
1846 /* Min wait time if loop down */
1847 mtime = jiffies + (min_wait * HZ);
1848
1849 /* wait time before firmware ready */
1850 wtime = jiffies + (wait_time * HZ);
1851
1852 /* Wait for ISP to finish LIP */
1853 if (!vha->flags.init_done)
1854 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1855
1856 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1857 vha->host_no));
1858
1859 do {
1860 rval = qla2x00_get_firmware_state(vha, state);
1861 if (rval == QLA_SUCCESS) {
1862 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1863 vha->device_flags &= ~DFLG_NO_CABLE;
1864 }
1865 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1866 DEBUG16(printk("scsi(%ld): fw_state=%x "
1867 "84xx=%x.\n", vha->host_no, state[0],
1868 state[2]));
1869 if ((state[2] & FSTATE_LOGGED_IN) &&
1870 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1871 DEBUG16(printk("scsi(%ld): Sending "
1872 "verify iocb.\n", vha->host_no));
1873
1874 cs84xx_time = jiffies;
1875 rval = qla84xx_init_chip(vha);
1876 if (rval != QLA_SUCCESS)
1877 break;
1878
1879 /* Add time taken to initialize. */
1880 cs84xx_time = jiffies - cs84xx_time;
1881 wtime += cs84xx_time;
1882 mtime += cs84xx_time;
1883 DEBUG16(printk("scsi(%ld): Increasing "
1884 "wait time by %ld. New time %ld\n",
1885 vha->host_no, cs84xx_time, wtime));
1886 }
1887 } else if (state[0] == FSTATE_READY) {
1888 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1889 vha->host_no));
1890
1891 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1892 &ha->login_timeout, &ha->r_a_tov);
1893
1894 rval = QLA_SUCCESS;
1895 break;
1896 }
1897
1898 rval = QLA_FUNCTION_FAILED;
1899
1900 if (atomic_read(&vha->loop_down_timer) &&
1901 state[0] != FSTATE_READY) {
1902 /* Loop down. Timeout on min_wait for states
1903 * other than Wait for Login.
1904 */
1905 if (time_after_eq(jiffies, mtime)) {
1906 qla_printk(KERN_INFO, ha,
1907 "Cable is unplugged...\n");
1908
1909 vha->device_flags |= DFLG_NO_CABLE;
1910 break;
1911 }
1912 }
1913 } else {
1914 /* Mailbox cmd failed. Timeout on min_wait. */
1915 if (time_after_eq(jiffies, mtime) ||
1916 (IS_QLA82XX(ha) && ha->flags.fw_hung))
1917 break;
1918 }
1919
1920 if (time_after_eq(jiffies, wtime))
1921 break;
1922
1923 /* Delay for a while */
1924 msleep(500);
1925
1926 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1927 vha->host_no, state[0], jiffies));
1928 } while (1);
1929
1930 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1931 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1932 jiffies));
1933
1934 if (rval) {
1935 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1936 vha->host_no));
1937 }
1938
1939 return (rval);
1940 }
1941
1942 /*
1943 * qla2x00_configure_hba
1944 * Setup adapter context.
1945 *
1946 * Input:
1947 * ha = adapter state pointer.
1948 *
1949 * Returns:
1950 * 0 = success
1951 *
1952 * Context:
1953 * Kernel context.
1954 */
1955 static int
1956 qla2x00_configure_hba(scsi_qla_host_t *vha)
1957 {
1958 int rval;
1959 uint16_t loop_id;
1960 uint16_t topo;
1961 uint16_t sw_cap;
1962 uint8_t al_pa;
1963 uint8_t area;
1964 uint8_t domain;
1965 char connect_type[22];
1966 struct qla_hw_data *ha = vha->hw;
1967
1968 /* Get host addresses. */
1969 rval = qla2x00_get_adapter_id(vha,
1970 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1971 if (rval != QLA_SUCCESS) {
1972 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1973 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1974 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1975 __func__, vha->host_no));
1976 } else {
1977 qla_printk(KERN_WARNING, ha,
1978 "ERROR -- Unable to get host loop ID.\n");
1979 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1980 }
1981 return (rval);
1982 }
1983
1984 if (topo == 4) {
1985 qla_printk(KERN_INFO, ha,
1986 "Cannot get topology - retrying.\n");
1987 return (QLA_FUNCTION_FAILED);
1988 }
1989
1990 vha->loop_id = loop_id;
1991
1992 /* initialize */
1993 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
1994 ha->operating_mode = LOOP;
1995 ha->switch_cap = 0;
1996
1997 switch (topo) {
1998 case 0:
1999 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
2000 vha->host_no));
2001 ha->current_topology = ISP_CFG_NL;
2002 strcpy(connect_type, "(Loop)");
2003 break;
2004
2005 case 1:
2006 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
2007 vha->host_no));
2008 ha->switch_cap = sw_cap;
2009 ha->current_topology = ISP_CFG_FL;
2010 strcpy(connect_type, "(FL_Port)");
2011 break;
2012
2013 case 2:
2014 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
2015 vha->host_no));
2016 ha->operating_mode = P2P;
2017 ha->current_topology = ISP_CFG_N;
2018 strcpy(connect_type, "(N_Port-to-N_Port)");
2019 break;
2020
2021 case 3:
2022 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
2023 vha->host_no));
2024 ha->switch_cap = sw_cap;
2025 ha->operating_mode = P2P;
2026 ha->current_topology = ISP_CFG_F;
2027 strcpy(connect_type, "(F_Port)");
2028 break;
2029
2030 default:
2031 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
2032 "Using NL.\n",
2033 vha->host_no, topo));
2034 ha->current_topology = ISP_CFG_NL;
2035 strcpy(connect_type, "(Loop)");
2036 break;
2037 }
2038
2039 /* Save Host port and loop ID. */
2040 /* byte order - Big Endian */
2041 vha->d_id.b.domain = domain;
2042 vha->d_id.b.area = area;
2043 vha->d_id.b.al_pa = al_pa;
2044
2045 if (!vha->flags.init_done)
2046 qla_printk(KERN_INFO, ha,
2047 "Topology - %s, Host Loop address 0x%x\n",
2048 connect_type, vha->loop_id);
2049
2050 if (rval) {
2051 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
2052 } else {
2053 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
2054 }
2055
2056 return(rval);
2057 }
2058
2059 inline void
2060 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2061 char *def)
2062 {
2063 char *st, *en;
2064 uint16_t index;
2065 struct qla_hw_data *ha = vha->hw;
2066 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2067 !IS_QLA8XXX_TYPE(ha);
2068
2069 if (memcmp(model, BINZERO, len) != 0) {
2070 strncpy(ha->model_number, model, len);
2071 st = en = ha->model_number;
2072 en += len - 1;
2073 while (en > st) {
2074 if (*en != 0x20 && *en != 0x00)
2075 break;
2076 *en-- = '\0';
2077 }
2078
2079 index = (ha->pdev->subsystem_device & 0xff);
2080 if (use_tbl &&
2081 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2082 index < QLA_MODEL_NAMES)
2083 strncpy(ha->model_desc,
2084 qla2x00_model_name[index * 2 + 1],
2085 sizeof(ha->model_desc) - 1);
2086 } else {
2087 index = (ha->pdev->subsystem_device & 0xff);
2088 if (use_tbl &&
2089 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2090 index < QLA_MODEL_NAMES) {
2091 strcpy(ha->model_number,
2092 qla2x00_model_name[index * 2]);
2093 strncpy(ha->model_desc,
2094 qla2x00_model_name[index * 2 + 1],
2095 sizeof(ha->model_desc) - 1);
2096 } else {
2097 strcpy(ha->model_number, def);
2098 }
2099 }
2100 if (IS_FWI2_CAPABLE(ha))
2101 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2102 sizeof(ha->model_desc));
2103 }
2104
2105 /* On sparc systems, obtain port and node WWN from firmware
2106 * properties.
2107 */
2108 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2109 {
2110 #ifdef CONFIG_SPARC
2111 struct qla_hw_data *ha = vha->hw;
2112 struct pci_dev *pdev = ha->pdev;
2113 struct device_node *dp = pci_device_to_OF_node(pdev);
2114 const u8 *val;
2115 int len;
2116
2117 val = of_get_property(dp, "port-wwn", &len);
2118 if (val && len >= WWN_SIZE)
2119 memcpy(nv->port_name, val, WWN_SIZE);
2120
2121 val = of_get_property(dp, "node-wwn", &len);
2122 if (val && len >= WWN_SIZE)
2123 memcpy(nv->node_name, val, WWN_SIZE);
2124 #endif
2125 }
2126
2127 /*
2128 * NVRAM configuration for ISP 2xxx
2129 *
2130 * Input:
2131 * ha = adapter block pointer.
2132 *
2133 * Output:
2134 * initialization control block in response_ring
2135 * host adapters parameters in host adapter block
2136 *
2137 * Returns:
2138 * 0 = success.
2139 */
2140 int
2141 qla2x00_nvram_config(scsi_qla_host_t *vha)
2142 {
2143 int rval;
2144 uint8_t chksum = 0;
2145 uint16_t cnt;
2146 uint8_t *dptr1, *dptr2;
2147 struct qla_hw_data *ha = vha->hw;
2148 init_cb_t *icb = ha->init_cb;
2149 nvram_t *nv = ha->nvram;
2150 uint8_t *ptr = ha->nvram;
2151 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2152
2153 rval = QLA_SUCCESS;
2154
2155 /* Determine NVRAM starting address. */
2156 ha->nvram_size = sizeof(nvram_t);
2157 ha->nvram_base = 0;
2158 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2159 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2160 ha->nvram_base = 0x80;
2161
2162 /* Get NVRAM data and calculate checksum. */
2163 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2164 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2165 chksum += *ptr++;
2166
2167 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
2168 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
2169
2170 /* Bad NVRAM data, set defaults parameters. */
2171 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2172 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2173 /* Reset NVRAM data. */
2174 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
2175 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
2176 nv->nvram_version);
2177 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
2178 "invalid -- WWPN) defaults.\n");
2179
2180 /*
2181 * Set default initialization control block.
2182 */
2183 memset(nv, 0, ha->nvram_size);
2184 nv->parameter_block_version = ICB_VERSION;
2185
2186 if (IS_QLA23XX(ha)) {
2187 nv->firmware_options[0] = BIT_2 | BIT_1;
2188 nv->firmware_options[1] = BIT_7 | BIT_5;
2189 nv->add_firmware_options[0] = BIT_5;
2190 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2191 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2192 nv->special_options[1] = BIT_7;
2193 } else if (IS_QLA2200(ha)) {
2194 nv->firmware_options[0] = BIT_2 | BIT_1;
2195 nv->firmware_options[1] = BIT_7 | BIT_5;
2196 nv->add_firmware_options[0] = BIT_5;
2197 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2198 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2199 } else if (IS_QLA2100(ha)) {
2200 nv->firmware_options[0] = BIT_3 | BIT_1;
2201 nv->firmware_options[1] = BIT_5;
2202 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2203 }
2204
2205 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2206 nv->execution_throttle = __constant_cpu_to_le16(16);
2207 nv->retry_count = 8;
2208 nv->retry_delay = 1;
2209
2210 nv->port_name[0] = 33;
2211 nv->port_name[3] = 224;
2212 nv->port_name[4] = 139;
2213
2214 qla2xxx_nvram_wwn_from_ofw(vha, nv);
2215
2216 nv->login_timeout = 4;
2217
2218 /*
2219 * Set default host adapter parameters
2220 */
2221 nv->host_p[1] = BIT_2;
2222 nv->reset_delay = 5;
2223 nv->port_down_retry_count = 8;
2224 nv->max_luns_per_target = __constant_cpu_to_le16(8);
2225 nv->link_down_timeout = 60;
2226
2227 rval = 1;
2228 }
2229
2230 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2231 /*
2232 * The SN2 does not provide BIOS emulation which means you can't change
2233 * potentially bogus BIOS settings. Force the use of default settings
2234 * for link rate and frame size. Hope that the rest of the settings
2235 * are valid.
2236 */
2237 if (ia64_platform_is("sn2")) {
2238 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2239 if (IS_QLA23XX(ha))
2240 nv->special_options[1] = BIT_7;
2241 }
2242 #endif
2243
2244 /* Reset Initialization control block */
2245 memset(icb, 0, ha->init_cb_size);
2246
2247 /*
2248 * Setup driver NVRAM options.
2249 */
2250 nv->firmware_options[0] |= (BIT_6 | BIT_1);
2251 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2252 nv->firmware_options[1] |= (BIT_5 | BIT_0);
2253 nv->firmware_options[1] &= ~BIT_4;
2254
2255 if (IS_QLA23XX(ha)) {
2256 nv->firmware_options[0] |= BIT_2;
2257 nv->firmware_options[0] &= ~BIT_3;
2258 nv->firmware_options[0] &= ~BIT_6;
2259 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2260
2261 if (IS_QLA2300(ha)) {
2262 if (ha->fb_rev == FPM_2310) {
2263 strcpy(ha->model_number, "QLA2310");
2264 } else {
2265 strcpy(ha->model_number, "QLA2300");
2266 }
2267 } else {
2268 qla2x00_set_model_info(vha, nv->model_number,
2269 sizeof(nv->model_number), "QLA23xx");
2270 }
2271 } else if (IS_QLA2200(ha)) {
2272 nv->firmware_options[0] |= BIT_2;
2273 /*
2274 * 'Point-to-point preferred, else loop' is not a safe
2275 * connection mode setting.
2276 */
2277 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2278 (BIT_5 | BIT_4)) {
2279 /* Force 'loop preferred, else point-to-point'. */
2280 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2281 nv->add_firmware_options[0] |= BIT_5;
2282 }
2283 strcpy(ha->model_number, "QLA22xx");
2284 } else /*if (IS_QLA2100(ha))*/ {
2285 strcpy(ha->model_number, "QLA2100");
2286 }
2287
2288 /*
2289 * Copy over NVRAM RISC parameter block to initialization control block.
2290 */
2291 dptr1 = (uint8_t *)icb;
2292 dptr2 = (uint8_t *)&nv->parameter_block_version;
2293 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2294 while (cnt--)
2295 *dptr1++ = *dptr2++;
2296
2297 /* Copy 2nd half. */
2298 dptr1 = (uint8_t *)icb->add_firmware_options;
2299 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2300 while (cnt--)
2301 *dptr1++ = *dptr2++;
2302
2303 /* Use alternate WWN? */
2304 if (nv->host_p[1] & BIT_7) {
2305 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2306 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2307 }
2308
2309 /* Prepare nodename */
2310 if ((icb->firmware_options[1] & BIT_6) == 0) {
2311 /*
2312 * Firmware will apply the following mask if the nodename was
2313 * not provided.
2314 */
2315 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2316 icb->node_name[0] &= 0xF0;
2317 }
2318
2319 /*
2320 * Set host adapter parameters.
2321 */
2322 if (nv->host_p[0] & BIT_7)
2323 ql2xextended_error_logging = 1;
2324 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2325 /* Always load RISC code on non ISP2[12]00 chips. */
2326 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2327 ha->flags.disable_risc_code_load = 0;
2328 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2329 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2330 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2331 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2332 ha->flags.disable_serdes = 0;
2333
2334 ha->operating_mode =
2335 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2336
2337 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2338 sizeof(ha->fw_seriallink_options));
2339
2340 /* save HBA serial number */
2341 ha->serial0 = icb->port_name[5];
2342 ha->serial1 = icb->port_name[6];
2343 ha->serial2 = icb->port_name[7];
2344 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2345 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2346
2347 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2348
2349 ha->retry_count = nv->retry_count;
2350
2351 /* Set minimum login_timeout to 4 seconds. */
2352 if (nv->login_timeout != ql2xlogintimeout)
2353 nv->login_timeout = ql2xlogintimeout;
2354 if (nv->login_timeout < 4)
2355 nv->login_timeout = 4;
2356 ha->login_timeout = nv->login_timeout;
2357 icb->login_timeout = nv->login_timeout;
2358
2359 /* Set minimum RATOV to 100 tenths of a second. */
2360 ha->r_a_tov = 100;
2361
2362 ha->loop_reset_delay = nv->reset_delay;
2363
2364 /* Link Down Timeout = 0:
2365 *
2366 * When Port Down timer expires we will start returning
2367 * I/O's to OS with "DID_NO_CONNECT".
2368 *
2369 * Link Down Timeout != 0:
2370 *
2371 * The driver waits for the link to come up after link down
2372 * before returning I/Os to OS with "DID_NO_CONNECT".
2373 */
2374 if (nv->link_down_timeout == 0) {
2375 ha->loop_down_abort_time =
2376 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2377 } else {
2378 ha->link_down_timeout = nv->link_down_timeout;
2379 ha->loop_down_abort_time =
2380 (LOOP_DOWN_TIME - ha->link_down_timeout);
2381 }
2382
2383 /*
2384 * Need enough time to try and get the port back.
2385 */
2386 ha->port_down_retry_count = nv->port_down_retry_count;
2387 if (qlport_down_retry)
2388 ha->port_down_retry_count = qlport_down_retry;
2389 /* Set login_retry_count */
2390 ha->login_retry_count = nv->retry_count;
2391 if (ha->port_down_retry_count == nv->port_down_retry_count &&
2392 ha->port_down_retry_count > 3)
2393 ha->login_retry_count = ha->port_down_retry_count;
2394 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2395 ha->login_retry_count = ha->port_down_retry_count;
2396 if (ql2xloginretrycount)
2397 ha->login_retry_count = ql2xloginretrycount;
2398
2399 icb->lun_enables = __constant_cpu_to_le16(0);
2400 icb->command_resource_count = 0;
2401 icb->immediate_notify_resource_count = 0;
2402 icb->timeout = __constant_cpu_to_le16(0);
2403
2404 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2405 /* Enable RIO */
2406 icb->firmware_options[0] &= ~BIT_3;
2407 icb->add_firmware_options[0] &=
2408 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2409 icb->add_firmware_options[0] |= BIT_2;
2410 icb->response_accumulation_timer = 3;
2411 icb->interrupt_delay_timer = 5;
2412
2413 vha->flags.process_response_queue = 1;
2414 } else {
2415 /* Enable ZIO. */
2416 if (!vha->flags.init_done) {
2417 ha->zio_mode = icb->add_firmware_options[0] &
2418 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2419 ha->zio_timer = icb->interrupt_delay_timer ?
2420 icb->interrupt_delay_timer: 2;
2421 }
2422 icb->add_firmware_options[0] &=
2423 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2424 vha->flags.process_response_queue = 0;
2425 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2426 ha->zio_mode = QLA_ZIO_MODE_6;
2427
2428 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
2429 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2430 ha->zio_timer * 100));
2431 qla_printk(KERN_INFO, ha,
2432 "ZIO mode %d enabled; timer delay (%d us).\n",
2433 ha->zio_mode, ha->zio_timer * 100);
2434
2435 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2436 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2437 vha->flags.process_response_queue = 1;
2438 }
2439 }
2440
2441 if (rval) {
2442 DEBUG2_3(printk(KERN_WARNING
2443 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
2444 }
2445 return (rval);
2446 }
2447
2448 static void
2449 qla2x00_rport_del(void *data)
2450 {
2451 fc_port_t *fcport = data;
2452 struct fc_rport *rport;
2453
2454 spin_lock_irq(fcport->vha->host->host_lock);
2455 rport = fcport->drport ? fcport->drport: fcport->rport;
2456 fcport->drport = NULL;
2457 spin_unlock_irq(fcport->vha->host->host_lock);
2458 if (rport)
2459 fc_remote_port_delete(rport);
2460 }
2461
2462 /**
2463 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2464 * @ha: HA context
2465 * @flags: allocation flags
2466 *
2467 * Returns a pointer to the allocated fcport, or NULL, if none available.
2468 */
2469 fc_port_t *
2470 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2471 {
2472 fc_port_t *fcport;
2473
2474 fcport = kzalloc(sizeof(fc_port_t), flags);
2475 if (!fcport)
2476 return NULL;
2477
2478 /* Setup fcport template structure. */
2479 fcport->vha = vha;
2480 fcport->vp_idx = vha->vp_idx;
2481 fcport->port_type = FCT_UNKNOWN;
2482 fcport->loop_id = FC_NO_LOOP_ID;
2483 atomic_set(&fcport->state, FCS_UNCONFIGURED);
2484 fcport->supported_classes = FC_COS_UNSPECIFIED;
2485
2486 return fcport;
2487 }
2488
2489 /*
2490 * qla2x00_configure_loop
2491 * Updates Fibre Channel Device Database with what is actually on loop.
2492 *
2493 * Input:
2494 * ha = adapter block pointer.
2495 *
2496 * Returns:
2497 * 0 = success.
2498 * 1 = error.
2499 * 2 = database was full and device was not configured.
2500 */
2501 static int
2502 qla2x00_configure_loop(scsi_qla_host_t *vha)
2503 {
2504 int rval;
2505 unsigned long flags, save_flags;
2506 struct qla_hw_data *ha = vha->hw;
2507 rval = QLA_SUCCESS;
2508
2509 /* Get Initiator ID */
2510 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2511 rval = qla2x00_configure_hba(vha);
2512 if (rval != QLA_SUCCESS) {
2513 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2514 vha->host_no));
2515 return (rval);
2516 }
2517 }
2518
2519 save_flags = flags = vha->dpc_flags;
2520 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2521 vha->host_no, flags));
2522
2523 /*
2524 * If we have both an RSCN and PORT UPDATE pending then handle them
2525 * both at the same time.
2526 */
2527 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2528 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2529
2530 qla2x00_get_data_rate(vha);
2531
2532 /* Determine what we need to do */
2533 if (ha->current_topology == ISP_CFG_FL &&
2534 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2535
2536 vha->flags.rscn_queue_overflow = 1;
2537 set_bit(RSCN_UPDATE, &flags);
2538
2539 } else if (ha->current_topology == ISP_CFG_F &&
2540 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2541
2542 vha->flags.rscn_queue_overflow = 1;
2543 set_bit(RSCN_UPDATE, &flags);
2544 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2545
2546 } else if (ha->current_topology == ISP_CFG_N) {
2547 clear_bit(RSCN_UPDATE, &flags);
2548
2549 } else if (!vha->flags.online ||
2550 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2551
2552 vha->flags.rscn_queue_overflow = 1;
2553 set_bit(RSCN_UPDATE, &flags);
2554 set_bit(LOCAL_LOOP_UPDATE, &flags);
2555 }
2556
2557 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2558 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2559 rval = QLA_FUNCTION_FAILED;
2560 else
2561 rval = qla2x00_configure_local_loop(vha);
2562 }
2563
2564 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2565 if (LOOP_TRANSITION(vha))
2566 rval = QLA_FUNCTION_FAILED;
2567 else
2568 rval = qla2x00_configure_fabric(vha);
2569 }
2570
2571 if (rval == QLA_SUCCESS) {
2572 if (atomic_read(&vha->loop_down_timer) ||
2573 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2574 rval = QLA_FUNCTION_FAILED;
2575 } else {
2576 atomic_set(&vha->loop_state, LOOP_READY);
2577
2578 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2579 }
2580 }
2581
2582 if (rval) {
2583 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2584 __func__, vha->host_no));
2585 } else {
2586 DEBUG3(printk("%s: exiting normally\n", __func__));
2587 }
2588
2589 /* Restore state if a resync event occurred during processing */
2590 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2591 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2592 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2593 if (test_bit(RSCN_UPDATE, &save_flags)) {
2594 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2595 if (!IS_ALOGIO_CAPABLE(ha))
2596 vha->flags.rscn_queue_overflow = 1;
2597 }
2598 }
2599
2600 return (rval);
2601 }
2602
2603
2604
2605 /*
2606 * qla2x00_configure_local_loop
2607 * Updates Fibre Channel Device Database with local loop devices.
2608 *
2609 * Input:
2610 * ha = adapter block pointer.
2611 *
2612 * Returns:
2613 * 0 = success.
2614 */
2615 static int
2616 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2617 {
2618 int rval, rval2;
2619 int found_devs;
2620 int found;
2621 fc_port_t *fcport, *new_fcport;
2622
2623 uint16_t index;
2624 uint16_t entries;
2625 char *id_iter;
2626 uint16_t loop_id;
2627 uint8_t domain, area, al_pa;
2628 struct qla_hw_data *ha = vha->hw;
2629
2630 found_devs = 0;
2631 new_fcport = NULL;
2632 entries = MAX_FIBRE_DEVICES;
2633
2634 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2635 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2636
2637 /* Get list of logged in devices. */
2638 memset(ha->gid_list, 0, GID_LIST_SIZE);
2639 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2640 &entries);
2641 if (rval != QLA_SUCCESS)
2642 goto cleanup_allocation;
2643
2644 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2645 vha->host_no, entries));
2646 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2647 entries * sizeof(struct gid_list_info)));
2648
2649 /* Allocate temporary fcport for any new fcports discovered. */
2650 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2651 if (new_fcport == NULL) {
2652 rval = QLA_MEMORY_ALLOC_FAILED;
2653 goto cleanup_allocation;
2654 }
2655 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2656
2657 /*
2658 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2659 */
2660 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2661 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2662 fcport->port_type != FCT_BROADCAST &&
2663 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2664
2665 DEBUG(printk("scsi(%ld): Marking port lost, "
2666 "loop_id=0x%04x\n",
2667 vha->host_no, fcport->loop_id));
2668
2669 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2670 }
2671 }
2672
2673 /* Add devices to port list. */
2674 id_iter = (char *)ha->gid_list;
2675 for (index = 0; index < entries; index++) {
2676 domain = ((struct gid_list_info *)id_iter)->domain;
2677 area = ((struct gid_list_info *)id_iter)->area;
2678 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2679 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2680 loop_id = (uint16_t)
2681 ((struct gid_list_info *)id_iter)->loop_id_2100;
2682 else
2683 loop_id = le16_to_cpu(
2684 ((struct gid_list_info *)id_iter)->loop_id);
2685 id_iter += ha->gid_list_info_size;
2686
2687 /* Bypass reserved domain fields. */
2688 if ((domain & 0xf0) == 0xf0)
2689 continue;
2690
2691 /* Bypass if not same domain and area of adapter. */
2692 if (area && domain &&
2693 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2694 continue;
2695
2696 /* Bypass invalid local loop ID. */
2697 if (loop_id > LAST_LOCAL_LOOP_ID)
2698 continue;
2699
2700 /* Fill in member data. */
2701 new_fcport->d_id.b.domain = domain;
2702 new_fcport->d_id.b.area = area;
2703 new_fcport->d_id.b.al_pa = al_pa;
2704 new_fcport->loop_id = loop_id;
2705 new_fcport->vp_idx = vha->vp_idx;
2706 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2707 if (rval2 != QLA_SUCCESS) {
2708 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2709 "information -- get_port_database=%x, "
2710 "loop_id=0x%04x\n",
2711 vha->host_no, rval2, new_fcport->loop_id));
2712 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2713 vha->host_no));
2714 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2715 continue;
2716 }
2717
2718 /* Check for matching device in port list. */
2719 found = 0;
2720 fcport = NULL;
2721 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2722 if (memcmp(new_fcport->port_name, fcport->port_name,
2723 WWN_SIZE))
2724 continue;
2725
2726 fcport->flags &= ~FCF_FABRIC_DEVICE;
2727 fcport->loop_id = new_fcport->loop_id;
2728 fcport->port_type = new_fcport->port_type;
2729 fcport->d_id.b24 = new_fcport->d_id.b24;
2730 memcpy(fcport->node_name, new_fcport->node_name,
2731 WWN_SIZE);
2732
2733 found++;
2734 break;
2735 }
2736
2737 if (!found) {
2738 /* New device, add to fcports list. */
2739 if (vha->vp_idx) {
2740 new_fcport->vha = vha;
2741 new_fcport->vp_idx = vha->vp_idx;
2742 }
2743 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2744
2745 /* Allocate a new replacement fcport. */
2746 fcport = new_fcport;
2747 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2748 if (new_fcport == NULL) {
2749 rval = QLA_MEMORY_ALLOC_FAILED;
2750 goto cleanup_allocation;
2751 }
2752 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2753 }
2754
2755 /* Base iIDMA settings on HBA port speed. */
2756 fcport->fp_speed = ha->link_data_rate;
2757
2758 qla2x00_update_fcport(vha, fcport);
2759
2760 found_devs++;
2761 }
2762
2763 cleanup_allocation:
2764 kfree(new_fcport);
2765
2766 if (rval != QLA_SUCCESS) {
2767 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2768 "rval=%x\n", vha->host_no, rval));
2769 }
2770
2771 return (rval);
2772 }
2773
2774 static void
2775 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2776 {
2777 #define LS_UNKNOWN 2
2778 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2779 char *link_speed;
2780 int rval;
2781 uint16_t mb[4];
2782 struct qla_hw_data *ha = vha->hw;
2783
2784 if (!IS_IIDMA_CAPABLE(ha))
2785 return;
2786
2787 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2788 fcport->fp_speed > ha->link_data_rate)
2789 return;
2790
2791 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2792 mb);
2793 if (rval != QLA_SUCCESS) {
2794 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2795 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2796 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2797 fcport->port_name[2], fcport->port_name[3],
2798 fcport->port_name[4], fcport->port_name[5],
2799 fcport->port_name[6], fcport->port_name[7], rval,
2800 fcport->fp_speed, mb[0], mb[1]));
2801 } else {
2802 link_speed = link_speeds[LS_UNKNOWN];
2803 if (fcport->fp_speed < 5)
2804 link_speed = link_speeds[fcport->fp_speed];
2805 else if (fcport->fp_speed == 0x13)
2806 link_speed = link_speeds[5];
2807 DEBUG2(qla_printk(KERN_INFO, ha,
2808 "iIDMA adjusted to %s GB/s on "
2809 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2810 link_speed, fcport->port_name[0],
2811 fcport->port_name[1], fcport->port_name[2],
2812 fcport->port_name[3], fcport->port_name[4],
2813 fcport->port_name[5], fcport->port_name[6],
2814 fcport->port_name[7]));
2815 }
2816 }
2817
2818 static void
2819 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2820 {
2821 struct fc_rport_identifiers rport_ids;
2822 struct fc_rport *rport;
2823 struct qla_hw_data *ha = vha->hw;
2824
2825 qla2x00_rport_del(fcport);
2826
2827 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2828 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2829 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2830 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2831 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2832 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2833 if (!rport) {
2834 qla_printk(KERN_WARNING, ha,
2835 "Unable to allocate fc remote port!\n");
2836 return;
2837 }
2838 spin_lock_irq(fcport->vha->host->host_lock);
2839 *((fc_port_t **)rport->dd_data) = fcport;
2840 spin_unlock_irq(fcport->vha->host->host_lock);
2841
2842 rport->supported_classes = fcport->supported_classes;
2843
2844 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2845 if (fcport->port_type == FCT_INITIATOR)
2846 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2847 if (fcport->port_type == FCT_TARGET)
2848 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2849 fc_remote_port_rolechg(rport, rport_ids.roles);
2850 }
2851
2852 /*
2853 * qla2x00_update_fcport
2854 * Updates device on list.
2855 *
2856 * Input:
2857 * ha = adapter block pointer.
2858 * fcport = port structure pointer.
2859 *
2860 * Return:
2861 * 0 - Success
2862 * BIT_0 - error
2863 *
2864 * Context:
2865 * Kernel context.
2866 */
2867 void
2868 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2869 {
2870 struct qla_hw_data *ha = vha->hw;
2871
2872 fcport->vha = vha;
2873 fcport->login_retry = 0;
2874 fcport->port_login_retry_count = ha->port_down_retry_count *
2875 PORT_RETRY_TIME;
2876 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2877 PORT_RETRY_TIME);
2878 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2879
2880 qla2x00_iidma_fcport(vha, fcport);
2881
2882 atomic_set(&fcport->state, FCS_ONLINE);
2883
2884 qla2x00_reg_remote_port(vha, fcport);
2885 }
2886
2887 /*
2888 * qla2x00_configure_fabric
2889 * Setup SNS devices with loop ID's.
2890 *
2891 * Input:
2892 * ha = adapter block pointer.
2893 *
2894 * Returns:
2895 * 0 = success.
2896 * BIT_0 = error
2897 */
2898 static int
2899 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2900 {
2901 int rval, rval2;
2902 fc_port_t *fcport, *fcptemp;
2903 uint16_t next_loopid;
2904 uint16_t mb[MAILBOX_REGISTER_COUNT];
2905 uint16_t loop_id;
2906 LIST_HEAD(new_fcports);
2907 struct qla_hw_data *ha = vha->hw;
2908 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2909
2910 /* If FL port exists, then SNS is present */
2911 if (IS_FWI2_CAPABLE(ha))
2912 loop_id = NPH_F_PORT;
2913 else
2914 loop_id = SNS_FL_PORT;
2915 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2916 if (rval != QLA_SUCCESS) {
2917 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2918 "Port\n", vha->host_no));
2919
2920 vha->device_flags &= ~SWITCH_FOUND;
2921 return (QLA_SUCCESS);
2922 }
2923 vha->device_flags |= SWITCH_FOUND;
2924
2925 /* Mark devices that need re-synchronization. */
2926 rval2 = qla2x00_device_resync(vha);
2927 if (rval2 == QLA_RSCNS_HANDLED) {
2928 /* No point doing the scan, just continue. */
2929 return (QLA_SUCCESS);
2930 }
2931 do {
2932 /* FDMI support. */
2933 if (ql2xfdmienable &&
2934 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2935 qla2x00_fdmi_register(vha);
2936
2937 /* Ensure we are logged into the SNS. */
2938 if (IS_FWI2_CAPABLE(ha))
2939 loop_id = NPH_SNS;
2940 else
2941 loop_id = SIMPLE_NAME_SERVER;
2942 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2943 0xfc, mb, BIT_1 | BIT_0);
2944 if (mb[0] != MBS_COMMAND_COMPLETE) {
2945 DEBUG2(qla_printk(KERN_INFO, ha,
2946 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2947 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2948 mb[0], mb[1], mb[2], mb[6], mb[7]));
2949 return (QLA_SUCCESS);
2950 }
2951
2952 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2953 if (qla2x00_rft_id(vha)) {
2954 /* EMPTY */
2955 DEBUG2(printk("scsi(%ld): Register FC-4 "
2956 "TYPE failed.\n", vha->host_no));
2957 }
2958 if (qla2x00_rff_id(vha)) {
2959 /* EMPTY */
2960 DEBUG2(printk("scsi(%ld): Register FC-4 "
2961 "Features failed.\n", vha->host_no));
2962 }
2963 if (qla2x00_rnn_id(vha)) {
2964 /* EMPTY */
2965 DEBUG2(printk("scsi(%ld): Register Node Name "
2966 "failed.\n", vha->host_no));
2967 } else if (qla2x00_rsnn_nn(vha)) {
2968 /* EMPTY */
2969 DEBUG2(printk("scsi(%ld): Register Symbolic "
2970 "Node Name failed.\n", vha->host_no));
2971 }
2972 }
2973
2974 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2975 if (rval != QLA_SUCCESS)
2976 break;
2977
2978 /*
2979 * Logout all previous fabric devices marked lost, except
2980 * FCP2 devices.
2981 */
2982 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2983 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2984 break;
2985
2986 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2987 continue;
2988
2989 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2990 qla2x00_mark_device_lost(vha, fcport,
2991 ql2xplogiabsentdevice, 0);
2992 if (fcport->loop_id != FC_NO_LOOP_ID &&
2993 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
2994 fcport->port_type != FCT_INITIATOR &&
2995 fcport->port_type != FCT_BROADCAST) {
2996 ha->isp_ops->fabric_logout(vha,
2997 fcport->loop_id,
2998 fcport->d_id.b.domain,
2999 fcport->d_id.b.area,
3000 fcport->d_id.b.al_pa);
3001 fcport->loop_id = FC_NO_LOOP_ID;
3002 }
3003 }
3004 }
3005
3006 /* Starting free loop ID. */
3007 next_loopid = ha->min_external_loopid;
3008
3009 /*
3010 * Scan through our port list and login entries that need to be
3011 * logged in.
3012 */
3013 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3014 if (atomic_read(&vha->loop_down_timer) ||
3015 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3016 break;
3017
3018 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3019 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3020 continue;
3021
3022 if (fcport->loop_id == FC_NO_LOOP_ID) {
3023 fcport->loop_id = next_loopid;
3024 rval = qla2x00_find_new_loop_id(
3025 base_vha, fcport);
3026 if (rval != QLA_SUCCESS) {
3027 /* Ran out of IDs to use */
3028 break;
3029 }
3030 }
3031 /* Login and update database */
3032 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3033 }
3034
3035 /* Exit if out of loop IDs. */
3036 if (rval != QLA_SUCCESS) {
3037 break;
3038 }
3039
3040 /*
3041 * Login and add the new devices to our port list.
3042 */
3043 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3044 if (atomic_read(&vha->loop_down_timer) ||
3045 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3046 break;
3047
3048 /* Find a new loop ID to use. */
3049 fcport->loop_id = next_loopid;
3050 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3051 if (rval != QLA_SUCCESS) {
3052 /* Ran out of IDs to use */
3053 break;
3054 }
3055
3056 /* Login and update database */
3057 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3058
3059 if (vha->vp_idx) {
3060 fcport->vha = vha;
3061 fcport->vp_idx = vha->vp_idx;
3062 }
3063 list_move_tail(&fcport->list, &vha->vp_fcports);
3064 }
3065 } while (0);
3066
3067 /* Free all new device structures not processed. */
3068 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3069 list_del(&fcport->list);
3070 kfree(fcport);
3071 }
3072
3073 if (rval) {
3074 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
3075 "rval=%d\n", vha->host_no, rval));
3076 }
3077
3078 return (rval);
3079 }
3080
3081 /*
3082 * qla2x00_find_all_fabric_devs
3083 *
3084 * Input:
3085 * ha = adapter block pointer.
3086 * dev = database device entry pointer.
3087 *
3088 * Returns:
3089 * 0 = success.
3090 *
3091 * Context:
3092 * Kernel context.
3093 */
3094 static int
3095 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3096 struct list_head *new_fcports)
3097 {
3098 int rval;
3099 uint16_t loop_id;
3100 fc_port_t *fcport, *new_fcport, *fcptemp;
3101 int found;
3102
3103 sw_info_t *swl;
3104 int swl_idx;
3105 int first_dev, last_dev;
3106 port_id_t wrap = {}, nxt_d_id;
3107 struct qla_hw_data *ha = vha->hw;
3108 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3109 struct scsi_qla_host *tvp;
3110
3111 rval = QLA_SUCCESS;
3112
3113 /* Try GID_PT to get device list, else GAN. */
3114 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3115 if (!swl) {
3116 /*EMPTY*/
3117 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
3118 "on GA_NXT\n", vha->host_no));
3119 } else {
3120 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3121 kfree(swl);
3122 swl = NULL;
3123 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3124 kfree(swl);
3125 swl = NULL;
3126 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3127 kfree(swl);
3128 swl = NULL;
3129 } else if (ql2xiidmaenable &&
3130 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3131 qla2x00_gpsc(vha, swl);
3132 }
3133
3134 /* If other queries succeeded probe for FC-4 type */
3135 if (swl)
3136 qla2x00_gff_id(vha, swl);
3137 }
3138 swl_idx = 0;
3139
3140 /* Allocate temporary fcport for any new fcports discovered. */
3141 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3142 if (new_fcport == NULL) {
3143 kfree(swl);
3144 return (QLA_MEMORY_ALLOC_FAILED);
3145 }
3146 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3147 /* Set start port ID scan at adapter ID. */
3148 first_dev = 1;
3149 last_dev = 0;
3150
3151 /* Starting free loop ID. */
3152 loop_id = ha->min_external_loopid;
3153 for (; loop_id <= ha->max_loop_id; loop_id++) {
3154 if (qla2x00_is_reserved_id(vha, loop_id))
3155 continue;
3156
3157 if (ha->current_topology == ISP_CFG_FL &&
3158 (atomic_read(&vha->loop_down_timer) ||
3159 LOOP_TRANSITION(vha))) {
3160 atomic_set(&vha->loop_down_timer, 0);
3161 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3162 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3163 break;
3164 }
3165
3166 if (swl != NULL) {
3167 if (last_dev) {
3168 wrap.b24 = new_fcport->d_id.b24;
3169 } else {
3170 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3171 memcpy(new_fcport->node_name,
3172 swl[swl_idx].node_name, WWN_SIZE);
3173 memcpy(new_fcport->port_name,
3174 swl[swl_idx].port_name, WWN_SIZE);
3175 memcpy(new_fcport->fabric_port_name,
3176 swl[swl_idx].fabric_port_name, WWN_SIZE);
3177 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3178 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3179
3180 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3181 last_dev = 1;
3182 }
3183 swl_idx++;
3184 }
3185 } else {
3186 /* Send GA_NXT to the switch */
3187 rval = qla2x00_ga_nxt(vha, new_fcport);
3188 if (rval != QLA_SUCCESS) {
3189 qla_printk(KERN_WARNING, ha,
3190 "SNS scan failed -- assuming zero-entry "
3191 "result...\n");
3192 list_for_each_entry_safe(fcport, fcptemp,
3193 new_fcports, list) {
3194 list_del(&fcport->list);
3195 kfree(fcport);
3196 }
3197 rval = QLA_SUCCESS;
3198 break;
3199 }
3200 }
3201
3202 /* If wrap on switch device list, exit. */
3203 if (first_dev) {
3204 wrap.b24 = new_fcport->d_id.b24;
3205 first_dev = 0;
3206 } else if (new_fcport->d_id.b24 == wrap.b24) {
3207 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
3208 vha->host_no, new_fcport->d_id.b.domain,
3209 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
3210 break;
3211 }
3212
3213 /* Bypass if same physical adapter. */
3214 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3215 continue;
3216
3217 /* Bypass virtual ports of the same host. */
3218 found = 0;
3219 if (ha->num_vhosts) {
3220 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3221 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3222 found = 1;
3223 break;
3224 }
3225 }
3226 if (found)
3227 continue;
3228 }
3229
3230 /* Bypass if same domain and area of adapter. */
3231 if (((new_fcport->d_id.b24 & 0xffff00) ==
3232 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3233 ISP_CFG_FL)
3234 continue;
3235
3236 /* Bypass reserved domain fields. */
3237 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3238 continue;
3239
3240 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3241 if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3242 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
3243 continue;
3244
3245 /* Locate matching device in database. */
3246 found = 0;
3247 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3248 if (memcmp(new_fcport->port_name, fcport->port_name,
3249 WWN_SIZE))
3250 continue;
3251
3252 found++;
3253
3254 /* Update port state. */
3255 memcpy(fcport->fabric_port_name,
3256 new_fcport->fabric_port_name, WWN_SIZE);
3257 fcport->fp_speed = new_fcport->fp_speed;
3258
3259 /*
3260 * If address the same and state FCS_ONLINE, nothing
3261 * changed.
3262 */
3263 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3264 atomic_read(&fcport->state) == FCS_ONLINE) {
3265 break;
3266 }
3267
3268 /*
3269 * If device was not a fabric device before.
3270 */
3271 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3272 fcport->d_id.b24 = new_fcport->d_id.b24;
3273 fcport->loop_id = FC_NO_LOOP_ID;
3274 fcport->flags |= (FCF_FABRIC_DEVICE |
3275 FCF_LOGIN_NEEDED);
3276 break;
3277 }
3278
3279 /*
3280 * Port ID changed or device was marked to be updated;
3281 * Log it out if still logged in and mark it for
3282 * relogin later.
3283 */
3284 fcport->d_id.b24 = new_fcport->d_id.b24;
3285 fcport->flags |= FCF_LOGIN_NEEDED;
3286 if (fcport->loop_id != FC_NO_LOOP_ID &&
3287 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3288 fcport->port_type != FCT_INITIATOR &&
3289 fcport->port_type != FCT_BROADCAST) {
3290 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3291 fcport->d_id.b.domain, fcport->d_id.b.area,
3292 fcport->d_id.b.al_pa);
3293 fcport->loop_id = FC_NO_LOOP_ID;
3294 }
3295
3296 break;
3297 }
3298
3299 if (found)
3300 continue;
3301 /* If device was not in our fcports list, then add it. */
3302 list_add_tail(&new_fcport->list, new_fcports);
3303
3304 /* Allocate a new replacement fcport. */
3305 nxt_d_id.b24 = new_fcport->d_id.b24;
3306 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3307 if (new_fcport == NULL) {
3308 kfree(swl);
3309 return (QLA_MEMORY_ALLOC_FAILED);
3310 }
3311 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3312 new_fcport->d_id.b24 = nxt_d_id.b24;
3313 }
3314
3315 kfree(swl);
3316 kfree(new_fcport);
3317
3318 return (rval);
3319 }
3320
3321 /*
3322 * qla2x00_find_new_loop_id
3323 * Scan through our port list and find a new usable loop ID.
3324 *
3325 * Input:
3326 * ha: adapter state pointer.
3327 * dev: port structure pointer.
3328 *
3329 * Returns:
3330 * qla2x00 local function return status code.
3331 *
3332 * Context:
3333 * Kernel context.
3334 */
3335 static int
3336 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3337 {
3338 int rval;
3339 int found;
3340 fc_port_t *fcport;
3341 uint16_t first_loop_id;
3342 struct qla_hw_data *ha = vha->hw;
3343 struct scsi_qla_host *vp;
3344 struct scsi_qla_host *tvp;
3345
3346 rval = QLA_SUCCESS;
3347
3348 /* Save starting loop ID. */
3349 first_loop_id = dev->loop_id;
3350
3351 for (;;) {
3352 /* Skip loop ID if already used by adapter. */
3353 if (dev->loop_id == vha->loop_id)
3354 dev->loop_id++;
3355
3356 /* Skip reserved loop IDs. */
3357 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3358 dev->loop_id++;
3359
3360 /* Reset loop ID if passed the end. */
3361 if (dev->loop_id > ha->max_loop_id) {
3362 /* first loop ID. */
3363 dev->loop_id = ha->min_external_loopid;
3364 }
3365
3366 /* Check for loop ID being already in use. */
3367 found = 0;
3368 fcport = NULL;
3369 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3370 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3371 if (fcport->loop_id == dev->loop_id &&
3372 fcport != dev) {
3373 /* ID possibly in use */
3374 found++;
3375 break;
3376 }
3377 }
3378 if (found)
3379 break;
3380 }
3381
3382 /* If not in use then it is free to use. */
3383 if (!found) {
3384 break;
3385 }
3386
3387 /* ID in use. Try next value. */
3388 dev->loop_id++;
3389
3390 /* If wrap around. No free ID to use. */
3391 if (dev->loop_id == first_loop_id) {
3392 dev->loop_id = FC_NO_LOOP_ID;
3393 rval = QLA_FUNCTION_FAILED;
3394 break;
3395 }
3396 }
3397
3398 return (rval);
3399 }
3400
3401 /*
3402 * qla2x00_device_resync
3403 * Marks devices in the database that needs resynchronization.
3404 *
3405 * Input:
3406 * ha = adapter block pointer.
3407 *
3408 * Context:
3409 * Kernel context.
3410 */
3411 static int
3412 qla2x00_device_resync(scsi_qla_host_t *vha)
3413 {
3414 int rval;
3415 uint32_t mask;
3416 fc_port_t *fcport;
3417 uint32_t rscn_entry;
3418 uint8_t rscn_out_iter;
3419 uint8_t format;
3420 port_id_t d_id = {};
3421
3422 rval = QLA_RSCNS_HANDLED;
3423
3424 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3425 vha->flags.rscn_queue_overflow) {
3426
3427 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3428 format = MSB(MSW(rscn_entry));
3429 d_id.b.domain = LSB(MSW(rscn_entry));
3430 d_id.b.area = MSB(LSW(rscn_entry));
3431 d_id.b.al_pa = LSB(LSW(rscn_entry));
3432
3433 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
3434 "[%02x/%02x%02x%02x].\n",
3435 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
3436 d_id.b.area, d_id.b.al_pa));
3437
3438 vha->rscn_out_ptr++;
3439 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3440 vha->rscn_out_ptr = 0;
3441
3442 /* Skip duplicate entries. */
3443 for (rscn_out_iter = vha->rscn_out_ptr;
3444 !vha->flags.rscn_queue_overflow &&
3445 rscn_out_iter != vha->rscn_in_ptr;
3446 rscn_out_iter = (rscn_out_iter ==
3447 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3448
3449 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3450 break;
3451
3452 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
3453 "entry found at [%d].\n", vha->host_no,
3454 rscn_out_iter));
3455
3456 vha->rscn_out_ptr = rscn_out_iter;
3457 }
3458
3459 /* Queue overflow, set switch default case. */
3460 if (vha->flags.rscn_queue_overflow) {
3461 DEBUG(printk("scsi(%ld): device_resync: rscn "
3462 "overflow.\n", vha->host_no));
3463
3464 format = 3;
3465 vha->flags.rscn_queue_overflow = 0;
3466 }
3467
3468 switch (format) {
3469 case 0:
3470 mask = 0xffffff;
3471 break;
3472 case 1:
3473 mask = 0xffff00;
3474 break;
3475 case 2:
3476 mask = 0xff0000;
3477 break;
3478 default:
3479 mask = 0x0;
3480 d_id.b24 = 0;
3481 vha->rscn_out_ptr = vha->rscn_in_ptr;
3482 break;
3483 }
3484
3485 rval = QLA_SUCCESS;
3486
3487 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3488 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3489 (fcport->d_id.b24 & mask) != d_id.b24 ||
3490 fcport->port_type == FCT_BROADCAST)
3491 continue;
3492
3493 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3494 if (format != 3 ||
3495 fcport->port_type != FCT_INITIATOR) {
3496 qla2x00_mark_device_lost(vha, fcport,
3497 0, 0);
3498 }
3499 }
3500 }
3501 }
3502 return (rval);
3503 }
3504
3505 /*
3506 * qla2x00_fabric_dev_login
3507 * Login fabric target device and update FC port database.
3508 *
3509 * Input:
3510 * ha: adapter state pointer.
3511 * fcport: port structure list pointer.
3512 * next_loopid: contains value of a new loop ID that can be used
3513 * by the next login attempt.
3514 *
3515 * Returns:
3516 * qla2x00 local function return status code.
3517 *
3518 * Context:
3519 * Kernel context.
3520 */
3521 static int
3522 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3523 uint16_t *next_loopid)
3524 {
3525 int rval;
3526 int retry;
3527 uint8_t opts;
3528 struct qla_hw_data *ha = vha->hw;
3529
3530 rval = QLA_SUCCESS;
3531 retry = 0;
3532
3533 if (IS_ALOGIO_CAPABLE(ha)) {
3534 if (fcport->flags & FCF_ASYNC_SENT)
3535 return rval;
3536 fcport->flags |= FCF_ASYNC_SENT;
3537 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3538 if (!rval)
3539 return rval;
3540 }
3541
3542 fcport->flags &= ~FCF_ASYNC_SENT;
3543 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3544 if (rval == QLA_SUCCESS) {
3545 /* Send an ADISC to FCP2 devices.*/
3546 opts = 0;
3547 if (fcport->flags & FCF_FCP2_DEVICE)
3548 opts |= BIT_1;
3549 rval = qla2x00_get_port_database(vha, fcport, opts);
3550 if (rval != QLA_SUCCESS) {
3551 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3552 fcport->d_id.b.domain, fcport->d_id.b.area,
3553 fcport->d_id.b.al_pa);
3554 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3555 } else {
3556 qla2x00_update_fcport(vha, fcport);
3557 }
3558 }
3559
3560 return (rval);
3561 }
3562
3563 /*
3564 * qla2x00_fabric_login
3565 * Issue fabric login command.
3566 *
3567 * Input:
3568 * ha = adapter block pointer.
3569 * device = pointer to FC device type structure.
3570 *
3571 * Returns:
3572 * 0 - Login successfully
3573 * 1 - Login failed
3574 * 2 - Initiator device
3575 * 3 - Fatal error
3576 */
3577 int
3578 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3579 uint16_t *next_loopid)
3580 {
3581 int rval;
3582 int retry;
3583 uint16_t tmp_loopid;
3584 uint16_t mb[MAILBOX_REGISTER_COUNT];
3585 struct qla_hw_data *ha = vha->hw;
3586
3587 retry = 0;
3588 tmp_loopid = 0;
3589
3590 for (;;) {
3591 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3592 "for port %02x%02x%02x.\n",
3593 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3594 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3595
3596 /* Login fcport on switch. */
3597 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3598 fcport->d_id.b.domain, fcport->d_id.b.area,
3599 fcport->d_id.b.al_pa, mb, BIT_0);
3600 if (mb[0] == MBS_PORT_ID_USED) {
3601 /*
3602 * Device has another loop ID. The firmware team
3603 * recommends the driver perform an implicit login with
3604 * the specified ID again. The ID we just used is save
3605 * here so we return with an ID that can be tried by
3606 * the next login.
3607 */
3608 retry++;
3609 tmp_loopid = fcport->loop_id;
3610 fcport->loop_id = mb[1];
3611
3612 DEBUG(printk("Fabric Login: port in use - next "
3613 "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3614 fcport->loop_id, fcport->d_id.b.domain,
3615 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3616
3617 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3618 /*
3619 * Login succeeded.
3620 */
3621 if (retry) {
3622 /* A retry occurred before. */
3623 *next_loopid = tmp_loopid;
3624 } else {
3625 /*
3626 * No retry occurred before. Just increment the
3627 * ID value for next login.
3628 */
3629 *next_loopid = (fcport->loop_id + 1);
3630 }
3631
3632 if (mb[1] & BIT_0) {
3633 fcport->port_type = FCT_INITIATOR;
3634 } else {
3635 fcport->port_type = FCT_TARGET;
3636 if (mb[1] & BIT_1) {
3637 fcport->flags |= FCF_FCP2_DEVICE;
3638 }
3639 }
3640
3641 if (mb[10] & BIT_0)
3642 fcport->supported_classes |= FC_COS_CLASS2;
3643 if (mb[10] & BIT_1)
3644 fcport->supported_classes |= FC_COS_CLASS3;
3645
3646 rval = QLA_SUCCESS;
3647 break;
3648 } else if (mb[0] == MBS_LOOP_ID_USED) {
3649 /*
3650 * Loop ID already used, try next loop ID.
3651 */
3652 fcport->loop_id++;
3653 rval = qla2x00_find_new_loop_id(vha, fcport);
3654 if (rval != QLA_SUCCESS) {
3655 /* Ran out of loop IDs to use */
3656 break;
3657 }
3658 } else if (mb[0] == MBS_COMMAND_ERROR) {
3659 /*
3660 * Firmware possibly timed out during login. If NO
3661 * retries are left to do then the device is declared
3662 * dead.
3663 */
3664 *next_loopid = fcport->loop_id;
3665 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3666 fcport->d_id.b.domain, fcport->d_id.b.area,
3667 fcport->d_id.b.al_pa);
3668 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3669
3670 rval = 1;
3671 break;
3672 } else {
3673 /*
3674 * unrecoverable / not handled error
3675 */
3676 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3677 "loop_id=%x jiffies=%lx.\n",
3678 __func__, vha->host_no, mb[0],
3679 fcport->d_id.b.domain, fcport->d_id.b.area,
3680 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3681
3682 *next_loopid = fcport->loop_id;
3683 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3684 fcport->d_id.b.domain, fcport->d_id.b.area,
3685 fcport->d_id.b.al_pa);
3686 fcport->loop_id = FC_NO_LOOP_ID;
3687 fcport->login_retry = 0;
3688
3689 rval = 3;
3690 break;
3691 }
3692 }
3693
3694 return (rval);
3695 }
3696
3697 /*
3698 * qla2x00_local_device_login
3699 * Issue local device login command.
3700 *
3701 * Input:
3702 * ha = adapter block pointer.
3703 * loop_id = loop id of device to login to.
3704 *
3705 * Returns (Where's the #define!!!!):
3706 * 0 - Login successfully
3707 * 1 - Login failed
3708 * 3 - Fatal error
3709 */
3710 int
3711 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3712 {
3713 int rval;
3714 uint16_t mb[MAILBOX_REGISTER_COUNT];
3715
3716 memset(mb, 0, sizeof(mb));
3717 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3718 if (rval == QLA_SUCCESS) {
3719 /* Interrogate mailbox registers for any errors */
3720 if (mb[0] == MBS_COMMAND_ERROR)
3721 rval = 1;
3722 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3723 /* device not in PCB table */
3724 rval = 3;
3725 }
3726
3727 return (rval);
3728 }
3729
3730 /*
3731 * qla2x00_loop_resync
3732 * Resync with fibre channel devices.
3733 *
3734 * Input:
3735 * ha = adapter block pointer.
3736 *
3737 * Returns:
3738 * 0 = success
3739 */
3740 int
3741 qla2x00_loop_resync(scsi_qla_host_t *vha)
3742 {
3743 int rval = QLA_SUCCESS;
3744 uint32_t wait_time;
3745 struct req_que *req;
3746 struct rsp_que *rsp;
3747
3748 if (vha->hw->flags.cpu_affinity_enabled)
3749 req = vha->hw->req_q_map[0];
3750 else
3751 req = vha->req;
3752 rsp = req->rsp;
3753
3754 atomic_set(&vha->loop_state, LOOP_UPDATE);
3755 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3756 if (vha->flags.online) {
3757 if (!(rval = qla2x00_fw_ready(vha))) {
3758 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3759 wait_time = 256;
3760 do {
3761 atomic_set(&vha->loop_state, LOOP_UPDATE);
3762
3763 /* Issue a marker after FW becomes ready. */
3764 qla2x00_marker(vha, req, rsp, 0, 0,
3765 MK_SYNC_ALL);
3766 vha->marker_needed = 0;
3767
3768 /* Remap devices on Loop. */
3769 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3770
3771 qla2x00_configure_loop(vha);
3772 wait_time--;
3773 } while (!atomic_read(&vha->loop_down_timer) &&
3774 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3775 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3776 &vha->dpc_flags)));
3777 }
3778 }
3779
3780 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3781 return (QLA_FUNCTION_FAILED);
3782
3783 if (rval)
3784 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3785
3786 return (rval);
3787 }
3788
3789 void
3790 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3791 {
3792 fc_port_t *fcport;
3793 struct scsi_qla_host *tvp, *vha;
3794
3795 /* Go with deferred removal of rport references. */
3796 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
3797 list_for_each_entry(fcport, &vha->vp_fcports, list)
3798 if (fcport && fcport->drport &&
3799 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3800 qla2x00_rport_del(fcport);
3801 }
3802
3803 void
3804 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3805 {
3806 struct qla_hw_data *ha = vha->hw;
3807 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3808 struct scsi_qla_host *tvp;
3809
3810 vha->flags.online = 0;
3811 ha->flags.chip_reset_done = 0;
3812 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3813 ha->qla_stats.total_isp_aborts++;
3814
3815 qla_printk(KERN_INFO, ha,
3816 "Performing ISP error recovery - ha= %p.\n", ha);
3817
3818 /* Chip reset does not apply to 82XX */
3819 if (!IS_QLA82XX(ha))
3820 ha->isp_ops->reset_chip(vha);
3821
3822 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3823 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3824 atomic_set(&vha->loop_state, LOOP_DOWN);
3825 qla2x00_mark_all_devices_lost(vha, 0);
3826 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
3827 qla2x00_mark_all_devices_lost(vp, 0);
3828 } else {
3829 if (!atomic_read(&vha->loop_down_timer))
3830 atomic_set(&vha->loop_down_timer,
3831 LOOP_DOWN_TIME);
3832 }
3833
3834 /* Make sure for ISP 82XX IO DMA is complete */
3835 if (IS_QLA82XX(ha))
3836 qla82xx_wait_for_pending_commands(vha);
3837
3838 /* Requeue all commands in outstanding command list. */
3839 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3840 }
3841
3842 /*
3843 * qla2x00_abort_isp
3844 * Resets ISP and aborts all outstanding commands.
3845 *
3846 * Input:
3847 * ha = adapter block pointer.
3848 *
3849 * Returns:
3850 * 0 = success
3851 */
3852 int
3853 qla2x00_abort_isp(scsi_qla_host_t *vha)
3854 {
3855 int rval;
3856 uint8_t status = 0;
3857 struct qla_hw_data *ha = vha->hw;
3858 struct scsi_qla_host *vp;
3859 struct scsi_qla_host *tvp;
3860 struct req_que *req = ha->req_q_map[0];
3861
3862 if (vha->flags.online) {
3863 qla2x00_abort_isp_cleanup(vha);
3864
3865 if (unlikely(pci_channel_offline(ha->pdev) &&
3866 ha->flags.pci_channel_io_perm_failure)) {
3867 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3868 status = 0;
3869 return status;
3870 }
3871
3872 ha->isp_ops->get_flash_version(vha, req->ring);
3873
3874 ha->isp_ops->nvram_config(vha);
3875
3876 if (!qla2x00_restart_isp(vha)) {
3877 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3878
3879 if (!atomic_read(&vha->loop_down_timer)) {
3880 /*
3881 * Issue marker command only when we are going
3882 * to start the I/O .
3883 */
3884 vha->marker_needed = 1;
3885 }
3886
3887 vha->flags.online = 1;
3888
3889 ha->isp_ops->enable_intrs(ha);
3890
3891 ha->isp_abort_cnt = 0;
3892 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3893
3894 if (IS_QLA81XX(ha))
3895 qla2x00_get_fw_version(vha,
3896 &ha->fw_major_version,
3897 &ha->fw_minor_version,
3898 &ha->fw_subminor_version,
3899 &ha->fw_attributes, &ha->fw_memory_size,
3900 ha->mpi_version, &ha->mpi_capabilities,
3901 ha->phy_version);
3902
3903 if (ha->fce) {
3904 ha->flags.fce_enabled = 1;
3905 memset(ha->fce, 0,
3906 fce_calc_size(ha->fce_bufs));
3907 rval = qla2x00_enable_fce_trace(vha,
3908 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3909 &ha->fce_bufs);
3910 if (rval) {
3911 qla_printk(KERN_WARNING, ha,
3912 "Unable to reinitialize FCE "
3913 "(%d).\n", rval);
3914 ha->flags.fce_enabled = 0;
3915 }
3916 }
3917
3918 if (ha->eft) {
3919 memset(ha->eft, 0, EFT_SIZE);
3920 rval = qla2x00_enable_eft_trace(vha,
3921 ha->eft_dma, EFT_NUM_BUFFERS);
3922 if (rval) {
3923 qla_printk(KERN_WARNING, ha,
3924 "Unable to reinitialize EFT "
3925 "(%d).\n", rval);
3926 }
3927 }
3928 } else { /* failed the ISP abort */
3929 vha->flags.online = 1;
3930 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3931 if (ha->isp_abort_cnt == 0) {
3932 qla_printk(KERN_WARNING, ha,
3933 "ISP error recovery failed - "
3934 "board disabled\n");
3935 /*
3936 * The next call disables the board
3937 * completely.
3938 */
3939 ha->isp_ops->reset_adapter(vha);
3940 vha->flags.online = 0;
3941 clear_bit(ISP_ABORT_RETRY,
3942 &vha->dpc_flags);
3943 status = 0;
3944 } else { /* schedule another ISP abort */
3945 ha->isp_abort_cnt--;
3946 DEBUG(printk("qla%ld: ISP abort - "
3947 "retry remaining %d\n",
3948 vha->host_no, ha->isp_abort_cnt));
3949 status = 1;
3950 }
3951 } else {
3952 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3953 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3954 "- retrying (%d) more times\n",
3955 vha->host_no, ha->isp_abort_cnt));
3956 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3957 status = 1;
3958 }
3959 }
3960
3961 }
3962
3963 if (!status) {
3964 DEBUG(printk(KERN_INFO
3965 "qla2x00_abort_isp(%ld): succeeded.\n",
3966 vha->host_no));
3967 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3968 if (vp->vp_idx)
3969 qla2x00_vp_abort_isp(vp);
3970 }
3971 } else {
3972 qla_printk(KERN_INFO, ha,
3973 "qla2x00_abort_isp: **** FAILED ****\n");
3974 }
3975
3976 return(status);
3977 }
3978
3979 /*
3980 * qla2x00_restart_isp
3981 * restarts the ISP after a reset
3982 *
3983 * Input:
3984 * ha = adapter block pointer.
3985 *
3986 * Returns:
3987 * 0 = success
3988 */
3989 static int
3990 qla2x00_restart_isp(scsi_qla_host_t *vha)
3991 {
3992 int status = 0;
3993 uint32_t wait_time;
3994 struct qla_hw_data *ha = vha->hw;
3995 struct req_que *req = ha->req_q_map[0];
3996 struct rsp_que *rsp = ha->rsp_q_map[0];
3997
3998 /* If firmware needs to be loaded */
3999 if (qla2x00_isp_firmware(vha)) {
4000 vha->flags.online = 0;
4001 status = ha->isp_ops->chip_diag(vha);
4002 if (!status)
4003 status = qla2x00_setup_chip(vha);
4004 }
4005
4006 if (!status && !(status = qla2x00_init_rings(vha))) {
4007 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4008 ha->flags.chip_reset_done = 1;
4009 /* Initialize the queues in use */
4010 qla25xx_init_queues(ha);
4011
4012 status = qla2x00_fw_ready(vha);
4013 if (!status) {
4014 DEBUG(printk("%s(): Start configure loop, "
4015 "status = %d\n", __func__, status));
4016
4017 /* Issue a marker after FW becomes ready. */
4018 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4019
4020 vha->flags.online = 1;
4021 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4022 wait_time = 256;
4023 do {
4024 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4025 qla2x00_configure_loop(vha);
4026 wait_time--;
4027 } while (!atomic_read(&vha->loop_down_timer) &&
4028 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4029 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4030 &vha->dpc_flags)));
4031 }
4032
4033 /* if no cable then assume it's good */
4034 if ((vha->device_flags & DFLG_NO_CABLE))
4035 status = 0;
4036
4037 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
4038 __func__,
4039 status));
4040 }
4041 return (status);
4042 }
4043
4044 static int
4045 qla25xx_init_queues(struct qla_hw_data *ha)
4046 {
4047 struct rsp_que *rsp = NULL;
4048 struct req_que *req = NULL;
4049 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4050 int ret = -1;
4051 int i;
4052
4053 for (i = 1; i < ha->max_rsp_queues; i++) {
4054 rsp = ha->rsp_q_map[i];
4055 if (rsp) {
4056 rsp->options &= ~BIT_0;
4057 ret = qla25xx_init_rsp_que(base_vha, rsp);
4058 if (ret != QLA_SUCCESS)
4059 DEBUG2_17(printk(KERN_WARNING
4060 "%s Rsp que:%d init failed\n", __func__,
4061 rsp->id));
4062 else
4063 DEBUG2_17(printk(KERN_INFO
4064 "%s Rsp que:%d inited\n", __func__,
4065 rsp->id));
4066 }
4067 }
4068 for (i = 1; i < ha->max_req_queues; i++) {
4069 req = ha->req_q_map[i];
4070 if (req) {
4071 /* Clear outstanding commands array. */
4072 req->options &= ~BIT_0;
4073 ret = qla25xx_init_req_que(base_vha, req);
4074 if (ret != QLA_SUCCESS)
4075 DEBUG2_17(printk(KERN_WARNING
4076 "%s Req que:%d init failed\n", __func__,
4077 req->id));
4078 else
4079 DEBUG2_17(printk(KERN_WARNING
4080 "%s Req que:%d inited\n", __func__,
4081 req->id));
4082 }
4083 }
4084 return ret;
4085 }
4086
4087 /*
4088 * qla2x00_reset_adapter
4089 * Reset adapter.
4090 *
4091 * Input:
4092 * ha = adapter block pointer.
4093 */
4094 void
4095 qla2x00_reset_adapter(scsi_qla_host_t *vha)
4096 {
4097 unsigned long flags = 0;
4098 struct qla_hw_data *ha = vha->hw;
4099 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4100
4101 vha->flags.online = 0;
4102 ha->isp_ops->disable_intrs(ha);
4103
4104 spin_lock_irqsave(&ha->hardware_lock, flags);
4105 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4106 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4107 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4108 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4110 }
4111
4112 void
4113 qla24xx_reset_adapter(scsi_qla_host_t *vha)
4114 {
4115 unsigned long flags = 0;
4116 struct qla_hw_data *ha = vha->hw;
4117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4118
4119 if (IS_QLA82XX(ha))
4120 return;
4121
4122 vha->flags.online = 0;
4123 ha->isp_ops->disable_intrs(ha);
4124
4125 spin_lock_irqsave(&ha->hardware_lock, flags);
4126 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4127 RD_REG_DWORD(&reg->hccr);
4128 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4129 RD_REG_DWORD(&reg->hccr);
4130 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4131
4132 if (IS_NOPOLLING_TYPE(ha))
4133 ha->isp_ops->enable_intrs(ha);
4134 }
4135
4136 /* On sparc systems, obtain port and node WWN from firmware
4137 * properties.
4138 */
4139 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4140 struct nvram_24xx *nv)
4141 {
4142 #ifdef CONFIG_SPARC
4143 struct qla_hw_data *ha = vha->hw;
4144 struct pci_dev *pdev = ha->pdev;
4145 struct device_node *dp = pci_device_to_OF_node(pdev);
4146 const u8 *val;
4147 int len;
4148
4149 val = of_get_property(dp, "port-wwn", &len);
4150 if (val && len >= WWN_SIZE)
4151 memcpy(nv->port_name, val, WWN_SIZE);
4152
4153 val = of_get_property(dp, "node-wwn", &len);
4154 if (val && len >= WWN_SIZE)
4155 memcpy(nv->node_name, val, WWN_SIZE);
4156 #endif
4157 }
4158
4159 int
4160 qla24xx_nvram_config(scsi_qla_host_t *vha)
4161 {
4162 int rval;
4163 struct init_cb_24xx *icb;
4164 struct nvram_24xx *nv;
4165 uint32_t *dptr;
4166 uint8_t *dptr1, *dptr2;
4167 uint32_t chksum;
4168 uint16_t cnt;
4169 struct qla_hw_data *ha = vha->hw;
4170
4171 rval = QLA_SUCCESS;
4172 icb = (struct init_cb_24xx *)ha->init_cb;
4173 nv = ha->nvram;
4174
4175 /* Determine NVRAM starting address. */
4176 if (ha->flags.port0) {
4177 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4178 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4179 } else {
4180 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4181 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4182 }
4183 ha->nvram_size = sizeof(struct nvram_24xx);
4184 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4185 if (IS_QLA82XX(ha))
4186 ha->vpd_size = FA_VPD_SIZE_82XX;
4187
4188 /* Get VPD data into cache */
4189 ha->vpd = ha->nvram + VPD_OFFSET;
4190 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4191 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4192
4193 /* Get NVRAM data into cache and calculate checksum. */
4194 dptr = (uint32_t *)nv;
4195 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4196 ha->nvram_size);
4197 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4198 chksum += le32_to_cpu(*dptr++);
4199
4200 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4201 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4202
4203 /* Bad NVRAM data, set defaults parameters. */
4204 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4205 || nv->id[3] != ' ' ||
4206 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4207 /* Reset NVRAM data. */
4208 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4209 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4210 le16_to_cpu(nv->nvram_version));
4211 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4212 "invalid -- WWPN) defaults.\n");
4213
4214 /*
4215 * Set default initialization control block.
4216 */
4217 memset(nv, 0, ha->nvram_size);
4218 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4219 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4220 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4221 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4222 nv->exchange_count = __constant_cpu_to_le16(0);
4223 nv->hard_address = __constant_cpu_to_le16(124);
4224 nv->port_name[0] = 0x21;
4225 nv->port_name[1] = 0x00 + ha->port_no;
4226 nv->port_name[2] = 0x00;
4227 nv->port_name[3] = 0xe0;
4228 nv->port_name[4] = 0x8b;
4229 nv->port_name[5] = 0x1c;
4230 nv->port_name[6] = 0x55;
4231 nv->port_name[7] = 0x86;
4232 nv->node_name[0] = 0x20;
4233 nv->node_name[1] = 0x00;
4234 nv->node_name[2] = 0x00;
4235 nv->node_name[3] = 0xe0;
4236 nv->node_name[4] = 0x8b;
4237 nv->node_name[5] = 0x1c;
4238 nv->node_name[6] = 0x55;
4239 nv->node_name[7] = 0x86;
4240 qla24xx_nvram_wwn_from_ofw(vha, nv);
4241 nv->login_retry_count = __constant_cpu_to_le16(8);
4242 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4243 nv->login_timeout = __constant_cpu_to_le16(0);
4244 nv->firmware_options_1 =
4245 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4246 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4247 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4248 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4249 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4250 nv->efi_parameters = __constant_cpu_to_le32(0);
4251 nv->reset_delay = 5;
4252 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4253 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4254 nv->link_down_timeout = __constant_cpu_to_le16(30);
4255
4256 rval = 1;
4257 }
4258
4259 /* Reset Initialization control block */
4260 memset(icb, 0, ha->init_cb_size);
4261
4262 /* Copy 1st segment. */
4263 dptr1 = (uint8_t *)icb;
4264 dptr2 = (uint8_t *)&nv->version;
4265 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4266 while (cnt--)
4267 *dptr1++ = *dptr2++;
4268
4269 icb->login_retry_count = nv->login_retry_count;
4270 icb->link_down_on_nos = nv->link_down_on_nos;
4271
4272 /* Copy 2nd segment. */
4273 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4274 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4275 cnt = (uint8_t *)&icb->reserved_3 -
4276 (uint8_t *)&icb->interrupt_delay_timer;
4277 while (cnt--)
4278 *dptr1++ = *dptr2++;
4279
4280 /*
4281 * Setup driver NVRAM options.
4282 */
4283 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4284 "QLA2462");
4285
4286 /* Use alternate WWN? */
4287 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4288 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4289 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4290 }
4291
4292 /* Prepare nodename */
4293 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4294 /*
4295 * Firmware will apply the following mask if the nodename was
4296 * not provided.
4297 */
4298 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4299 icb->node_name[0] &= 0xF0;
4300 }
4301
4302 /* Set host adapter parameters. */
4303 ha->flags.disable_risc_code_load = 0;
4304 ha->flags.enable_lip_reset = 0;
4305 ha->flags.enable_lip_full_login =
4306 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4307 ha->flags.enable_target_reset =
4308 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4309 ha->flags.enable_led_scheme = 0;
4310 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4311
4312 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4313 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4314
4315 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4316 sizeof(ha->fw_seriallink_options24));
4317
4318 /* save HBA serial number */
4319 ha->serial0 = icb->port_name[5];
4320 ha->serial1 = icb->port_name[6];
4321 ha->serial2 = icb->port_name[7];
4322 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4323 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4324
4325 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4326
4327 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4328
4329 /* Set minimum login_timeout to 4 seconds. */
4330 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4331 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4332 if (le16_to_cpu(nv->login_timeout) < 4)
4333 nv->login_timeout = __constant_cpu_to_le16(4);
4334 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4335 icb->login_timeout = nv->login_timeout;
4336
4337 /* Set minimum RATOV to 100 tenths of a second. */
4338 ha->r_a_tov = 100;
4339
4340 ha->loop_reset_delay = nv->reset_delay;
4341
4342 /* Link Down Timeout = 0:
4343 *
4344 * When Port Down timer expires we will start returning
4345 * I/O's to OS with "DID_NO_CONNECT".
4346 *
4347 * Link Down Timeout != 0:
4348 *
4349 * The driver waits for the link to come up after link down
4350 * before returning I/Os to OS with "DID_NO_CONNECT".
4351 */
4352 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4353 ha->loop_down_abort_time =
4354 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4355 } else {
4356 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4357 ha->loop_down_abort_time =
4358 (LOOP_DOWN_TIME - ha->link_down_timeout);
4359 }
4360
4361 /* Need enough time to try and get the port back. */
4362 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4363 if (qlport_down_retry)
4364 ha->port_down_retry_count = qlport_down_retry;
4365
4366 /* Set login_retry_count */
4367 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4368 if (ha->port_down_retry_count ==
4369 le16_to_cpu(nv->port_down_retry_count) &&
4370 ha->port_down_retry_count > 3)
4371 ha->login_retry_count = ha->port_down_retry_count;
4372 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4373 ha->login_retry_count = ha->port_down_retry_count;
4374 if (ql2xloginretrycount)
4375 ha->login_retry_count = ql2xloginretrycount;
4376
4377 /* Enable ZIO. */
4378 if (!vha->flags.init_done) {
4379 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4380 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4381 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4382 le16_to_cpu(icb->interrupt_delay_timer): 2;
4383 }
4384 icb->firmware_options_2 &= __constant_cpu_to_le32(
4385 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4386 vha->flags.process_response_queue = 0;
4387 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4388 ha->zio_mode = QLA_ZIO_MODE_6;
4389
4390 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4391 "(%d us).\n", vha->host_no, ha->zio_mode,
4392 ha->zio_timer * 100));
4393 qla_printk(KERN_INFO, ha,
4394 "ZIO mode %d enabled; timer delay (%d us).\n",
4395 ha->zio_mode, ha->zio_timer * 100);
4396
4397 icb->firmware_options_2 |= cpu_to_le32(
4398 (uint32_t)ha->zio_mode);
4399 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4400 vha->flags.process_response_queue = 1;
4401 }
4402
4403 if (rval) {
4404 DEBUG2_3(printk(KERN_WARNING
4405 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4406 }
4407 return (rval);
4408 }
4409
4410 static int
4411 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4412 uint32_t faddr)
4413 {
4414 int rval = QLA_SUCCESS;
4415 int segments, fragment;
4416 uint32_t *dcode, dlen;
4417 uint32_t risc_addr;
4418 uint32_t risc_size;
4419 uint32_t i;
4420 struct qla_hw_data *ha = vha->hw;
4421 struct req_que *req = ha->req_q_map[0];
4422
4423 qla_printk(KERN_INFO, ha,
4424 "FW: Loading from flash (%x)...\n", faddr);
4425
4426 rval = QLA_SUCCESS;
4427
4428 segments = FA_RISC_CODE_SEGMENTS;
4429 dcode = (uint32_t *)req->ring;
4430 *srisc_addr = 0;
4431
4432 /* Validate firmware image by checking version. */
4433 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4434 for (i = 0; i < 4; i++)
4435 dcode[i] = be32_to_cpu(dcode[i]);
4436 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4437 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4438 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4439 dcode[3] == 0)) {
4440 qla_printk(KERN_WARNING, ha,
4441 "Unable to verify integrity of flash firmware image!\n");
4442 qla_printk(KERN_WARNING, ha,
4443 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4444 dcode[1], dcode[2], dcode[3]);
4445
4446 return QLA_FUNCTION_FAILED;
4447 }
4448
4449 while (segments && rval == QLA_SUCCESS) {
4450 /* Read segment's load information. */
4451 qla24xx_read_flash_data(vha, dcode, faddr, 4);
4452
4453 risc_addr = be32_to_cpu(dcode[2]);
4454 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4455 risc_size = be32_to_cpu(dcode[3]);
4456
4457 fragment = 0;
4458 while (risc_size > 0 && rval == QLA_SUCCESS) {
4459 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4460 if (dlen > risc_size)
4461 dlen = risc_size;
4462
4463 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4464 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
4465 vha->host_no, risc_addr, dlen, faddr));
4466
4467 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4468 for (i = 0; i < dlen; i++)
4469 dcode[i] = swab32(dcode[i]);
4470
4471 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4472 dlen);
4473 if (rval) {
4474 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4475 "segment %d of firmware\n", vha->host_no,
4476 fragment));
4477 qla_printk(KERN_WARNING, ha,
4478 "[ERROR] Failed to load segment %d of "
4479 "firmware\n", fragment);
4480 break;
4481 }
4482
4483 faddr += dlen;
4484 risc_addr += dlen;
4485 risc_size -= dlen;
4486 fragment++;
4487 }
4488
4489 /* Next segment. */
4490 segments--;
4491 }
4492
4493 return rval;
4494 }
4495
4496 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4497
4498 int
4499 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4500 {
4501 int rval;
4502 int i, fragment;
4503 uint16_t *wcode, *fwcode;
4504 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4505 struct fw_blob *blob;
4506 struct qla_hw_data *ha = vha->hw;
4507 struct req_que *req = ha->req_q_map[0];
4508
4509 /* Load firmware blob. */
4510 blob = qla2x00_request_firmware(vha);
4511 if (!blob) {
4512 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4513 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4514 "from: " QLA_FW_URL ".\n");
4515 return QLA_FUNCTION_FAILED;
4516 }
4517
4518 rval = QLA_SUCCESS;
4519
4520 wcode = (uint16_t *)req->ring;
4521 *srisc_addr = 0;
4522 fwcode = (uint16_t *)blob->fw->data;
4523 fwclen = 0;
4524
4525 /* Validate firmware image by checking version. */
4526 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4527 qla_printk(KERN_WARNING, ha,
4528 "Unable to verify integrity of firmware image (%Zd)!\n",
4529 blob->fw->size);
4530 goto fail_fw_integrity;
4531 }
4532 for (i = 0; i < 4; i++)
4533 wcode[i] = be16_to_cpu(fwcode[i + 4]);
4534 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4535 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4536 wcode[2] == 0 && wcode[3] == 0)) {
4537 qla_printk(KERN_WARNING, ha,
4538 "Unable to verify integrity of firmware image!\n");
4539 qla_printk(KERN_WARNING, ha,
4540 "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4541 wcode[1], wcode[2], wcode[3]);
4542 goto fail_fw_integrity;
4543 }
4544
4545 seg = blob->segs;
4546 while (*seg && rval == QLA_SUCCESS) {
4547 risc_addr = *seg;
4548 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4549 risc_size = be16_to_cpu(fwcode[3]);
4550
4551 /* Validate firmware image size. */
4552 fwclen += risc_size * sizeof(uint16_t);
4553 if (blob->fw->size < fwclen) {
4554 qla_printk(KERN_WARNING, ha,
4555 "Unable to verify integrity of firmware image "
4556 "(%Zd)!\n", blob->fw->size);
4557 goto fail_fw_integrity;
4558 }
4559
4560 fragment = 0;
4561 while (risc_size > 0 && rval == QLA_SUCCESS) {
4562 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4563 if (wlen > risc_size)
4564 wlen = risc_size;
4565
4566 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4567 "addr %x, number of words 0x%x.\n", vha->host_no,
4568 risc_addr, wlen));
4569
4570 for (i = 0; i < wlen; i++)
4571 wcode[i] = swab16(fwcode[i]);
4572
4573 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4574 wlen);
4575 if (rval) {
4576 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4577 "segment %d of firmware\n", vha->host_no,
4578 fragment));
4579 qla_printk(KERN_WARNING, ha,
4580 "[ERROR] Failed to load segment %d of "
4581 "firmware\n", fragment);
4582 break;
4583 }
4584
4585 fwcode += wlen;
4586 risc_addr += wlen;
4587 risc_size -= wlen;
4588 fragment++;
4589 }
4590
4591 /* Next segment. */
4592 seg++;
4593 }
4594 return rval;
4595
4596 fail_fw_integrity:
4597 return QLA_FUNCTION_FAILED;
4598 }
4599
4600 static int
4601 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4602 {
4603 int rval;
4604 int segments, fragment;
4605 uint32_t *dcode, dlen;
4606 uint32_t risc_addr;
4607 uint32_t risc_size;
4608 uint32_t i;
4609 struct fw_blob *blob;
4610 uint32_t *fwcode, fwclen;
4611 struct qla_hw_data *ha = vha->hw;
4612 struct req_que *req = ha->req_q_map[0];
4613
4614 /* Load firmware blob. */
4615 blob = qla2x00_request_firmware(vha);
4616 if (!blob) {
4617 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4618 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4619 "from: " QLA_FW_URL ".\n");
4620
4621 return QLA_FUNCTION_FAILED;
4622 }
4623
4624 qla_printk(KERN_INFO, ha,
4625 "FW: Loading via request-firmware...\n");
4626
4627 rval = QLA_SUCCESS;
4628
4629 segments = FA_RISC_CODE_SEGMENTS;
4630 dcode = (uint32_t *)req->ring;
4631 *srisc_addr = 0;
4632 fwcode = (uint32_t *)blob->fw->data;
4633 fwclen = 0;
4634
4635 /* Validate firmware image by checking version. */
4636 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4637 qla_printk(KERN_WARNING, ha,
4638 "Unable to verify integrity of firmware image (%Zd)!\n",
4639 blob->fw->size);
4640 goto fail_fw_integrity;
4641 }
4642 for (i = 0; i < 4; i++)
4643 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4644 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4645 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4646 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4647 dcode[3] == 0)) {
4648 qla_printk(KERN_WARNING, ha,
4649 "Unable to verify integrity of firmware image!\n");
4650 qla_printk(KERN_WARNING, ha,
4651 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4652 dcode[1], dcode[2], dcode[3]);
4653 goto fail_fw_integrity;
4654 }
4655
4656 while (segments && rval == QLA_SUCCESS) {
4657 risc_addr = be32_to_cpu(fwcode[2]);
4658 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4659 risc_size = be32_to_cpu(fwcode[3]);
4660
4661 /* Validate firmware image size. */
4662 fwclen += risc_size * sizeof(uint32_t);
4663 if (blob->fw->size < fwclen) {
4664 qla_printk(KERN_WARNING, ha,
4665 "Unable to verify integrity of firmware image "
4666 "(%Zd)!\n", blob->fw->size);
4667
4668 goto fail_fw_integrity;
4669 }
4670
4671 fragment = 0;
4672 while (risc_size > 0 && rval == QLA_SUCCESS) {
4673 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4674 if (dlen > risc_size)
4675 dlen = risc_size;
4676
4677 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4678 "addr %x, number of dwords 0x%x.\n", vha->host_no,
4679 risc_addr, dlen));
4680
4681 for (i = 0; i < dlen; i++)
4682 dcode[i] = swab32(fwcode[i]);
4683
4684 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4685 dlen);
4686 if (rval) {
4687 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4688 "segment %d of firmware\n", vha->host_no,
4689 fragment));
4690 qla_printk(KERN_WARNING, ha,
4691 "[ERROR] Failed to load segment %d of "
4692 "firmware\n", fragment);
4693 break;
4694 }
4695
4696 fwcode += dlen;
4697 risc_addr += dlen;
4698 risc_size -= dlen;
4699 fragment++;
4700 }
4701
4702 /* Next segment. */
4703 segments--;
4704 }
4705 return rval;
4706
4707 fail_fw_integrity:
4708 return QLA_FUNCTION_FAILED;
4709 }
4710
4711 int
4712 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4713 {
4714 int rval;
4715
4716 if (ql2xfwloadbin == 1)
4717 return qla81xx_load_risc(vha, srisc_addr);
4718
4719 /*
4720 * FW Load priority:
4721 * 1) Firmware via request-firmware interface (.bin file).
4722 * 2) Firmware residing in flash.
4723 */
4724 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4725 if (rval == QLA_SUCCESS)
4726 return rval;
4727
4728 return qla24xx_load_risc_flash(vha, srisc_addr,
4729 vha->hw->flt_region_fw);
4730 }
4731
4732 int
4733 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4734 {
4735 int rval;
4736 struct qla_hw_data *ha = vha->hw;
4737
4738 if (ql2xfwloadbin == 2)
4739 goto try_blob_fw;
4740
4741 /*
4742 * FW Load priority:
4743 * 1) Firmware residing in flash.
4744 * 2) Firmware via request-firmware interface (.bin file).
4745 * 3) Golden-Firmware residing in flash -- limited operation.
4746 */
4747 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4748 if (rval == QLA_SUCCESS)
4749 return rval;
4750
4751 try_blob_fw:
4752 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4753 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4754 return rval;
4755
4756 qla_printk(KERN_ERR, ha,
4757 "FW: Attempting to fallback to golden firmware...\n");
4758 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4759 if (rval != QLA_SUCCESS)
4760 return rval;
4761
4762 qla_printk(KERN_ERR, ha,
4763 "FW: Please update operational firmware...\n");
4764 ha->flags.running_gold_fw = 1;
4765
4766 return rval;
4767 }
4768
4769 void
4770 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4771 {
4772 int ret, retries;
4773 struct qla_hw_data *ha = vha->hw;
4774
4775 if (ha->flags.pci_channel_io_perm_failure)
4776 return;
4777 if (!IS_FWI2_CAPABLE(ha))
4778 return;
4779 if (!ha->fw_major_version)
4780 return;
4781
4782 ret = qla2x00_stop_firmware(vha);
4783 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4784 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4785 ha->isp_ops->reset_chip(vha);
4786 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4787 continue;
4788 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4789 continue;
4790 qla_printk(KERN_INFO, ha,
4791 "Attempting retry of stop-firmware command...\n");
4792 ret = qla2x00_stop_firmware(vha);
4793 }
4794 }
4795
4796 int
4797 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4798 {
4799 int rval = QLA_SUCCESS;
4800 uint16_t mb[MAILBOX_REGISTER_COUNT];
4801 struct qla_hw_data *ha = vha->hw;
4802 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4803 struct req_que *req;
4804 struct rsp_que *rsp;
4805
4806 if (!vha->vp_idx)
4807 return -EINVAL;
4808
4809 rval = qla2x00_fw_ready(base_vha);
4810 if (ha->flags.cpu_affinity_enabled)
4811 req = ha->req_q_map[0];
4812 else
4813 req = vha->req;
4814 rsp = req->rsp;
4815
4816 if (rval == QLA_SUCCESS) {
4817 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4818 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4819 }
4820
4821 vha->flags.management_server_logged_in = 0;
4822
4823 /* Login to SNS first */
4824 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4825 if (mb[0] != MBS_COMMAND_COMPLETE) {
4826 DEBUG15(qla_printk(KERN_INFO, ha,
4827 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4828 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4829 mb[0], mb[1], mb[2], mb[6], mb[7]));
4830 return (QLA_FUNCTION_FAILED);
4831 }
4832
4833 atomic_set(&vha->loop_down_timer, 0);
4834 atomic_set(&vha->loop_state, LOOP_UP);
4835 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4836 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4837 rval = qla2x00_loop_resync(base_vha);
4838
4839 return rval;
4840 }
4841
4842 /* 84XX Support **************************************************************/
4843
4844 static LIST_HEAD(qla_cs84xx_list);
4845 static DEFINE_MUTEX(qla_cs84xx_mutex);
4846
4847 static struct qla_chip_state_84xx *
4848 qla84xx_get_chip(struct scsi_qla_host *vha)
4849 {
4850 struct qla_chip_state_84xx *cs84xx;
4851 struct qla_hw_data *ha = vha->hw;
4852
4853 mutex_lock(&qla_cs84xx_mutex);
4854
4855 /* Find any shared 84xx chip. */
4856 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
4857 if (cs84xx->bus == ha->pdev->bus) {
4858 kref_get(&cs84xx->kref);
4859 goto done;
4860 }
4861 }
4862
4863 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
4864 if (!cs84xx)
4865 goto done;
4866
4867 kref_init(&cs84xx->kref);
4868 spin_lock_init(&cs84xx->access_lock);
4869 mutex_init(&cs84xx->fw_update_mutex);
4870 cs84xx->bus = ha->pdev->bus;
4871
4872 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
4873 done:
4874 mutex_unlock(&qla_cs84xx_mutex);
4875 return cs84xx;
4876 }
4877
4878 static void
4879 __qla84xx_chip_release(struct kref *kref)
4880 {
4881 struct qla_chip_state_84xx *cs84xx =
4882 container_of(kref, struct qla_chip_state_84xx, kref);
4883
4884 mutex_lock(&qla_cs84xx_mutex);
4885 list_del(&cs84xx->list);
4886 mutex_unlock(&qla_cs84xx_mutex);
4887 kfree(cs84xx);
4888 }
4889
4890 void
4891 qla84xx_put_chip(struct scsi_qla_host *vha)
4892 {
4893 struct qla_hw_data *ha = vha->hw;
4894 if (ha->cs84xx)
4895 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4896 }
4897
4898 static int
4899 qla84xx_init_chip(scsi_qla_host_t *vha)
4900 {
4901 int rval;
4902 uint16_t status[2];
4903 struct qla_hw_data *ha = vha->hw;
4904
4905 mutex_lock(&ha->cs84xx->fw_update_mutex);
4906
4907 rval = qla84xx_verify_chip(vha, status);
4908
4909 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4910
4911 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4912 QLA_SUCCESS;
4913 }
4914
4915 /* 81XX Support **************************************************************/
4916
4917 int
4918 qla81xx_nvram_config(scsi_qla_host_t *vha)
4919 {
4920 int rval;
4921 struct init_cb_81xx *icb;
4922 struct nvram_81xx *nv;
4923 uint32_t *dptr;
4924 uint8_t *dptr1, *dptr2;
4925 uint32_t chksum;
4926 uint16_t cnt;
4927 struct qla_hw_data *ha = vha->hw;
4928
4929 rval = QLA_SUCCESS;
4930 icb = (struct init_cb_81xx *)ha->init_cb;
4931 nv = ha->nvram;
4932
4933 /* Determine NVRAM starting address. */
4934 ha->nvram_size = sizeof(struct nvram_81xx);
4935 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4936
4937 /* Get VPD data into cache */
4938 ha->vpd = ha->nvram + VPD_OFFSET;
4939 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
4940 ha->vpd_size);
4941
4942 /* Get NVRAM data into cache and calculate checksum. */
4943 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
4944 ha->nvram_size);
4945 dptr = (uint32_t *)nv;
4946 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4947 chksum += le32_to_cpu(*dptr++);
4948
4949 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4950 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4951
4952 /* Bad NVRAM data, set defaults parameters. */
4953 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4954 || nv->id[3] != ' ' ||
4955 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4956 /* Reset NVRAM data. */
4957 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4958 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4959 le16_to_cpu(nv->nvram_version));
4960 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4961 "invalid -- WWPN) defaults.\n");
4962
4963 /*
4964 * Set default initialization control block.
4965 */
4966 memset(nv, 0, ha->nvram_size);
4967 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4968 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4969 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4970 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4971 nv->exchange_count = __constant_cpu_to_le16(0);
4972 nv->port_name[0] = 0x21;
4973 nv->port_name[1] = 0x00 + ha->port_no;
4974 nv->port_name[2] = 0x00;
4975 nv->port_name[3] = 0xe0;
4976 nv->port_name[4] = 0x8b;
4977 nv->port_name[5] = 0x1c;
4978 nv->port_name[6] = 0x55;
4979 nv->port_name[7] = 0x86;
4980 nv->node_name[0] = 0x20;
4981 nv->node_name[1] = 0x00;
4982 nv->node_name[2] = 0x00;
4983 nv->node_name[3] = 0xe0;
4984 nv->node_name[4] = 0x8b;
4985 nv->node_name[5] = 0x1c;
4986 nv->node_name[6] = 0x55;
4987 nv->node_name[7] = 0x86;
4988 nv->login_retry_count = __constant_cpu_to_le16(8);
4989 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4990 nv->login_timeout = __constant_cpu_to_le16(0);
4991 nv->firmware_options_1 =
4992 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4993 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4994 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4995 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4996 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4997 nv->efi_parameters = __constant_cpu_to_le32(0);
4998 nv->reset_delay = 5;
4999 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5000 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5001 nv->link_down_timeout = __constant_cpu_to_le16(30);
5002 nv->enode_mac[0] = 0x00;
5003 nv->enode_mac[1] = 0x02;
5004 nv->enode_mac[2] = 0x03;
5005 nv->enode_mac[3] = 0x04;
5006 nv->enode_mac[4] = 0x05;
5007 nv->enode_mac[5] = 0x06 + ha->port_no;
5008
5009 rval = 1;
5010 }
5011
5012 /* Reset Initialization control block */
5013 memset(icb, 0, sizeof(struct init_cb_81xx));
5014
5015 /* Copy 1st segment. */
5016 dptr1 = (uint8_t *)icb;
5017 dptr2 = (uint8_t *)&nv->version;
5018 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5019 while (cnt--)
5020 *dptr1++ = *dptr2++;
5021
5022 icb->login_retry_count = nv->login_retry_count;
5023
5024 /* Copy 2nd segment. */
5025 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5026 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5027 cnt = (uint8_t *)&icb->reserved_5 -
5028 (uint8_t *)&icb->interrupt_delay_timer;
5029 while (cnt--)
5030 *dptr1++ = *dptr2++;
5031
5032 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5033 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5034 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5035 icb->enode_mac[0] = 0x01;
5036 icb->enode_mac[1] = 0x02;
5037 icb->enode_mac[2] = 0x03;
5038 icb->enode_mac[3] = 0x04;
5039 icb->enode_mac[4] = 0x05;
5040 icb->enode_mac[5] = 0x06 + ha->port_no;
5041 }
5042
5043 /* Use extended-initialization control block. */
5044 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5045
5046 /*
5047 * Setup driver NVRAM options.
5048 */
5049 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5050 "QLE8XXX");
5051
5052 /* Use alternate WWN? */
5053 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5054 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5055 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5056 }
5057
5058 /* Prepare nodename */
5059 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5060 /*
5061 * Firmware will apply the following mask if the nodename was
5062 * not provided.
5063 */
5064 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5065 icb->node_name[0] &= 0xF0;
5066 }
5067
5068 /* Set host adapter parameters. */
5069 ha->flags.disable_risc_code_load = 0;
5070 ha->flags.enable_lip_reset = 0;
5071 ha->flags.enable_lip_full_login =
5072 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5073 ha->flags.enable_target_reset =
5074 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5075 ha->flags.enable_led_scheme = 0;
5076 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5077
5078 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5079 (BIT_6 | BIT_5 | BIT_4)) >> 4;
5080
5081 /* save HBA serial number */
5082 ha->serial0 = icb->port_name[5];
5083 ha->serial1 = icb->port_name[6];
5084 ha->serial2 = icb->port_name[7];
5085 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5086 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5087
5088 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5089
5090 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5091
5092 /* Set minimum login_timeout to 4 seconds. */
5093 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5094 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5095 if (le16_to_cpu(nv->login_timeout) < 4)
5096 nv->login_timeout = __constant_cpu_to_le16(4);
5097 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5098 icb->login_timeout = nv->login_timeout;
5099
5100 /* Set minimum RATOV to 100 tenths of a second. */
5101 ha->r_a_tov = 100;
5102
5103 ha->loop_reset_delay = nv->reset_delay;
5104
5105 /* Link Down Timeout = 0:
5106 *
5107 * When Port Down timer expires we will start returning
5108 * I/O's to OS with "DID_NO_CONNECT".
5109 *
5110 * Link Down Timeout != 0:
5111 *
5112 * The driver waits for the link to come up after link down
5113 * before returning I/Os to OS with "DID_NO_CONNECT".
5114 */
5115 if (le16_to_cpu(nv->link_down_timeout) == 0) {
5116 ha->loop_down_abort_time =
5117 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5118 } else {
5119 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5120 ha->loop_down_abort_time =
5121 (LOOP_DOWN_TIME - ha->link_down_timeout);
5122 }
5123
5124 /* Need enough time to try and get the port back. */
5125 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5126 if (qlport_down_retry)
5127 ha->port_down_retry_count = qlport_down_retry;
5128
5129 /* Set login_retry_count */
5130 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
5131 if (ha->port_down_retry_count ==
5132 le16_to_cpu(nv->port_down_retry_count) &&
5133 ha->port_down_retry_count > 3)
5134 ha->login_retry_count = ha->port_down_retry_count;
5135 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5136 ha->login_retry_count = ha->port_down_retry_count;
5137 if (ql2xloginretrycount)
5138 ha->login_retry_count = ql2xloginretrycount;
5139
5140 /* Enable ZIO. */
5141 if (!vha->flags.init_done) {
5142 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5143 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5144 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5145 le16_to_cpu(icb->interrupt_delay_timer): 2;
5146 }
5147 icb->firmware_options_2 &= __constant_cpu_to_le32(
5148 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5149 vha->flags.process_response_queue = 0;
5150 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5151 ha->zio_mode = QLA_ZIO_MODE_6;
5152
5153 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
5154 "(%d us).\n", vha->host_no, ha->zio_mode,
5155 ha->zio_timer * 100));
5156 qla_printk(KERN_INFO, ha,
5157 "ZIO mode %d enabled; timer delay (%d us).\n",
5158 ha->zio_mode, ha->zio_timer * 100);
5159
5160 icb->firmware_options_2 |= cpu_to_le32(
5161 (uint32_t)ha->zio_mode);
5162 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5163 vha->flags.process_response_queue = 1;
5164 }
5165
5166 if (rval) {
5167 DEBUG2_3(printk(KERN_WARNING
5168 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
5169 }
5170 return (rval);
5171 }
5172
5173 int
5174 qla82xx_restart_isp(scsi_qla_host_t *vha)
5175 {
5176 int status, rval;
5177 uint32_t wait_time;
5178 struct qla_hw_data *ha = vha->hw;
5179 struct req_que *req = ha->req_q_map[0];
5180 struct rsp_que *rsp = ha->rsp_q_map[0];
5181 struct scsi_qla_host *vp;
5182 struct scsi_qla_host *tvp;
5183
5184 status = qla2x00_init_rings(vha);
5185 if (!status) {
5186 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5187 ha->flags.chip_reset_done = 1;
5188
5189 status = qla2x00_fw_ready(vha);
5190 if (!status) {
5191 qla_printk(KERN_INFO, ha,
5192 "%s(): Start configure loop, "
5193 "status = %d\n", __func__, status);
5194
5195 /* Issue a marker after FW becomes ready. */
5196 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5197
5198 vha->flags.online = 1;
5199 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5200 wait_time = 256;
5201 do {
5202 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5203 qla2x00_configure_loop(vha);
5204 wait_time--;
5205 } while (!atomic_read(&vha->loop_down_timer) &&
5206 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5207 wait_time &&
5208 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5209 }
5210
5211 /* if no cable then assume it's good */
5212 if ((vha->device_flags & DFLG_NO_CABLE))
5213 status = 0;
5214
5215 qla_printk(KERN_INFO, ha,
5216 "%s(): Configure loop done, status = 0x%x\n",
5217 __func__, status);
5218 }
5219
5220 if (!status) {
5221 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5222
5223 if (!atomic_read(&vha->loop_down_timer)) {
5224 /*
5225 * Issue marker command only when we are going
5226 * to start the I/O .
5227 */
5228 vha->marker_needed = 1;
5229 }
5230
5231 vha->flags.online = 1;
5232
5233 ha->isp_ops->enable_intrs(ha);
5234
5235 ha->isp_abort_cnt = 0;
5236 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5237
5238 if (ha->fce) {
5239 ha->flags.fce_enabled = 1;
5240 memset(ha->fce, 0,
5241 fce_calc_size(ha->fce_bufs));
5242 rval = qla2x00_enable_fce_trace(vha,
5243 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5244 &ha->fce_bufs);
5245 if (rval) {
5246 qla_printk(KERN_WARNING, ha,
5247 "Unable to reinitialize FCE "
5248 "(%d).\n", rval);
5249 ha->flags.fce_enabled = 0;
5250 }
5251 }
5252
5253 if (ha->eft) {
5254 memset(ha->eft, 0, EFT_SIZE);
5255 rval = qla2x00_enable_eft_trace(vha,
5256 ha->eft_dma, EFT_NUM_BUFFERS);
5257 if (rval) {
5258 qla_printk(KERN_WARNING, ha,
5259 "Unable to reinitialize EFT "
5260 "(%d).\n", rval);
5261 }
5262 }
5263 }
5264
5265 if (!status) {
5266 DEBUG(printk(KERN_INFO
5267 "qla82xx_restart_isp(%ld): succeeded.\n",
5268 vha->host_no));
5269 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
5270 if (vp->vp_idx)
5271 qla2x00_vp_abort_isp(vp);
5272 }
5273 } else {
5274 qla_printk(KERN_INFO, ha,
5275 "qla82xx_restart_isp: **** FAILED ****\n");
5276 }
5277
5278 return status;
5279 }
5280
5281 void
5282 qla81xx_update_fw_options(scsi_qla_host_t *vha)
5283 {
5284 struct qla_hw_data *ha = vha->hw;
5285
5286 if (!ql2xetsenable)
5287 return;
5288
5289 /* Enable ETS Burst. */
5290 memset(ha->fw_options, 0, sizeof(ha->fw_options));
5291 ha->fw_options[2] |= BIT_9;
5292 qla2x00_set_fw_options(vha, ha->fw_options);
5293 }
5294
5295 /*
5296 * qla24xx_get_fcp_prio
5297 * Gets the fcp cmd priority value for the logged in port.
5298 * Looks for a match of the port descriptors within
5299 * each of the fcp prio config entries. If a match is found,
5300 * the tag (priority) value is returned.
5301 *
5302 * Input:
5303 * ha = adapter block po
5304 * fcport = port structure pointer.
5305 *
5306 * Return:
5307 * non-zero (if found)
5308 * 0 (if not found)
5309 *
5310 * Context:
5311 * Kernel context
5312 */
5313 uint8_t
5314 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5315 {
5316 int i, entries;
5317 uint8_t pid_match, wwn_match;
5318 uint8_t priority;
5319 uint32_t pid1, pid2;
5320 uint64_t wwn1, wwn2;
5321 struct qla_fcp_prio_entry *pri_entry;
5322 struct qla_hw_data *ha = vha->hw;
5323
5324 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5325 return 0;
5326
5327 priority = 0;
5328 entries = ha->fcp_prio_cfg->num_entries;
5329 pri_entry = &ha->fcp_prio_cfg->entry[0];
5330
5331 for (i = 0; i < entries; i++) {
5332 pid_match = wwn_match = 0;
5333
5334 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5335 pri_entry++;
5336 continue;
5337 }
5338
5339 /* check source pid for a match */
5340 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5341 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5342 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5343 if (pid1 == INVALID_PORT_ID)
5344 pid_match++;
5345 else if (pid1 == pid2)
5346 pid_match++;
5347 }
5348
5349 /* check destination pid for a match */
5350 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5351 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5352 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5353 if (pid1 == INVALID_PORT_ID)
5354 pid_match++;
5355 else if (pid1 == pid2)
5356 pid_match++;
5357 }
5358
5359 /* check source WWN for a match */
5360 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5361 wwn1 = wwn_to_u64(vha->port_name);
5362 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5363 if (wwn2 == (uint64_t)-1)
5364 wwn_match++;
5365 else if (wwn1 == wwn2)
5366 wwn_match++;
5367 }
5368
5369 /* check destination WWN for a match */
5370 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5371 wwn1 = wwn_to_u64(fcport->port_name);
5372 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5373 if (wwn2 == (uint64_t)-1)
5374 wwn_match++;
5375 else if (wwn1 == wwn2)
5376 wwn_match++;
5377 }
5378
5379 if (pid_match == 2 || wwn_match == 2) {
5380 /* Found a matching entry */
5381 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5382 priority = pri_entry->tag;
5383 break;
5384 }
5385
5386 pri_entry++;
5387 }
5388
5389 return priority;
5390 }
5391
5392 /*
5393 * qla24xx_update_fcport_fcp_prio
5394 * Activates fcp priority for the logged in fc port
5395 *
5396 * Input:
5397 * ha = adapter block pointer.
5398 * fcp = port structure pointer.
5399 *
5400 * Return:
5401 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5402 *
5403 * Context:
5404 * Kernel context.
5405 */
5406 int
5407 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
5408 {
5409 int ret;
5410 uint8_t priority;
5411 uint16_t mb[5];
5412
5413 if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
5414 fcport->port_type != FCT_TARGET ||
5415 fcport->loop_id == FC_NO_LOOP_ID)
5416 return QLA_FUNCTION_FAILED;
5417
5418 priority = qla24xx_get_fcp_prio(ha, fcport);
5419 ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
5420 if (ret == QLA_SUCCESS)
5421 fcport->fcp_prio = priority;
5422 else
5423 DEBUG2(printk(KERN_WARNING
5424 "scsi(%ld): Unable to activate fcp priority, "
5425 " ret=0x%x\n", ha->host_no, ret));
5426
5427 return ret;
5428 }
5429
5430 /*
5431 * qla24xx_update_all_fcp_prio
5432 * Activates fcp priority for all the logged in ports
5433 *
5434 * Input:
5435 * ha = adapter block pointer.
5436 *
5437 * Return:
5438 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5439 *
5440 * Context:
5441 * Kernel context.
5442 */
5443 int
5444 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5445 {
5446 int ret;
5447 fc_port_t *fcport;
5448
5449 ret = QLA_FUNCTION_FAILED;
5450 /* We need to set priority for all logged in ports */
5451 list_for_each_entry(fcport, &vha->vp_fcports, list)
5452 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5453
5454 return ret;
5455 }