1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_disc.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray
[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
64 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
65 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 static int lpfc_fcf_inuse(struct lpfc_hba
*);
70 lpfc_terminate_rport_io(struct fc_rport
*rport
)
72 struct lpfc_rport_data
*rdata
;
73 struct lpfc_nodelist
* ndlp
;
74 struct lpfc_hba
*phba
;
76 rdata
= rport
->dd_data
;
79 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
80 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
81 printk(KERN_ERR
"Cannot find remote node"
82 " to terminate I/O Data x%x\n",
89 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
93 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
94 lpfc_sli_abort_iocb(ndlp
->vport
,
95 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
96 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
106 struct lpfc_rport_data
*rdata
;
107 struct lpfc_nodelist
* ndlp
;
108 struct lpfc_vport
*vport
;
109 struct lpfc_hba
*phba
;
110 struct lpfc_work_evt
*evtp
;
114 rdata
= rport
->dd_data
;
116 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
122 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
126 /* Don't defer this if we are in the process of deleting the vport
127 * or unloading the driver. The unload will cleanup the node
128 * appropriately we just need to cleanup the ndlp rport info here.
130 if (vport
->load_flag
& FC_UNLOADING
) {
131 put_node
= rdata
->pnode
!= NULL
;
132 put_rport
= ndlp
->rport
!= NULL
;
138 put_device(&rport
->dev
);
142 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
145 evtp
= &ndlp
->dev_loss_evt
;
147 if (!list_empty(&evtp
->evt_listp
))
150 spin_lock_irq(&phba
->hbalock
);
151 /* We need to hold the node by incrementing the reference
152 * count until this queued work is done
154 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
155 if (evtp
->evt_arg1
) {
156 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
157 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
158 lpfc_worker_wake_up(phba
);
160 spin_unlock_irq(&phba
->hbalock
);
166 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
167 * @ndlp: Pointer to remote node object.
169 * This function is called from the worker thread when devloss timeout timer
170 * expires. For SLI4 host, this routine shall return 1 when at lease one
171 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
172 * routine shall return 0 when there is no remote node is still in use of FCF
173 * when devloss timeout happened to this @ndlp.
176 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
178 struct lpfc_rport_data
*rdata
;
179 struct fc_rport
*rport
;
180 struct lpfc_vport
*vport
;
181 struct lpfc_hba
*phba
;
193 rdata
= rport
->dd_data
;
194 name
= (uint8_t *) &ndlp
->nlp_portname
;
198 if (phba
->sli_rev
== LPFC_SLI_REV4
)
199 fcf_inuse
= lpfc_fcf_inuse(phba
);
201 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
202 "rport devlosstmo:did:x%x type:x%x id:x%x",
203 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
205 /* Don't defer this if we are in the process of deleting the vport
206 * or unloading the driver. The unload will cleanup the node
207 * appropriately we just need to cleanup the ndlp rport info here.
209 if (vport
->load_flag
& FC_UNLOADING
) {
210 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
211 /* flush the target */
212 lpfc_sli_abort_iocb(vport
,
213 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
214 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
216 put_node
= rdata
->pnode
!= NULL
;
217 put_rport
= ndlp
->rport
!= NULL
;
223 put_device(&rport
->dev
);
227 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
228 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
229 "0284 Devloss timeout Ignored on "
230 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
232 *name
, *(name
+1), *(name
+2), *(name
+3),
233 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
238 if (ndlp
->nlp_type
& NLP_FABRIC
) {
239 /* We will clean up these Nodes in linkup */
240 put_node
= rdata
->pnode
!= NULL
;
241 put_rport
= ndlp
->rport
!= NULL
;
247 put_device(&rport
->dev
);
251 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
253 /* flush the target */
254 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
255 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
259 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
260 "0203 Devloss timeout on "
261 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
262 "NPort x%06x Data: x%x x%x x%x\n",
263 *name
, *(name
+1), *(name
+2), *(name
+3),
264 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
265 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
266 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
268 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
269 "0204 Devloss timeout on "
270 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
271 "NPort x%06x Data: x%x x%x x%x\n",
272 *name
, *(name
+1), *(name
+2), *(name
+3),
273 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
274 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
275 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
278 put_node
= rdata
->pnode
!= NULL
;
279 put_rport
= ndlp
->rport
!= NULL
;
285 put_device(&rport
->dev
);
287 if (!(vport
->load_flag
& FC_UNLOADING
) &&
288 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
289 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
290 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
291 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
292 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
293 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
299 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
300 * @phba: Pointer to hba context object.
301 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
302 * @nlp_did: remote node identifer with devloss timeout.
304 * This function is called from the worker thread after invoking devloss
305 * timeout handler and releasing the reference count for the ndlp with
306 * which the devloss timeout was handled for SLI4 host. For the devloss
307 * timeout of the last remote node which had been in use of FCF, when this
308 * routine is invoked, it shall be guaranteed that none of the remote are
309 * in-use of FCF. When devloss timeout to the last remote using the FCF,
310 * if the FIP engine is neither in FCF table scan process nor roundrobin
311 * failover process, the in-use FCF shall be unregistered. If the FIP
312 * engine is in FCF discovery process, the devloss timeout state shall
313 * be set for either the FCF table scan process or roundrobin failover
314 * process to unregister the in-use FCF.
317 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
320 /* If devloss timeout happened to a remote node when FCF had no
321 * longer been in-use, do nothing.
326 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
327 spin_lock_irq(&phba
->hbalock
);
328 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
329 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
330 spin_unlock_irq(&phba
->hbalock
);
333 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
334 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
335 "2847 Last remote node (x%x) using "
336 "FCF devloss tmo\n", nlp_did
);
338 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
339 spin_unlock_irq(&phba
->hbalock
);
340 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
341 "2868 Devloss tmo to FCF rediscovery "
345 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
346 spin_unlock_irq(&phba
->hbalock
);
347 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
348 "2869 Devloss tmo to idle FIP engine, "
349 "unreg in-use FCF and rescan.\n");
350 /* Unregister in-use FCF and rescan */
351 lpfc_unregister_fcf_rescan(phba
);
354 spin_unlock_irq(&phba
->hbalock
);
355 if (phba
->hba_flag
& FCF_TS_INPROG
)
356 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
357 "2870 FCF table scan in progress\n");
358 if (phba
->hba_flag
& FCF_RR_INPROG
)
359 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
360 "2871 FLOGI roundrobin FCF failover "
363 lpfc_unregister_unused_fcf(phba
);
367 * lpfc_alloc_fast_evt - Allocates data structure for posting event
368 * @phba: Pointer to hba context object.
370 * This function is called from the functions which need to post
371 * events from interrupt context. This function allocates data
372 * structure required for posting event. It also keeps track of
373 * number of events pending and prevent event storm when there are
376 struct lpfc_fast_path_event
*
377 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
378 struct lpfc_fast_path_event
*ret
;
380 /* If there are lot of fast event do not exhaust memory due to this */
381 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
384 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
387 atomic_inc(&phba
->fast_event_count
);
388 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
389 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
395 * lpfc_free_fast_evt - Frees event data structure
396 * @phba: Pointer to hba context object.
397 * @evt: Event object which need to be freed.
399 * This function frees the data structure required for posting
403 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
404 struct lpfc_fast_path_event
*evt
) {
406 atomic_dec(&phba
->fast_event_count
);
411 * lpfc_send_fastpath_evt - Posts events generated from fast path
412 * @phba: Pointer to hba context object.
413 * @evtp: Event data structure.
415 * This function is called from worker thread, when the interrupt
416 * context need to post an event. This function posts the event
417 * to fc transport netlink interface.
420 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
421 struct lpfc_work_evt
*evtp
)
423 unsigned long evt_category
, evt_sub_category
;
424 struct lpfc_fast_path_event
*fast_evt_data
;
426 uint32_t evt_data_size
;
427 struct Scsi_Host
*shost
;
429 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
432 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
433 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
434 fabric_evt
.subcategory
;
435 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
436 if (evt_category
== FC_REG_FABRIC_EVENT
) {
437 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
438 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
439 evt_data_size
= sizeof(fast_evt_data
->un
.
441 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
442 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
443 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
444 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
446 lpfc_free_fast_evt(phba
, fast_evt_data
);
449 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
450 switch (evt_sub_category
) {
451 case LPFC_EVENT_QFULL
:
452 case LPFC_EVENT_DEVBSY
:
453 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
454 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
456 case LPFC_EVENT_CHECK_COND
:
457 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
458 evt_data_size
= sizeof(fast_evt_data
->un
.
461 case LPFC_EVENT_VARQUEDEPTH
:
462 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
463 evt_data_size
= sizeof(fast_evt_data
->un
.
467 lpfc_free_fast_evt(phba
, fast_evt_data
);
471 lpfc_free_fast_evt(phba
, fast_evt_data
);
475 fc_host_post_vendor_event(shost
,
476 fc_get_event_number(),
481 lpfc_free_fast_evt(phba
, fast_evt_data
);
486 lpfc_work_list_done(struct lpfc_hba
*phba
)
488 struct lpfc_work_evt
*evtp
= NULL
;
489 struct lpfc_nodelist
*ndlp
;
494 spin_lock_irq(&phba
->hbalock
);
495 while (!list_empty(&phba
->work_list
)) {
496 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
498 spin_unlock_irq(&phba
->hbalock
);
501 case LPFC_EVT_ELS_RETRY
:
502 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
503 lpfc_els_retry_delay_handler(ndlp
);
504 free_evt
= 0; /* evt is part of ndlp */
505 /* decrement the node reference count held
506 * for this queued work
510 case LPFC_EVT_DEV_LOSS
:
511 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
512 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
514 /* decrement the node reference count held for
517 nlp_did
= ndlp
->nlp_DID
;
519 if (phba
->sli_rev
== LPFC_SLI_REV4
)
520 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
524 case LPFC_EVT_ONLINE
:
525 if (phba
->link_state
< LPFC_LINK_DOWN
)
526 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
528 *(int *) (evtp
->evt_arg1
) = 0;
529 complete((struct completion
*)(evtp
->evt_arg2
));
531 case LPFC_EVT_OFFLINE_PREP
:
532 if (phba
->link_state
>= LPFC_LINK_DOWN
)
533 lpfc_offline_prep(phba
);
534 *(int *)(evtp
->evt_arg1
) = 0;
535 complete((struct completion
*)(evtp
->evt_arg2
));
537 case LPFC_EVT_OFFLINE
:
539 lpfc_sli_brdrestart(phba
);
540 *(int *)(evtp
->evt_arg1
) =
541 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
542 lpfc_unblock_mgmt_io(phba
);
543 complete((struct completion
*)(evtp
->evt_arg2
));
545 case LPFC_EVT_WARM_START
:
547 lpfc_reset_barrier(phba
);
548 lpfc_sli_brdreset(phba
);
549 lpfc_hba_down_post(phba
);
550 *(int *)(evtp
->evt_arg1
) =
551 lpfc_sli_brdready(phba
, HS_MBRDY
);
552 lpfc_unblock_mgmt_io(phba
);
553 complete((struct completion
*)(evtp
->evt_arg2
));
557 *(int *)(evtp
->evt_arg1
)
558 = (phba
->pport
->stopped
)
559 ? 0 : lpfc_sli_brdkill(phba
);
560 lpfc_unblock_mgmt_io(phba
);
561 complete((struct completion
*)(evtp
->evt_arg2
));
563 case LPFC_EVT_FASTPATH_MGMT_EVT
:
564 lpfc_send_fastpath_evt(phba
, evtp
);
567 case LPFC_EVT_RESET_HBA
:
568 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
569 lpfc_reset_hba(phba
);
574 spin_lock_irq(&phba
->hbalock
);
576 spin_unlock_irq(&phba
->hbalock
);
581 lpfc_work_done(struct lpfc_hba
*phba
)
583 struct lpfc_sli_ring
*pring
;
584 uint32_t ha_copy
, status
, control
, work_port_events
;
585 struct lpfc_vport
**vports
;
586 struct lpfc_vport
*vport
;
589 spin_lock_irq(&phba
->hbalock
);
590 ha_copy
= phba
->work_ha
;
592 spin_unlock_irq(&phba
->hbalock
);
594 /* First, try to post the next mailbox command to SLI4 device */
595 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
596 lpfc_sli4_post_async_mbox(phba
);
598 if (ha_copy
& HA_ERATT
)
599 /* Handle the error attention event */
600 lpfc_handle_eratt(phba
);
602 if (ha_copy
& HA_MBATT
)
603 lpfc_sli_handle_mb_event(phba
);
605 if (ha_copy
& HA_LATT
)
606 lpfc_handle_latt(phba
);
608 /* Process SLI4 events */
609 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
610 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
611 lpfc_handle_rrq_active(phba
);
612 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
613 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
614 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
615 lpfc_sli4_els_xri_abort_event_proc(phba
);
616 if (phba
->hba_flag
& ASYNC_EVENT
)
617 lpfc_sli4_async_event_proc(phba
);
618 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
619 spin_lock_irq(&phba
->hbalock
);
620 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
621 spin_unlock_irq(&phba
->hbalock
);
622 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
624 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
625 lpfc_sli4_fcf_redisc_event_proc(phba
);
628 vports
= lpfc_create_vport_work_array(phba
);
630 for (i
= 0; i
<= phba
->max_vports
; i
++) {
632 * We could have no vports in array if unloading, so if
633 * this happens then just use the pport
635 if (vports
[i
] == NULL
&& i
== 0)
641 spin_lock_irq(&vport
->work_port_lock
);
642 work_port_events
= vport
->work_port_events
;
643 vport
->work_port_events
&= ~work_port_events
;
644 spin_unlock_irq(&vport
->work_port_lock
);
645 if (work_port_events
& WORKER_DISC_TMO
)
646 lpfc_disc_timeout_handler(vport
);
647 if (work_port_events
& WORKER_ELS_TMO
)
648 lpfc_els_timeout_handler(vport
);
649 if (work_port_events
& WORKER_HB_TMO
)
650 lpfc_hb_timeout_handler(phba
);
651 if (work_port_events
& WORKER_MBOX_TMO
)
652 lpfc_mbox_timeout_handler(phba
);
653 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
654 lpfc_unblock_fabric_iocbs(phba
);
655 if (work_port_events
& WORKER_FDMI_TMO
)
656 lpfc_fdmi_timeout_handler(vport
);
657 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
658 lpfc_ramp_down_queue_handler(phba
);
659 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
660 lpfc_ramp_up_queue_handler(phba
);
661 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
662 lpfc_delayed_disc_timeout_handler(vport
);
664 lpfc_destroy_vport_work_array(phba
, vports
);
666 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
667 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
668 status
>>= (4*LPFC_ELS_RING
);
669 if ((status
& HA_RXMASK
) ||
670 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
671 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
672 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
673 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
674 /* Set the lpfc data pending flag */
675 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
677 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
678 lpfc_sli_handle_slow_ring_event(phba
, pring
,
682 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && pring
->txq_cnt
)
683 lpfc_drain_txq(phba
);
685 * Turn on Ring interrupts
687 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
688 spin_lock_irq(&phba
->hbalock
);
689 control
= readl(phba
->HCregaddr
);
690 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
691 lpfc_debugfs_slow_ring_trc(phba
,
692 "WRK Enable ring: cntl:x%x hacopy:x%x",
693 control
, ha_copy
, 0);
695 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
696 writel(control
, phba
->HCregaddr
);
697 readl(phba
->HCregaddr
); /* flush */
699 lpfc_debugfs_slow_ring_trc(phba
,
700 "WRK Ring ok: cntl:x%x hacopy:x%x",
701 control
, ha_copy
, 0);
703 spin_unlock_irq(&phba
->hbalock
);
706 lpfc_work_list_done(phba
);
710 lpfc_do_work(void *p
)
712 struct lpfc_hba
*phba
= p
;
715 set_user_nice(current
, -20);
716 phba
->data_flags
= 0;
718 while (!kthread_should_stop()) {
719 /* wait and check worker queue activities */
720 rc
= wait_event_interruptible(phba
->work_waitq
,
721 (test_and_clear_bit(LPFC_DATA_READY
,
723 || kthread_should_stop()));
724 /* Signal wakeup shall terminate the worker thread */
726 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
727 "0433 Wakeup on signal: rc=x%x\n", rc
);
731 /* Attend pending lpfc data processing */
732 lpfc_work_done(phba
);
734 phba
->worker_thread
= NULL
;
735 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
736 "0432 Worker thread stopped.\n");
741 * This is only called to handle FC worker events. Since this a rare
742 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
743 * embedding it in the IOCB.
746 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
749 struct lpfc_work_evt
*evtp
;
753 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
754 * be queued to worker thread for processing
756 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
760 evtp
->evt_arg1
= arg1
;
761 evtp
->evt_arg2
= arg2
;
764 spin_lock_irqsave(&phba
->hbalock
, flags
);
765 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
766 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
768 lpfc_worker_wake_up(phba
);
774 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
776 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
777 struct lpfc_hba
*phba
= vport
->phba
;
778 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
781 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
782 if (!NLP_CHK_NODE_ACT(ndlp
))
784 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
786 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
787 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
788 (ndlp
->nlp_DID
== NameServer_DID
)))
789 lpfc_unreg_rpi(vport
, ndlp
);
791 /* Leave Fabric nodes alone on link down */
792 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
793 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
795 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
798 : NLP_EVT_DEVICE_RECOVERY
);
800 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
801 if (phba
->sli_rev
== LPFC_SLI_REV4
)
802 lpfc_sli4_unreg_all_rpis(vport
);
803 lpfc_mbx_unreg_vpi(vport
);
804 spin_lock_irq(shost
->host_lock
);
805 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
806 spin_unlock_irq(shost
->host_lock
);
811 lpfc_port_link_failure(struct lpfc_vport
*vport
)
813 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
815 /* Cleanup any outstanding received buffers */
816 lpfc_cleanup_rcv_buffers(vport
);
818 /* Cleanup any outstanding RSCN activity */
819 lpfc_els_flush_rscn(vport
);
821 /* Cleanup any outstanding ELS commands */
822 lpfc_els_flush_cmd(vport
);
824 lpfc_cleanup_rpis(vport
, 0);
826 /* Turn off discovery timer if its running */
827 lpfc_can_disctmo(vport
);
831 lpfc_linkdown_port(struct lpfc_vport
*vport
)
833 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
835 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
837 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
838 "Link Down: state:x%x rtry:x%x flg:x%x",
839 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
841 lpfc_port_link_failure(vport
);
843 /* Stop delayed Nport discovery */
844 spin_lock_irq(shost
->host_lock
);
845 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
846 spin_unlock_irq(shost
->host_lock
);
847 del_timer_sync(&vport
->delayed_disc_tmo
);
851 lpfc_linkdown(struct lpfc_hba
*phba
)
853 struct lpfc_vport
*vport
= phba
->pport
;
854 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
855 struct lpfc_vport
**vports
;
859 if (phba
->link_state
== LPFC_LINK_DOWN
)
862 /* Block all SCSI stack I/Os */
863 lpfc_scsi_dev_block(phba
);
865 spin_lock_irq(&phba
->hbalock
);
866 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
867 spin_unlock_irq(&phba
->hbalock
);
868 if (phba
->link_state
> LPFC_LINK_DOWN
) {
869 phba
->link_state
= LPFC_LINK_DOWN
;
870 spin_lock_irq(shost
->host_lock
);
871 phba
->pport
->fc_flag
&= ~FC_LBIT
;
872 spin_unlock_irq(shost
->host_lock
);
874 vports
= lpfc_create_vport_work_array(phba
);
876 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
877 /* Issue a LINK DOWN event to all nodes */
878 lpfc_linkdown_port(vports
[i
]);
880 lpfc_destroy_vport_work_array(phba
, vports
);
881 /* Clean up any firmware default rpi's */
882 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
884 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
886 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
887 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
888 == MBX_NOT_FINISHED
) {
889 mempool_free(mb
, phba
->mbox_mem_pool
);
893 /* Setup myDID for link up if we are in pt2pt mode */
894 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
895 phba
->pport
->fc_myDID
= 0;
896 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
898 lpfc_config_link(phba
, mb
);
899 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
901 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
902 == MBX_NOT_FINISHED
) {
903 mempool_free(mb
, phba
->mbox_mem_pool
);
906 spin_lock_irq(shost
->host_lock
);
907 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
908 spin_unlock_irq(shost
->host_lock
);
915 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
917 struct lpfc_nodelist
*ndlp
;
919 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
920 if (!NLP_CHK_NODE_ACT(ndlp
))
922 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
924 if (ndlp
->nlp_type
& NLP_FABRIC
) {
925 /* On Linkup its safe to clean up the ndlp
926 * from Fabric connections.
928 if (ndlp
->nlp_DID
!= Fabric_DID
)
929 lpfc_unreg_rpi(vport
, ndlp
);
930 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
931 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
932 /* Fail outstanding IO now since device is
935 lpfc_unreg_rpi(vport
, ndlp
);
941 lpfc_linkup_port(struct lpfc_vport
*vport
)
943 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
944 struct lpfc_hba
*phba
= vport
->phba
;
946 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
949 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
950 "Link Up: top:x%x speed:x%x flg:x%x",
951 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
953 /* If NPIV is not enabled, only bring the physical port up */
954 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
955 (vport
!= phba
->pport
))
958 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
960 spin_lock_irq(shost
->host_lock
);
961 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
962 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
963 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
964 vport
->fc_ns_retry
= 0;
965 spin_unlock_irq(shost
->host_lock
);
967 if (vport
->fc_flag
& FC_LBIT
)
968 lpfc_linkup_cleanup_nodes(vport
);
973 lpfc_linkup(struct lpfc_hba
*phba
)
975 struct lpfc_vport
**vports
;
978 lpfc_cleanup_wt_rrqs(phba
);
979 phba
->link_state
= LPFC_LINK_UP
;
981 /* Unblock fabric iocbs if they are blocked */
982 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
983 del_timer_sync(&phba
->fabric_block_timer
);
985 vports
= lpfc_create_vport_work_array(phba
);
987 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
988 lpfc_linkup_port(vports
[i
]);
989 lpfc_destroy_vport_work_array(phba
, vports
);
990 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
991 (phba
->sli_rev
< LPFC_SLI_REV4
))
992 lpfc_issue_clear_la(phba
, phba
->pport
);
998 * This routine handles processing a CLEAR_LA mailbox
999 * command upon completion. It is setup in the LPFC_MBOXQ
1000 * as the completion routine when the command is
1001 * handed off to the SLI layer.
1004 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1006 struct lpfc_vport
*vport
= pmb
->vport
;
1007 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1008 struct lpfc_sli
*psli
= &phba
->sli
;
1009 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1012 /* Since we don't do discovery right now, turn these off here */
1013 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1014 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1015 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1017 /* Check for error */
1018 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1019 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1020 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1021 "0320 CLEAR_LA mbxStatus error x%x hba "
1023 mb
->mbxStatus
, vport
->port_state
);
1024 phba
->link_state
= LPFC_HBA_ERROR
;
1028 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1029 phba
->link_state
= LPFC_HBA_READY
;
1031 spin_lock_irq(&phba
->hbalock
);
1032 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1033 control
= readl(phba
->HCregaddr
);
1034 control
|= HC_LAINT_ENA
;
1035 writel(control
, phba
->HCregaddr
);
1036 readl(phba
->HCregaddr
); /* flush */
1037 spin_unlock_irq(&phba
->hbalock
);
1038 mempool_free(pmb
, phba
->mbox_mem_pool
);
1042 /* Device Discovery completes */
1043 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1044 "0225 Device Discovery completes\n");
1045 mempool_free(pmb
, phba
->mbox_mem_pool
);
1047 spin_lock_irq(shost
->host_lock
);
1048 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1049 spin_unlock_irq(shost
->host_lock
);
1051 lpfc_can_disctmo(vport
);
1053 /* turn on Link Attention interrupts */
1055 spin_lock_irq(&phba
->hbalock
);
1056 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1057 control
= readl(phba
->HCregaddr
);
1058 control
|= HC_LAINT_ENA
;
1059 writel(control
, phba
->HCregaddr
);
1060 readl(phba
->HCregaddr
); /* flush */
1061 spin_unlock_irq(&phba
->hbalock
);
1068 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1070 struct lpfc_vport
*vport
= pmb
->vport
;
1072 if (pmb
->u
.mb
.mbxStatus
)
1075 mempool_free(pmb
, phba
->mbox_mem_pool
);
1077 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1078 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1079 !(vport
->fc_flag
& FC_LBIT
)) {
1080 /* Need to wait for FAN - use discovery timer
1081 * for timeout. port_state is identically
1082 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1084 lpfc_set_disctmo(vport
);
1088 /* Start discovery by sending a FLOGI. port_state is identically
1089 * LPFC_FLOGI while waiting for FLOGI cmpl
1091 if (vport
->port_state
!= LPFC_FLOGI
)
1092 lpfc_initial_flogi(vport
);
1096 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1097 "0306 CONFIG_LINK mbxStatus error x%x "
1099 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1100 mempool_free(pmb
, phba
->mbox_mem_pool
);
1102 lpfc_linkdown(phba
);
1104 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1105 "0200 CONFIG_LINK bad hba state x%x\n",
1108 lpfc_issue_clear_la(phba
, vport
);
1113 * lpfc_sli4_clear_fcf_rr_bmask
1114 * @phba pointer to the struct lpfc_hba for this port.
1115 * This fucnction resets the round robin bit mask and clears the
1116 * fcf priority list. The list deletions are done while holding the
1117 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1118 * from the lpfc_fcf_pri record.
1121 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1123 struct lpfc_fcf_pri
*fcf_pri
;
1124 struct lpfc_fcf_pri
*next_fcf_pri
;
1125 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1126 spin_lock_irq(&phba
->hbalock
);
1127 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1128 &phba
->fcf
.fcf_pri_list
, list
) {
1129 list_del_init(&fcf_pri
->list
);
1130 fcf_pri
->fcf_rec
.flag
= 0;
1132 spin_unlock_irq(&phba
->hbalock
);
1135 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1137 struct lpfc_vport
*vport
= mboxq
->vport
;
1139 if (mboxq
->u
.mb
.mbxStatus
) {
1140 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1141 "2017 REG_FCFI mbxStatus error x%x "
1143 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1147 /* Start FCoE discovery by sending a FLOGI. */
1148 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1149 /* Set the FCFI registered flag */
1150 spin_lock_irq(&phba
->hbalock
);
1151 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1152 spin_unlock_irq(&phba
->hbalock
);
1154 /* If there is a pending FCoE event, restart FCF table scan. */
1155 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1156 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1159 /* Mark successful completion of FCF table scan */
1160 spin_lock_irq(&phba
->hbalock
);
1161 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1162 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1163 if (vport
->port_state
!= LPFC_FLOGI
) {
1164 phba
->hba_flag
|= FCF_RR_INPROG
;
1165 spin_unlock_irq(&phba
->hbalock
);
1166 lpfc_issue_init_vfi(vport
);
1169 spin_unlock_irq(&phba
->hbalock
);
1173 spin_lock_irq(&phba
->hbalock
);
1174 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1175 spin_unlock_irq(&phba
->hbalock
);
1177 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1181 * lpfc_fab_name_match - Check if the fcf fabric name match.
1182 * @fab_name: pointer to fabric name.
1183 * @new_fcf_record: pointer to fcf record.
1185 * This routine compare the fcf record's fabric name with provided
1186 * fabric name. If the fabric name are identical this function
1187 * returns 1 else return 0.
1190 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1192 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1194 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1196 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1198 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1200 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1202 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1204 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1206 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1212 * lpfc_sw_name_match - Check if the fcf switch name match.
1213 * @fab_name: pointer to fabric name.
1214 * @new_fcf_record: pointer to fcf record.
1216 * This routine compare the fcf record's switch name with provided
1217 * switch name. If the switch name are identical this function
1218 * returns 1 else return 0.
1221 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1223 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1225 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1227 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1229 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1231 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1233 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1235 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1237 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1243 * lpfc_mac_addr_match - Check if the fcf mac address match.
1244 * @mac_addr: pointer to mac address.
1245 * @new_fcf_record: pointer to fcf record.
1247 * This routine compare the fcf record's mac address with HBA's
1248 * FCF mac address. If the mac addresses are identical this function
1249 * returns 1 else return 0.
1252 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1254 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1256 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1258 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1260 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1262 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1264 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1270 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1272 return (curr_vlan_id
== new_vlan_id
);
1276 * lpfc_update_fcf_record - Update driver fcf record
1277 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1278 * @phba: pointer to lpfc hba data structure.
1279 * @fcf_index: Index for the lpfc_fcf_record.
1280 * @new_fcf_record: pointer to hba fcf record.
1282 * This routine updates the driver FCF priority record from the new HBA FCF
1283 * record. This routine is called with the host lock held.
1286 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1287 struct fcf_record
*new_fcf_record
1290 struct lpfc_fcf_pri
*fcf_pri
;
1292 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1293 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1294 /* FCF record priority */
1295 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1300 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1301 * @fcf: pointer to driver fcf record.
1302 * @new_fcf_record: pointer to fcf record.
1304 * This routine copies the FCF information from the FCF
1305 * record to lpfc_hba data structure.
1308 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1309 struct fcf_record
*new_fcf_record
)
1312 fcf_rec
->fabric_name
[0] =
1313 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1314 fcf_rec
->fabric_name
[1] =
1315 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1316 fcf_rec
->fabric_name
[2] =
1317 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1318 fcf_rec
->fabric_name
[3] =
1319 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1320 fcf_rec
->fabric_name
[4] =
1321 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1322 fcf_rec
->fabric_name
[5] =
1323 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1324 fcf_rec
->fabric_name
[6] =
1325 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1326 fcf_rec
->fabric_name
[7] =
1327 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1329 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1330 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1331 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1332 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1333 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1334 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1335 /* FCF record index */
1336 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1337 /* FCF record priority */
1338 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1340 fcf_rec
->switch_name
[0] =
1341 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1342 fcf_rec
->switch_name
[1] =
1343 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1344 fcf_rec
->switch_name
[2] =
1345 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1346 fcf_rec
->switch_name
[3] =
1347 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1348 fcf_rec
->switch_name
[4] =
1349 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1350 fcf_rec
->switch_name
[5] =
1351 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1352 fcf_rec
->switch_name
[6] =
1353 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1354 fcf_rec
->switch_name
[7] =
1355 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1359 * lpfc_update_fcf_record - Update driver fcf record
1360 * @phba: pointer to lpfc hba data structure.
1361 * @fcf_rec: pointer to driver fcf record.
1362 * @new_fcf_record: pointer to hba fcf record.
1363 * @addr_mode: address mode to be set to the driver fcf record.
1364 * @vlan_id: vlan tag to be set to the driver fcf record.
1365 * @flag: flag bits to be set to the driver fcf record.
1367 * This routine updates the driver FCF record from the new HBA FCF record
1368 * together with the address mode, vlan_id, and other informations. This
1369 * routine is called with the host lock held.
1372 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1373 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1374 uint16_t vlan_id
, uint32_t flag
)
1376 /* Copy the fields from the HBA's FCF record */
1377 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1378 /* Update other fields of driver FCF record */
1379 fcf_rec
->addr_mode
= addr_mode
;
1380 fcf_rec
->vlan_id
= vlan_id
;
1381 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1382 __lpfc_update_fcf_record_pri(phba
,
1383 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1388 * lpfc_register_fcf - Register the FCF with hba.
1389 * @phba: pointer to lpfc hba data structure.
1391 * This routine issues a register fcfi mailbox command to register
1395 lpfc_register_fcf(struct lpfc_hba
*phba
)
1397 LPFC_MBOXQ_t
*fcf_mbxq
;
1400 spin_lock_irq(&phba
->hbalock
);
1401 /* If the FCF is not available do nothing. */
1402 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1403 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1404 spin_unlock_irq(&phba
->hbalock
);
1408 /* The FCF is already registered, start discovery */
1409 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1410 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1411 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1412 if (phba
->pport
->port_state
!= LPFC_FLOGI
) {
1413 phba
->hba_flag
|= FCF_RR_INPROG
;
1414 spin_unlock_irq(&phba
->hbalock
);
1415 lpfc_initial_flogi(phba
->pport
);
1418 spin_unlock_irq(&phba
->hbalock
);
1421 spin_unlock_irq(&phba
->hbalock
);
1423 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1425 spin_lock_irq(&phba
->hbalock
);
1426 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1427 spin_unlock_irq(&phba
->hbalock
);
1431 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1432 fcf_mbxq
->vport
= phba
->pport
;
1433 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1434 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1435 if (rc
== MBX_NOT_FINISHED
) {
1436 spin_lock_irq(&phba
->hbalock
);
1437 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1438 spin_unlock_irq(&phba
->hbalock
);
1439 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1446 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1447 * @phba: pointer to lpfc hba data structure.
1448 * @new_fcf_record: pointer to fcf record.
1449 * @boot_flag: Indicates if this record used by boot bios.
1450 * @addr_mode: The address mode to be used by this FCF
1451 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1453 * This routine compare the fcf record with connect list obtained from the
1454 * config region to decide if this FCF can be used for SAN discovery. It returns
1455 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1456 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1457 * is used by boot bios and addr_mode will indicate the addressing mode to be
1458 * used for this FCF when the function returns.
1459 * If the FCF record need to be used with a particular vlan id, the vlan is
1460 * set in the vlan_id on return of the function. If not VLAN tagging need to
1461 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1464 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1465 struct fcf_record
*new_fcf_record
,
1466 uint32_t *boot_flag
, uint32_t *addr_mode
,
1469 struct lpfc_fcf_conn_entry
*conn_entry
;
1470 int i
, j
, fcf_vlan_id
= 0;
1472 /* Find the lowest VLAN id in the FCF record */
1473 for (i
= 0; i
< 512; i
++) {
1474 if (new_fcf_record
->vlan_bitmap
[i
]) {
1475 fcf_vlan_id
= i
* 8;
1477 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1485 /* If FCF not available return 0 */
1486 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1487 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1490 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1492 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1494 if (phba
->valid_vlan
)
1495 *vlan_id
= phba
->vlan_id
;
1497 *vlan_id
= LPFC_FCOE_NULL_VID
;
1502 * If there are no FCF connection table entry, driver connect to all
1505 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1507 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1511 * When there are no FCF connect entries, use driver's default
1512 * addressing mode - FPMA.
1514 if (*addr_mode
& LPFC_FCF_FPMA
)
1515 *addr_mode
= LPFC_FCF_FPMA
;
1517 /* If FCF record report a vlan id use that vlan id */
1519 *vlan_id
= fcf_vlan_id
;
1521 *vlan_id
= LPFC_FCOE_NULL_VID
;
1525 list_for_each_entry(conn_entry
,
1526 &phba
->fcf_conn_rec_list
, list
) {
1527 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1530 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1531 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1534 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1535 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1538 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1540 * If the vlan bit map does not have the bit set for the
1541 * vlan id to be used, then it is not a match.
1543 if (!(new_fcf_record
->vlan_bitmap
1544 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1545 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1550 * If connection record does not support any addressing mode,
1551 * skip the FCF record.
1553 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1554 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1558 * Check if the connection record specifies a required
1561 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1562 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1565 * If SPMA required but FCF not support this continue.
1567 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1568 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1569 new_fcf_record
) & LPFC_FCF_SPMA
))
1573 * If FPMA required but FCF not support this continue.
1575 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1576 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1577 new_fcf_record
) & LPFC_FCF_FPMA
))
1582 * This fcf record matches filtering criteria.
1584 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1590 * If user did not specify any addressing mode, or if the
1591 * preferred addressing mode specified by user is not supported
1592 * by FCF, allow fabric to pick the addressing mode.
1594 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1597 * If the user specified a required address mode, assign that
1600 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1601 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1602 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1604 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1606 * If the user specified a preferred address mode, use the
1607 * addr mode only if FCF support the addr_mode.
1609 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1610 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1611 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1612 (*addr_mode
& LPFC_FCF_SPMA
))
1613 *addr_mode
= LPFC_FCF_SPMA
;
1614 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1615 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1616 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1617 (*addr_mode
& LPFC_FCF_FPMA
))
1618 *addr_mode
= LPFC_FCF_FPMA
;
1620 /* If matching connect list has a vlan id, use it */
1621 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1622 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1624 * If no vlan id is specified in connect list, use the vlan id
1627 else if (fcf_vlan_id
)
1628 *vlan_id
= fcf_vlan_id
;
1630 *vlan_id
= LPFC_FCOE_NULL_VID
;
1639 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1640 * @phba: pointer to lpfc hba data structure.
1641 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1643 * This function check if there is any fcoe event pending while driver
1644 * scan FCF entries. If there is any pending event, it will restart the
1645 * FCF saning and return 1 else return 0.
1648 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1651 * If the Link is up and no FCoE events while in the
1652 * FCF discovery, no need to restart FCF discovery.
1654 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1655 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1658 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1659 "2768 Pending link or FCF event during current "
1660 "handling of the previous event: link_state:x%x, "
1661 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1662 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1663 phba
->fcoe_eventtag
);
1665 spin_lock_irq(&phba
->hbalock
);
1666 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1667 spin_unlock_irq(&phba
->hbalock
);
1669 if (phba
->link_state
>= LPFC_LINK_UP
) {
1670 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1671 "2780 Restart FCF table scan due to "
1672 "pending FCF event:evt_tag_at_scan:x%x, "
1673 "evt_tag_current:x%x\n",
1674 phba
->fcoe_eventtag_at_fcf_scan
,
1675 phba
->fcoe_eventtag
);
1676 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1679 * Do not continue FCF discovery and clear FCF_TS_INPROG
1682 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1683 "2833 Stop FCF discovery process due to link "
1684 "state change (x%x)\n", phba
->link_state
);
1685 spin_lock_irq(&phba
->hbalock
);
1686 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1687 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1688 spin_unlock_irq(&phba
->hbalock
);
1691 /* Unregister the currently registered FCF if required */
1693 spin_lock_irq(&phba
->hbalock
);
1694 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1695 spin_unlock_irq(&phba
->hbalock
);
1696 lpfc_sli4_unregister_fcf(phba
);
1702 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1703 * @phba: pointer to lpfc hba data structure.
1704 * @fcf_cnt: number of eligible fcf record seen so far.
1706 * This function makes an running random selection decision on FCF record to
1707 * use through a sequence of @fcf_cnt eligible FCF records with equal
1708 * probability. To perform integer manunipulation of random numbers with
1709 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1710 * from random32() are taken as the random random number generated.
1712 * Returns true when outcome is for the newly read FCF record should be
1713 * chosen; otherwise, return false when outcome is for keeping the previously
1714 * chosen FCF record.
1717 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1721 /* Get 16-bit uniform random number */
1722 rand_num
= (0xFFFF & random32());
1724 /* Decision with probability 1/fcf_cnt */
1725 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1732 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1733 * @phba: pointer to lpfc hba data structure.
1734 * @mboxq: pointer to mailbox object.
1735 * @next_fcf_index: pointer to holder of next fcf index.
1737 * This routine parses the non-embedded fcf mailbox command by performing the
1738 * necessarily error checking, non-embedded read FCF record mailbox command
1739 * SGE parsing, and endianness swapping.
1741 * Returns the pointer to the new FCF record in the non-embedded mailbox
1742 * command DMA memory if successfully, other NULL.
1744 static struct fcf_record
*
1745 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1746 uint16_t *next_fcf_index
)
1749 dma_addr_t phys_addr
;
1750 struct lpfc_mbx_sge sge
;
1751 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1752 uint32_t shdr_status
, shdr_add_status
;
1753 union lpfc_sli4_cfg_shdr
*shdr
;
1754 struct fcf_record
*new_fcf_record
;
1756 /* Get the first SGE entry from the non-embedded DMA memory. This
1757 * routine only uses a single SGE.
1759 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1760 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1761 if (unlikely(!mboxq
->sge_array
)) {
1762 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1763 "2524 Failed to get the non-embedded SGE "
1764 "virtual address\n");
1767 virt_addr
= mboxq
->sge_array
->addr
[0];
1769 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1770 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1771 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1772 if (shdr_status
|| shdr_add_status
) {
1773 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1774 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1775 "2726 READ_FCF_RECORD Indicates empty "
1778 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1779 "2521 READ_FCF_RECORD mailbox failed "
1780 "with status x%x add_status x%x, "
1781 "mbx\n", shdr_status
, shdr_add_status
);
1785 /* Interpreting the returned information of the FCF record */
1786 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1787 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1788 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1789 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1790 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1791 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1792 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1793 offsetof(struct fcf_record
, vlan_bitmap
));
1794 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1795 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1797 return new_fcf_record
;
1801 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1802 * @phba: pointer to lpfc hba data structure.
1803 * @fcf_record: pointer to the fcf record.
1804 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1805 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1807 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1811 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1812 struct fcf_record
*fcf_record
,
1814 uint16_t next_fcf_index
)
1816 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1817 "2764 READ_FCF_RECORD:\n"
1818 "\tFCF_Index : x%x\n"
1819 "\tFCF_Avail : x%x\n"
1820 "\tFCF_Valid : x%x\n"
1821 "\tFIP_Priority : x%x\n"
1822 "\tMAC_Provider : x%x\n"
1823 "\tLowest VLANID : x%x\n"
1824 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1825 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1826 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1827 "\tNext_FCF_Index: x%x\n",
1828 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1829 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1830 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1831 fcf_record
->fip_priority
,
1832 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1834 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1835 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1836 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1837 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1838 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1839 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1840 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1841 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1842 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1843 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1844 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1845 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1846 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1847 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1848 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1849 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1850 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1851 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1852 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1853 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1854 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1855 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1860 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1861 * @phba: pointer to lpfc hba data structure.
1862 * @fcf_rec: pointer to an existing FCF record.
1863 * @new_fcf_record: pointer to a new FCF record.
1864 * @new_vlan_id: vlan id from the new FCF record.
1866 * This function performs matching test of a new FCF record against an existing
1867 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1868 * will not be used as part of the FCF record matching criteria.
1870 * Returns true if all the fields matching, otherwise returns false.
1873 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1874 struct lpfc_fcf_rec
*fcf_rec
,
1875 struct fcf_record
*new_fcf_record
,
1876 uint16_t new_vlan_id
)
1878 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1879 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1881 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1883 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1885 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1887 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1893 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1894 * @vport: Pointer to vport object.
1895 * @fcf_index: index to next fcf.
1897 * This function processing the roundrobin fcf failover to next fcf index.
1898 * When this function is invoked, there will be a current fcf registered
1900 * Return: 0 for continue retrying flogi on currently registered fcf;
1901 * 1 for stop flogi on currently registered fcf;
1903 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1905 struct lpfc_hba
*phba
= vport
->phba
;
1908 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1909 spin_lock_irq(&phba
->hbalock
);
1910 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1911 spin_unlock_irq(&phba
->hbalock
);
1912 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1913 "2872 Devloss tmo with no eligible "
1914 "FCF, unregister in-use FCF (x%x) "
1915 "and rescan FCF table\n",
1916 phba
->fcf
.current_rec
.fcf_indx
);
1917 lpfc_unregister_fcf_rescan(phba
);
1918 goto stop_flogi_current_fcf
;
1920 /* Mark the end to FLOGI roundrobin failover */
1921 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1922 /* Allow action to new fcf asynchronous event */
1923 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1924 spin_unlock_irq(&phba
->hbalock
);
1925 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1926 "2865 No FCF available, stop roundrobin FCF "
1927 "failover and change port state:x%x/x%x\n",
1928 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1929 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1930 goto stop_flogi_current_fcf
;
1932 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1933 "2794 Try FLOGI roundrobin FCF failover to "
1934 "(x%x)\n", fcf_index
);
1935 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1937 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1938 "2761 FLOGI roundrobin FCF failover "
1939 "failed (rc:x%x) to read FCF (x%x)\n",
1940 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1942 goto stop_flogi_current_fcf
;
1946 stop_flogi_current_fcf
:
1947 lpfc_can_disctmo(vport
);
1952 * lpfc_sli4_fcf_pri_list_del
1953 * @phba: pointer to lpfc hba data structure.
1954 * @fcf_index the index of the fcf record to delete
1955 * This routine checks the on list flag of the fcf_index to be deleted.
1956 * If it is one the list then it is removed from the list, and the flag
1957 * is cleared. This routine grab the hbalock before removing the fcf
1958 * record from the list.
1960 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
1963 struct lpfc_fcf_pri
*new_fcf_pri
;
1965 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1966 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1967 "3058 deleting idx x%x pri x%x flg x%x\n",
1968 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
1969 new_fcf_pri
->fcf_rec
.flag
);
1970 spin_lock_irq(&phba
->hbalock
);
1971 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
1972 if (phba
->fcf
.current_rec
.priority
==
1973 new_fcf_pri
->fcf_rec
.priority
)
1974 phba
->fcf
.eligible_fcf_cnt
--;
1975 list_del_init(&new_fcf_pri
->list
);
1976 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
1978 spin_unlock_irq(&phba
->hbalock
);
1982 * lpfc_sli4_set_fcf_flogi_fail
1983 * @phba: pointer to lpfc hba data structure.
1984 * @fcf_index the index of the fcf record to update
1985 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
1986 * flag so the the round robin slection for the particular priority level
1987 * will try a different fcf record that does not have this bit set.
1988 * If the fcf record is re-read for any reason this flag is cleared brfore
1989 * adding it to the priority list.
1992 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
1994 struct lpfc_fcf_pri
*new_fcf_pri
;
1995 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1996 spin_lock_irq(&phba
->hbalock
);
1997 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
1998 spin_unlock_irq(&phba
->hbalock
);
2002 * lpfc_sli4_fcf_pri_list_add
2003 * @phba: pointer to lpfc hba data structure.
2004 * @fcf_index the index of the fcf record to add
2005 * This routine checks the priority of the fcf_index to be added.
2006 * If it is a lower priority than the current head of the fcf_pri list
2007 * then it is added to the list in the right order.
2008 * If it is the same priority as the current head of the list then it
2009 * is added to the head of the list and its bit in the rr_bmask is set.
2010 * If the fcf_index to be added is of a higher priority than the current
2011 * head of the list then the rr_bmask is cleared, its bit is set in the
2012 * rr_bmask and it is added to the head of the list.
2014 * 0=success 1=failure
2016 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
, uint16_t fcf_index
,
2017 struct fcf_record
*new_fcf_record
)
2019 uint16_t current_fcf_pri
;
2020 uint16_t last_index
;
2021 struct lpfc_fcf_pri
*fcf_pri
;
2022 struct lpfc_fcf_pri
*next_fcf_pri
;
2023 struct lpfc_fcf_pri
*new_fcf_pri
;
2026 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2027 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2028 "3059 adding idx x%x pri x%x flg x%x\n",
2029 fcf_index
, new_fcf_record
->fip_priority
,
2030 new_fcf_pri
->fcf_rec
.flag
);
2031 spin_lock_irq(&phba
->hbalock
);
2032 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2033 list_del_init(&new_fcf_pri
->list
);
2034 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2035 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2036 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2037 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2038 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2039 new_fcf_pri
->fcf_rec
.fcf_index
);
2043 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2044 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2045 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2046 ret
= 0; /* Empty rr list */
2049 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2050 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2051 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2052 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2053 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2054 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2055 /* fcfs_at_this_priority_level = 1; */
2056 phba
->fcf
.eligible_fcf_cnt
= 1;
2058 /* fcfs_at_this_priority_level++; */
2059 phba
->fcf
.eligible_fcf_cnt
++;
2060 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2061 new_fcf_pri
->fcf_rec
.fcf_index
);
2065 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2066 &phba
->fcf
.fcf_pri_list
, list
) {
2067 if (new_fcf_pri
->fcf_rec
.priority
<=
2068 fcf_pri
->fcf_rec
.priority
) {
2069 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2070 list_add(&new_fcf_pri
->list
,
2071 &phba
->fcf
.fcf_pri_list
);
2073 list_add(&new_fcf_pri
->list
,
2074 &((struct lpfc_fcf_pri
*)
2075 fcf_pri
->list
.prev
)->list
);
2078 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2079 || new_fcf_pri
->fcf_rec
.priority
<
2080 next_fcf_pri
->fcf_rec
.priority
) {
2081 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2085 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2091 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2092 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2093 spin_unlock_irq(&phba
->hbalock
);
2098 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2099 * @phba: pointer to lpfc hba data structure.
2100 * @mboxq: pointer to mailbox object.
2102 * This function iterates through all the fcf records available in
2103 * HBA and chooses the optimal FCF record for discovery. After finding
2104 * the FCF for discovery it registers the FCF record and kicks start
2106 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2107 * use an FCF record which matches fabric name and mac address of the
2108 * currently used FCF record.
2109 * If the driver supports only one FCF, it will try to use the FCF record
2110 * used by BOOT_BIOS.
2113 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2115 struct fcf_record
*new_fcf_record
;
2116 uint32_t boot_flag
, addr_mode
;
2117 uint16_t fcf_index
, next_fcf_index
;
2118 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2121 bool select_new_fcf
;
2124 /* If there is pending FCoE event restart FCF table scan */
2125 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2126 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2130 /* Parse the FCF record from the non-embedded mailbox command */
2131 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2133 if (!new_fcf_record
) {
2134 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2135 "2765 Mailbox command READ_FCF_RECORD "
2136 "failed to retrieve a FCF record.\n");
2137 /* Let next new FCF event trigger fast failover */
2138 spin_lock_irq(&phba
->hbalock
);
2139 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2140 spin_unlock_irq(&phba
->hbalock
);
2141 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2145 /* Check the FCF record against the connection list */
2146 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2147 &addr_mode
, &vlan_id
);
2149 /* Log the FCF record information if turned on */
2150 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2154 * If the fcf record does not match with connect list entries
2155 * read the next entry; otherwise, this is an eligible FCF
2156 * record for roundrobin FCF failover.
2159 lpfc_sli4_fcf_pri_list_del(phba
,
2160 bf_get(lpfc_fcf_record_fcf_index
,
2162 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2163 "2781 FCF (x%x) failed connection "
2164 "list check: (x%x/x%x)\n",
2165 bf_get(lpfc_fcf_record_fcf_index
,
2167 bf_get(lpfc_fcf_record_fcf_avail
,
2169 bf_get(lpfc_fcf_record_fcf_valid
,
2171 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2172 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2173 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2174 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2175 phba
->fcf
.current_rec
.fcf_indx
) {
2176 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2177 "2862 FCF (x%x) matches property "
2178 "of in-use FCF (x%x)\n",
2179 bf_get(lpfc_fcf_record_fcf_index
,
2181 phba
->fcf
.current_rec
.fcf_indx
);
2185 * In case the current in-use FCF record becomes
2186 * invalid/unavailable during FCF discovery that
2187 * was not triggered by fast FCF failover process,
2188 * treat it as fast FCF failover.
2190 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2191 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2192 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2193 "2835 Invalid in-use FCF "
2194 "(x%x), enter FCF failover "
2196 phba
->fcf
.current_rec
.fcf_indx
);
2197 spin_lock_irq(&phba
->hbalock
);
2198 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2199 spin_unlock_irq(&phba
->hbalock
);
2200 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2201 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2202 LPFC_FCOE_FCF_GET_FIRST
);
2208 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2209 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2216 * If this is not the first FCF discovery of the HBA, use last
2217 * FCF record for the discovery. The condition that a rescan
2218 * matches the in-use FCF record: fabric name, switch name, mac
2219 * address, and vlan_id.
2221 spin_lock_irq(&phba
->hbalock
);
2222 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2223 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2224 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2225 new_fcf_record
, vlan_id
)) {
2226 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2227 phba
->fcf
.current_rec
.fcf_indx
) {
2228 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2229 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2230 /* Stop FCF redisc wait timer */
2231 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2233 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2234 /* Fast failover, mark completed */
2235 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2236 spin_unlock_irq(&phba
->hbalock
);
2237 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2238 "2836 New FCF matches in-use "
2240 phba
->fcf
.current_rec
.fcf_indx
);
2243 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2244 "2863 New FCF (x%x) matches "
2245 "property of in-use FCF (x%x)\n",
2246 bf_get(lpfc_fcf_record_fcf_index
,
2248 phba
->fcf
.current_rec
.fcf_indx
);
2251 * Read next FCF record from HBA searching for the matching
2252 * with in-use record only if not during the fast failover
2253 * period. In case of fast failover period, it shall try to
2254 * determine whether the FCF record just read should be the
2257 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2258 spin_unlock_irq(&phba
->hbalock
);
2263 * Update on failover FCF record only if it's in FCF fast-failover
2264 * period; otherwise, update on current FCF record.
2266 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2267 fcf_rec
= &phba
->fcf
.failover_rec
;
2269 fcf_rec
= &phba
->fcf
.current_rec
;
2271 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2273 * If the driver FCF record does not have boot flag
2274 * set and new hba fcf record has boot flag set, use
2275 * the new hba fcf record.
2277 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2278 /* Choose this FCF record */
2279 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2280 "2837 Update current FCF record "
2281 "(x%x) with new FCF record (x%x)\n",
2283 bf_get(lpfc_fcf_record_fcf_index
,
2285 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2286 addr_mode
, vlan_id
, BOOT_ENABLE
);
2287 spin_unlock_irq(&phba
->hbalock
);
2291 * If the driver FCF record has boot flag set and the
2292 * new hba FCF record does not have boot flag, read
2293 * the next FCF record.
2295 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2296 spin_unlock_irq(&phba
->hbalock
);
2300 * If the new hba FCF record has lower priority value
2301 * than the driver FCF record, use the new record.
2303 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2304 /* Choose the new FCF record with lower priority */
2305 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2306 "2838 Update current FCF record "
2307 "(x%x) with new FCF record (x%x)\n",
2309 bf_get(lpfc_fcf_record_fcf_index
,
2311 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2312 addr_mode
, vlan_id
, 0);
2313 /* Reset running random FCF selection count */
2314 phba
->fcf
.eligible_fcf_cnt
= 1;
2315 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2316 /* Update running random FCF selection count */
2317 phba
->fcf
.eligible_fcf_cnt
++;
2318 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2319 phba
->fcf
.eligible_fcf_cnt
);
2320 if (select_new_fcf
) {
2321 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2322 "2839 Update current FCF record "
2323 "(x%x) with new FCF record (x%x)\n",
2325 bf_get(lpfc_fcf_record_fcf_index
,
2327 /* Choose the new FCF by random selection */
2328 __lpfc_update_fcf_record(phba
, fcf_rec
,
2330 addr_mode
, vlan_id
, 0);
2333 spin_unlock_irq(&phba
->hbalock
);
2337 * This is the first suitable FCF record, choose this record for
2338 * initial best-fit FCF.
2341 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2342 "2840 Update initial FCF candidate "
2344 bf_get(lpfc_fcf_record_fcf_index
,
2346 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2347 addr_mode
, vlan_id
, (boot_flag
?
2349 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2350 /* Setup initial running random FCF selection count */
2351 phba
->fcf
.eligible_fcf_cnt
= 1;
2352 /* Seeding the random number generator for random selection */
2353 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
2356 spin_unlock_irq(&phba
->hbalock
);
2360 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2361 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2362 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2364 * Case of FCF fast failover scan
2368 * It has not found any suitable FCF record, cancel
2369 * FCF scan inprogress, and do nothing
2371 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2372 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2373 "2782 No suitable FCF found: "
2375 phba
->fcoe_eventtag_at_fcf_scan
,
2376 bf_get(lpfc_fcf_record_fcf_index
,
2378 spin_lock_irq(&phba
->hbalock
);
2379 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2380 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2381 spin_unlock_irq(&phba
->hbalock
);
2382 /* Unregister in-use FCF and rescan */
2383 lpfc_printf_log(phba
, KERN_INFO
,
2385 "2864 On devloss tmo "
2386 "unreg in-use FCF and "
2387 "rescan FCF table\n");
2388 lpfc_unregister_fcf_rescan(phba
);
2392 * Let next new FCF event trigger fast failover
2394 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2395 spin_unlock_irq(&phba
->hbalock
);
2399 * It has found a suitable FCF record that is not
2400 * the same as in-use FCF record, unregister the
2401 * in-use FCF record, replace the in-use FCF record
2402 * with the new FCF record, mark FCF fast failover
2403 * completed, and then start register the new FCF
2407 /* Unregister the current in-use FCF record */
2408 lpfc_unregister_fcf(phba
);
2410 /* Replace in-use record with the new record */
2411 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2412 "2842 Replace in-use FCF (x%x) "
2413 "with failover FCF (x%x)\n",
2414 phba
->fcf
.current_rec
.fcf_indx
,
2415 phba
->fcf
.failover_rec
.fcf_indx
);
2416 memcpy(&phba
->fcf
.current_rec
,
2417 &phba
->fcf
.failover_rec
,
2418 sizeof(struct lpfc_fcf_rec
));
2420 * Mark the fast FCF failover rediscovery completed
2421 * and the start of the first round of the roundrobin
2424 spin_lock_irq(&phba
->hbalock
);
2425 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2426 spin_unlock_irq(&phba
->hbalock
);
2427 /* Register to the new FCF record */
2428 lpfc_register_fcf(phba
);
2431 * In case of transaction period to fast FCF failover,
2432 * do nothing when search to the end of the FCF table.
2434 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2435 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2438 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2439 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2441 * In case the current in-use FCF record no
2442 * longer existed during FCF discovery that
2443 * was not triggered by fast FCF failover
2444 * process, treat it as fast FCF failover.
2446 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2447 "2841 In-use FCF record (x%x) "
2448 "not reported, entering fast "
2449 "FCF failover mode scanning.\n",
2450 phba
->fcf
.current_rec
.fcf_indx
);
2451 spin_lock_irq(&phba
->hbalock
);
2452 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2453 spin_unlock_irq(&phba
->hbalock
);
2454 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2455 LPFC_FCOE_FCF_GET_FIRST
);
2458 /* Register to the new FCF record */
2459 lpfc_register_fcf(phba
);
2462 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2466 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2467 lpfc_register_fcf(phba
);
2473 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2474 * @phba: pointer to lpfc hba data structure.
2475 * @mboxq: pointer to mailbox object.
2477 * This is the callback function for FLOGI failure roundrobin FCF failover
2478 * read FCF record mailbox command from the eligible FCF record bmask for
2479 * performing the failover. If the FCF read back is not valid/available, it
2480 * fails through to retrying FLOGI to the currently registered FCF again.
2481 * Otherwise, if the FCF read back is valid and available, it will set the
2482 * newly read FCF record to the failover FCF record, unregister currently
2483 * registered FCF record, copy the failover FCF record to the current
2484 * FCF record, and then register the current FCF record before proceeding
2485 * to trying FLOGI on the new failover FCF.
2488 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2490 struct fcf_record
*new_fcf_record
;
2491 uint32_t boot_flag
, addr_mode
;
2492 uint16_t next_fcf_index
, fcf_index
;
2493 uint16_t current_fcf_index
;
2497 /* If link state is not up, stop the roundrobin failover process */
2498 if (phba
->link_state
< LPFC_LINK_UP
) {
2499 spin_lock_irq(&phba
->hbalock
);
2500 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2501 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2502 spin_unlock_irq(&phba
->hbalock
);
2506 /* Parse the FCF record from the non-embedded mailbox command */
2507 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2509 if (!new_fcf_record
) {
2510 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2511 "2766 Mailbox command READ_FCF_RECORD "
2512 "failed to retrieve a FCF record.\n");
2516 /* Get the needed parameters from FCF record */
2517 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2518 &addr_mode
, &vlan_id
);
2520 /* Log the FCF record information if turned on */
2521 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2524 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2526 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2527 "2848 Remove ineligible FCF (x%x) from "
2528 "from roundrobin bmask\n", fcf_index
);
2529 /* Clear roundrobin bmask bit for ineligible FCF */
2530 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2531 /* Perform next round of roundrobin FCF failover */
2532 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2533 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2539 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2540 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2541 "2760 Perform FLOGI roundrobin FCF failover: "
2542 "FCF (x%x) back to FCF (x%x)\n",
2543 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2544 /* Wait 500 ms before retrying FLOGI to current FCF */
2546 lpfc_issue_init_vfi(phba
->pport
);
2550 /* Upload new FCF record to the failover FCF record */
2551 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2552 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2553 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2554 spin_lock_irq(&phba
->hbalock
);
2555 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2556 new_fcf_record
, addr_mode
, vlan_id
,
2557 (boot_flag
? BOOT_ENABLE
: 0));
2558 spin_unlock_irq(&phba
->hbalock
);
2560 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2562 /* Unregister the current in-use FCF record */
2563 lpfc_unregister_fcf(phba
);
2565 /* Replace in-use record with the new record */
2566 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2567 sizeof(struct lpfc_fcf_rec
));
2569 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2570 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2571 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2574 lpfc_register_fcf(phba
);
2576 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2580 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2581 * @phba: pointer to lpfc hba data structure.
2582 * @mboxq: pointer to mailbox object.
2584 * This is the callback function of read FCF record mailbox command for
2585 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2586 * failover when a new FCF event happened. If the FCF read back is
2587 * valid/available and it passes the connection list check, it updates
2588 * the bmask for the eligible FCF record for roundrobin failover.
2591 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2593 struct fcf_record
*new_fcf_record
;
2594 uint32_t boot_flag
, addr_mode
;
2595 uint16_t fcf_index
, next_fcf_index
;
2599 /* If link state is not up, no need to proceed */
2600 if (phba
->link_state
< LPFC_LINK_UP
)
2603 /* If FCF discovery period is over, no need to proceed */
2604 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2607 /* Parse the FCF record from the non-embedded mailbox command */
2608 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2610 if (!new_fcf_record
) {
2611 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2612 "2767 Mailbox command READ_FCF_RECORD "
2613 "failed to retrieve a FCF record.\n");
2617 /* Check the connection list for eligibility */
2618 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2619 &addr_mode
, &vlan_id
);
2621 /* Log the FCF record information if turned on */
2622 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2628 /* Update the eligible FCF record index bmask */
2629 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2631 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2634 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2638 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2639 * @phba: pointer to lpfc hba data structure.
2640 * @mboxq: pointer to mailbox data structure.
2642 * This function handles completion of init vfi mailbox command.
2645 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2647 struct lpfc_vport
*vport
= mboxq
->vport
;
2649 /* VFI not supported on interface type 0, just do the flogi */
2650 if (mboxq
->u
.mb
.mbxStatus
&& (bf_get(lpfc_sli_intf_if_type
,
2651 &phba
->sli4_hba
.sli_intf
) != LPFC_SLI_INTF_IF_TYPE_0
)) {
2652 lpfc_printf_vlog(vport
, KERN_ERR
,
2654 "2891 Init VFI mailbox failed 0x%x\n",
2655 mboxq
->u
.mb
.mbxStatus
);
2656 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2657 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2661 lpfc_initial_flogi(vport
);
2662 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2667 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2668 * @vport: pointer to lpfc_vport data structure.
2670 * This function issue a init_vfi mailbox command to initialize the VFI and
2671 * VPI for the physical port.
2674 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2676 LPFC_MBOXQ_t
*mboxq
;
2678 struct lpfc_hba
*phba
= vport
->phba
;
2680 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2682 lpfc_printf_vlog(vport
, KERN_ERR
,
2683 LOG_MBOX
, "2892 Failed to allocate "
2684 "init_vfi mailbox\n");
2687 lpfc_init_vfi(mboxq
, vport
);
2688 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2689 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2690 if (rc
== MBX_NOT_FINISHED
) {
2691 lpfc_printf_vlog(vport
, KERN_ERR
,
2692 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2693 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2698 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2699 * @phba: pointer to lpfc hba data structure.
2700 * @mboxq: pointer to mailbox data structure.
2702 * This function handles completion of init vpi mailbox command.
2705 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2707 struct lpfc_vport
*vport
= mboxq
->vport
;
2708 struct lpfc_nodelist
*ndlp
;
2709 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2711 if (mboxq
->u
.mb
.mbxStatus
) {
2712 lpfc_printf_vlog(vport
, KERN_ERR
,
2714 "2609 Init VPI mailbox failed 0x%x\n",
2715 mboxq
->u
.mb
.mbxStatus
);
2716 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2717 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2720 spin_lock_irq(shost
->host_lock
);
2721 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2722 spin_unlock_irq(shost
->host_lock
);
2724 /* If this port is physical port or FDISC is done, do reg_vpi */
2725 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2726 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2728 lpfc_printf_vlog(vport
, KERN_ERR
,
2730 "2731 Cannot find fabric "
2731 "controller node\n");
2733 lpfc_register_new_vport(phba
, vport
, ndlp
);
2734 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2738 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2739 lpfc_initial_fdisc(vport
);
2741 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2742 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2743 "2606 No NPIV Fabric support\n");
2745 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2750 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2751 * @vport: pointer to lpfc_vport data structure.
2753 * This function issue a init_vpi mailbox command to initialize
2754 * VPI for the vport.
2757 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2759 LPFC_MBOXQ_t
*mboxq
;
2762 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2764 lpfc_printf_vlog(vport
, KERN_ERR
,
2765 LOG_MBOX
, "2607 Failed to allocate "
2766 "init_vpi mailbox\n");
2769 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2770 mboxq
->vport
= vport
;
2771 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2772 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2773 if (rc
== MBX_NOT_FINISHED
) {
2774 lpfc_printf_vlog(vport
, KERN_ERR
,
2775 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2776 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2781 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2782 * @phba: pointer to lpfc hba data structure.
2784 * This function loops through the list of vports on the @phba and issues an
2785 * FDISC if possible.
2788 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2790 struct lpfc_vport
**vports
;
2793 vports
= lpfc_create_vport_work_array(phba
);
2794 if (vports
!= NULL
) {
2795 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2796 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2798 /* There are no vpi for this vport */
2799 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2800 lpfc_vport_set_state(vports
[i
],
2804 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2805 lpfc_vport_set_state(vports
[i
],
2809 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2810 lpfc_issue_init_vpi(vports
[i
]);
2813 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2814 lpfc_initial_fdisc(vports
[i
]);
2816 lpfc_vport_set_state(vports
[i
],
2817 FC_VPORT_NO_FABRIC_SUPP
);
2818 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2821 "Fabric support\n");
2825 lpfc_destroy_vport_work_array(phba
, vports
);
2829 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2831 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2832 struct lpfc_vport
*vport
= mboxq
->vport
;
2833 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2835 if (mboxq
->u
.mb
.mbxStatus
) {
2836 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2837 "2018 REG_VFI mbxStatus error x%x "
2839 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2840 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2841 /* FLOGI failed, use loop map to make discovery list */
2842 lpfc_disc_list_loopmap(vport
);
2843 /* Start discovery */
2844 lpfc_disc_start(vport
);
2847 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2850 /* The VPI is implicitly registered when the VFI is registered */
2851 spin_lock_irq(shost
->host_lock
);
2852 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2853 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2854 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2855 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2856 spin_unlock_irq(shost
->host_lock
);
2858 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2859 /* For private loop just start discovery and we are done. */
2860 if ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2861 (phba
->alpa_map
[0] == 0) &&
2862 !(vport
->fc_flag
& FC_PUBLIC_LOOP
)) {
2863 /* Use loop map to make discovery list */
2864 lpfc_disc_list_loopmap(vport
);
2865 /* Start discovery */
2866 lpfc_disc_start(vport
);
2868 lpfc_start_fdiscs(phba
);
2869 lpfc_do_scr_ns_plogi(phba
, vport
);
2874 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2875 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2881 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2883 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2884 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2885 struct lpfc_vport
*vport
= pmb
->vport
;
2888 /* Check for error */
2889 if (mb
->mbxStatus
) {
2890 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2891 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2892 "0319 READ_SPARAM mbxStatus error x%x "
2894 mb
->mbxStatus
, vport
->port_state
);
2895 lpfc_linkdown(phba
);
2899 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2900 sizeof (struct serv_parm
));
2901 lpfc_update_vport_wwn(vport
);
2902 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2903 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2904 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2907 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2909 mempool_free(pmb
, phba
->mbox_mem_pool
);
2913 pmb
->context1
= NULL
;
2914 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2916 lpfc_issue_clear_la(phba
, vport
);
2917 mempool_free(pmb
, phba
->mbox_mem_pool
);
2922 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
2924 struct lpfc_vport
*vport
= phba
->pport
;
2925 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2927 struct lpfc_dmabuf
*mp
;
2929 struct fcf_record
*fcf_record
;
2931 spin_lock_irq(&phba
->hbalock
);
2932 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
2933 case LPFC_LINK_SPEED_1GHZ
:
2934 case LPFC_LINK_SPEED_2GHZ
:
2935 case LPFC_LINK_SPEED_4GHZ
:
2936 case LPFC_LINK_SPEED_8GHZ
:
2937 case LPFC_LINK_SPEED_10GHZ
:
2938 case LPFC_LINK_SPEED_16GHZ
:
2939 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
2942 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
2946 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
2947 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2949 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2950 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2952 /* if npiv is enabled and this adapter supports npiv log
2953 * a message that npiv is not supported in this topology
2955 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2956 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2957 "1309 Link Up Event npiv not supported in loop "
2959 /* Get Loop Map information */
2960 if (bf_get(lpfc_mbx_read_top_il
, la
))
2961 vport
->fc_flag
|= FC_LBIT
;
2963 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
2964 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
2967 phba
->alpa_map
[0] = 0;
2969 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2980 numalpa
= phba
->alpa_map
[0];
2982 while (j
< numalpa
) {
2983 memset(un
.pamap
, 0, 16);
2984 for (k
= 1; j
< numalpa
; k
++) {
2986 phba
->alpa_map
[j
+ 1];
2991 /* Link Up Event ALPA map */
2992 lpfc_printf_log(phba
,
2995 "1304 Link Up Event "
2996 "ALPA map Data: x%x "
2998 un
.pa
.wd1
, un
.pa
.wd2
,
2999 un
.pa
.wd3
, un
.pa
.wd4
);
3004 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3005 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3006 (phba
->sli_rev
== 3))
3007 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3009 vport
->fc_myDID
= phba
->fc_pref_DID
;
3010 vport
->fc_flag
|= FC_LBIT
;
3012 spin_unlock_irq(&phba
->hbalock
);
3015 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3019 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3021 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3024 sparam_mbox
->vport
= vport
;
3025 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3026 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3027 if (rc
== MBX_NOT_FINISHED
) {
3028 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3029 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3031 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3035 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3036 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3039 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3040 lpfc_config_link(phba
, cfglink_mbox
);
3041 cfglink_mbox
->vport
= vport
;
3042 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3043 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3044 if (rc
== MBX_NOT_FINISHED
) {
3045 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3049 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3051 * Add the driver's default FCF record at FCF index 0 now. This
3052 * is phase 1 implementation that support FCF index 0 and driver
3055 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3056 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3058 if (unlikely(!fcf_record
)) {
3059 lpfc_printf_log(phba
, KERN_ERR
,
3061 "2554 Could not allocate memory for "
3067 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3068 LPFC_FCOE_FCF_DEF_INDEX
);
3069 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3071 lpfc_printf_log(phba
, KERN_ERR
,
3073 "2013 Could not manually add FCF "
3074 "record 0, status %d\n", rc
);
3082 * The driver is expected to do FIP/FCF. Call the port
3083 * and get the FCF Table.
3085 spin_lock_irq(&phba
->hbalock
);
3086 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3087 spin_unlock_irq(&phba
->hbalock
);
3090 /* This is the initial FCF discovery scan */
3091 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3092 spin_unlock_irq(&phba
->hbalock
);
3093 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3094 "2778 Start FCF table scan at linkup\n");
3095 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3096 LPFC_FCOE_FCF_GET_FIRST
);
3098 spin_lock_irq(&phba
->hbalock
);
3099 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3100 spin_unlock_irq(&phba
->hbalock
);
3103 /* Reset FCF roundrobin bmask for new discovery */
3104 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3109 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3110 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3111 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3112 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3113 lpfc_issue_clear_la(phba
, vport
);
3118 lpfc_enable_la(struct lpfc_hba
*phba
)
3121 struct lpfc_sli
*psli
= &phba
->sli
;
3122 spin_lock_irq(&phba
->hbalock
);
3123 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3124 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3125 control
= readl(phba
->HCregaddr
);
3126 control
|= HC_LAINT_ENA
;
3127 writel(control
, phba
->HCregaddr
);
3128 readl(phba
->HCregaddr
); /* flush */
3130 spin_unlock_irq(&phba
->hbalock
);
3134 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3136 lpfc_linkdown(phba
);
3137 lpfc_enable_la(phba
);
3138 lpfc_unregister_unused_fcf(phba
);
3139 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3144 * This routine handles processing a READ_TOPOLOGY mailbox
3145 * command upon completion. It is setup in the LPFC_MBOXQ
3146 * as the completion routine when the command is
3147 * handed off to the SLI layer.
3150 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3152 struct lpfc_vport
*vport
= pmb
->vport
;
3153 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3154 struct lpfc_mbx_read_top
*la
;
3155 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3156 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3158 /* Unblock ELS traffic */
3159 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3160 /* Check for error */
3161 if (mb
->mbxStatus
) {
3162 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3163 "1307 READ_LA mbox error x%x state x%x\n",
3164 mb
->mbxStatus
, vport
->port_state
);
3165 lpfc_mbx_issue_link_down(phba
);
3166 phba
->link_state
= LPFC_HBA_ERROR
;
3167 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3170 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3172 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3174 spin_lock_irq(shost
->host_lock
);
3175 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3176 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3178 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3179 spin_unlock_irq(shost
->host_lock
);
3181 if ((phba
->fc_eventTag
< la
->eventTag
) ||
3182 (phba
->fc_eventTag
== la
->eventTag
)) {
3183 phba
->fc_stat
.LinkMultiEvent
++;
3184 if (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)
3185 if (phba
->fc_eventTag
!= 0)
3186 lpfc_linkdown(phba
);
3189 phba
->fc_eventTag
= la
->eventTag
;
3190 spin_lock_irq(&phba
->hbalock
);
3191 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3192 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3194 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3195 spin_unlock_irq(&phba
->hbalock
);
3197 phba
->link_events
++;
3198 if ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
) &&
3199 (!bf_get(lpfc_mbx_read_top_mm
, la
))) {
3200 phba
->fc_stat
.LinkUp
++;
3201 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3202 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3203 "1306 Link Up Event in loop back mode "
3204 "x%x received Data: x%x x%x x%x x%x\n",
3205 la
->eventTag
, phba
->fc_eventTag
,
3206 bf_get(lpfc_mbx_read_top_alpa_granted
,
3208 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3211 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3212 "1303 Link Up Event x%x received "
3213 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3214 la
->eventTag
, phba
->fc_eventTag
,
3215 bf_get(lpfc_mbx_read_top_alpa_granted
,
3217 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3219 bf_get(lpfc_mbx_read_top_mm
, la
),
3220 bf_get(lpfc_mbx_read_top_fa
, la
),
3221 phba
->wait_4_mlo_maint_flg
);
3223 lpfc_mbx_process_link_up(phba
, la
);
3224 } else if (bf_get(lpfc_mbx_read_top_att_type
, la
) ==
3225 LPFC_ATT_LINK_DOWN
) {
3226 phba
->fc_stat
.LinkDown
++;
3227 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3228 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3229 "1308 Link Down Event in loop back mode "
3231 "Data: x%x x%x x%x\n",
3232 la
->eventTag
, phba
->fc_eventTag
,
3233 phba
->pport
->port_state
, vport
->fc_flag
);
3236 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3237 "1305 Link Down Event x%x received "
3238 "Data: x%x x%x x%x x%x x%x\n",
3239 la
->eventTag
, phba
->fc_eventTag
,
3240 phba
->pport
->port_state
, vport
->fc_flag
,
3241 bf_get(lpfc_mbx_read_top_mm
, la
),
3242 bf_get(lpfc_mbx_read_top_fa
, la
));
3244 lpfc_mbx_issue_link_down(phba
);
3246 if ((bf_get(lpfc_mbx_read_top_mm
, la
)) &&
3247 (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)) {
3248 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3249 phba
->fc_stat
.LinkDown
++;
3250 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3251 "1312 Link Down Event x%x received "
3252 "Data: x%x x%x x%x\n",
3253 la
->eventTag
, phba
->fc_eventTag
,
3254 phba
->pport
->port_state
, vport
->fc_flag
);
3255 lpfc_mbx_issue_link_down(phba
);
3257 lpfc_enable_la(phba
);
3259 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3260 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3261 "Data: x%x x%x x%x\n",
3262 la
->eventTag
, phba
->fc_eventTag
,
3263 phba
->pport
->port_state
, vport
->fc_flag
);
3265 * The cmnd that triggered this will be waiting for this
3268 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3269 if (phba
->wait_4_mlo_maint_flg
) {
3270 phba
->wait_4_mlo_maint_flg
= 0;
3271 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3275 if (bf_get(lpfc_mbx_read_top_fa
, la
)) {
3276 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3277 lpfc_issue_clear_la(phba
, vport
);
3278 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3280 bf_get(lpfc_mbx_read_top_fa
, la
));
3283 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3284 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3286 mempool_free(pmb
, phba
->mbox_mem_pool
);
3291 * This routine handles processing a REG_LOGIN mailbox
3292 * command upon completion. It is setup in the LPFC_MBOXQ
3293 * as the completion routine when the command is
3294 * handed off to the SLI layer.
3297 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3299 struct lpfc_vport
*vport
= pmb
->vport
;
3300 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3301 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3302 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3304 pmb
->context1
= NULL
;
3305 pmb
->context2
= NULL
;
3307 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3308 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3310 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3311 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3312 /* We rcvd a rscn after issuing this
3313 * mbox reg login, we may have cycled
3314 * back through the state and be
3315 * back at reg login state so this
3316 * mbox needs to be ignored becase
3317 * there is another reg login in
3320 spin_lock_irq(shost
->host_lock
);
3321 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3322 spin_unlock_irq(shost
->host_lock
);
3324 /* Good status, call state machine */
3325 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
3326 NLP_EVT_CMPL_REG_LOGIN
);
3328 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3330 mempool_free(pmb
, phba
->mbox_mem_pool
);
3331 /* decrement the node reference count held for this callback
3340 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3342 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3343 struct lpfc_vport
*vport
= pmb
->vport
;
3344 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3346 switch (mb
->mbxStatus
) {
3349 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3350 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3353 /* If VPI is busy, reset the HBA */
3355 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3356 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3357 vport
->vpi
, mb
->mbxStatus
);
3358 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3359 lpfc_workq_post_event(phba
, NULL
, NULL
,
3360 LPFC_EVT_RESET_HBA
);
3362 spin_lock_irq(shost
->host_lock
);
3363 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3364 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3365 spin_unlock_irq(shost
->host_lock
);
3366 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3367 mempool_free(pmb
, phba
->mbox_mem_pool
);
3368 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3370 * This shost reference might have been taken at the beginning of
3371 * lpfc_vport_delete()
3373 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3374 scsi_host_put(shost
);
3378 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3380 struct lpfc_hba
*phba
= vport
->phba
;
3384 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3388 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3389 mbox
->vport
= vport
;
3390 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3391 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3392 if (rc
== MBX_NOT_FINISHED
) {
3393 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3394 "1800 Could not issue unreg_vpi\n");
3395 mempool_free(mbox
, phba
->mbox_mem_pool
);
3396 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3403 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3405 struct lpfc_vport
*vport
= pmb
->vport
;
3406 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3407 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3409 switch (mb
->mbxStatus
) {
3413 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3414 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3416 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3417 spin_lock_irq(shost
->host_lock
);
3418 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3419 spin_unlock_irq(shost
->host_lock
);
3420 vport
->fc_myDID
= 0;
3424 spin_lock_irq(shost
->host_lock
);
3425 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3426 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3427 spin_unlock_irq(shost
->host_lock
);
3428 vport
->num_disc_nodes
= 0;
3429 /* go thru NPR list and issue ELS PLOGIs */
3430 if (vport
->fc_npr_cnt
)
3431 lpfc_els_disc_plogi(vport
);
3433 if (!vport
->num_disc_nodes
) {
3434 spin_lock_irq(shost
->host_lock
);
3435 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3436 spin_unlock_irq(shost
->host_lock
);
3437 lpfc_can_disctmo(vport
);
3439 vport
->port_state
= LPFC_VPORT_READY
;
3442 mempool_free(pmb
, phba
->mbox_mem_pool
);
3447 * lpfc_create_static_vport - Read HBA config region to create static vports.
3448 * @phba: pointer to lpfc hba data structure.
3450 * This routine issue a DUMP mailbox command for config region 22 to get
3451 * the list of static vports to be created. The function create vports
3452 * based on the information returned from the HBA.
3455 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3457 LPFC_MBOXQ_t
*pmb
= NULL
;
3459 struct static_vport_info
*vport_info
;
3461 struct fc_vport_identifiers vport_id
;
3462 struct fc_vport
*new_fc_vport
;
3463 struct Scsi_Host
*shost
;
3464 struct lpfc_vport
*vport
;
3465 uint16_t offset
= 0;
3466 uint8_t *vport_buff
;
3467 struct lpfc_dmabuf
*mp
;
3468 uint32_t byte_count
= 0;
3470 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3472 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3473 "0542 lpfc_create_static_vport failed to"
3474 " allocate mailbox memory\n");
3480 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3482 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3483 "0543 lpfc_create_static_vport failed to"
3484 " allocate vport_info\n");
3485 mempool_free(pmb
, phba
->mbox_mem_pool
);
3489 vport_buff
= (uint8_t *) vport_info
;
3491 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3494 pmb
->vport
= phba
->pport
;
3495 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
3497 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3498 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3499 "0544 lpfc_create_static_vport failed to"
3500 " issue dump mailbox command ret 0x%x "
3506 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3507 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3508 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3509 if (byte_count
> sizeof(struct static_vport_info
) -
3511 byte_count
= sizeof(struct static_vport_info
)
3513 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3514 offset
+= byte_count
;
3516 if (mb
->un
.varDmp
.word_cnt
>
3517 sizeof(struct static_vport_info
) - offset
)
3518 mb
->un
.varDmp
.word_cnt
=
3519 sizeof(struct static_vport_info
)
3521 byte_count
= mb
->un
.varDmp
.word_cnt
;
3522 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3523 vport_buff
+ offset
,
3526 offset
+= byte_count
;
3529 } while (byte_count
&&
3530 offset
< sizeof(struct static_vport_info
));
3533 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3534 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3535 != VPORT_INFO_REV
)) {
3536 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3537 "0545 lpfc_create_static_vport bad"
3538 " information header 0x%x 0x%x\n",
3539 le32_to_cpu(vport_info
->signature
),
3540 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3545 shost
= lpfc_shost_from_vport(phba
->pport
);
3547 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3548 memset(&vport_id
, 0, sizeof(vport_id
));
3549 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3550 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3551 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3554 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3555 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3556 vport_id
.disable
= false;
3557 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3559 if (!new_fc_vport
) {
3560 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3561 "0546 lpfc_create_static_vport failed to"
3566 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3567 vport
->vport_flag
|= STATIC_VPORT
;
3572 if (rc
!= MBX_TIMEOUT
) {
3573 if (pmb
->context2
) {
3574 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3575 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3578 mempool_free(pmb
, phba
->mbox_mem_pool
);
3585 * This routine handles processing a Fabric REG_LOGIN mailbox
3586 * command upon completion. It is setup in the LPFC_MBOXQ
3587 * as the completion routine when the command is
3588 * handed off to the SLI layer.
3591 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3593 struct lpfc_vport
*vport
= pmb
->vport
;
3594 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3595 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3596 struct lpfc_nodelist
*ndlp
;
3598 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3599 pmb
->context1
= NULL
;
3600 pmb
->context2
= NULL
;
3602 if (mb
->mbxStatus
) {
3603 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3604 "0258 Register Fabric login error: 0x%x\n",
3606 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3608 mempool_free(pmb
, phba
->mbox_mem_pool
);
3610 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3611 /* FLOGI failed, use loop map to make discovery list */
3612 lpfc_disc_list_loopmap(vport
);
3614 /* Start discovery */
3615 lpfc_disc_start(vport
);
3616 /* Decrement the reference count to ndlp after the
3617 * reference to the ndlp are done.
3623 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3624 /* Decrement the reference count to ndlp after the reference
3625 * to the ndlp are done.
3631 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3632 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3633 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3634 ndlp
->nlp_type
|= NLP_FABRIC
;
3635 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3637 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3638 /* when physical port receive logo donot start
3639 * vport discovery */
3640 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3641 lpfc_start_fdiscs(phba
);
3643 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3644 lpfc_do_scr_ns_plogi(phba
, vport
);
3647 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3649 mempool_free(pmb
, phba
->mbox_mem_pool
);
3651 /* Drop the reference count from the mbox at the end after
3652 * all the current reference to the ndlp have been done.
3659 * This routine handles processing a NameServer REG_LOGIN mailbox
3660 * command upon completion. It is setup in the LPFC_MBOXQ
3661 * as the completion routine when the command is
3662 * handed off to the SLI layer.
3665 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3667 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3668 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3669 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3670 struct lpfc_vport
*vport
= pmb
->vport
;
3672 pmb
->context1
= NULL
;
3673 pmb
->context2
= NULL
;
3675 if (mb
->mbxStatus
) {
3677 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3678 "0260 Register NameServer error: 0x%x\n",
3680 /* decrement the node reference count held for this
3681 * callback function.
3684 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3686 mempool_free(pmb
, phba
->mbox_mem_pool
);
3688 /* If no other thread is using the ndlp, free it */
3689 lpfc_nlp_not_used(ndlp
);
3691 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3693 * RegLogin failed, use loop map to make discovery
3696 lpfc_disc_list_loopmap(vport
);
3698 /* Start discovery */
3699 lpfc_disc_start(vport
);
3702 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3706 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3707 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3708 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3709 ndlp
->nlp_type
|= NLP_FABRIC
;
3710 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3712 if (vport
->port_state
< LPFC_VPORT_READY
) {
3713 /* Link up discovery requires Fabric registration. */
3714 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3715 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3716 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3717 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3718 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3720 /* Issue SCR just before NameServer GID_FT Query */
3721 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3724 vport
->fc_ns_retry
= 0;
3725 /* Good status, issue CT Request to NameServer */
3726 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3727 /* Cannot issue NameServer Query, so finish up discovery */
3731 /* decrement the node reference count held for this
3732 * callback function.
3735 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3737 mempool_free(pmb
, phba
->mbox_mem_pool
);
3743 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3745 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3746 struct fc_rport
*rport
;
3747 struct lpfc_rport_data
*rdata
;
3748 struct fc_rport_identifiers rport_ids
;
3749 struct lpfc_hba
*phba
= vport
->phba
;
3751 /* Remote port has reappeared. Re-register w/ FC transport */
3752 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3753 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3754 rport_ids
.port_id
= ndlp
->nlp_DID
;
3755 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3758 * We leave our node pointer in rport->dd_data when we unregister a
3759 * FCP target port. But fc_remote_port_add zeros the space to which
3760 * rport->dd_data points. So, if we're reusing a previously
3761 * registered port, drop the reference that we took the last time we
3762 * registered the port.
3764 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3765 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3768 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3769 "rport add: did:x%x flg:x%x type x%x",
3770 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3772 /* Don't add the remote port if unloading. */
3773 if (vport
->load_flag
& FC_UNLOADING
)
3776 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3777 if (!rport
|| !get_device(&rport
->dev
)) {
3778 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3779 "Warning: fc_remote_port_add failed\n");
3783 /* initialize static port data */
3784 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3785 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3786 rdata
= rport
->dd_data
;
3787 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3789 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3790 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3791 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3792 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3794 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3795 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3797 if ((rport
->scsi_target_id
!= -1) &&
3798 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3799 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3805 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3807 struct fc_rport
*rport
= ndlp
->rport
;
3809 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3810 "rport delete: did:x%x flg:x%x type x%x",
3811 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3813 fc_remote_port_delete(rport
);
3819 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3821 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3823 spin_lock_irq(shost
->host_lock
);
3825 case NLP_STE_UNUSED_NODE
:
3826 vport
->fc_unused_cnt
+= count
;
3828 case NLP_STE_PLOGI_ISSUE
:
3829 vport
->fc_plogi_cnt
+= count
;
3831 case NLP_STE_ADISC_ISSUE
:
3832 vport
->fc_adisc_cnt
+= count
;
3834 case NLP_STE_REG_LOGIN_ISSUE
:
3835 vport
->fc_reglogin_cnt
+= count
;
3837 case NLP_STE_PRLI_ISSUE
:
3838 vport
->fc_prli_cnt
+= count
;
3840 case NLP_STE_UNMAPPED_NODE
:
3841 vport
->fc_unmap_cnt
+= count
;
3843 case NLP_STE_MAPPED_NODE
:
3844 vport
->fc_map_cnt
+= count
;
3846 case NLP_STE_NPR_NODE
:
3847 vport
->fc_npr_cnt
+= count
;
3850 spin_unlock_irq(shost
->host_lock
);
3854 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3855 int old_state
, int new_state
)
3857 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3859 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3860 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3861 ndlp
->nlp_type
|= NLP_FC_NODE
;
3863 if (new_state
== NLP_STE_MAPPED_NODE
)
3864 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3865 if (new_state
== NLP_STE_NPR_NODE
)
3866 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3868 /* Transport interface */
3869 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3870 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3871 vport
->phba
->nport_event_cnt
++;
3872 lpfc_unregister_remote_port(ndlp
);
3875 if (new_state
== NLP_STE_MAPPED_NODE
||
3876 new_state
== NLP_STE_UNMAPPED_NODE
) {
3877 vport
->phba
->nport_event_cnt
++;
3879 * Tell the fc transport about the port, if we haven't
3880 * already. If we have, and it's a scsi entity, be
3881 * sure to unblock any attached scsi devices
3883 lpfc_register_remote_port(vport
, ndlp
);
3885 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3886 (vport
->stat_data_enabled
)) {
3888 * A new target is discovered, if there is no buffer for
3889 * statistical data collection allocate buffer.
3891 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3892 sizeof(struct lpfc_scsicmd_bkt
),
3895 if (!ndlp
->lat_data
)
3896 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3897 "0286 lpfc_nlp_state_cleanup failed to "
3898 "allocate statistical data buffer DID "
3899 "0x%x\n", ndlp
->nlp_DID
);
3902 * if we added to Mapped list, but the remote port
3903 * registration failed or assigned a target id outside
3904 * our presentable range - move the node to the
3907 if (new_state
== NLP_STE_MAPPED_NODE
&&
3909 ndlp
->rport
->scsi_target_id
== -1 ||
3910 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3911 spin_lock_irq(shost
->host_lock
);
3912 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3913 spin_unlock_irq(shost
->host_lock
);
3914 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3919 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3921 static char *states
[] = {
3922 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3923 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3924 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3925 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3926 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3927 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3928 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3929 [NLP_STE_NPR_NODE
] = "NPR",
3932 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3933 strlcpy(buffer
, states
[state
], size
);
3935 snprintf(buffer
, size
, "unknown (%d)", state
);
3940 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3943 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3944 int old_state
= ndlp
->nlp_state
;
3945 char name1
[16], name2
[16];
3947 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3948 "0904 NPort state transition x%06x, %s -> %s\n",
3950 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3951 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3953 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3954 "node statechg did:x%x old:%d ste:%d",
3955 ndlp
->nlp_DID
, old_state
, state
);
3957 if (old_state
== NLP_STE_NPR_NODE
&&
3958 state
!= NLP_STE_NPR_NODE
)
3959 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3960 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3961 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3962 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3965 if (list_empty(&ndlp
->nlp_listp
)) {
3966 spin_lock_irq(shost
->host_lock
);
3967 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3968 spin_unlock_irq(shost
->host_lock
);
3969 } else if (old_state
)
3970 lpfc_nlp_counters(vport
, old_state
, -1);
3972 ndlp
->nlp_state
= state
;
3973 lpfc_nlp_counters(vport
, state
, 1);
3974 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3978 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3980 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3982 if (list_empty(&ndlp
->nlp_listp
)) {
3983 spin_lock_irq(shost
->host_lock
);
3984 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3985 spin_unlock_irq(shost
->host_lock
);
3990 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3992 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3994 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3995 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3996 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3997 spin_lock_irq(shost
->host_lock
);
3998 list_del_init(&ndlp
->nlp_listp
);
3999 spin_unlock_irq(shost
->host_lock
);
4000 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4001 NLP_STE_UNUSED_NODE
);
4005 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4007 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4008 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4009 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4010 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4011 NLP_STE_UNUSED_NODE
);
4014 * lpfc_initialize_node - Initialize all fields of node object
4015 * @vport: Pointer to Virtual Port object.
4016 * @ndlp: Pointer to FC node object.
4017 * @did: FC_ID of the node.
4019 * This function is always called when node object need to be initialized.
4020 * It initializes all the fields of the node object. Although the reference
4021 * to phba from @ndlp can be obtained indirectly through it's reference to
4022 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4023 * to the life-span of the @ndlp might go beyond the existence of @vport as
4024 * the final release of ndlp is determined by its reference count. And, the
4025 * operation on @ndlp needs the reference to phba.
4028 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4031 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4032 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4033 init_timer(&ndlp
->nlp_delayfunc
);
4034 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
4035 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
4036 ndlp
->nlp_DID
= did
;
4037 ndlp
->vport
= vport
;
4038 ndlp
->phba
= vport
->phba
;
4039 ndlp
->nlp_sid
= NLP_NO_SID
;
4040 kref_init(&ndlp
->kref
);
4041 NLP_INT_NODE_ACT(ndlp
);
4042 atomic_set(&ndlp
->cmd_pending
, 0);
4043 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4044 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4045 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4048 struct lpfc_nodelist
*
4049 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4052 struct lpfc_hba
*phba
= vport
->phba
;
4054 unsigned long flags
;
4059 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4060 /* The ndlp should not be in memory free mode */
4061 if (NLP_CHK_FREE_REQ(ndlp
)) {
4062 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4063 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4064 "0277 lpfc_enable_node: ndlp:x%p "
4065 "usgmap:x%x refcnt:%d\n",
4066 (void *)ndlp
, ndlp
->nlp_usg_map
,
4067 atomic_read(&ndlp
->kref
.refcount
));
4070 /* The ndlp should not already be in active mode */
4071 if (NLP_CHK_NODE_ACT(ndlp
)) {
4072 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4073 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4074 "0278 lpfc_enable_node: ndlp:x%p "
4075 "usgmap:x%x refcnt:%d\n",
4076 (void *)ndlp
, ndlp
->nlp_usg_map
,
4077 atomic_read(&ndlp
->kref
.refcount
));
4081 /* Keep the original DID */
4082 did
= ndlp
->nlp_DID
;
4084 /* re-initialize ndlp except of ndlp linked list pointer */
4085 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4086 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4087 lpfc_initialize_node(vport
, ndlp
, did
);
4089 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4091 if (state
!= NLP_STE_UNUSED_NODE
)
4092 lpfc_nlp_set_state(vport
, ndlp
, state
);
4094 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4095 "node enable: did:x%x",
4096 ndlp
->nlp_DID
, 0, 0);
4101 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4104 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4105 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4106 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4107 * until ALL other outstanding threads have completed. We check
4108 * that the ndlp not already in the UNUSED state before we proceed.
4110 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4112 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4113 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4114 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4120 * Start / ReStart rescue timer for Discovery / RSCN handling
4123 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4125 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4126 struct lpfc_hba
*phba
= vport
->phba
;
4129 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4130 /* For FAN, timeout should be greater than edtov */
4131 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4133 /* Normal discovery timeout should be > than ELS/CT timeout
4134 * FC spec states we need 3 * ratov for CT requests
4136 tmo
= ((phba
->fc_ratov
* 3) + 3);
4140 if (!timer_pending(&vport
->fc_disctmo
)) {
4141 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4142 "set disc timer: tmo:x%x state:x%x flg:x%x",
4143 tmo
, vport
->port_state
, vport
->fc_flag
);
4146 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
4147 spin_lock_irq(shost
->host_lock
);
4148 vport
->fc_flag
|= FC_DISC_TMO
;
4149 spin_unlock_irq(shost
->host_lock
);
4151 /* Start Discovery Timer state <hba_state> */
4152 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4153 "0247 Start Discovery Timer state x%x "
4154 "Data: x%x x%lx x%x x%x\n",
4155 vport
->port_state
, tmo
,
4156 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4157 vport
->fc_adisc_cnt
);
4163 * Cancel rescue timer for Discovery / RSCN handling
4166 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4168 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4169 unsigned long iflags
;
4171 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4172 "can disc timer: state:x%x rtry:x%x flg:x%x",
4173 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4175 /* Turn off discovery timer if its running */
4176 if (vport
->fc_flag
& FC_DISC_TMO
) {
4177 spin_lock_irqsave(shost
->host_lock
, iflags
);
4178 vport
->fc_flag
&= ~FC_DISC_TMO
;
4179 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4180 del_timer_sync(&vport
->fc_disctmo
);
4181 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4182 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4183 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4186 /* Cancel Discovery Timer state <hba_state> */
4187 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4188 "0248 Cancel Discovery Timer state x%x "
4189 "Data: x%x x%x x%x\n",
4190 vport
->port_state
, vport
->fc_flag
,
4191 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4196 * Check specified ring for outstanding IOCB on the SLI queue
4197 * Return true if iocb matches the specified nport
4200 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4201 struct lpfc_sli_ring
*pring
,
4202 struct lpfc_iocbq
*iocb
,
4203 struct lpfc_nodelist
*ndlp
)
4205 struct lpfc_sli
*psli
= &phba
->sli
;
4206 IOCB_t
*icmd
= &iocb
->iocb
;
4207 struct lpfc_vport
*vport
= ndlp
->vport
;
4209 if (iocb
->vport
!= vport
)
4212 if (pring
->ringno
== LPFC_ELS_RING
) {
4213 switch (icmd
->ulpCommand
) {
4214 case CMD_GEN_REQUEST64_CR
:
4215 if (iocb
->context_un
.ndlp
== ndlp
)
4217 case CMD_ELS_REQUEST64_CR
:
4218 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4220 case CMD_XMIT_ELS_RSP64_CX
:
4221 if (iocb
->context1
== (uint8_t *) ndlp
)
4224 } else if (pring
->ringno
== psli
->extra_ring
) {
4226 } else if (pring
->ringno
== psli
->fcp_ring
) {
4227 /* Skip match check if waiting to relogin to FCP target */
4228 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4229 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4232 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4235 } else if (pring
->ringno
== psli
->next_ring
) {
4242 * Free resources / clean up outstanding I/Os
4243 * associated with nlp_rpi in the LPFC_NODELIST entry.
4246 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4248 LIST_HEAD(completions
);
4249 struct lpfc_sli
*psli
;
4250 struct lpfc_sli_ring
*pring
;
4251 struct lpfc_iocbq
*iocb
, *next_iocb
;
4254 lpfc_fabric_abort_nport(ndlp
);
4257 * Everything that matches on txcmplq will be returned
4258 * by firmware with a no rpi error.
4261 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4262 /* Now process each ring */
4263 for (i
= 0; i
< psli
->num_rings
; i
++) {
4264 pring
= &psli
->ring
[i
];
4266 spin_lock_irq(&phba
->hbalock
);
4267 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
4270 * Check to see if iocb matches the nport we are
4273 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
4275 /* It matches, so deque and call compl
4277 list_move_tail(&iocb
->list
,
4282 spin_unlock_irq(&phba
->hbalock
);
4286 /* Cancel all the IOCBs from the completions list */
4287 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4294 * Free rpi associated with LPFC_NODELIST entry.
4295 * This routine is called from lpfc_freenode(), when we are removing
4296 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4297 * LOGO that completes successfully, and we are waiting to PLOGI back
4298 * to the remote NPort. In addition, it is called after we receive
4299 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4300 * we are waiting to PLOGI back to the remote NPort.
4303 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4305 struct lpfc_hba
*phba
= vport
->phba
;
4310 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4311 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4313 /* SLI4 ports require the physical rpi value. */
4314 rpi
= ndlp
->nlp_rpi
;
4315 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4316 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4317 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4318 mbox
->vport
= vport
;
4319 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4320 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4321 if (rc
== MBX_NOT_FINISHED
)
4322 mempool_free(mbox
, phba
->mbox_mem_pool
);
4324 lpfc_no_rpi(phba
, ndlp
);
4326 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4328 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4329 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4336 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4337 * @phba: pointer to lpfc hba data structure.
4339 * This routine is invoked to unregister all the currently registered RPIs
4343 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4345 struct lpfc_vport
**vports
;
4346 struct lpfc_nodelist
*ndlp
;
4347 struct Scsi_Host
*shost
;
4350 vports
= lpfc_create_vport_work_array(phba
);
4352 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4353 "2884 Vport array allocation failed \n");
4356 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4357 shost
= lpfc_shost_from_vport(vports
[i
]);
4358 spin_lock_irq(shost
->host_lock
);
4359 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4360 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4361 /* The mempool_alloc might sleep */
4362 spin_unlock_irq(shost
->host_lock
);
4363 lpfc_unreg_rpi(vports
[i
], ndlp
);
4364 spin_lock_irq(shost
->host_lock
);
4367 spin_unlock_irq(shost
->host_lock
);
4369 lpfc_destroy_vport_work_array(phba
, vports
);
4373 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4375 struct lpfc_hba
*phba
= vport
->phba
;
4379 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4380 lpfc_sli4_unreg_all_rpis(vport
);
4384 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4386 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4388 mbox
->vport
= vport
;
4389 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4390 mbox
->context1
= NULL
;
4391 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4392 if (rc
!= MBX_TIMEOUT
)
4393 mempool_free(mbox
, phba
->mbox_mem_pool
);
4395 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4396 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4397 "1836 Could not issue "
4398 "unreg_login(all_rpis) status %d\n", rc
);
4403 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4405 struct lpfc_hba
*phba
= vport
->phba
;
4409 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4411 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4413 mbox
->vport
= vport
;
4414 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4415 mbox
->context1
= NULL
;
4416 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4417 if (rc
!= MBX_TIMEOUT
)
4418 mempool_free(mbox
, phba
->mbox_mem_pool
);
4420 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4421 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4422 "1815 Could not issue "
4423 "unreg_did (default rpis) status %d\n",
4429 * Free resources associated with LPFC_NODELIST entry
4430 * so it can be freed.
4433 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4435 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4436 struct lpfc_hba
*phba
= vport
->phba
;
4437 LPFC_MBOXQ_t
*mb
, *nextmb
;
4438 struct lpfc_dmabuf
*mp
;
4440 /* Cleanup node for NPort <nlp_DID> */
4441 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4442 "0900 Cleanup node for NPort x%x "
4443 "Data: x%x x%x x%x\n",
4444 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4445 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4446 if (NLP_CHK_FREE_REQ(ndlp
)) {
4447 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4448 "0280 lpfc_cleanup_node: ndlp:x%p "
4449 "usgmap:x%x refcnt:%d\n",
4450 (void *)ndlp
, ndlp
->nlp_usg_map
,
4451 atomic_read(&ndlp
->kref
.refcount
));
4452 lpfc_dequeue_node(vport
, ndlp
);
4454 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4455 "0281 lpfc_cleanup_node: ndlp:x%p "
4456 "usgmap:x%x refcnt:%d\n",
4457 (void *)ndlp
, ndlp
->nlp_usg_map
,
4458 atomic_read(&ndlp
->kref
.refcount
));
4459 lpfc_disable_node(vport
, ndlp
);
4462 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4463 if ((mb
= phba
->sli
.mbox_active
)) {
4464 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4465 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4466 mb
->context2
= NULL
;
4467 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4471 spin_lock_irq(&phba
->hbalock
);
4472 /* Cleanup REG_LOGIN completions which are not yet processed */
4473 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4474 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4475 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4478 mb
->context2
= NULL
;
4479 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4482 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4483 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4484 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4485 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4487 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4490 list_del(&mb
->list
);
4491 mempool_free(mb
, phba
->mbox_mem_pool
);
4492 /* We shall not invoke the lpfc_nlp_put to decrement
4493 * the ndlp reference count as we are in the process
4494 * of lpfc_nlp_release.
4498 spin_unlock_irq(&phba
->hbalock
);
4500 lpfc_els_abort(phba
, ndlp
);
4502 spin_lock_irq(shost
->host_lock
);
4503 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4504 spin_unlock_irq(shost
->host_lock
);
4506 ndlp
->nlp_last_elscmd
= 0;
4507 del_timer_sync(&ndlp
->nlp_delayfunc
);
4509 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4510 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4511 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4512 lpfc_unreg_rpi(vport
, ndlp
);
4518 * Check to see if we can free the nlp back to the freelist.
4519 * If we are in the middle of using the nlp in the discovery state
4520 * machine, defer the free till we reach the end of the state machine.
4523 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4525 struct lpfc_hba
*phba
= vport
->phba
;
4526 struct lpfc_rport_data
*rdata
;
4530 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4531 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4532 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4533 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4534 /* For this case we need to cleanup the default rpi
4535 * allocated by the firmware.
4537 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4539 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4540 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4542 mempool_free(mbox
, phba
->mbox_mem_pool
);
4545 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4546 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4547 mbox
->vport
= vport
;
4548 mbox
->context2
= NULL
;
4549 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4550 if (rc
== MBX_NOT_FINISHED
) {
4551 mempool_free(mbox
, phba
->mbox_mem_pool
);
4556 lpfc_cleanup_node(vport
, ndlp
);
4559 * We can get here with a non-NULL ndlp->rport because when we
4560 * unregister a rport we don't break the rport/node linkage. So if we
4561 * do, make sure we don't leaving any dangling pointers behind.
4564 rdata
= ndlp
->rport
->dd_data
;
4565 rdata
->pnode
= NULL
;
4571 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4574 D_ID mydid
, ndlpdid
, matchdid
;
4576 if (did
== Bcast_DID
)
4579 /* First check for Direct match */
4580 if (ndlp
->nlp_DID
== did
)
4583 /* Next check for area/domain identically equals 0 match */
4584 mydid
.un
.word
= vport
->fc_myDID
;
4585 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4589 matchdid
.un
.word
= did
;
4590 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4591 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4592 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4593 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4594 if ((ndlpdid
.un
.b
.domain
== 0) &&
4595 (ndlpdid
.un
.b
.area
== 0)) {
4596 if (ndlpdid
.un
.b
.id
)
4602 matchdid
.un
.word
= ndlp
->nlp_DID
;
4603 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4604 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4605 if ((matchdid
.un
.b
.domain
== 0) &&
4606 (matchdid
.un
.b
.area
== 0)) {
4607 if (matchdid
.un
.b
.id
)
4615 /* Search for a nodelist entry */
4616 static struct lpfc_nodelist
*
4617 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4619 struct lpfc_nodelist
*ndlp
;
4622 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4623 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4624 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4625 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4626 ((uint32_t) ndlp
->nlp_type
<< 8) |
4627 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4628 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4629 "0929 FIND node DID "
4630 "Data: x%p x%x x%x x%x\n",
4631 ndlp
, ndlp
->nlp_DID
,
4632 ndlp
->nlp_flag
, data1
);
4637 /* FIND node did <did> NOT FOUND */
4638 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4639 "0932 FIND node did x%x NOT FOUND.\n", did
);
4643 struct lpfc_nodelist
*
4644 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4646 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4647 struct lpfc_nodelist
*ndlp
;
4648 unsigned long iflags
;
4650 spin_lock_irqsave(shost
->host_lock
, iflags
);
4651 ndlp
= __lpfc_findnode_did(vport
, did
);
4652 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4656 struct lpfc_nodelist
*
4657 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4659 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4660 struct lpfc_nodelist
*ndlp
;
4662 ndlp
= lpfc_findnode_did(vport
, did
);
4664 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4665 lpfc_rscn_payload_check(vport
, did
) == 0)
4667 ndlp
= (struct lpfc_nodelist
*)
4668 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4671 lpfc_nlp_init(vport
, ndlp
, did
);
4672 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4673 spin_lock_irq(shost
->host_lock
);
4674 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4675 spin_unlock_irq(shost
->host_lock
);
4677 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4678 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4681 spin_lock_irq(shost
->host_lock
);
4682 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4683 spin_unlock_irq(shost
->host_lock
);
4687 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4688 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4689 if (lpfc_rscn_payload_check(vport
, did
)) {
4690 /* If we've already received a PLOGI from this NPort
4691 * we don't need to try to discover it again.
4693 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4696 /* Since this node is marked for discovery,
4697 * delay timeout is not needed.
4699 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4700 spin_lock_irq(shost
->host_lock
);
4701 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4702 spin_unlock_irq(shost
->host_lock
);
4706 /* If we've already received a PLOGI from this NPort,
4707 * or we are already in the process of discovery on it,
4708 * we don't need to try to discover it again.
4710 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4711 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4712 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4714 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4715 spin_lock_irq(shost
->host_lock
);
4716 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4717 spin_unlock_irq(shost
->host_lock
);
4722 /* Build a list of nodes to discover based on the loopmap */
4724 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4726 struct lpfc_hba
*phba
= vport
->phba
;
4728 uint32_t alpa
, index
;
4730 if (!lpfc_is_link_up(phba
))
4733 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
4736 /* Check for loop map present or not */
4737 if (phba
->alpa_map
[0]) {
4738 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4739 alpa
= phba
->alpa_map
[j
];
4740 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4742 lpfc_setup_disc_node(vport
, alpa
);
4745 /* No alpamap, so try all alpa's */
4746 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4747 /* If cfg_scan_down is set, start from highest
4748 * ALPA (0xef) to lowest (0x1).
4750 if (vport
->cfg_scan_down
)
4753 index
= FC_MAXLOOP
- j
- 1;
4754 alpa
= lpfcAlpaArray
[index
];
4755 if ((vport
->fc_myDID
& 0xff) == alpa
)
4757 lpfc_setup_disc_node(vport
, alpa
);
4764 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4767 struct lpfc_sli
*psli
= &phba
->sli
;
4768 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4769 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4770 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4774 * if it's not a physical port or if we already send
4775 * clear_la then don't send it.
4777 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4778 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4779 (phba
->sli_rev
== LPFC_SLI_REV4
))
4782 /* Link up discovery */
4783 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4784 phba
->link_state
= LPFC_CLEAR_LA
;
4785 lpfc_clear_la(phba
, mbox
);
4786 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4787 mbox
->vport
= vport
;
4788 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4789 if (rc
== MBX_NOT_FINISHED
) {
4790 mempool_free(mbox
, phba
->mbox_mem_pool
);
4791 lpfc_disc_flush_list(vport
);
4792 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4793 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4794 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4795 phba
->link_state
= LPFC_HBA_ERROR
;
4800 /* Reg_vpi to tell firmware to resume normal operations */
4802 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4804 LPFC_MBOXQ_t
*regvpimbox
;
4806 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4808 lpfc_reg_vpi(vport
, regvpimbox
);
4809 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4810 regvpimbox
->vport
= vport
;
4811 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4812 == MBX_NOT_FINISHED
) {
4813 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4818 /* Start Link up / RSCN discovery on NPR nodes */
4820 lpfc_disc_start(struct lpfc_vport
*vport
)
4822 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4823 struct lpfc_hba
*phba
= vport
->phba
;
4825 uint32_t clear_la_pending
;
4828 if (!lpfc_is_link_up(phba
))
4831 if (phba
->link_state
== LPFC_CLEAR_LA
)
4832 clear_la_pending
= 1;
4834 clear_la_pending
= 0;
4836 if (vport
->port_state
< LPFC_VPORT_READY
)
4837 vport
->port_state
= LPFC_DISC_AUTH
;
4839 lpfc_set_disctmo(vport
);
4841 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4846 vport
->fc_prevDID
= vport
->fc_myDID
;
4847 vport
->num_disc_nodes
= 0;
4849 /* Start Discovery state <hba_state> */
4850 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4851 "0202 Start Discovery hba state x%x "
4852 "Data: x%x x%x x%x\n",
4853 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4854 vport
->fc_adisc_cnt
);
4856 /* First do ADISCs - if any */
4857 num_sent
= lpfc_els_disc_adisc(vport
);
4862 /* Register the VPI for SLI3, NON-NPIV only. */
4863 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4864 !(vport
->fc_flag
& FC_PT2PT
) &&
4865 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4866 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4867 lpfc_issue_reg_vpi(phba
, vport
);
4872 * For SLI2, we need to set port_state to READY and continue
4875 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4876 /* If we get here, there is nothing to ADISC */
4877 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4878 lpfc_issue_clear_la(phba
, vport
);
4880 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4881 vport
->num_disc_nodes
= 0;
4882 /* go thru NPR nodes and issue ELS PLOGIs */
4883 if (vport
->fc_npr_cnt
)
4884 lpfc_els_disc_plogi(vport
);
4886 if (!vport
->num_disc_nodes
) {
4887 spin_lock_irq(shost
->host_lock
);
4888 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4889 spin_unlock_irq(shost
->host_lock
);
4890 lpfc_can_disctmo(vport
);
4893 vport
->port_state
= LPFC_VPORT_READY
;
4895 /* Next do PLOGIs - if any */
4896 num_sent
= lpfc_els_disc_plogi(vport
);
4901 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4902 /* Check to see if more RSCNs came in while we
4903 * were processing this one.
4905 if ((vport
->fc_rscn_id_cnt
== 0) &&
4906 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4907 spin_lock_irq(shost
->host_lock
);
4908 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4909 spin_unlock_irq(shost
->host_lock
);
4910 lpfc_can_disctmo(vport
);
4912 lpfc_els_handle_rscn(vport
);
4919 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4920 * ring the match the sppecified nodelist.
4923 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4925 LIST_HEAD(completions
);
4926 struct lpfc_sli
*psli
;
4928 struct lpfc_iocbq
*iocb
, *next_iocb
;
4929 struct lpfc_sli_ring
*pring
;
4932 pring
= &psli
->ring
[LPFC_ELS_RING
];
4934 /* Error matching iocb on txq or txcmplq
4935 * First check the txq.
4937 spin_lock_irq(&phba
->hbalock
);
4938 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4939 if (iocb
->context1
!= ndlp
) {
4943 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4944 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4946 list_move_tail(&iocb
->list
, &completions
);
4951 /* Next check the txcmplq */
4952 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4953 if (iocb
->context1
!= ndlp
) {
4957 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4958 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4959 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4962 spin_unlock_irq(&phba
->hbalock
);
4964 /* Cancel all the IOCBs from the completions list */
4965 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4970 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4972 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4973 struct lpfc_hba
*phba
= vport
->phba
;
4975 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4976 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4978 if (!NLP_CHK_NODE_ACT(ndlp
))
4980 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4981 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4982 lpfc_free_tx(phba
, ndlp
);
4989 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4991 lpfc_els_flush_rscn(vport
);
4992 lpfc_els_flush_cmd(vport
);
4993 lpfc_disc_flush_list(vport
);
4996 /*****************************************************************************/
4998 * NAME: lpfc_disc_timeout
5000 * FUNCTION: Fibre Channel driver discovery timeout routine.
5002 * EXECUTION ENVIRONMENT: interrupt only
5010 /*****************************************************************************/
5012 lpfc_disc_timeout(unsigned long ptr
)
5014 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5015 struct lpfc_hba
*phba
= vport
->phba
;
5016 uint32_t tmo_posted
;
5017 unsigned long flags
= 0;
5019 if (unlikely(!phba
))
5022 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5023 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5025 vport
->work_port_events
|= WORKER_DISC_TMO
;
5026 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5029 lpfc_worker_wake_up(phba
);
5034 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5036 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5037 struct lpfc_hba
*phba
= vport
->phba
;
5038 struct lpfc_sli
*psli
= &phba
->sli
;
5039 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5040 LPFC_MBOXQ_t
*initlinkmbox
;
5041 int rc
, clrlaerr
= 0;
5043 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5046 spin_lock_irq(shost
->host_lock
);
5047 vport
->fc_flag
&= ~FC_DISC_TMO
;
5048 spin_unlock_irq(shost
->host_lock
);
5050 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5051 "disc timeout: state:x%x rtry:x%x flg:x%x",
5052 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5054 switch (vport
->port_state
) {
5056 case LPFC_LOCAL_CFG_LINK
:
5057 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5061 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5062 "0221 FAN timeout\n");
5063 /* Start discovery by sending FLOGI, clean up old rpis */
5064 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5066 if (!NLP_CHK_NODE_ACT(ndlp
))
5068 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5070 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5071 /* Clean up the ndlp on Fabric connections */
5072 lpfc_drop_node(vport
, ndlp
);
5074 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5075 /* Fail outstanding IO now since device
5076 * is marked for PLOGI.
5078 lpfc_unreg_rpi(vport
, ndlp
);
5081 if (vport
->port_state
!= LPFC_FLOGI
) {
5082 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5083 lpfc_initial_flogi(vport
);
5085 lpfc_issue_init_vfi(vport
);
5092 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5093 /* Initial FLOGI timeout */
5094 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5095 "0222 Initial %s timeout\n",
5096 vport
->vpi
? "FDISC" : "FLOGI");
5098 /* Assume no Fabric and go on with discovery.
5099 * Check for outstanding ELS FLOGI to abort.
5102 /* FLOGI failed, so just use loop map to make discovery list */
5103 lpfc_disc_list_loopmap(vport
);
5105 /* Start discovery */
5106 lpfc_disc_start(vport
);
5109 case LPFC_FABRIC_CFG_LINK
:
5110 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5112 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5113 "0223 Timeout while waiting for "
5114 "NameServer login\n");
5115 /* Next look for NameServer ndlp */
5116 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5117 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5118 lpfc_els_abort(phba
, ndlp
);
5120 /* ReStart discovery */
5124 /* Check for wait for NameServer Rsp timeout */
5125 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5126 "0224 NameServer Query timeout "
5128 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5130 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5131 /* Try it one more time */
5132 vport
->fc_ns_retry
++;
5133 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
5134 vport
->fc_ns_retry
, 0);
5138 vport
->fc_ns_retry
= 0;
5142 * Discovery is over.
5143 * set port_state to PORT_READY if SLI2.
5144 * cmpl_reg_vpi will set port_state to READY for SLI3.
5146 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5147 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5148 lpfc_issue_reg_vpi(phba
, vport
);
5150 lpfc_issue_clear_la(phba
, vport
);
5151 vport
->port_state
= LPFC_VPORT_READY
;
5155 /* Setup and issue mailbox INITIALIZE LINK command */
5156 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5157 if (!initlinkmbox
) {
5158 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5159 "0206 Device Discovery "
5160 "completion error\n");
5161 phba
->link_state
= LPFC_HBA_ERROR
;
5165 lpfc_linkdown(phba
);
5166 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5167 phba
->cfg_link_speed
);
5168 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5169 initlinkmbox
->vport
= vport
;
5170 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5171 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5172 lpfc_set_loopback_flag(phba
);
5173 if (rc
== MBX_NOT_FINISHED
)
5174 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5178 case LPFC_DISC_AUTH
:
5179 /* Node Authentication timeout */
5180 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5181 "0227 Node Authentication timeout\n");
5182 lpfc_disc_flush_list(vport
);
5185 * set port_state to PORT_READY if SLI2.
5186 * cmpl_reg_vpi will set port_state to READY for SLI3.
5188 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5189 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5190 lpfc_issue_reg_vpi(phba
, vport
);
5191 else { /* NPIV Not enabled */
5192 lpfc_issue_clear_la(phba
, vport
);
5193 vport
->port_state
= LPFC_VPORT_READY
;
5198 case LPFC_VPORT_READY
:
5199 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5200 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5201 "0231 RSCN timeout Data: x%x "
5203 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5205 /* Cleanup any outstanding ELS commands */
5206 lpfc_els_flush_cmd(vport
);
5208 lpfc_els_flush_rscn(vport
);
5209 lpfc_disc_flush_list(vport
);
5214 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5215 "0273 Unexpected discovery timeout, "
5216 "vport State x%x\n", vport
->port_state
);
5220 switch (phba
->link_state
) {
5222 /* CLEAR LA timeout */
5223 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5224 "0228 CLEAR LA timeout\n");
5229 lpfc_issue_clear_la(phba
, vport
);
5231 case LPFC_LINK_UNKNOWN
:
5232 case LPFC_WARM_START
:
5233 case LPFC_INIT_START
:
5234 case LPFC_INIT_MBX_CMDS
:
5235 case LPFC_LINK_DOWN
:
5236 case LPFC_HBA_ERROR
:
5237 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5238 "0230 Unexpected timeout, hba link "
5239 "state x%x\n", phba
->link_state
);
5243 case LPFC_HBA_READY
:
5248 lpfc_disc_flush_list(vport
);
5249 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5250 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5251 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5252 vport
->port_state
= LPFC_VPORT_READY
;
5259 * This routine handles processing a NameServer REG_LOGIN mailbox
5260 * command upon completion. It is setup in the LPFC_MBOXQ
5261 * as the completion routine when the command is
5262 * handed off to the SLI layer.
5265 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5267 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5268 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5269 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5270 struct lpfc_vport
*vport
= pmb
->vport
;
5272 pmb
->context1
= NULL
;
5273 pmb
->context2
= NULL
;
5275 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5276 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5277 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5278 ndlp
->nlp_type
|= NLP_FABRIC
;
5279 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5282 * Start issuing Fabric-Device Management Interface (FDMI) command to
5283 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5284 * fdmi-on=2 (supporting RPA/hostnmae)
5287 if (vport
->cfg_fdmi_on
== 1)
5288 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
5290 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
5292 /* decrement the node reference count held for this callback
5296 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5298 mempool_free(pmb
, phba
->mbox_mem_pool
);
5304 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5306 uint16_t *rpi
= param
;
5308 return ndlp
->nlp_rpi
== *rpi
;
5312 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5314 return memcmp(&ndlp
->nlp_portname
, param
,
5315 sizeof(ndlp
->nlp_portname
)) == 0;
5318 static struct lpfc_nodelist
*
5319 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5321 struct lpfc_nodelist
*ndlp
;
5323 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5324 if (filter(ndlp
, param
))
5331 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5332 * returns the node list element pointer else return NULL.
5334 struct lpfc_nodelist
*
5335 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5337 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5341 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5342 * returns the node element list pointer else return NULL.
5344 struct lpfc_nodelist
*
5345 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5347 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5348 struct lpfc_nodelist
*ndlp
;
5350 spin_lock_irq(shost
->host_lock
);
5351 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5352 spin_unlock_irq(shost
->host_lock
);
5357 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5360 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5362 lpfc_initialize_node(vport
, ndlp
, did
);
5363 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5365 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5366 "node init: did:x%x",
5367 ndlp
->nlp_DID
, 0, 0);
5372 /* This routine releases all resources associated with a specifc NPort's ndlp
5373 * and mempool_free's the nodelist.
5376 lpfc_nlp_release(struct kref
*kref
)
5378 struct lpfc_hba
*phba
;
5379 unsigned long flags
;
5380 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5383 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5384 "node release: did:x%x flg:x%x type:x%x",
5385 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5387 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5388 "0279 lpfc_nlp_release: ndlp:x%p "
5389 "usgmap:x%x refcnt:%d\n",
5390 (void *)ndlp
, ndlp
->nlp_usg_map
,
5391 atomic_read(&ndlp
->kref
.refcount
));
5393 /* remove ndlp from action. */
5394 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5396 /* clear the ndlp active flag for all release cases */
5398 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5399 NLP_CLR_NODE_ACT(ndlp
);
5400 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5401 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5402 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
5404 /* free ndlp memory for final ndlp release */
5405 if (NLP_CHK_FREE_REQ(ndlp
)) {
5406 kfree(ndlp
->lat_data
);
5407 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5411 /* This routine bumps the reference count for a ndlp structure to ensure
5412 * that one discovery thread won't free a ndlp while another discovery thread
5415 struct lpfc_nodelist
*
5416 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5418 struct lpfc_hba
*phba
;
5419 unsigned long flags
;
5422 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5423 "node get: did:x%x flg:x%x refcnt:x%x",
5424 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5425 atomic_read(&ndlp
->kref
.refcount
));
5426 /* The check of ndlp usage to prevent incrementing the
5427 * ndlp reference count that is in the process of being
5431 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5432 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5433 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5434 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5435 "0276 lpfc_nlp_get: ndlp:x%p "
5436 "usgmap:x%x refcnt:%d\n",
5437 (void *)ndlp
, ndlp
->nlp_usg_map
,
5438 atomic_read(&ndlp
->kref
.refcount
));
5441 kref_get(&ndlp
->kref
);
5442 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5447 /* This routine decrements the reference count for a ndlp structure. If the
5448 * count goes to 0, this indicates the the associated nodelist should be
5449 * freed. Returning 1 indicates the ndlp resource has been released; on the
5450 * other hand, returning 0 indicates the ndlp resource has not been released
5454 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
5456 struct lpfc_hba
*phba
;
5457 unsigned long flags
;
5462 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5463 "node put: did:x%x flg:x%x refcnt:x%x",
5464 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5465 atomic_read(&ndlp
->kref
.refcount
));
5467 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5468 /* Check the ndlp memory free acknowledge flag to avoid the
5469 * possible race condition that kref_put got invoked again
5470 * after previous one has done ndlp memory free.
5472 if (NLP_CHK_FREE_ACK(ndlp
)) {
5473 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5474 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5475 "0274 lpfc_nlp_put: ndlp:x%p "
5476 "usgmap:x%x refcnt:%d\n",
5477 (void *)ndlp
, ndlp
->nlp_usg_map
,
5478 atomic_read(&ndlp
->kref
.refcount
));
5481 /* Check the ndlp inactivate log flag to avoid the possible
5482 * race condition that kref_put got invoked again after ndlp
5483 * is already in inactivating state.
5485 if (NLP_CHK_IACT_REQ(ndlp
)) {
5486 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5487 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5488 "0275 lpfc_nlp_put: ndlp:x%p "
5489 "usgmap:x%x refcnt:%d\n",
5490 (void *)ndlp
, ndlp
->nlp_usg_map
,
5491 atomic_read(&ndlp
->kref
.refcount
));
5494 /* For last put, mark the ndlp usage flags to make sure no
5495 * other kref_get and kref_put on the same ndlp shall get
5496 * in between the process when the final kref_put has been
5497 * invoked on this ndlp.
5499 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
5500 /* Indicate ndlp is put to inactive state. */
5501 NLP_SET_IACT_REQ(ndlp
);
5502 /* Acknowledge ndlp memory free has been seen. */
5503 if (NLP_CHK_FREE_REQ(ndlp
))
5504 NLP_SET_FREE_ACK(ndlp
);
5506 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5507 /* Note, the kref_put returns 1 when decrementing a reference
5508 * count that was 1, it invokes the release callback function,
5509 * but it still left the reference count as 1 (not actually
5510 * performs the last decrementation). Otherwise, it actually
5511 * decrements the reference count and returns 0.
5513 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5516 /* This routine free's the specified nodelist if it is not in use
5517 * by any other discovery thread. This routine returns 1 if the
5518 * ndlp has been freed. A return value of 0 indicates the ndlp is
5519 * not yet been released.
5522 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5524 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5525 "node not used: did:x%x flg:x%x refcnt:x%x",
5526 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5527 atomic_read(&ndlp
->kref
.refcount
));
5528 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5529 if (lpfc_nlp_put(ndlp
))
5535 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5536 * @phba: Pointer to hba context object.
5538 * This function iterate through all FC nodes associated
5539 * will all vports to check if there is any node with
5540 * fc_rports associated with it. If there is an fc_rport
5541 * associated with the node, then the node is either in
5542 * discovered state or its devloss_timer is pending.
5545 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5547 struct lpfc_vport
**vports
;
5549 struct lpfc_nodelist
*ndlp
;
5550 struct Scsi_Host
*shost
;
5552 vports
= lpfc_create_vport_work_array(phba
);
5554 /* If driver cannot allocate memory, indicate fcf is in use */
5558 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5559 shost
= lpfc_shost_from_vport(vports
[i
]);
5560 spin_lock_irq(shost
->host_lock
);
5562 * IF the CVL_RCVD bit is not set then we have sent the
5564 * If dev_loss fires while we are waiting we do not want to
5567 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
5568 spin_unlock_irq(shost
->host_lock
);
5572 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5573 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5574 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5576 spin_unlock_irq(shost
->host_lock
);
5579 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5580 "2624 RPI %x DID %x flg %x still "
5582 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5584 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)
5588 spin_unlock_irq(shost
->host_lock
);
5591 lpfc_destroy_vport_work_array(phba
, vports
);
5596 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5597 * @phba: Pointer to hba context object.
5598 * @mboxq: Pointer to mailbox object.
5600 * This function frees memory associated with the mailbox command.
5603 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5605 struct lpfc_vport
*vport
= mboxq
->vport
;
5606 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5608 if (mboxq
->u
.mb
.mbxStatus
) {
5609 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5610 "2555 UNREG_VFI mbxStatus error x%x "
5612 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5614 spin_lock_irq(shost
->host_lock
);
5615 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5616 spin_unlock_irq(shost
->host_lock
);
5617 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5622 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5623 * @phba: Pointer to hba context object.
5624 * @mboxq: Pointer to mailbox object.
5626 * This function frees memory associated with the mailbox command.
5629 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5631 struct lpfc_vport
*vport
= mboxq
->vport
;
5633 if (mboxq
->u
.mb
.mbxStatus
) {
5634 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5635 "2550 UNREG_FCFI mbxStatus error x%x "
5637 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5639 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5644 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5645 * @phba: Pointer to hba context object.
5647 * This function prepare the HBA for unregistering the currently registered
5648 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5652 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5655 struct lpfc_vport
**vports
;
5656 struct lpfc_nodelist
*ndlp
;
5657 struct Scsi_Host
*shost
;
5660 /* Unregister RPIs */
5661 if (lpfc_fcf_inuse(phba
))
5662 lpfc_unreg_hba_rpis(phba
);
5664 /* At this point, all discovery is aborted */
5665 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5667 /* Unregister VPIs */
5668 vports
= lpfc_create_vport_work_array(phba
);
5669 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5670 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5671 /* Stop FLOGI/FDISC retries */
5672 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5674 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5675 lpfc_cleanup_pending_mbox(vports
[i
]);
5676 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5677 lpfc_sli4_unreg_all_rpis(vports
[i
]);
5678 lpfc_mbx_unreg_vpi(vports
[i
]);
5679 shost
= lpfc_shost_from_vport(vports
[i
]);
5680 spin_lock_irq(shost
->host_lock
);
5681 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5682 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5683 spin_unlock_irq(shost
->host_lock
);
5685 lpfc_destroy_vport_work_array(phba
, vports
);
5687 /* Cleanup any outstanding ELS commands */
5688 lpfc_els_flush_all_cmd(phba
);
5690 /* Unregister VFI */
5691 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5693 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5694 "2556 UNREG_VFI mbox allocation failed"
5695 "HBA state x%x\n", phba
->pport
->port_state
);
5699 lpfc_unreg_vfi(mbox
, phba
->pport
);
5700 mbox
->vport
= phba
->pport
;
5701 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
5703 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5704 if (rc
== MBX_NOT_FINISHED
) {
5705 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5706 "2557 UNREG_VFI issue mbox failed rc x%x "
5708 rc
, phba
->pport
->port_state
);
5709 mempool_free(mbox
, phba
->mbox_mem_pool
);
5713 shost
= lpfc_shost_from_vport(phba
->pport
);
5714 spin_lock_irq(shost
->host_lock
);
5715 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5716 spin_unlock_irq(shost
->host_lock
);
5722 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5723 * @phba: Pointer to hba context object.
5725 * This function issues synchronous unregister FCF mailbox command to HBA to
5726 * unregister the currently registered FCF record. The driver does not reset
5727 * the driver FCF usage state flags.
5729 * Return 0 if successfully issued, none-zero otherwise.
5732 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5737 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5739 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5740 "2551 UNREG_FCFI mbox allocation failed"
5741 "HBA state x%x\n", phba
->pport
->port_state
);
5744 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5745 mbox
->vport
= phba
->pport
;
5746 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5747 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5749 if (rc
== MBX_NOT_FINISHED
) {
5750 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5751 "2552 Unregister FCFI command failed rc x%x "
5753 rc
, phba
->pport
->port_state
);
5760 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5761 * @phba: Pointer to hba context object.
5763 * This function unregisters the currently reigstered FCF. This function
5764 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5767 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5771 /* Preparation for unregistering fcf */
5772 rc
= lpfc_unregister_fcf_prep(phba
);
5774 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5775 "2748 Failed to prepare for unregistering "
5776 "HBA's FCF record: rc=%d\n", rc
);
5780 /* Now, unregister FCF record and reset HBA FCF state */
5781 rc
= lpfc_sli4_unregister_fcf(phba
);
5784 /* Reset HBA FCF states after successful unregister FCF */
5785 phba
->fcf
.fcf_flag
= 0;
5786 phba
->fcf
.current_rec
.flag
= 0;
5789 * If driver is not unloading, check if there is any other
5790 * FCF record that can be used for discovery.
5792 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5793 (phba
->link_state
< LPFC_LINK_UP
))
5796 /* This is considered as the initial FCF discovery scan */
5797 spin_lock_irq(&phba
->hbalock
);
5798 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5799 spin_unlock_irq(&phba
->hbalock
);
5801 /* Reset FCF roundrobin bmask for new discovery */
5802 lpfc_sli4_clear_fcf_rr_bmask(phba
);
5804 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5807 spin_lock_irq(&phba
->hbalock
);
5808 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5809 spin_unlock_irq(&phba
->hbalock
);
5810 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5811 "2553 lpfc_unregister_unused_fcf failed "
5812 "to read FCF record HBA state x%x\n",
5813 phba
->pport
->port_state
);
5818 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5819 * @phba: Pointer to hba context object.
5821 * This function just unregisters the currently reigstered FCF. It does not
5822 * try to find another FCF for discovery.
5825 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5829 /* Preparation for unregistering fcf */
5830 rc
= lpfc_unregister_fcf_prep(phba
);
5832 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5833 "2749 Failed to prepare for unregistering "
5834 "HBA's FCF record: rc=%d\n", rc
);
5838 /* Now, unregister FCF record and reset HBA FCF state */
5839 rc
= lpfc_sli4_unregister_fcf(phba
);
5842 /* Set proper HBA FCF states after successful unregister FCF */
5843 spin_lock_irq(&phba
->hbalock
);
5844 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5845 spin_unlock_irq(&phba
->hbalock
);
5849 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5850 * @phba: Pointer to hba context object.
5852 * This function check if there are any connected remote port for the FCF and
5853 * if all the devices are disconnected, this function unregister FCFI.
5854 * This function also tries to use another FCF for discovery.
5857 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5860 * If HBA is not running in FIP mode, if HBA does not support
5861 * FCoE, if FCF discovery is ongoing, or if FCF has not been
5862 * registered, do nothing.
5864 spin_lock_irq(&phba
->hbalock
);
5865 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
5866 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5867 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
5868 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
5869 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
5870 spin_unlock_irq(&phba
->hbalock
);
5873 spin_unlock_irq(&phba
->hbalock
);
5875 if (lpfc_fcf_inuse(phba
))
5878 lpfc_unregister_fcf_rescan(phba
);
5882 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5883 * @phba: Pointer to hba context object.
5884 * @buff: Buffer containing the FCF connection table as in the config
5886 * This function create driver data structure for the FCF connection
5887 * record table read from config region 23.
5890 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5893 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5894 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5895 struct lpfc_fcf_conn_rec
*conn_rec
;
5896 uint32_t record_count
;
5899 /* Free the current connect table */
5900 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5901 &phba
->fcf_conn_rec_list
, list
) {
5902 list_del_init(&conn_entry
->list
);
5906 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5907 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5908 sizeof(struct lpfc_fcf_conn_rec
);
5910 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5911 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5913 for (i
= 0; i
< record_count
; i
++) {
5914 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5916 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5919 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5920 "2566 Failed to allocate connection"
5925 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5926 sizeof(struct lpfc_fcf_conn_rec
));
5927 conn_entry
->conn_rec
.vlan_tag
=
5928 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5929 conn_entry
->conn_rec
.flags
=
5930 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5931 list_add_tail(&conn_entry
->list
,
5932 &phba
->fcf_conn_rec_list
);
5937 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5938 * @phba: Pointer to hba context object.
5939 * @buff: Buffer containing the FCoE parameter data structure.
5941 * This function update driver data structure with config
5942 * parameters read from config region 23.
5945 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5948 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5949 struct lpfc_fcoe_params
*fcoe_param
;
5951 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5953 fcoe_param
= (struct lpfc_fcoe_params
*)
5954 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5956 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5957 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5960 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5961 phba
->valid_vlan
= 1;
5962 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5966 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5967 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5968 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5973 * lpfc_get_rec_conf23 - Get a record type in config region data.
5974 * @buff: Buffer containing config region 23 data.
5975 * @size: Size of the data buffer.
5976 * @rec_type: Record type to be searched.
5978 * This function searches config region data to find the beginning
5979 * of the record specified by record_type. If record found, this
5980 * function return pointer to the record else return NULL.
5983 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5985 uint32_t offset
= 0, rec_length
;
5987 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5988 (size
< sizeof(uint32_t)))
5991 rec_length
= buff
[offset
+ 1];
5994 * One TLV record has one word header and number of data words
5995 * specified in the rec_length field of the record header.
5997 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5999 if (buff
[offset
] == rec_type
)
6000 return &buff
[offset
];
6002 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6005 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6006 rec_length
= buff
[offset
+ 1];
6012 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6013 * @phba: Pointer to lpfc_hba data structure.
6014 * @buff: Buffer containing config region 23 data.
6015 * @size: Size of the data buffer.
6017 * This function parses the FCoE config parameters in config region 23 and
6018 * populate driver data structure with the parameters.
6021 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6025 uint32_t offset
= 0, rec_length
;
6029 * If data size is less than 2 words signature and version cannot be
6032 if (size
< 2*sizeof(uint32_t))
6035 /* Check the region signature first */
6036 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6037 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6038 "2567 Config region 23 has bad signature\n");
6044 /* Check the data structure version */
6045 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6046 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6047 "2568 Config region 23 has bad version\n");
6052 rec_length
= buff
[offset
+ 1];
6054 /* Read FCoE param record */
6055 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6056 size
- offset
, FCOE_PARAM_TYPE
);
6058 lpfc_read_fcoe_param(phba
, rec_ptr
);
6060 /* Read FCF connection table */
6061 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6062 size
- offset
, FCOE_CONN_TBL_TYPE
);
6064 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);