1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/kthread.h>
29 #include <linux/interrupt.h>
30 #include <linux/lockdep.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
38 #include <linux/nvme-fc-driver.h>
43 #include "lpfc_disc.h"
45 #include "lpfc_sli4.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 /* AlpaArray for assignment of scsid for scan-down and bind_method */
55 static uint8_t lpfcAlpaArray
[] = {
56 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
57 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
58 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
59 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
60 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
61 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
62 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
63 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
64 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
65 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
66 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
67 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
68 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
71 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
72 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
73 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
74 static int lpfc_fcf_inuse(struct lpfc_hba
*);
77 lpfc_terminate_rport_io(struct fc_rport
*rport
)
79 struct lpfc_rport_data
*rdata
;
80 struct lpfc_nodelist
* ndlp
;
81 struct lpfc_hba
*phba
;
83 rdata
= rport
->dd_data
;
86 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
87 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
88 printk(KERN_ERR
"Cannot find remote node"
89 " to terminate I/O Data x%x\n",
96 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
97 "rport terminate: sid:x%x did:x%x flg:x%x",
98 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
100 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
101 lpfc_sli_abort_iocb(ndlp
->vport
,
102 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
103 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
108 * This function will be called when dev_loss_tmo fire.
111 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
113 struct lpfc_rport_data
*rdata
;
114 struct lpfc_nodelist
* ndlp
;
115 struct lpfc_vport
*vport
;
116 struct Scsi_Host
*shost
;
117 struct lpfc_hba
*phba
;
118 struct lpfc_work_evt
*evtp
;
122 rdata
= rport
->dd_data
;
124 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
130 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
131 "rport devlosscb: sid:x%x did:x%x flg:x%x",
132 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
134 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
135 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
136 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
138 /* Don't defer this if we are in the process of deleting the vport
139 * or unloading the driver. The unload will cleanup the node
140 * appropriately we just need to cleanup the ndlp rport info here.
142 if (vport
->load_flag
& FC_UNLOADING
) {
143 put_node
= rdata
->pnode
!= NULL
;
144 put_rport
= ndlp
->rport
!= NULL
;
150 put_device(&rport
->dev
);
154 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
157 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
))
158 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
159 "6789 rport name %llx != node port name %llx",
161 wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
));
163 evtp
= &ndlp
->dev_loss_evt
;
165 if (!list_empty(&evtp
->evt_listp
)) {
166 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
167 "6790 rport name %llx dev_loss_evt pending",
172 shost
= lpfc_shost_from_vport(vport
);
173 spin_lock_irq(shost
->host_lock
);
174 ndlp
->nlp_flag
|= NLP_IN_DEV_LOSS
;
175 spin_unlock_irq(shost
->host_lock
);
177 /* We need to hold the node by incrementing the reference
178 * count until this queued work is done
180 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
182 spin_lock_irq(&phba
->hbalock
);
183 if (evtp
->evt_arg1
) {
184 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
185 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
186 lpfc_worker_wake_up(phba
);
188 spin_unlock_irq(&phba
->hbalock
);
194 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
195 * @ndlp: Pointer to remote node object.
197 * This function is called from the worker thread when devloss timeout timer
198 * expires. For SLI4 host, this routine shall return 1 when at lease one
199 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
200 * routine shall return 0 when there is no remote node is still in use of FCF
201 * when devloss timeout happened to this @ndlp.
204 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
206 struct lpfc_rport_data
*rdata
;
207 struct fc_rport
*rport
;
208 struct lpfc_vport
*vport
;
209 struct lpfc_hba
*phba
;
210 struct Scsi_Host
*shost
;
218 shost
= lpfc_shost_from_vport(vport
);
220 spin_lock_irq(shost
->host_lock
);
221 ndlp
->nlp_flag
&= ~NLP_IN_DEV_LOSS
;
222 spin_unlock_irq(shost
->host_lock
);
227 name
= (uint8_t *) &ndlp
->nlp_portname
;
230 if (phba
->sli_rev
== LPFC_SLI_REV4
)
231 fcf_inuse
= lpfc_fcf_inuse(phba
);
233 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
234 "rport devlosstmo:did:x%x type:x%x id:x%x",
235 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
237 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
238 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
239 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
242 * lpfc_nlp_remove if reached with dangling rport drops the
243 * reference. To make sure that does not happen clear rport
244 * pointer in ndlp before lpfc_nlp_put.
246 rdata
= rport
->dd_data
;
248 /* Don't defer this if we are in the process of deleting the vport
249 * or unloading the driver. The unload will cleanup the node
250 * appropriately we just need to cleanup the ndlp rport info here.
252 if (vport
->load_flag
& FC_UNLOADING
) {
253 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
254 /* flush the target */
255 lpfc_sli_abort_iocb(vport
,
256 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
257 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
259 put_node
= rdata
->pnode
!= NULL
;
264 put_device(&rport
->dev
);
269 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
270 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
271 "0284 Devloss timeout Ignored on "
272 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
274 *name
, *(name
+1), *(name
+2), *(name
+3),
275 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
280 put_node
= rdata
->pnode
!= NULL
;
285 put_device(&rport
->dev
);
287 if (ndlp
->nlp_type
& NLP_FABRIC
)
290 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
292 lpfc_sli_abort_iocb(vport
, &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
293 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
297 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
298 "0203 Devloss timeout on "
299 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
300 "NPort x%06x Data: x%x x%x x%x\n",
301 *name
, *(name
+1), *(name
+2), *(name
+3),
302 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
303 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
304 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
306 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
307 "0204 Devloss timeout on "
308 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
309 "NPort x%06x Data: x%x x%x x%x\n",
310 *name
, *(name
+1), *(name
+2), *(name
+3),
311 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
312 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
313 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
316 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
317 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
318 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
319 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
320 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
321 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
327 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
328 * @phba: Pointer to hba context object.
329 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
330 * @nlp_did: remote node identifer with devloss timeout.
332 * This function is called from the worker thread after invoking devloss
333 * timeout handler and releasing the reference count for the ndlp with
334 * which the devloss timeout was handled for SLI4 host. For the devloss
335 * timeout of the last remote node which had been in use of FCF, when this
336 * routine is invoked, it shall be guaranteed that none of the remote are
337 * in-use of FCF. When devloss timeout to the last remote using the FCF,
338 * if the FIP engine is neither in FCF table scan process nor roundrobin
339 * failover process, the in-use FCF shall be unregistered. If the FIP
340 * engine is in FCF discovery process, the devloss timeout state shall
341 * be set for either the FCF table scan process or roundrobin failover
342 * process to unregister the in-use FCF.
345 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
348 /* If devloss timeout happened to a remote node when FCF had no
349 * longer been in-use, do nothing.
354 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
355 spin_lock_irq(&phba
->hbalock
);
356 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
357 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
358 spin_unlock_irq(&phba
->hbalock
);
361 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
362 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
363 "2847 Last remote node (x%x) using "
364 "FCF devloss tmo\n", nlp_did
);
366 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
367 spin_unlock_irq(&phba
->hbalock
);
368 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
369 "2868 Devloss tmo to FCF rediscovery "
373 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
374 spin_unlock_irq(&phba
->hbalock
);
375 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
376 "2869 Devloss tmo to idle FIP engine, "
377 "unreg in-use FCF and rescan.\n");
378 /* Unregister in-use FCF and rescan */
379 lpfc_unregister_fcf_rescan(phba
);
382 spin_unlock_irq(&phba
->hbalock
);
383 if (phba
->hba_flag
& FCF_TS_INPROG
)
384 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
385 "2870 FCF table scan in progress\n");
386 if (phba
->hba_flag
& FCF_RR_INPROG
)
387 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
388 "2871 FLOGI roundrobin FCF failover "
391 lpfc_unregister_unused_fcf(phba
);
395 * lpfc_alloc_fast_evt - Allocates data structure for posting event
396 * @phba: Pointer to hba context object.
398 * This function is called from the functions which need to post
399 * events from interrupt context. This function allocates data
400 * structure required for posting event. It also keeps track of
401 * number of events pending and prevent event storm when there are
404 struct lpfc_fast_path_event
*
405 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
406 struct lpfc_fast_path_event
*ret
;
408 /* If there are lot of fast event do not exhaust memory due to this */
409 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
412 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
415 atomic_inc(&phba
->fast_event_count
);
416 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
417 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
423 * lpfc_free_fast_evt - Frees event data structure
424 * @phba: Pointer to hba context object.
425 * @evt: Event object which need to be freed.
427 * This function frees the data structure required for posting
431 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
432 struct lpfc_fast_path_event
*evt
) {
434 atomic_dec(&phba
->fast_event_count
);
439 * lpfc_send_fastpath_evt - Posts events generated from fast path
440 * @phba: Pointer to hba context object.
441 * @evtp: Event data structure.
443 * This function is called from worker thread, when the interrupt
444 * context need to post an event. This function posts the event
445 * to fc transport netlink interface.
448 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
449 struct lpfc_work_evt
*evtp
)
451 unsigned long evt_category
, evt_sub_category
;
452 struct lpfc_fast_path_event
*fast_evt_data
;
454 uint32_t evt_data_size
;
455 struct Scsi_Host
*shost
;
457 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
460 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
461 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
462 fabric_evt
.subcategory
;
463 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
464 if (evt_category
== FC_REG_FABRIC_EVENT
) {
465 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
466 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
467 evt_data_size
= sizeof(fast_evt_data
->un
.
469 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
470 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
471 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
472 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
474 lpfc_free_fast_evt(phba
, fast_evt_data
);
477 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
478 switch (evt_sub_category
) {
479 case LPFC_EVENT_QFULL
:
480 case LPFC_EVENT_DEVBSY
:
481 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
482 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
484 case LPFC_EVENT_CHECK_COND
:
485 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
486 evt_data_size
= sizeof(fast_evt_data
->un
.
489 case LPFC_EVENT_VARQUEDEPTH
:
490 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
491 evt_data_size
= sizeof(fast_evt_data
->un
.
495 lpfc_free_fast_evt(phba
, fast_evt_data
);
499 lpfc_free_fast_evt(phba
, fast_evt_data
);
503 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
504 fc_host_post_vendor_event(shost
,
505 fc_get_event_number(),
510 lpfc_free_fast_evt(phba
, fast_evt_data
);
515 lpfc_work_list_done(struct lpfc_hba
*phba
)
517 struct lpfc_work_evt
*evtp
= NULL
;
518 struct lpfc_nodelist
*ndlp
;
523 spin_lock_irq(&phba
->hbalock
);
524 while (!list_empty(&phba
->work_list
)) {
525 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
527 spin_unlock_irq(&phba
->hbalock
);
530 case LPFC_EVT_ELS_RETRY
:
531 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
532 lpfc_els_retry_delay_handler(ndlp
);
533 free_evt
= 0; /* evt is part of ndlp */
534 /* decrement the node reference count held
535 * for this queued work
539 case LPFC_EVT_DEV_LOSS
:
540 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
541 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
543 /* decrement the node reference count held for
546 nlp_did
= ndlp
->nlp_DID
;
548 if (phba
->sli_rev
== LPFC_SLI_REV4
)
549 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
553 case LPFC_EVT_ONLINE
:
554 if (phba
->link_state
< LPFC_LINK_DOWN
)
555 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
557 *(int *) (evtp
->evt_arg1
) = 0;
558 complete((struct completion
*)(evtp
->evt_arg2
));
560 case LPFC_EVT_OFFLINE_PREP
:
561 if (phba
->link_state
>= LPFC_LINK_DOWN
)
562 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
563 *(int *)(evtp
->evt_arg1
) = 0;
564 complete((struct completion
*)(evtp
->evt_arg2
));
566 case LPFC_EVT_OFFLINE
:
568 lpfc_sli_brdrestart(phba
);
569 *(int *)(evtp
->evt_arg1
) =
570 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
571 lpfc_unblock_mgmt_io(phba
);
572 complete((struct completion
*)(evtp
->evt_arg2
));
574 case LPFC_EVT_WARM_START
:
576 lpfc_reset_barrier(phba
);
577 lpfc_sli_brdreset(phba
);
578 lpfc_hba_down_post(phba
);
579 *(int *)(evtp
->evt_arg1
) =
580 lpfc_sli_brdready(phba
, HS_MBRDY
);
581 lpfc_unblock_mgmt_io(phba
);
582 complete((struct completion
*)(evtp
->evt_arg2
));
586 *(int *)(evtp
->evt_arg1
)
587 = (phba
->pport
->stopped
)
588 ? 0 : lpfc_sli_brdkill(phba
);
589 lpfc_unblock_mgmt_io(phba
);
590 complete((struct completion
*)(evtp
->evt_arg2
));
592 case LPFC_EVT_FASTPATH_MGMT_EVT
:
593 lpfc_send_fastpath_evt(phba
, evtp
);
596 case LPFC_EVT_RESET_HBA
:
597 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
598 lpfc_reset_hba(phba
);
603 spin_lock_irq(&phba
->hbalock
);
605 spin_unlock_irq(&phba
->hbalock
);
610 lpfc_work_done(struct lpfc_hba
*phba
)
612 struct lpfc_sli_ring
*pring
;
613 uint32_t ha_copy
, status
, control
, work_port_events
;
614 struct lpfc_vport
**vports
;
615 struct lpfc_vport
*vport
;
618 spin_lock_irq(&phba
->hbalock
);
619 ha_copy
= phba
->work_ha
;
621 spin_unlock_irq(&phba
->hbalock
);
623 /* First, try to post the next mailbox command to SLI4 device */
624 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
625 lpfc_sli4_post_async_mbox(phba
);
627 if (ha_copy
& HA_ERATT
)
628 /* Handle the error attention event */
629 lpfc_handle_eratt(phba
);
631 if (ha_copy
& HA_MBATT
)
632 lpfc_sli_handle_mb_event(phba
);
634 if (ha_copy
& HA_LATT
)
635 lpfc_handle_latt(phba
);
637 /* Process SLI4 events */
638 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
639 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
640 lpfc_handle_rrq_active(phba
);
641 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
642 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
643 if (phba
->hba_flag
& NVME_XRI_ABORT_EVENT
)
644 lpfc_sli4_nvme_xri_abort_event_proc(phba
);
645 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
646 lpfc_sli4_els_xri_abort_event_proc(phba
);
647 if (phba
->hba_flag
& ASYNC_EVENT
)
648 lpfc_sli4_async_event_proc(phba
);
649 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
650 spin_lock_irq(&phba
->hbalock
);
651 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
652 spin_unlock_irq(&phba
->hbalock
);
653 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
655 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
656 lpfc_sli4_fcf_redisc_event_proc(phba
);
659 vports
= lpfc_create_vport_work_array(phba
);
661 for (i
= 0; i
<= phba
->max_vports
; i
++) {
663 * We could have no vports in array if unloading, so if
664 * this happens then just use the pport
666 if (vports
[i
] == NULL
&& i
== 0)
672 spin_lock_irq(&vport
->work_port_lock
);
673 work_port_events
= vport
->work_port_events
;
674 vport
->work_port_events
&= ~work_port_events
;
675 spin_unlock_irq(&vport
->work_port_lock
);
676 if (work_port_events
& WORKER_DISC_TMO
)
677 lpfc_disc_timeout_handler(vport
);
678 if (work_port_events
& WORKER_ELS_TMO
)
679 lpfc_els_timeout_handler(vport
);
680 if (work_port_events
& WORKER_HB_TMO
)
681 lpfc_hb_timeout_handler(phba
);
682 if (work_port_events
& WORKER_MBOX_TMO
)
683 lpfc_mbox_timeout_handler(phba
);
684 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
685 lpfc_unblock_fabric_iocbs(phba
);
686 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
687 lpfc_ramp_down_queue_handler(phba
);
688 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
689 lpfc_delayed_disc_timeout_handler(vport
);
691 lpfc_destroy_vport_work_array(phba
, vports
);
693 pring
= lpfc_phba_elsring(phba
);
694 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
695 status
>>= (4*LPFC_ELS_RING
);
696 if ((status
& HA_RXMASK
) ||
697 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
698 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
699 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
700 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
701 /* Set the lpfc data pending flag */
702 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
704 if (phba
->link_state
>= LPFC_LINK_UP
) {
705 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
706 lpfc_sli_handle_slow_ring_event(phba
, pring
,
711 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
712 (!list_empty(&pring
->txq
)))
713 lpfc_drain_txq(phba
);
715 * Turn on Ring interrupts
717 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
718 spin_lock_irq(&phba
->hbalock
);
719 control
= readl(phba
->HCregaddr
);
720 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
721 lpfc_debugfs_slow_ring_trc(phba
,
722 "WRK Enable ring: cntl:x%x hacopy:x%x",
723 control
, ha_copy
, 0);
725 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
726 writel(control
, phba
->HCregaddr
);
727 readl(phba
->HCregaddr
); /* flush */
729 lpfc_debugfs_slow_ring_trc(phba
,
730 "WRK Ring ok: cntl:x%x hacopy:x%x",
731 control
, ha_copy
, 0);
733 spin_unlock_irq(&phba
->hbalock
);
736 lpfc_work_list_done(phba
);
740 lpfc_do_work(void *p
)
742 struct lpfc_hba
*phba
= p
;
745 set_user_nice(current
, MIN_NICE
);
746 current
->flags
|= PF_NOFREEZE
;
747 phba
->data_flags
= 0;
749 while (!kthread_should_stop()) {
750 /* wait and check worker queue activities */
751 rc
= wait_event_interruptible(phba
->work_waitq
,
752 (test_and_clear_bit(LPFC_DATA_READY
,
754 || kthread_should_stop()));
755 /* Signal wakeup shall terminate the worker thread */
757 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
758 "0433 Wakeup on signal: rc=x%x\n", rc
);
762 /* Attend pending lpfc data processing */
763 lpfc_work_done(phba
);
765 phba
->worker_thread
= NULL
;
766 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
767 "0432 Worker thread stopped.\n");
772 * This is only called to handle FC worker events. Since this a rare
773 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
774 * embedding it in the IOCB.
777 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
780 struct lpfc_work_evt
*evtp
;
784 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
785 * be queued to worker thread for processing
787 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
791 evtp
->evt_arg1
= arg1
;
792 evtp
->evt_arg2
= arg2
;
795 spin_lock_irqsave(&phba
->hbalock
, flags
);
796 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
797 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
799 lpfc_worker_wake_up(phba
);
805 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
807 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
808 struct lpfc_hba
*phba
= vport
->phba
;
809 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
811 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
812 if (!NLP_CHK_NODE_ACT(ndlp
))
814 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
816 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
817 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
818 (ndlp
->nlp_DID
== NameServer_DID
)))
819 lpfc_unreg_rpi(vport
, ndlp
);
821 /* Leave Fabric nodes alone on link down */
822 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
823 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
825 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
828 : NLP_EVT_DEVICE_RECOVERY
);
830 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
831 if (phba
->sli_rev
== LPFC_SLI_REV4
)
832 lpfc_sli4_unreg_all_rpis(vport
);
833 lpfc_mbx_unreg_vpi(vport
);
834 spin_lock_irq(shost
->host_lock
);
835 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
836 spin_unlock_irq(shost
->host_lock
);
841 lpfc_port_link_failure(struct lpfc_vport
*vport
)
843 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
845 /* Cleanup any outstanding received buffers */
846 lpfc_cleanup_rcv_buffers(vport
);
848 /* Cleanup any outstanding RSCN activity */
849 lpfc_els_flush_rscn(vport
);
851 /* Cleanup any outstanding ELS commands */
852 lpfc_els_flush_cmd(vport
);
854 lpfc_cleanup_rpis(vport
, 0);
856 /* Turn off discovery timer if its running */
857 lpfc_can_disctmo(vport
);
861 lpfc_linkdown_port(struct lpfc_vport
*vport
)
863 struct lpfc_hba
*phba
= vport
->phba
;
864 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
866 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
867 fc_host_post_event(shost
, fc_get_event_number(),
868 FCH_EVT_LINKDOWN
, 0);
870 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
871 "Link Down: state:x%x rtry:x%x flg:x%x",
872 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
874 lpfc_port_link_failure(vport
);
876 /* Stop delayed Nport discovery */
877 spin_lock_irq(shost
->host_lock
);
878 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
879 spin_unlock_irq(shost
->host_lock
);
880 del_timer_sync(&vport
->delayed_disc_tmo
);
884 lpfc_linkdown(struct lpfc_hba
*phba
)
886 struct lpfc_vport
*vport
= phba
->pport
;
887 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
888 struct lpfc_vport
**vports
;
892 if (phba
->link_state
== LPFC_LINK_DOWN
)
895 /* Block all SCSI stack I/Os */
896 lpfc_scsi_dev_block(phba
);
898 spin_lock_irq(&phba
->hbalock
);
899 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
900 spin_unlock_irq(&phba
->hbalock
);
901 if (phba
->link_state
> LPFC_LINK_DOWN
) {
902 phba
->link_state
= LPFC_LINK_DOWN
;
903 spin_lock_irq(shost
->host_lock
);
904 phba
->pport
->fc_flag
&= ~FC_LBIT
;
905 spin_unlock_irq(shost
->host_lock
);
907 vports
= lpfc_create_vport_work_array(phba
);
908 if (vports
!= NULL
) {
909 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
910 /* Issue a LINK DOWN event to all nodes */
911 lpfc_linkdown_port(vports
[i
]);
913 vports
[i
]->fc_myDID
= 0;
915 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
916 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
917 if (phba
->nvmet_support
)
918 lpfc_nvmet_update_targetport(phba
);
920 lpfc_nvme_update_localport(vports
[i
]);
924 lpfc_destroy_vport_work_array(phba
, vports
);
925 /* Clean up any firmware default rpi's */
926 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
928 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
930 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
931 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
932 == MBX_NOT_FINISHED
) {
933 mempool_free(mb
, phba
->mbox_mem_pool
);
937 /* Setup myDID for link up if we are in pt2pt mode */
938 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
939 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
941 lpfc_config_link(phba
, mb
);
942 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
944 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
945 == MBX_NOT_FINISHED
) {
946 mempool_free(mb
, phba
->mbox_mem_pool
);
949 spin_lock_irq(shost
->host_lock
);
950 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
951 spin_unlock_irq(shost
->host_lock
);
957 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
959 struct lpfc_nodelist
*ndlp
;
961 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
962 if (!NLP_CHK_NODE_ACT(ndlp
))
964 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
966 if (ndlp
->nlp_type
& NLP_FABRIC
) {
967 /* On Linkup its safe to clean up the ndlp
968 * from Fabric connections.
970 if (ndlp
->nlp_DID
!= Fabric_DID
)
971 lpfc_unreg_rpi(vport
, ndlp
);
972 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
973 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
974 /* Fail outstanding IO now since device is
977 lpfc_unreg_rpi(vport
, ndlp
);
983 lpfc_linkup_port(struct lpfc_vport
*vport
)
985 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
986 struct lpfc_hba
*phba
= vport
->phba
;
988 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
991 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
992 "Link Up: top:x%x speed:x%x flg:x%x",
993 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
995 /* If NPIV is not enabled, only bring the physical port up */
996 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
997 (vport
!= phba
->pport
))
1000 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
1001 fc_host_post_event(shost
, fc_get_event_number(),
1004 spin_lock_irq(shost
->host_lock
);
1005 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
1006 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
1007 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
1008 vport
->fc_ns_retry
= 0;
1009 spin_unlock_irq(shost
->host_lock
);
1011 if (vport
->fc_flag
& FC_LBIT
)
1012 lpfc_linkup_cleanup_nodes(vport
);
1017 lpfc_linkup(struct lpfc_hba
*phba
)
1019 struct lpfc_vport
**vports
;
1022 phba
->link_state
= LPFC_LINK_UP
;
1024 /* Unblock fabric iocbs if they are blocked */
1025 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1026 del_timer_sync(&phba
->fabric_block_timer
);
1028 vports
= lpfc_create_vport_work_array(phba
);
1030 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1031 lpfc_linkup_port(vports
[i
]);
1032 lpfc_destroy_vport_work_array(phba
, vports
);
1038 * This routine handles processing a CLEAR_LA mailbox
1039 * command upon completion. It is setup in the LPFC_MBOXQ
1040 * as the completion routine when the command is
1041 * handed off to the SLI layer. SLI3 only.
1044 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1046 struct lpfc_vport
*vport
= pmb
->vport
;
1047 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1048 struct lpfc_sli
*psli
= &phba
->sli
;
1049 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1052 /* Since we don't do discovery right now, turn these off here */
1053 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1054 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1056 /* Check for error */
1057 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1058 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1059 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1060 "0320 CLEAR_LA mbxStatus error x%x hba "
1062 mb
->mbxStatus
, vport
->port_state
);
1063 phba
->link_state
= LPFC_HBA_ERROR
;
1067 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1068 phba
->link_state
= LPFC_HBA_READY
;
1070 spin_lock_irq(&phba
->hbalock
);
1071 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1072 control
= readl(phba
->HCregaddr
);
1073 control
|= HC_LAINT_ENA
;
1074 writel(control
, phba
->HCregaddr
);
1075 readl(phba
->HCregaddr
); /* flush */
1076 spin_unlock_irq(&phba
->hbalock
);
1077 mempool_free(pmb
, phba
->mbox_mem_pool
);
1081 /* Device Discovery completes */
1082 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1083 "0225 Device Discovery completes\n");
1084 mempool_free(pmb
, phba
->mbox_mem_pool
);
1086 spin_lock_irq(shost
->host_lock
);
1087 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1088 spin_unlock_irq(shost
->host_lock
);
1090 lpfc_can_disctmo(vport
);
1092 /* turn on Link Attention interrupts */
1094 spin_lock_irq(&phba
->hbalock
);
1095 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1096 control
= readl(phba
->HCregaddr
);
1097 control
|= HC_LAINT_ENA
;
1098 writel(control
, phba
->HCregaddr
);
1099 readl(phba
->HCregaddr
); /* flush */
1100 spin_unlock_irq(&phba
->hbalock
);
1107 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1109 struct lpfc_vport
*vport
= pmb
->vport
;
1111 if (pmb
->u
.mb
.mbxStatus
)
1114 mempool_free(pmb
, phba
->mbox_mem_pool
);
1116 /* don't perform discovery for SLI4 loopback diagnostic test */
1117 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1118 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1119 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1122 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1123 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1124 !(vport
->fc_flag
& FC_LBIT
)) {
1125 /* Need to wait for FAN - use discovery timer
1126 * for timeout. port_state is identically
1127 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1129 lpfc_set_disctmo(vport
);
1133 /* Start discovery by sending a FLOGI. port_state is identically
1134 * LPFC_FLOGI while waiting for FLOGI cmpl
1136 if (vport
->port_state
!= LPFC_FLOGI
)
1137 lpfc_initial_flogi(vport
);
1138 else if (vport
->fc_flag
& FC_PT2PT
)
1139 lpfc_disc_start(vport
);
1143 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1144 "0306 CONFIG_LINK mbxStatus error x%x "
1146 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1147 mempool_free(pmb
, phba
->mbox_mem_pool
);
1149 lpfc_linkdown(phba
);
1151 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1152 "0200 CONFIG_LINK bad hba state x%x\n",
1155 lpfc_issue_clear_la(phba
, vport
);
1160 * lpfc_sli4_clear_fcf_rr_bmask
1161 * @phba pointer to the struct lpfc_hba for this port.
1162 * This fucnction resets the round robin bit mask and clears the
1163 * fcf priority list. The list deletions are done while holding the
1164 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1165 * from the lpfc_fcf_pri record.
1168 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1170 struct lpfc_fcf_pri
*fcf_pri
;
1171 struct lpfc_fcf_pri
*next_fcf_pri
;
1172 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1173 spin_lock_irq(&phba
->hbalock
);
1174 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1175 &phba
->fcf
.fcf_pri_list
, list
) {
1176 list_del_init(&fcf_pri
->list
);
1177 fcf_pri
->fcf_rec
.flag
= 0;
1179 spin_unlock_irq(&phba
->hbalock
);
1182 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1184 struct lpfc_vport
*vport
= mboxq
->vport
;
1186 if (mboxq
->u
.mb
.mbxStatus
) {
1187 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1188 "2017 REG_FCFI mbxStatus error x%x "
1190 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1194 /* Start FCoE discovery by sending a FLOGI. */
1195 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1196 /* Set the FCFI registered flag */
1197 spin_lock_irq(&phba
->hbalock
);
1198 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1199 spin_unlock_irq(&phba
->hbalock
);
1201 /* If there is a pending FCoE event, restart FCF table scan. */
1202 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1203 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1206 /* Mark successful completion of FCF table scan */
1207 spin_lock_irq(&phba
->hbalock
);
1208 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1209 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1210 if (vport
->port_state
!= LPFC_FLOGI
) {
1211 phba
->hba_flag
|= FCF_RR_INPROG
;
1212 spin_unlock_irq(&phba
->hbalock
);
1213 lpfc_issue_init_vfi(vport
);
1216 spin_unlock_irq(&phba
->hbalock
);
1220 spin_lock_irq(&phba
->hbalock
);
1221 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1222 spin_unlock_irq(&phba
->hbalock
);
1224 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1228 * lpfc_fab_name_match - Check if the fcf fabric name match.
1229 * @fab_name: pointer to fabric name.
1230 * @new_fcf_record: pointer to fcf record.
1232 * This routine compare the fcf record's fabric name with provided
1233 * fabric name. If the fabric name are identical this function
1234 * returns 1 else return 0.
1237 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1239 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1241 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1243 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1245 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1247 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1249 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1251 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1253 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1259 * lpfc_sw_name_match - Check if the fcf switch name match.
1260 * @fab_name: pointer to fabric name.
1261 * @new_fcf_record: pointer to fcf record.
1263 * This routine compare the fcf record's switch name with provided
1264 * switch name. If the switch name are identical this function
1265 * returns 1 else return 0.
1268 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1270 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1272 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1274 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1276 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1278 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1280 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1282 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1284 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1290 * lpfc_mac_addr_match - Check if the fcf mac address match.
1291 * @mac_addr: pointer to mac address.
1292 * @new_fcf_record: pointer to fcf record.
1294 * This routine compare the fcf record's mac address with HBA's
1295 * FCF mac address. If the mac addresses are identical this function
1296 * returns 1 else return 0.
1299 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1301 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1303 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1305 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1307 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1309 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1311 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1317 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1319 return (curr_vlan_id
== new_vlan_id
);
1323 * lpfc_update_fcf_record - Update driver fcf record
1324 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1325 * @phba: pointer to lpfc hba data structure.
1326 * @fcf_index: Index for the lpfc_fcf_record.
1327 * @new_fcf_record: pointer to hba fcf record.
1329 * This routine updates the driver FCF priority record from the new HBA FCF
1330 * record. This routine is called with the host lock held.
1333 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1334 struct fcf_record
*new_fcf_record
1337 struct lpfc_fcf_pri
*fcf_pri
;
1339 lockdep_assert_held(&phba
->hbalock
);
1341 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1342 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1343 /* FCF record priority */
1344 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1349 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1350 * @fcf: pointer to driver fcf record.
1351 * @new_fcf_record: pointer to fcf record.
1353 * This routine copies the FCF information from the FCF
1354 * record to lpfc_hba data structure.
1357 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1358 struct fcf_record
*new_fcf_record
)
1361 fcf_rec
->fabric_name
[0] =
1362 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1363 fcf_rec
->fabric_name
[1] =
1364 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1365 fcf_rec
->fabric_name
[2] =
1366 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1367 fcf_rec
->fabric_name
[3] =
1368 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1369 fcf_rec
->fabric_name
[4] =
1370 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1371 fcf_rec
->fabric_name
[5] =
1372 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1373 fcf_rec
->fabric_name
[6] =
1374 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1375 fcf_rec
->fabric_name
[7] =
1376 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1378 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1379 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1380 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1381 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1382 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1383 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1384 /* FCF record index */
1385 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1386 /* FCF record priority */
1387 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1389 fcf_rec
->switch_name
[0] =
1390 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1391 fcf_rec
->switch_name
[1] =
1392 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1393 fcf_rec
->switch_name
[2] =
1394 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1395 fcf_rec
->switch_name
[3] =
1396 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1397 fcf_rec
->switch_name
[4] =
1398 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1399 fcf_rec
->switch_name
[5] =
1400 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1401 fcf_rec
->switch_name
[6] =
1402 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1403 fcf_rec
->switch_name
[7] =
1404 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1408 * lpfc_update_fcf_record - Update driver fcf record
1409 * @phba: pointer to lpfc hba data structure.
1410 * @fcf_rec: pointer to driver fcf record.
1411 * @new_fcf_record: pointer to hba fcf record.
1412 * @addr_mode: address mode to be set to the driver fcf record.
1413 * @vlan_id: vlan tag to be set to the driver fcf record.
1414 * @flag: flag bits to be set to the driver fcf record.
1416 * This routine updates the driver FCF record from the new HBA FCF record
1417 * together with the address mode, vlan_id, and other informations. This
1418 * routine is called with the host lock held.
1421 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1422 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1423 uint16_t vlan_id
, uint32_t flag
)
1425 lockdep_assert_held(&phba
->hbalock
);
1427 /* Copy the fields from the HBA's FCF record */
1428 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1429 /* Update other fields of driver FCF record */
1430 fcf_rec
->addr_mode
= addr_mode
;
1431 fcf_rec
->vlan_id
= vlan_id
;
1432 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1433 __lpfc_update_fcf_record_pri(phba
,
1434 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1439 * lpfc_register_fcf - Register the FCF with hba.
1440 * @phba: pointer to lpfc hba data structure.
1442 * This routine issues a register fcfi mailbox command to register
1446 lpfc_register_fcf(struct lpfc_hba
*phba
)
1448 LPFC_MBOXQ_t
*fcf_mbxq
;
1451 spin_lock_irq(&phba
->hbalock
);
1452 /* If the FCF is not available do nothing. */
1453 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1454 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1455 spin_unlock_irq(&phba
->hbalock
);
1459 /* The FCF is already registered, start discovery */
1460 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1461 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1462 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1463 if (phba
->pport
->port_state
!= LPFC_FLOGI
&&
1464 phba
->pport
->fc_flag
& FC_FABRIC
) {
1465 phba
->hba_flag
|= FCF_RR_INPROG
;
1466 spin_unlock_irq(&phba
->hbalock
);
1467 lpfc_initial_flogi(phba
->pport
);
1470 spin_unlock_irq(&phba
->hbalock
);
1473 spin_unlock_irq(&phba
->hbalock
);
1475 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1477 spin_lock_irq(&phba
->hbalock
);
1478 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1479 spin_unlock_irq(&phba
->hbalock
);
1483 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1484 fcf_mbxq
->vport
= phba
->pport
;
1485 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1486 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1487 if (rc
== MBX_NOT_FINISHED
) {
1488 spin_lock_irq(&phba
->hbalock
);
1489 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1490 spin_unlock_irq(&phba
->hbalock
);
1491 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1498 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1499 * @phba: pointer to lpfc hba data structure.
1500 * @new_fcf_record: pointer to fcf record.
1501 * @boot_flag: Indicates if this record used by boot bios.
1502 * @addr_mode: The address mode to be used by this FCF
1503 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1505 * This routine compare the fcf record with connect list obtained from the
1506 * config region to decide if this FCF can be used for SAN discovery. It returns
1507 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1508 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1509 * is used by boot bios and addr_mode will indicate the addressing mode to be
1510 * used for this FCF when the function returns.
1511 * If the FCF record need to be used with a particular vlan id, the vlan is
1512 * set in the vlan_id on return of the function. If not VLAN tagging need to
1513 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1516 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1517 struct fcf_record
*new_fcf_record
,
1518 uint32_t *boot_flag
, uint32_t *addr_mode
,
1521 struct lpfc_fcf_conn_entry
*conn_entry
;
1522 int i
, j
, fcf_vlan_id
= 0;
1524 /* Find the lowest VLAN id in the FCF record */
1525 for (i
= 0; i
< 512; i
++) {
1526 if (new_fcf_record
->vlan_bitmap
[i
]) {
1527 fcf_vlan_id
= i
* 8;
1529 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1537 /* FCF not valid/available or solicitation in progress */
1538 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1539 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1540 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1543 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1545 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1547 if (phba
->valid_vlan
)
1548 *vlan_id
= phba
->vlan_id
;
1550 *vlan_id
= LPFC_FCOE_NULL_VID
;
1555 * If there are no FCF connection table entry, driver connect to all
1558 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1560 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1564 * When there are no FCF connect entries, use driver's default
1565 * addressing mode - FPMA.
1567 if (*addr_mode
& LPFC_FCF_FPMA
)
1568 *addr_mode
= LPFC_FCF_FPMA
;
1570 /* If FCF record report a vlan id use that vlan id */
1572 *vlan_id
= fcf_vlan_id
;
1574 *vlan_id
= LPFC_FCOE_NULL_VID
;
1578 list_for_each_entry(conn_entry
,
1579 &phba
->fcf_conn_rec_list
, list
) {
1580 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1583 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1584 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1587 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1588 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1591 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1593 * If the vlan bit map does not have the bit set for the
1594 * vlan id to be used, then it is not a match.
1596 if (!(new_fcf_record
->vlan_bitmap
1597 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1598 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1603 * If connection record does not support any addressing mode,
1604 * skip the FCF record.
1606 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1607 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1611 * Check if the connection record specifies a required
1614 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1615 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1618 * If SPMA required but FCF not support this continue.
1620 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1621 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1622 new_fcf_record
) & LPFC_FCF_SPMA
))
1626 * If FPMA required but FCF not support this continue.
1628 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1629 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1630 new_fcf_record
) & LPFC_FCF_FPMA
))
1635 * This fcf record matches filtering criteria.
1637 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1643 * If user did not specify any addressing mode, or if the
1644 * preferred addressing mode specified by user is not supported
1645 * by FCF, allow fabric to pick the addressing mode.
1647 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1650 * If the user specified a required address mode, assign that
1653 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1654 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1655 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1657 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1659 * If the user specified a preferred address mode, use the
1660 * addr mode only if FCF support the addr_mode.
1662 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1663 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1664 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1665 (*addr_mode
& LPFC_FCF_SPMA
))
1666 *addr_mode
= LPFC_FCF_SPMA
;
1667 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1668 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1669 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1670 (*addr_mode
& LPFC_FCF_FPMA
))
1671 *addr_mode
= LPFC_FCF_FPMA
;
1673 /* If matching connect list has a vlan id, use it */
1674 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1675 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1677 * If no vlan id is specified in connect list, use the vlan id
1680 else if (fcf_vlan_id
)
1681 *vlan_id
= fcf_vlan_id
;
1683 *vlan_id
= LPFC_FCOE_NULL_VID
;
1692 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1693 * @phba: pointer to lpfc hba data structure.
1694 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1696 * This function check if there is any fcoe event pending while driver
1697 * scan FCF entries. If there is any pending event, it will restart the
1698 * FCF saning and return 1 else return 0.
1701 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1704 * If the Link is up and no FCoE events while in the
1705 * FCF discovery, no need to restart FCF discovery.
1707 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1708 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1711 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1712 "2768 Pending link or FCF event during current "
1713 "handling of the previous event: link_state:x%x, "
1714 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1715 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1716 phba
->fcoe_eventtag
);
1718 spin_lock_irq(&phba
->hbalock
);
1719 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1720 spin_unlock_irq(&phba
->hbalock
);
1722 if (phba
->link_state
>= LPFC_LINK_UP
) {
1723 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1724 "2780 Restart FCF table scan due to "
1725 "pending FCF event:evt_tag_at_scan:x%x, "
1726 "evt_tag_current:x%x\n",
1727 phba
->fcoe_eventtag_at_fcf_scan
,
1728 phba
->fcoe_eventtag
);
1729 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1732 * Do not continue FCF discovery and clear FCF_TS_INPROG
1735 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1736 "2833 Stop FCF discovery process due to link "
1737 "state change (x%x)\n", phba
->link_state
);
1738 spin_lock_irq(&phba
->hbalock
);
1739 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1740 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1741 spin_unlock_irq(&phba
->hbalock
);
1744 /* Unregister the currently registered FCF if required */
1746 spin_lock_irq(&phba
->hbalock
);
1747 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1748 spin_unlock_irq(&phba
->hbalock
);
1749 lpfc_sli4_unregister_fcf(phba
);
1755 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1756 * @phba: pointer to lpfc hba data structure.
1757 * @fcf_cnt: number of eligible fcf record seen so far.
1759 * This function makes an running random selection decision on FCF record to
1760 * use through a sequence of @fcf_cnt eligible FCF records with equal
1761 * probability. To perform integer manunipulation of random numbers with
1762 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1763 * from prandom_u32() are taken as the random random number generated.
1765 * Returns true when outcome is for the newly read FCF record should be
1766 * chosen; otherwise, return false when outcome is for keeping the previously
1767 * chosen FCF record.
1770 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1774 /* Get 16-bit uniform random number */
1775 rand_num
= 0xFFFF & prandom_u32();
1777 /* Decision with probability 1/fcf_cnt */
1778 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1785 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1786 * @phba: pointer to lpfc hba data structure.
1787 * @mboxq: pointer to mailbox object.
1788 * @next_fcf_index: pointer to holder of next fcf index.
1790 * This routine parses the non-embedded fcf mailbox command by performing the
1791 * necessarily error checking, non-embedded read FCF record mailbox command
1792 * SGE parsing, and endianness swapping.
1794 * Returns the pointer to the new FCF record in the non-embedded mailbox
1795 * command DMA memory if successfully, other NULL.
1797 static struct fcf_record
*
1798 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1799 uint16_t *next_fcf_index
)
1802 struct lpfc_mbx_sge sge
;
1803 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1804 uint32_t shdr_status
, shdr_add_status
, if_type
;
1805 union lpfc_sli4_cfg_shdr
*shdr
;
1806 struct fcf_record
*new_fcf_record
;
1808 /* Get the first SGE entry from the non-embedded DMA memory. This
1809 * routine only uses a single SGE.
1811 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1812 if (unlikely(!mboxq
->sge_array
)) {
1813 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1814 "2524 Failed to get the non-embedded SGE "
1815 "virtual address\n");
1818 virt_addr
= mboxq
->sge_array
->addr
[0];
1820 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1821 lpfc_sli_pcimem_bcopy(shdr
, shdr
,
1822 sizeof(union lpfc_sli4_cfg_shdr
));
1823 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1824 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1825 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1826 if (shdr_status
|| shdr_add_status
) {
1827 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
||
1828 if_type
== LPFC_SLI_INTF_IF_TYPE_2
)
1829 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1830 "2726 READ_FCF_RECORD Indicates empty "
1833 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1834 "2521 READ_FCF_RECORD mailbox failed "
1835 "with status x%x add_status x%x, "
1836 "mbx\n", shdr_status
, shdr_add_status
);
1840 /* Interpreting the returned information of the FCF record */
1841 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1842 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1843 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1844 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1845 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1846 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1847 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1848 offsetof(struct fcf_record
, vlan_bitmap
));
1849 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1850 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1852 return new_fcf_record
;
1856 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1857 * @phba: pointer to lpfc hba data structure.
1858 * @fcf_record: pointer to the fcf record.
1859 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1860 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1862 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1866 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1867 struct fcf_record
*fcf_record
,
1869 uint16_t next_fcf_index
)
1871 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1872 "2764 READ_FCF_RECORD:\n"
1873 "\tFCF_Index : x%x\n"
1874 "\tFCF_Avail : x%x\n"
1875 "\tFCF_Valid : x%x\n"
1877 "\tFIP_Priority : x%x\n"
1878 "\tMAC_Provider : x%x\n"
1879 "\tLowest VLANID : x%x\n"
1880 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1881 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1882 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1883 "\tNext_FCF_Index: x%x\n",
1884 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1885 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1886 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1887 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1888 fcf_record
->fip_priority
,
1889 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1891 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1892 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1893 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1894 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1895 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1896 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1897 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1898 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1899 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1900 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1901 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1902 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1903 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1904 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1905 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1906 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1907 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1908 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1909 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1910 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1911 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1912 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1917 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1918 * @phba: pointer to lpfc hba data structure.
1919 * @fcf_rec: pointer to an existing FCF record.
1920 * @new_fcf_record: pointer to a new FCF record.
1921 * @new_vlan_id: vlan id from the new FCF record.
1923 * This function performs matching test of a new FCF record against an existing
1924 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1925 * will not be used as part of the FCF record matching criteria.
1927 * Returns true if all the fields matching, otherwise returns false.
1930 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1931 struct lpfc_fcf_rec
*fcf_rec
,
1932 struct fcf_record
*new_fcf_record
,
1933 uint16_t new_vlan_id
)
1935 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1936 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1938 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1940 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1942 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1944 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1950 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1951 * @vport: Pointer to vport object.
1952 * @fcf_index: index to next fcf.
1954 * This function processing the roundrobin fcf failover to next fcf index.
1955 * When this function is invoked, there will be a current fcf registered
1957 * Return: 0 for continue retrying flogi on currently registered fcf;
1958 * 1 for stop flogi on currently registered fcf;
1960 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1962 struct lpfc_hba
*phba
= vport
->phba
;
1965 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1966 spin_lock_irq(&phba
->hbalock
);
1967 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1968 spin_unlock_irq(&phba
->hbalock
);
1969 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1970 "2872 Devloss tmo with no eligible "
1971 "FCF, unregister in-use FCF (x%x) "
1972 "and rescan FCF table\n",
1973 phba
->fcf
.current_rec
.fcf_indx
);
1974 lpfc_unregister_fcf_rescan(phba
);
1975 goto stop_flogi_current_fcf
;
1977 /* Mark the end to FLOGI roundrobin failover */
1978 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1979 /* Allow action to new fcf asynchronous event */
1980 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1981 spin_unlock_irq(&phba
->hbalock
);
1982 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1983 "2865 No FCF available, stop roundrobin FCF "
1984 "failover and change port state:x%x/x%x\n",
1985 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1986 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1987 goto stop_flogi_current_fcf
;
1989 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1990 "2794 Try FLOGI roundrobin FCF failover to "
1991 "(x%x)\n", fcf_index
);
1992 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1994 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1995 "2761 FLOGI roundrobin FCF failover "
1996 "failed (rc:x%x) to read FCF (x%x)\n",
1997 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1999 goto stop_flogi_current_fcf
;
2003 stop_flogi_current_fcf
:
2004 lpfc_can_disctmo(vport
);
2009 * lpfc_sli4_fcf_pri_list_del
2010 * @phba: pointer to lpfc hba data structure.
2011 * @fcf_index the index of the fcf record to delete
2012 * This routine checks the on list flag of the fcf_index to be deleted.
2013 * If it is one the list then it is removed from the list, and the flag
2014 * is cleared. This routine grab the hbalock before removing the fcf
2015 * record from the list.
2017 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
2020 struct lpfc_fcf_pri
*new_fcf_pri
;
2022 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2023 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2024 "3058 deleting idx x%x pri x%x flg x%x\n",
2025 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
2026 new_fcf_pri
->fcf_rec
.flag
);
2027 spin_lock_irq(&phba
->hbalock
);
2028 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2029 if (phba
->fcf
.current_rec
.priority
==
2030 new_fcf_pri
->fcf_rec
.priority
)
2031 phba
->fcf
.eligible_fcf_cnt
--;
2032 list_del_init(&new_fcf_pri
->list
);
2033 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2035 spin_unlock_irq(&phba
->hbalock
);
2039 * lpfc_sli4_set_fcf_flogi_fail
2040 * @phba: pointer to lpfc hba data structure.
2041 * @fcf_index the index of the fcf record to update
2042 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2043 * flag so the the round robin slection for the particular priority level
2044 * will try a different fcf record that does not have this bit set.
2045 * If the fcf record is re-read for any reason this flag is cleared brfore
2046 * adding it to the priority list.
2049 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2051 struct lpfc_fcf_pri
*new_fcf_pri
;
2052 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2053 spin_lock_irq(&phba
->hbalock
);
2054 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2055 spin_unlock_irq(&phba
->hbalock
);
2059 * lpfc_sli4_fcf_pri_list_add
2060 * @phba: pointer to lpfc hba data structure.
2061 * @fcf_index the index of the fcf record to add
2062 * This routine checks the priority of the fcf_index to be added.
2063 * If it is a lower priority than the current head of the fcf_pri list
2064 * then it is added to the list in the right order.
2065 * If it is the same priority as the current head of the list then it
2066 * is added to the head of the list and its bit in the rr_bmask is set.
2067 * If the fcf_index to be added is of a higher priority than the current
2068 * head of the list then the rr_bmask is cleared, its bit is set in the
2069 * rr_bmask and it is added to the head of the list.
2071 * 0=success 1=failure
2073 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
,
2075 struct fcf_record
*new_fcf_record
)
2077 uint16_t current_fcf_pri
;
2078 uint16_t last_index
;
2079 struct lpfc_fcf_pri
*fcf_pri
;
2080 struct lpfc_fcf_pri
*next_fcf_pri
;
2081 struct lpfc_fcf_pri
*new_fcf_pri
;
2084 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2085 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2086 "3059 adding idx x%x pri x%x flg x%x\n",
2087 fcf_index
, new_fcf_record
->fip_priority
,
2088 new_fcf_pri
->fcf_rec
.flag
);
2089 spin_lock_irq(&phba
->hbalock
);
2090 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2091 list_del_init(&new_fcf_pri
->list
);
2092 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2093 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2094 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2095 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2096 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2097 new_fcf_pri
->fcf_rec
.fcf_index
);
2101 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2102 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2103 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2104 ret
= 0; /* Empty rr list */
2107 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2108 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2109 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2110 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2111 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2112 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2113 /* fcfs_at_this_priority_level = 1; */
2114 phba
->fcf
.eligible_fcf_cnt
= 1;
2116 /* fcfs_at_this_priority_level++; */
2117 phba
->fcf
.eligible_fcf_cnt
++;
2118 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2119 new_fcf_pri
->fcf_rec
.fcf_index
);
2123 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2124 &phba
->fcf
.fcf_pri_list
, list
) {
2125 if (new_fcf_pri
->fcf_rec
.priority
<=
2126 fcf_pri
->fcf_rec
.priority
) {
2127 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2128 list_add(&new_fcf_pri
->list
,
2129 &phba
->fcf
.fcf_pri_list
);
2131 list_add(&new_fcf_pri
->list
,
2132 &((struct lpfc_fcf_pri
*)
2133 fcf_pri
->list
.prev
)->list
);
2136 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2137 || new_fcf_pri
->fcf_rec
.priority
<
2138 next_fcf_pri
->fcf_rec
.priority
) {
2139 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2143 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2149 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2150 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2151 spin_unlock_irq(&phba
->hbalock
);
2156 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2157 * @phba: pointer to lpfc hba data structure.
2158 * @mboxq: pointer to mailbox object.
2160 * This function iterates through all the fcf records available in
2161 * HBA and chooses the optimal FCF record for discovery. After finding
2162 * the FCF for discovery it registers the FCF record and kicks start
2164 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2165 * use an FCF record which matches fabric name and mac address of the
2166 * currently used FCF record.
2167 * If the driver supports only one FCF, it will try to use the FCF record
2168 * used by BOOT_BIOS.
2171 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2173 struct fcf_record
*new_fcf_record
;
2174 uint32_t boot_flag
, addr_mode
;
2175 uint16_t fcf_index
, next_fcf_index
;
2176 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2177 uint16_t vlan_id
= LPFC_FCOE_NULL_VID
;
2178 bool select_new_fcf
;
2181 /* If there is pending FCoE event restart FCF table scan */
2182 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2183 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2187 /* Parse the FCF record from the non-embedded mailbox command */
2188 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2190 if (!new_fcf_record
) {
2191 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2192 "2765 Mailbox command READ_FCF_RECORD "
2193 "failed to retrieve a FCF record.\n");
2194 /* Let next new FCF event trigger fast failover */
2195 spin_lock_irq(&phba
->hbalock
);
2196 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2197 spin_unlock_irq(&phba
->hbalock
);
2198 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2202 /* Check the FCF record against the connection list */
2203 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2204 &addr_mode
, &vlan_id
);
2206 /* Log the FCF record information if turned on */
2207 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2211 * If the fcf record does not match with connect list entries
2212 * read the next entry; otherwise, this is an eligible FCF
2213 * record for roundrobin FCF failover.
2216 lpfc_sli4_fcf_pri_list_del(phba
,
2217 bf_get(lpfc_fcf_record_fcf_index
,
2219 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2220 "2781 FCF (x%x) failed connection "
2221 "list check: (x%x/x%x/%x)\n",
2222 bf_get(lpfc_fcf_record_fcf_index
,
2224 bf_get(lpfc_fcf_record_fcf_avail
,
2226 bf_get(lpfc_fcf_record_fcf_valid
,
2228 bf_get(lpfc_fcf_record_fcf_sol
,
2230 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2231 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2232 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2233 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2234 phba
->fcf
.current_rec
.fcf_indx
) {
2235 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2236 "2862 FCF (x%x) matches property "
2237 "of in-use FCF (x%x)\n",
2238 bf_get(lpfc_fcf_record_fcf_index
,
2240 phba
->fcf
.current_rec
.fcf_indx
);
2244 * In case the current in-use FCF record becomes
2245 * invalid/unavailable during FCF discovery that
2246 * was not triggered by fast FCF failover process,
2247 * treat it as fast FCF failover.
2249 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2250 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2251 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2252 "2835 Invalid in-use FCF "
2253 "(x%x), enter FCF failover "
2255 phba
->fcf
.current_rec
.fcf_indx
);
2256 spin_lock_irq(&phba
->hbalock
);
2257 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2258 spin_unlock_irq(&phba
->hbalock
);
2259 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2260 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2261 LPFC_FCOE_FCF_GET_FIRST
);
2267 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2268 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2275 * If this is not the first FCF discovery of the HBA, use last
2276 * FCF record for the discovery. The condition that a rescan
2277 * matches the in-use FCF record: fabric name, switch name, mac
2278 * address, and vlan_id.
2280 spin_lock_irq(&phba
->hbalock
);
2281 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2282 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2283 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2284 new_fcf_record
, vlan_id
)) {
2285 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2286 phba
->fcf
.current_rec
.fcf_indx
) {
2287 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2288 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2289 /* Stop FCF redisc wait timer */
2290 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2292 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2293 /* Fast failover, mark completed */
2294 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2295 spin_unlock_irq(&phba
->hbalock
);
2296 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2297 "2836 New FCF matches in-use "
2298 "FCF (x%x), port_state:x%x, "
2300 phba
->fcf
.current_rec
.fcf_indx
,
2301 phba
->pport
->port_state
,
2302 phba
->pport
->fc_flag
);
2305 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2306 "2863 New FCF (x%x) matches "
2307 "property of in-use FCF (x%x)\n",
2308 bf_get(lpfc_fcf_record_fcf_index
,
2310 phba
->fcf
.current_rec
.fcf_indx
);
2313 * Read next FCF record from HBA searching for the matching
2314 * with in-use record only if not during the fast failover
2315 * period. In case of fast failover period, it shall try to
2316 * determine whether the FCF record just read should be the
2319 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2320 spin_unlock_irq(&phba
->hbalock
);
2325 * Update on failover FCF record only if it's in FCF fast-failover
2326 * period; otherwise, update on current FCF record.
2328 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2329 fcf_rec
= &phba
->fcf
.failover_rec
;
2331 fcf_rec
= &phba
->fcf
.current_rec
;
2333 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2335 * If the driver FCF record does not have boot flag
2336 * set and new hba fcf record has boot flag set, use
2337 * the new hba fcf record.
2339 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2340 /* Choose this FCF record */
2341 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2342 "2837 Update current FCF record "
2343 "(x%x) with new FCF record (x%x)\n",
2345 bf_get(lpfc_fcf_record_fcf_index
,
2347 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2348 addr_mode
, vlan_id
, BOOT_ENABLE
);
2349 spin_unlock_irq(&phba
->hbalock
);
2353 * If the driver FCF record has boot flag set and the
2354 * new hba FCF record does not have boot flag, read
2355 * the next FCF record.
2357 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2358 spin_unlock_irq(&phba
->hbalock
);
2362 * If the new hba FCF record has lower priority value
2363 * than the driver FCF record, use the new record.
2365 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2366 /* Choose the new FCF record with lower priority */
2367 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2368 "2838 Update current FCF record "
2369 "(x%x) with new FCF record (x%x)\n",
2371 bf_get(lpfc_fcf_record_fcf_index
,
2373 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2374 addr_mode
, vlan_id
, 0);
2375 /* Reset running random FCF selection count */
2376 phba
->fcf
.eligible_fcf_cnt
= 1;
2377 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2378 /* Update running random FCF selection count */
2379 phba
->fcf
.eligible_fcf_cnt
++;
2380 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2381 phba
->fcf
.eligible_fcf_cnt
);
2382 if (select_new_fcf
) {
2383 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2384 "2839 Update current FCF record "
2385 "(x%x) with new FCF record (x%x)\n",
2387 bf_get(lpfc_fcf_record_fcf_index
,
2389 /* Choose the new FCF by random selection */
2390 __lpfc_update_fcf_record(phba
, fcf_rec
,
2392 addr_mode
, vlan_id
, 0);
2395 spin_unlock_irq(&phba
->hbalock
);
2399 * This is the first suitable FCF record, choose this record for
2400 * initial best-fit FCF.
2403 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2404 "2840 Update initial FCF candidate "
2406 bf_get(lpfc_fcf_record_fcf_index
,
2408 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2409 addr_mode
, vlan_id
, (boot_flag
?
2411 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2412 /* Setup initial running random FCF selection count */
2413 phba
->fcf
.eligible_fcf_cnt
= 1;
2415 spin_unlock_irq(&phba
->hbalock
);
2419 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2420 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2421 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2423 * Case of FCF fast failover scan
2427 * It has not found any suitable FCF record, cancel
2428 * FCF scan inprogress, and do nothing
2430 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2431 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2432 "2782 No suitable FCF found: "
2434 phba
->fcoe_eventtag_at_fcf_scan
,
2435 bf_get(lpfc_fcf_record_fcf_index
,
2437 spin_lock_irq(&phba
->hbalock
);
2438 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2439 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2440 spin_unlock_irq(&phba
->hbalock
);
2441 /* Unregister in-use FCF and rescan */
2442 lpfc_printf_log(phba
, KERN_INFO
,
2444 "2864 On devloss tmo "
2445 "unreg in-use FCF and "
2446 "rescan FCF table\n");
2447 lpfc_unregister_fcf_rescan(phba
);
2451 * Let next new FCF event trigger fast failover
2453 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2454 spin_unlock_irq(&phba
->hbalock
);
2458 * It has found a suitable FCF record that is not
2459 * the same as in-use FCF record, unregister the
2460 * in-use FCF record, replace the in-use FCF record
2461 * with the new FCF record, mark FCF fast failover
2462 * completed, and then start register the new FCF
2466 /* Unregister the current in-use FCF record */
2467 lpfc_unregister_fcf(phba
);
2469 /* Replace in-use record with the new record */
2470 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2471 "2842 Replace in-use FCF (x%x) "
2472 "with failover FCF (x%x)\n",
2473 phba
->fcf
.current_rec
.fcf_indx
,
2474 phba
->fcf
.failover_rec
.fcf_indx
);
2475 memcpy(&phba
->fcf
.current_rec
,
2476 &phba
->fcf
.failover_rec
,
2477 sizeof(struct lpfc_fcf_rec
));
2479 * Mark the fast FCF failover rediscovery completed
2480 * and the start of the first round of the roundrobin
2483 spin_lock_irq(&phba
->hbalock
);
2484 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2485 spin_unlock_irq(&phba
->hbalock
);
2486 /* Register to the new FCF record */
2487 lpfc_register_fcf(phba
);
2490 * In case of transaction period to fast FCF failover,
2491 * do nothing when search to the end of the FCF table.
2493 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2494 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2497 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2498 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2500 * In case the current in-use FCF record no
2501 * longer existed during FCF discovery that
2502 * was not triggered by fast FCF failover
2503 * process, treat it as fast FCF failover.
2505 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2506 "2841 In-use FCF record (x%x) "
2507 "not reported, entering fast "
2508 "FCF failover mode scanning.\n",
2509 phba
->fcf
.current_rec
.fcf_indx
);
2510 spin_lock_irq(&phba
->hbalock
);
2511 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2512 spin_unlock_irq(&phba
->hbalock
);
2513 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2514 LPFC_FCOE_FCF_GET_FIRST
);
2517 /* Register to the new FCF record */
2518 lpfc_register_fcf(phba
);
2521 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2525 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2526 lpfc_register_fcf(phba
);
2532 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2533 * @phba: pointer to lpfc hba data structure.
2534 * @mboxq: pointer to mailbox object.
2536 * This is the callback function for FLOGI failure roundrobin FCF failover
2537 * read FCF record mailbox command from the eligible FCF record bmask for
2538 * performing the failover. If the FCF read back is not valid/available, it
2539 * fails through to retrying FLOGI to the currently registered FCF again.
2540 * Otherwise, if the FCF read back is valid and available, it will set the
2541 * newly read FCF record to the failover FCF record, unregister currently
2542 * registered FCF record, copy the failover FCF record to the current
2543 * FCF record, and then register the current FCF record before proceeding
2544 * to trying FLOGI on the new failover FCF.
2547 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2549 struct fcf_record
*new_fcf_record
;
2550 uint32_t boot_flag
, addr_mode
;
2551 uint16_t next_fcf_index
, fcf_index
;
2552 uint16_t current_fcf_index
;
2556 /* If link state is not up, stop the roundrobin failover process */
2557 if (phba
->link_state
< LPFC_LINK_UP
) {
2558 spin_lock_irq(&phba
->hbalock
);
2559 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2560 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2561 spin_unlock_irq(&phba
->hbalock
);
2565 /* Parse the FCF record from the non-embedded mailbox command */
2566 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2568 if (!new_fcf_record
) {
2569 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2570 "2766 Mailbox command READ_FCF_RECORD "
2571 "failed to retrieve a FCF record. "
2572 "hba_flg x%x fcf_flg x%x\n", phba
->hba_flag
,
2573 phba
->fcf
.fcf_flag
);
2574 lpfc_unregister_fcf_rescan(phba
);
2578 /* Get the needed parameters from FCF record */
2579 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2580 &addr_mode
, &vlan_id
);
2582 /* Log the FCF record information if turned on */
2583 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2586 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2588 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2589 "2848 Remove ineligible FCF (x%x) from "
2590 "from roundrobin bmask\n", fcf_index
);
2591 /* Clear roundrobin bmask bit for ineligible FCF */
2592 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2593 /* Perform next round of roundrobin FCF failover */
2594 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2595 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2601 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2602 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2603 "2760 Perform FLOGI roundrobin FCF failover: "
2604 "FCF (x%x) back to FCF (x%x)\n",
2605 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2606 /* Wait 500 ms before retrying FLOGI to current FCF */
2608 lpfc_issue_init_vfi(phba
->pport
);
2612 /* Upload new FCF record to the failover FCF record */
2613 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2614 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2615 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2616 spin_lock_irq(&phba
->hbalock
);
2617 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2618 new_fcf_record
, addr_mode
, vlan_id
,
2619 (boot_flag
? BOOT_ENABLE
: 0));
2620 spin_unlock_irq(&phba
->hbalock
);
2622 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2624 /* Unregister the current in-use FCF record */
2625 lpfc_unregister_fcf(phba
);
2627 /* Replace in-use record with the new record */
2628 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2629 sizeof(struct lpfc_fcf_rec
));
2631 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2632 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2633 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2636 lpfc_register_fcf(phba
);
2638 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2642 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2643 * @phba: pointer to lpfc hba data structure.
2644 * @mboxq: pointer to mailbox object.
2646 * This is the callback function of read FCF record mailbox command for
2647 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2648 * failover when a new FCF event happened. If the FCF read back is
2649 * valid/available and it passes the connection list check, it updates
2650 * the bmask for the eligible FCF record for roundrobin failover.
2653 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2655 struct fcf_record
*new_fcf_record
;
2656 uint32_t boot_flag
, addr_mode
;
2657 uint16_t fcf_index
, next_fcf_index
;
2661 /* If link state is not up, no need to proceed */
2662 if (phba
->link_state
< LPFC_LINK_UP
)
2665 /* If FCF discovery period is over, no need to proceed */
2666 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2669 /* Parse the FCF record from the non-embedded mailbox command */
2670 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2672 if (!new_fcf_record
) {
2673 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2674 "2767 Mailbox command READ_FCF_RECORD "
2675 "failed to retrieve a FCF record.\n");
2679 /* Check the connection list for eligibility */
2680 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2681 &addr_mode
, &vlan_id
);
2683 /* Log the FCF record information if turned on */
2684 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2690 /* Update the eligible FCF record index bmask */
2691 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2693 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2696 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2700 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2701 * @phba: pointer to lpfc hba data structure.
2702 * @mboxq: pointer to mailbox data structure.
2704 * This function handles completion of init vfi mailbox command.
2707 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2709 struct lpfc_vport
*vport
= mboxq
->vport
;
2712 * VFI not supported on interface type 0, just do the flogi
2713 * Also continue if the VFI is in use - just use the same one.
2715 if (mboxq
->u
.mb
.mbxStatus
&&
2716 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2717 LPFC_SLI_INTF_IF_TYPE_0
) &&
2718 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2719 lpfc_printf_vlog(vport
, KERN_ERR
,
2721 "2891 Init VFI mailbox failed 0x%x\n",
2722 mboxq
->u
.mb
.mbxStatus
);
2723 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2724 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2728 lpfc_initial_flogi(vport
);
2729 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2734 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2735 * @vport: pointer to lpfc_vport data structure.
2737 * This function issue a init_vfi mailbox command to initialize the VFI and
2738 * VPI for the physical port.
2741 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2743 LPFC_MBOXQ_t
*mboxq
;
2745 struct lpfc_hba
*phba
= vport
->phba
;
2747 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2749 lpfc_printf_vlog(vport
, KERN_ERR
,
2750 LOG_MBOX
, "2892 Failed to allocate "
2751 "init_vfi mailbox\n");
2754 lpfc_init_vfi(mboxq
, vport
);
2755 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2756 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2757 if (rc
== MBX_NOT_FINISHED
) {
2758 lpfc_printf_vlog(vport
, KERN_ERR
,
2759 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2760 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2765 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2766 * @phba: pointer to lpfc hba data structure.
2767 * @mboxq: pointer to mailbox data structure.
2769 * This function handles completion of init vpi mailbox command.
2772 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2774 struct lpfc_vport
*vport
= mboxq
->vport
;
2775 struct lpfc_nodelist
*ndlp
;
2776 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2778 if (mboxq
->u
.mb
.mbxStatus
) {
2779 lpfc_printf_vlog(vport
, KERN_ERR
,
2781 "2609 Init VPI mailbox failed 0x%x\n",
2782 mboxq
->u
.mb
.mbxStatus
);
2783 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2784 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2787 spin_lock_irq(shost
->host_lock
);
2788 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2789 spin_unlock_irq(shost
->host_lock
);
2791 /* If this port is physical port or FDISC is done, do reg_vpi */
2792 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2793 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2795 lpfc_printf_vlog(vport
, KERN_ERR
,
2797 "2731 Cannot find fabric "
2798 "controller node\n");
2800 lpfc_register_new_vport(phba
, vport
, ndlp
);
2801 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2805 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2806 lpfc_initial_fdisc(vport
);
2808 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2809 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2810 "2606 No NPIV Fabric support\n");
2812 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2817 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2818 * @vport: pointer to lpfc_vport data structure.
2820 * This function issue a init_vpi mailbox command to initialize
2821 * VPI for the vport.
2824 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2826 LPFC_MBOXQ_t
*mboxq
;
2829 if ((vport
->port_type
!= LPFC_PHYSICAL_PORT
) && (!vport
->vpi
)) {
2830 vpi
= lpfc_alloc_vpi(vport
->phba
);
2832 lpfc_printf_vlog(vport
, KERN_ERR
,
2834 "3303 Failed to obtain vport vpi\n");
2835 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2841 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2843 lpfc_printf_vlog(vport
, KERN_ERR
,
2844 LOG_MBOX
, "2607 Failed to allocate "
2845 "init_vpi mailbox\n");
2848 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2849 mboxq
->vport
= vport
;
2850 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2851 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2852 if (rc
== MBX_NOT_FINISHED
) {
2853 lpfc_printf_vlog(vport
, KERN_ERR
,
2854 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2855 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2860 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2861 * @phba: pointer to lpfc hba data structure.
2863 * This function loops through the list of vports on the @phba and issues an
2864 * FDISC if possible.
2867 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2869 struct lpfc_vport
**vports
;
2872 vports
= lpfc_create_vport_work_array(phba
);
2873 if (vports
!= NULL
) {
2874 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2875 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2877 /* There are no vpi for this vport */
2878 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2879 lpfc_vport_set_state(vports
[i
],
2883 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2884 lpfc_vport_set_state(vports
[i
],
2888 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2889 lpfc_issue_init_vpi(vports
[i
]);
2892 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2893 lpfc_initial_fdisc(vports
[i
]);
2895 lpfc_vport_set_state(vports
[i
],
2896 FC_VPORT_NO_FABRIC_SUPP
);
2897 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2900 "Fabric support\n");
2904 lpfc_destroy_vport_work_array(phba
, vports
);
2908 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2910 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2911 struct lpfc_vport
*vport
= mboxq
->vport
;
2912 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2915 * VFI not supported for interface type 0, so ignore any mailbox
2916 * error (except VFI in use) and continue with the discovery.
2918 if (mboxq
->u
.mb
.mbxStatus
&&
2919 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2920 LPFC_SLI_INTF_IF_TYPE_0
) &&
2921 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2922 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2923 "2018 REG_VFI mbxStatus error x%x "
2925 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2926 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2927 /* FLOGI failed, use loop map to make discovery list */
2928 lpfc_disc_list_loopmap(vport
);
2929 /* Start discovery */
2930 lpfc_disc_start(vport
);
2933 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2937 /* If the VFI is already registered, there is nothing else to do
2938 * Unless this was a VFI update and we are in PT2PT mode, then
2939 * we should drop through to set the port state to ready.
2941 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
2942 if (!(phba
->sli_rev
== LPFC_SLI_REV4
&&
2943 vport
->fc_flag
& FC_PT2PT
))
2946 /* The VPI is implicitly registered when the VFI is registered */
2947 spin_lock_irq(shost
->host_lock
);
2948 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2949 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2950 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2951 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2952 spin_unlock_irq(shost
->host_lock
);
2954 /* In case SLI4 FC loopback test, we are ready */
2955 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2956 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2957 phba
->link_state
= LPFC_HBA_READY
;
2961 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
2962 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2963 "alpacnt:%d LinkState:%x topology:%x\n",
2964 vport
->port_state
, vport
->fc_flag
, vport
->fc_myDID
,
2965 vport
->phba
->alpa_map
[0],
2966 phba
->link_state
, phba
->fc_topology
);
2968 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2970 * For private loop or for NPort pt2pt,
2971 * just start discovery and we are done.
2973 if ((vport
->fc_flag
& FC_PT2PT
) ||
2974 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2975 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
2977 /* Use loop map to make discovery list */
2978 lpfc_disc_list_loopmap(vport
);
2979 /* Start discovery */
2980 if (vport
->fc_flag
& FC_PT2PT
)
2981 vport
->port_state
= LPFC_VPORT_READY
;
2983 lpfc_disc_start(vport
);
2985 lpfc_start_fdiscs(phba
);
2986 lpfc_do_scr_ns_plogi(phba
, vport
);
2991 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2993 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
3000 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3002 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3003 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
3004 struct lpfc_vport
*vport
= pmb
->vport
;
3005 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3006 struct serv_parm
*sp
= &vport
->fc_sparam
;
3009 /* Check for error */
3010 if (mb
->mbxStatus
) {
3011 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3012 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3013 "0319 READ_SPARAM mbxStatus error x%x "
3015 mb
->mbxStatus
, vport
->port_state
);
3016 lpfc_linkdown(phba
);
3020 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
3021 sizeof (struct serv_parm
));
3023 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
3024 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
3025 ed_tov
= (ed_tov
+ 999999) / 1000000;
3027 phba
->fc_edtov
= ed_tov
;
3028 phba
->fc_ratov
= (2 * ed_tov
) / 1000;
3029 if (phba
->fc_ratov
< FF_DEF_RATOV
) {
3030 /* RA_TOV should be atleast 10sec for initial flogi */
3031 phba
->fc_ratov
= FF_DEF_RATOV
;
3034 lpfc_update_vport_wwn(vport
);
3035 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
3036 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
3037 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
3038 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
3041 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3043 mempool_free(pmb
, phba
->mbox_mem_pool
);
3047 pmb
->context1
= NULL
;
3048 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3050 lpfc_issue_clear_la(phba
, vport
);
3051 mempool_free(pmb
, phba
->mbox_mem_pool
);
3056 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
3058 struct lpfc_vport
*vport
= phba
->pport
;
3059 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
3060 struct Scsi_Host
*shost
;
3062 struct lpfc_dmabuf
*mp
;
3064 struct fcf_record
*fcf_record
;
3065 uint32_t fc_flags
= 0;
3067 spin_lock_irq(&phba
->hbalock
);
3068 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
3070 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3071 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
3072 case LPFC_LINK_SPEED_1GHZ
:
3073 case LPFC_LINK_SPEED_2GHZ
:
3074 case LPFC_LINK_SPEED_4GHZ
:
3075 case LPFC_LINK_SPEED_8GHZ
:
3076 case LPFC_LINK_SPEED_10GHZ
:
3077 case LPFC_LINK_SPEED_16GHZ
:
3078 case LPFC_LINK_SPEED_32GHZ
:
3081 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3086 if (phba
->fc_topology
&&
3087 phba
->fc_topology
!= bf_get(lpfc_mbx_read_top_topology
, la
)) {
3088 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3089 "3314 Toplogy changed was 0x%x is 0x%x\n",
3091 bf_get(lpfc_mbx_read_top_topology
, la
));
3092 phba
->fc_topology_changed
= 1;
3095 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3096 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3098 shost
= lpfc_shost_from_vport(vport
);
3099 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3100 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3102 /* if npiv is enabled and this adapter supports npiv log
3103 * a message that npiv is not supported in this topology
3105 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3106 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3107 "1309 Link Up Event npiv not supported in loop "
3109 /* Get Loop Map information */
3110 if (bf_get(lpfc_mbx_read_top_il
, la
))
3111 fc_flags
|= FC_LBIT
;
3113 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3114 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3117 phba
->alpa_map
[0] = 0;
3119 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3130 numalpa
= phba
->alpa_map
[0];
3132 while (j
< numalpa
) {
3133 memset(un
.pamap
, 0, 16);
3134 for (k
= 1; j
< numalpa
; k
++) {
3136 phba
->alpa_map
[j
+ 1];
3141 /* Link Up Event ALPA map */
3142 lpfc_printf_log(phba
,
3145 "1304 Link Up Event "
3146 "ALPA map Data: x%x "
3148 un
.pa
.wd1
, un
.pa
.wd2
,
3149 un
.pa
.wd3
, un
.pa
.wd4
);
3154 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3155 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3156 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3157 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3159 vport
->fc_myDID
= phba
->fc_pref_DID
;
3160 fc_flags
|= FC_LBIT
;
3162 spin_unlock_irq(&phba
->hbalock
);
3165 spin_lock_irq(shost
->host_lock
);
3166 vport
->fc_flag
|= fc_flags
;
3167 spin_unlock_irq(shost
->host_lock
);
3171 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3175 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3177 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3180 sparam_mbox
->vport
= vport
;
3181 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3182 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3183 if (rc
== MBX_NOT_FINISHED
) {
3184 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3185 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3187 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3191 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3192 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3195 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3196 lpfc_config_link(phba
, cfglink_mbox
);
3197 cfglink_mbox
->vport
= vport
;
3198 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3199 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3200 if (rc
== MBX_NOT_FINISHED
) {
3201 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3205 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3207 * Add the driver's default FCF record at FCF index 0 now. This
3208 * is phase 1 implementation that support FCF index 0 and driver
3211 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3212 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3214 if (unlikely(!fcf_record
)) {
3215 lpfc_printf_log(phba
, KERN_ERR
,
3217 "2554 Could not allocate memory for "
3223 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3224 LPFC_FCOE_FCF_DEF_INDEX
);
3225 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3227 lpfc_printf_log(phba
, KERN_ERR
,
3229 "2013 Could not manually add FCF "
3230 "record 0, status %d\n", rc
);
3238 * The driver is expected to do FIP/FCF. Call the port
3239 * and get the FCF Table.
3241 spin_lock_irq(&phba
->hbalock
);
3242 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3243 spin_unlock_irq(&phba
->hbalock
);
3246 /* This is the initial FCF discovery scan */
3247 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3248 spin_unlock_irq(&phba
->hbalock
);
3249 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3250 "2778 Start FCF table scan at linkup\n");
3251 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3252 LPFC_FCOE_FCF_GET_FIRST
);
3254 spin_lock_irq(&phba
->hbalock
);
3255 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3256 spin_unlock_irq(&phba
->hbalock
);
3259 /* Reset FCF roundrobin bmask for new discovery */
3260 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3265 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3266 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3267 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3268 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3269 lpfc_issue_clear_la(phba
, vport
);
3274 lpfc_enable_la(struct lpfc_hba
*phba
)
3277 struct lpfc_sli
*psli
= &phba
->sli
;
3278 spin_lock_irq(&phba
->hbalock
);
3279 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3280 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3281 control
= readl(phba
->HCregaddr
);
3282 control
|= HC_LAINT_ENA
;
3283 writel(control
, phba
->HCregaddr
);
3284 readl(phba
->HCregaddr
); /* flush */
3286 spin_unlock_irq(&phba
->hbalock
);
3290 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3292 lpfc_linkdown(phba
);
3293 lpfc_enable_la(phba
);
3294 lpfc_unregister_unused_fcf(phba
);
3295 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3300 * This routine handles processing a READ_TOPOLOGY mailbox
3301 * command upon completion. It is setup in the LPFC_MBOXQ
3302 * as the completion routine when the command is
3303 * handed off to the SLI layer. SLI4 only.
3306 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3308 struct lpfc_vport
*vport
= pmb
->vport
;
3309 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3310 struct lpfc_mbx_read_top
*la
;
3311 struct lpfc_sli_ring
*pring
;
3312 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3313 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3316 /* Unblock ELS traffic */
3317 pring
= lpfc_phba_elsring(phba
);
3318 pring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3320 /* Check for error */
3321 if (mb
->mbxStatus
) {
3322 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3323 "1307 READ_LA mbox error x%x state x%x\n",
3324 mb
->mbxStatus
, vport
->port_state
);
3325 lpfc_mbx_issue_link_down(phba
);
3326 phba
->link_state
= LPFC_HBA_ERROR
;
3327 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3330 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3331 attn_type
= bf_get(lpfc_mbx_read_top_att_type
, la
);
3333 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3335 spin_lock_irq(shost
->host_lock
);
3336 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3337 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3339 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3340 spin_unlock_irq(shost
->host_lock
);
3342 if (phba
->fc_eventTag
<= la
->eventTag
) {
3343 phba
->fc_stat
.LinkMultiEvent
++;
3344 if (attn_type
== LPFC_ATT_LINK_UP
)
3345 if (phba
->fc_eventTag
!= 0)
3346 lpfc_linkdown(phba
);
3349 phba
->fc_eventTag
= la
->eventTag
;
3350 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3351 spin_lock_irq(&phba
->hbalock
);
3352 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3353 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3355 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3356 spin_unlock_irq(&phba
->hbalock
);
3359 phba
->link_events
++;
3360 if ((attn_type
== LPFC_ATT_LINK_UP
) &&
3361 !(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)) {
3362 phba
->fc_stat
.LinkUp
++;
3363 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3364 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3365 "1306 Link Up Event in loop back mode "
3366 "x%x received Data: x%x x%x x%x x%x\n",
3367 la
->eventTag
, phba
->fc_eventTag
,
3368 bf_get(lpfc_mbx_read_top_alpa_granted
,
3370 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3373 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3374 "1303 Link Up Event x%x received "
3375 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3376 la
->eventTag
, phba
->fc_eventTag
,
3377 bf_get(lpfc_mbx_read_top_alpa_granted
,
3379 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3381 bf_get(lpfc_mbx_read_top_mm
, la
),
3382 bf_get(lpfc_mbx_read_top_fa
, la
),
3383 phba
->wait_4_mlo_maint_flg
);
3385 lpfc_mbx_process_link_up(phba
, la
);
3386 } else if (attn_type
== LPFC_ATT_LINK_DOWN
||
3387 attn_type
== LPFC_ATT_UNEXP_WWPN
) {
3388 phba
->fc_stat
.LinkDown
++;
3389 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3390 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3391 "1308 Link Down Event in loop back mode "
3393 "Data: x%x x%x x%x\n",
3394 la
->eventTag
, phba
->fc_eventTag
,
3395 phba
->pport
->port_state
, vport
->fc_flag
);
3396 else if (attn_type
== LPFC_ATT_UNEXP_WWPN
)
3397 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3398 "1313 Link Down UNEXP WWPN Event x%x received "
3399 "Data: x%x x%x x%x x%x x%x\n",
3400 la
->eventTag
, phba
->fc_eventTag
,
3401 phba
->pport
->port_state
, vport
->fc_flag
,
3402 bf_get(lpfc_mbx_read_top_mm
, la
),
3403 bf_get(lpfc_mbx_read_top_fa
, la
));
3405 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3406 "1305 Link Down Event x%x received "
3407 "Data: x%x x%x x%x x%x x%x\n",
3408 la
->eventTag
, phba
->fc_eventTag
,
3409 phba
->pport
->port_state
, vport
->fc_flag
,
3410 bf_get(lpfc_mbx_read_top_mm
, la
),
3411 bf_get(lpfc_mbx_read_top_fa
, la
));
3412 lpfc_mbx_issue_link_down(phba
);
3414 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
&&
3415 attn_type
== LPFC_ATT_LINK_UP
) {
3416 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3417 phba
->fc_stat
.LinkDown
++;
3418 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3419 "1312 Link Down Event x%x received "
3420 "Data: x%x x%x x%x\n",
3421 la
->eventTag
, phba
->fc_eventTag
,
3422 phba
->pport
->port_state
, vport
->fc_flag
);
3423 lpfc_mbx_issue_link_down(phba
);
3425 lpfc_enable_la(phba
);
3427 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3428 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3429 "Data: x%x x%x x%x\n",
3430 la
->eventTag
, phba
->fc_eventTag
,
3431 phba
->pport
->port_state
, vport
->fc_flag
);
3433 * The cmnd that triggered this will be waiting for this
3436 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3437 if (phba
->wait_4_mlo_maint_flg
) {
3438 phba
->wait_4_mlo_maint_flg
= 0;
3439 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3443 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3444 bf_get(lpfc_mbx_read_top_fa
, la
)) {
3445 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)
3446 lpfc_issue_clear_la(phba
, vport
);
3447 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3449 bf_get(lpfc_mbx_read_top_fa
, la
));
3452 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3453 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3455 mempool_free(pmb
, phba
->mbox_mem_pool
);
3460 * This routine handles processing a REG_LOGIN mailbox
3461 * command upon completion. It is setup in the LPFC_MBOXQ
3462 * as the completion routine when the command is
3463 * handed off to the SLI layer.
3466 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3468 struct lpfc_vport
*vport
= pmb
->vport
;
3469 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3470 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3471 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3473 pmb
->context1
= NULL
;
3474 pmb
->context2
= NULL
;
3476 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3477 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3478 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3479 kref_read(&ndlp
->kref
),
3480 ndlp
->nlp_usg_map
, ndlp
);
3481 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3482 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3484 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3485 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3486 /* We rcvd a rscn after issuing this
3487 * mbox reg login, we may have cycled
3488 * back through the state and be
3489 * back at reg login state so this
3490 * mbox needs to be ignored becase
3491 * there is another reg login in
3494 spin_lock_irq(shost
->host_lock
);
3495 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3496 spin_unlock_irq(shost
->host_lock
);
3499 * We cannot leave the RPI registered because
3500 * if we go thru discovery again for this ndlp
3501 * a subsequent REG_RPI will fail.
3503 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3504 lpfc_unreg_rpi(vport
, ndlp
);
3507 /* Call state machine */
3508 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
3510 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3512 mempool_free(pmb
, phba
->mbox_mem_pool
);
3513 /* decrement the node reference count held for this callback
3522 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3524 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3525 struct lpfc_vport
*vport
= pmb
->vport
;
3526 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3528 switch (mb
->mbxStatus
) {
3531 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3532 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3535 /* If VPI is busy, reset the HBA */
3537 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3538 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3539 vport
->vpi
, mb
->mbxStatus
);
3540 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3541 lpfc_workq_post_event(phba
, NULL
, NULL
,
3542 LPFC_EVT_RESET_HBA
);
3544 spin_lock_irq(shost
->host_lock
);
3545 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3546 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3547 spin_unlock_irq(shost
->host_lock
);
3548 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3549 mempool_free(pmb
, phba
->mbox_mem_pool
);
3550 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3552 * This shost reference might have been taken at the beginning of
3553 * lpfc_vport_delete()
3555 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3556 scsi_host_put(shost
);
3560 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3562 struct lpfc_hba
*phba
= vport
->phba
;
3566 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3570 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3571 mbox
->vport
= vport
;
3572 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3573 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3574 if (rc
== MBX_NOT_FINISHED
) {
3575 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3576 "1800 Could not issue unreg_vpi\n");
3577 mempool_free(mbox
, phba
->mbox_mem_pool
);
3578 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3585 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3587 struct lpfc_vport
*vport
= pmb
->vport
;
3588 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3589 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3591 switch (mb
->mbxStatus
) {
3595 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3596 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3598 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3599 spin_lock_irq(shost
->host_lock
);
3600 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3601 spin_unlock_irq(shost
->host_lock
);
3602 vport
->fc_myDID
= 0;
3604 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3605 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3606 if (phba
->nvmet_support
)
3607 lpfc_nvmet_update_targetport(phba
);
3609 lpfc_nvme_update_localport(vport
);
3614 spin_lock_irq(shost
->host_lock
);
3615 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3616 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3617 spin_unlock_irq(shost
->host_lock
);
3618 vport
->num_disc_nodes
= 0;
3619 /* go thru NPR list and issue ELS PLOGIs */
3620 if (vport
->fc_npr_cnt
)
3621 lpfc_els_disc_plogi(vport
);
3623 if (!vport
->num_disc_nodes
) {
3624 spin_lock_irq(shost
->host_lock
);
3625 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3626 spin_unlock_irq(shost
->host_lock
);
3627 lpfc_can_disctmo(vport
);
3629 vport
->port_state
= LPFC_VPORT_READY
;
3632 mempool_free(pmb
, phba
->mbox_mem_pool
);
3637 * lpfc_create_static_vport - Read HBA config region to create static vports.
3638 * @phba: pointer to lpfc hba data structure.
3640 * This routine issue a DUMP mailbox command for config region 22 to get
3641 * the list of static vports to be created. The function create vports
3642 * based on the information returned from the HBA.
3645 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3647 LPFC_MBOXQ_t
*pmb
= NULL
;
3649 struct static_vport_info
*vport_info
;
3650 int mbx_wait_rc
= 0, i
;
3651 struct fc_vport_identifiers vport_id
;
3652 struct fc_vport
*new_fc_vport
;
3653 struct Scsi_Host
*shost
;
3654 struct lpfc_vport
*vport
;
3655 uint16_t offset
= 0;
3656 uint8_t *vport_buff
;
3657 struct lpfc_dmabuf
*mp
;
3658 uint32_t byte_count
= 0;
3660 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3662 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3663 "0542 lpfc_create_static_vport failed to"
3664 " allocate mailbox memory\n");
3667 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3670 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3672 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3673 "0543 lpfc_create_static_vport failed to"
3674 " allocate vport_info\n");
3675 mempool_free(pmb
, phba
->mbox_mem_pool
);
3679 vport_buff
= (uint8_t *) vport_info
;
3681 /* free dma buffer from previous round */
3682 if (pmb
->context1
) {
3683 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3684 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3687 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3690 pmb
->vport
= phba
->pport
;
3691 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3694 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3695 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3696 "0544 lpfc_create_static_vport failed to"
3697 " issue dump mailbox command ret 0x%x "
3699 mbx_wait_rc
, mb
->mbxStatus
);
3703 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3704 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3705 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3706 if (byte_count
> sizeof(struct static_vport_info
) -
3708 byte_count
= sizeof(struct static_vport_info
)
3710 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3711 offset
+= byte_count
;
3713 if (mb
->un
.varDmp
.word_cnt
>
3714 sizeof(struct static_vport_info
) - offset
)
3715 mb
->un
.varDmp
.word_cnt
=
3716 sizeof(struct static_vport_info
)
3718 byte_count
= mb
->un
.varDmp
.word_cnt
;
3719 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3720 vport_buff
+ offset
,
3723 offset
+= byte_count
;
3726 } while (byte_count
&&
3727 offset
< sizeof(struct static_vport_info
));
3730 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3731 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3732 != VPORT_INFO_REV
)) {
3733 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3734 "0545 lpfc_create_static_vport bad"
3735 " information header 0x%x 0x%x\n",
3736 le32_to_cpu(vport_info
->signature
),
3737 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3742 shost
= lpfc_shost_from_vport(phba
->pport
);
3744 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3745 memset(&vport_id
, 0, sizeof(vport_id
));
3746 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3747 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3748 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3751 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3752 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3753 vport_id
.disable
= false;
3754 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3756 if (!new_fc_vport
) {
3757 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3758 "0546 lpfc_create_static_vport failed to"
3763 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3764 vport
->vport_flag
|= STATIC_VPORT
;
3769 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3770 if (pmb
->context1
) {
3771 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3772 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3775 mempool_free(pmb
, phba
->mbox_mem_pool
);
3782 * This routine handles processing a Fabric REG_LOGIN mailbox
3783 * command upon completion. It is setup in the LPFC_MBOXQ
3784 * as the completion routine when the command is
3785 * handed off to the SLI layer.
3788 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3790 struct lpfc_vport
*vport
= pmb
->vport
;
3791 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3792 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3793 struct lpfc_nodelist
*ndlp
;
3794 struct Scsi_Host
*shost
;
3796 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3797 pmb
->context1
= NULL
;
3798 pmb
->context2
= NULL
;
3800 if (mb
->mbxStatus
) {
3801 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3802 "0258 Register Fabric login error: 0x%x\n",
3804 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3806 mempool_free(pmb
, phba
->mbox_mem_pool
);
3808 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3809 /* FLOGI failed, use loop map to make discovery list */
3810 lpfc_disc_list_loopmap(vport
);
3812 /* Start discovery */
3813 lpfc_disc_start(vport
);
3814 /* Decrement the reference count to ndlp after the
3815 * reference to the ndlp are done.
3821 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3822 /* Decrement the reference count to ndlp after the reference
3823 * to the ndlp are done.
3829 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3830 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3831 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3832 ndlp
->nlp_type
|= NLP_FABRIC
;
3833 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3835 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3836 /* when physical port receive logo donot start
3837 * vport discovery */
3838 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3839 lpfc_start_fdiscs(phba
);
3841 shost
= lpfc_shost_from_vport(vport
);
3842 spin_lock_irq(shost
->host_lock
);
3843 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3844 spin_unlock_irq(shost
->host_lock
);
3846 lpfc_do_scr_ns_plogi(phba
, vport
);
3849 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3851 mempool_free(pmb
, phba
->mbox_mem_pool
);
3853 /* Drop the reference count from the mbox at the end after
3854 * all the current reference to the ndlp have been done.
3861 * This routine will issue a GID_FT for each FC4 Type supported
3862 * by the driver. ALL GID_FTs must complete before discovery is started.
3865 lpfc_issue_gidft(struct lpfc_vport
*vport
)
3867 struct lpfc_hba
*phba
= vport
->phba
;
3869 /* Good status, issue CT Request to NameServer */
3870 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3871 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)) {
3872 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_FCP
)) {
3873 /* Cannot issue NameServer FCP Query, so finish up
3876 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3877 "0604 %s FC TYPE %x %s\n",
3878 "Failed to issue GID_FT to ",
3880 "Finishing discovery.");
3886 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3887 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3888 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_NVME
)) {
3889 /* Cannot issue NameServer NVME Query, so finish up
3892 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3893 "0605 %s FC_TYPE %x %s %d\n",
3894 "Failed to issue GID_FT to ",
3896 "Finishing discovery: gidftinp ",
3898 if (vport
->gidft_inp
== 0)
3903 return vport
->gidft_inp
;
3907 * This routine handles processing a NameServer REG_LOGIN mailbox
3908 * command upon completion. It is setup in the LPFC_MBOXQ
3909 * as the completion routine when the command is
3910 * handed off to the SLI layer.
3913 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3915 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3916 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3917 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3918 struct lpfc_vport
*vport
= pmb
->vport
;
3920 pmb
->context1
= NULL
;
3921 pmb
->context2
= NULL
;
3922 vport
->gidft_inp
= 0;
3924 if (mb
->mbxStatus
) {
3925 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3926 "0260 Register NameServer error: 0x%x\n",
3930 /* decrement the node reference count held for this
3931 * callback function.
3934 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3936 mempool_free(pmb
, phba
->mbox_mem_pool
);
3938 /* If no other thread is using the ndlp, free it */
3939 lpfc_nlp_not_used(ndlp
);
3941 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3943 * RegLogin failed, use loop map to make discovery
3946 lpfc_disc_list_loopmap(vport
);
3948 /* Start discovery */
3949 lpfc_disc_start(vport
);
3952 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3956 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3957 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3958 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3959 ndlp
->nlp_type
|= NLP_FABRIC
;
3960 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3961 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3962 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3963 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3964 kref_read(&ndlp
->kref
),
3965 ndlp
->nlp_usg_map
, ndlp
);
3967 if (vport
->port_state
< LPFC_VPORT_READY
) {
3968 /* Link up discovery requires Fabric registration. */
3969 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3970 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3971 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3972 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3974 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3975 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
))
3976 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, FC_TYPE_FCP
);
3978 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3979 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
))
3980 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0,
3983 /* Issue SCR just before NameServer GID_FT Query */
3984 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3987 vport
->fc_ns_retry
= 0;
3988 if (lpfc_issue_gidft(vport
) == 0)
3992 * At this point in time we may need to wait for multiple
3993 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
3995 * decrement the node reference count held for this
3996 * callback function.
3999 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4001 mempool_free(pmb
, phba
->mbox_mem_pool
);
4007 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4009 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4010 struct fc_rport
*rport
;
4011 struct lpfc_rport_data
*rdata
;
4012 struct fc_rport_identifiers rport_ids
;
4013 struct lpfc_hba
*phba
= vport
->phba
;
4015 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4018 /* Remote port has reappeared. Re-register w/ FC transport */
4019 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
4020 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
4021 rport_ids
.port_id
= ndlp
->nlp_DID
;
4022 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
4025 * We leave our node pointer in rport->dd_data when we unregister a
4026 * FCP target port. But fc_remote_port_add zeros the space to which
4027 * rport->dd_data points. So, if we're reusing a previously
4028 * registered port, drop the reference that we took the last time we
4029 * registered the port.
4031 rport
= ndlp
->rport
;
4033 rdata
= rport
->dd_data
;
4034 /* break the link before dropping the ref */
4037 if (rdata
->pnode
== ndlp
)
4039 rdata
->pnode
= NULL
;
4041 /* drop reference for earlier registeration */
4042 put_device(&rport
->dev
);
4045 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4046 "rport add: did:x%x flg:x%x type x%x",
4047 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4049 /* Don't add the remote port if unloading. */
4050 if (vport
->load_flag
& FC_UNLOADING
)
4053 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
4054 if (!rport
|| !get_device(&rport
->dev
)) {
4055 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
4056 "Warning: fc_remote_port_add failed\n");
4060 /* initialize static port data */
4061 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
4062 rport
->supported_classes
= ndlp
->nlp_class_sup
;
4063 rdata
= rport
->dd_data
;
4064 rdata
->pnode
= lpfc_nlp_get(ndlp
);
4066 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
4067 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
4068 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
4069 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
4071 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
4072 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
4074 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4075 "3183 rport register x%06x, rport %p role x%x\n",
4076 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
4078 if ((rport
->scsi_target_id
!= -1) &&
4079 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
4080 ndlp
->nlp_sid
= rport
->scsi_target_id
;
4086 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
4088 struct fc_rport
*rport
= ndlp
->rport
;
4089 struct lpfc_vport
*vport
= ndlp
->vport
;
4090 struct lpfc_hba
*phba
= vport
->phba
;
4092 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4095 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4096 "rport delete: did:x%x flg:x%x type x%x",
4097 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4099 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4100 "3184 rport unregister x%06x, rport %p\n",
4101 ndlp
->nlp_DID
, rport
);
4103 fc_remote_port_delete(rport
);
4109 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
4111 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4113 spin_lock_irq(shost
->host_lock
);
4115 case NLP_STE_UNUSED_NODE
:
4116 vport
->fc_unused_cnt
+= count
;
4118 case NLP_STE_PLOGI_ISSUE
:
4119 vport
->fc_plogi_cnt
+= count
;
4121 case NLP_STE_ADISC_ISSUE
:
4122 vport
->fc_adisc_cnt
+= count
;
4124 case NLP_STE_REG_LOGIN_ISSUE
:
4125 vport
->fc_reglogin_cnt
+= count
;
4127 case NLP_STE_PRLI_ISSUE
:
4128 vport
->fc_prli_cnt
+= count
;
4130 case NLP_STE_UNMAPPED_NODE
:
4131 vport
->fc_unmap_cnt
+= count
;
4133 case NLP_STE_MAPPED_NODE
:
4134 vport
->fc_map_cnt
+= count
;
4136 case NLP_STE_NPR_NODE
:
4137 if (vport
->fc_npr_cnt
== 0 && count
== -1)
4138 vport
->fc_npr_cnt
= 0;
4140 vport
->fc_npr_cnt
+= count
;
4143 spin_unlock_irq(shost
->host_lock
);
4147 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4148 int old_state
, int new_state
)
4150 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4152 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
4153 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4154 ndlp
->nlp_type
|= NLP_FC_NODE
;
4156 if (new_state
== NLP_STE_MAPPED_NODE
)
4157 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4158 if (new_state
== NLP_STE_NPR_NODE
)
4159 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
4161 /* FCP and NVME Transport interface */
4162 if ((old_state
== NLP_STE_MAPPED_NODE
||
4163 old_state
== NLP_STE_UNMAPPED_NODE
)) {
4165 vport
->phba
->nport_event_cnt
++;
4166 lpfc_unregister_remote_port(ndlp
);
4169 /* Notify the NVME transport of this rport's loss on the
4170 * Initiator. For NVME Target, should upcall transport
4171 * in the else clause when API available.
4173 if (ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4174 vport
->phba
->nport_event_cnt
++;
4175 if (vport
->phba
->nvmet_support
== 0)
4176 lpfc_nvme_unregister_port(vport
, ndlp
);
4180 /* FCP and NVME Transport interfaces */
4182 if (new_state
== NLP_STE_MAPPED_NODE
||
4183 new_state
== NLP_STE_UNMAPPED_NODE
) {
4184 if ((ndlp
->nlp_fc4_type
& NLP_FC4_FCP
) ||
4185 (ndlp
->nlp_DID
== Fabric_DID
)) {
4186 vport
->phba
->nport_event_cnt
++;
4188 * Tell the fc transport about the port, if we haven't
4189 * already. If we have, and it's a scsi entity, be
4191 lpfc_register_remote_port(vport
, ndlp
);
4193 /* Notify the NVME transport of this new rport. */
4194 if (ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4195 if (vport
->phba
->nvmet_support
== 0) {
4196 /* Register this rport with the transport.
4197 * Initiators take the NDLP ref count in
4200 vport
->phba
->nport_event_cnt
++;
4201 lpfc_nvme_register_port(vport
, ndlp
);
4203 /* Just take an NDLP ref count since the
4204 * target does not register rports.
4211 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4212 (vport
->stat_data_enabled
)) {
4214 * A new target is discovered, if there is no buffer for
4215 * statistical data collection allocate buffer.
4217 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
4218 sizeof(struct lpfc_scsicmd_bkt
),
4221 if (!ndlp
->lat_data
)
4222 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
4223 "0286 lpfc_nlp_state_cleanup failed to "
4224 "allocate statistical data buffer DID "
4225 "0x%x\n", ndlp
->nlp_DID
);
4228 * If the node just added to Mapped list was an FCP target,
4229 * but the remote port registration failed or assigned a target
4230 * id outside the presentable range - move the node to the
4233 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4234 (ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4236 ndlp
->rport
->scsi_target_id
== -1 ||
4237 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
4238 spin_lock_irq(shost
->host_lock
);
4239 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
4240 spin_unlock_irq(shost
->host_lock
);
4241 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4246 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
4248 static char *states
[] = {
4249 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4250 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4251 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4252 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4253 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4254 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4255 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4256 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4257 [NLP_STE_NPR_NODE
] = "NPR",
4260 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4261 strlcpy(buffer
, states
[state
], size
);
4263 snprintf(buffer
, size
, "unknown (%d)", state
);
4268 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4271 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4272 int old_state
= ndlp
->nlp_state
;
4273 char name1
[16], name2
[16];
4275 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4276 "0904 NPort state transition x%06x, %s -> %s\n",
4278 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4279 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4281 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4282 "node statechg did:x%x old:%d ste:%d",
4283 ndlp
->nlp_DID
, old_state
, state
);
4285 if (old_state
== NLP_STE_NPR_NODE
&&
4286 state
!= NLP_STE_NPR_NODE
)
4287 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4288 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4289 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4290 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4293 if (list_empty(&ndlp
->nlp_listp
)) {
4294 spin_lock_irq(shost
->host_lock
);
4295 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4296 spin_unlock_irq(shost
->host_lock
);
4297 } else if (old_state
)
4298 lpfc_nlp_counters(vport
, old_state
, -1);
4300 ndlp
->nlp_state
= state
;
4301 lpfc_nlp_counters(vport
, state
, 1);
4302 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4306 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4308 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4310 if (list_empty(&ndlp
->nlp_listp
)) {
4311 spin_lock_irq(shost
->host_lock
);
4312 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4313 spin_unlock_irq(shost
->host_lock
);
4318 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4320 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4322 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4323 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4324 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4325 spin_lock_irq(shost
->host_lock
);
4326 list_del_init(&ndlp
->nlp_listp
);
4327 spin_unlock_irq(shost
->host_lock
);
4328 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4329 NLP_STE_UNUSED_NODE
);
4333 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4335 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4336 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4337 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4338 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4339 NLP_STE_UNUSED_NODE
);
4342 * lpfc_initialize_node - Initialize all fields of node object
4343 * @vport: Pointer to Virtual Port object.
4344 * @ndlp: Pointer to FC node object.
4345 * @did: FC_ID of the node.
4347 * This function is always called when node object need to be initialized.
4348 * It initializes all the fields of the node object. Although the reference
4349 * to phba from @ndlp can be obtained indirectly through it's reference to
4350 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4351 * to the life-span of the @ndlp might go beyond the existence of @vport as
4352 * the final release of ndlp is determined by its reference count. And, the
4353 * operation on @ndlp needs the reference to phba.
4356 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4359 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4360 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4361 setup_timer(&ndlp
->nlp_delayfunc
, lpfc_els_retry_delay
,
4362 (unsigned long)ndlp
);
4363 ndlp
->nlp_DID
= did
;
4364 ndlp
->vport
= vport
;
4365 ndlp
->phba
= vport
->phba
;
4366 ndlp
->nlp_sid
= NLP_NO_SID
;
4367 ndlp
->nlp_fc4_type
= NLP_FC4_NONE
;
4368 kref_init(&ndlp
->kref
);
4369 NLP_INT_NODE_ACT(ndlp
);
4370 atomic_set(&ndlp
->cmd_pending
, 0);
4371 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4374 struct lpfc_nodelist
*
4375 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4378 struct lpfc_hba
*phba
= vport
->phba
;
4380 unsigned long flags
;
4381 unsigned long *active_rrqs_xri_bitmap
= NULL
;
4382 int rpi
= LPFC_RPI_ALLOC_ERROR
;
4387 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4388 rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4389 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
4393 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4394 /* The ndlp should not be in memory free mode */
4395 if (NLP_CHK_FREE_REQ(ndlp
)) {
4396 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4397 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4398 "0277 lpfc_enable_node: ndlp:x%p "
4399 "usgmap:x%x refcnt:%d\n",
4400 (void *)ndlp
, ndlp
->nlp_usg_map
,
4401 kref_read(&ndlp
->kref
));
4404 /* The ndlp should not already be in active mode */
4405 if (NLP_CHK_NODE_ACT(ndlp
)) {
4406 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4407 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4408 "0278 lpfc_enable_node: ndlp:x%p "
4409 "usgmap:x%x refcnt:%d\n",
4410 (void *)ndlp
, ndlp
->nlp_usg_map
,
4411 kref_read(&ndlp
->kref
));
4415 /* Keep the original DID */
4416 did
= ndlp
->nlp_DID
;
4417 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4418 active_rrqs_xri_bitmap
= ndlp
->active_rrqs_xri_bitmap
;
4420 /* re-initialize ndlp except of ndlp linked list pointer */
4421 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4422 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4423 lpfc_initialize_node(vport
, ndlp
, did
);
4425 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4426 ndlp
->active_rrqs_xri_bitmap
= active_rrqs_xri_bitmap
;
4428 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4429 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4430 ndlp
->nlp_rpi
= rpi
;
4431 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4432 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4433 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4435 kref_read(&ndlp
->kref
),
4436 ndlp
->nlp_usg_map
, ndlp
);
4440 if (state
!= NLP_STE_UNUSED_NODE
)
4441 lpfc_nlp_set_state(vport
, ndlp
, state
);
4443 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4444 "node enable: did:x%x",
4445 ndlp
->nlp_DID
, 0, 0);
4449 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4450 lpfc_sli4_free_rpi(vport
->phba
, rpi
);
4455 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4458 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4459 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4460 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4461 * until ALL other outstanding threads have completed. We check
4462 * that the ndlp not already in the UNUSED state before we proceed.
4464 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4466 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4467 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4468 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4469 lpfc_unreg_rpi(vport
, ndlp
);
4477 * Start / ReStart rescue timer for Discovery / RSCN handling
4480 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4482 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4483 struct lpfc_hba
*phba
= vport
->phba
;
4486 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4487 /* For FAN, timeout should be greater than edtov */
4488 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4490 /* Normal discovery timeout should be > than ELS/CT timeout
4491 * FC spec states we need 3 * ratov for CT requests
4493 tmo
= ((phba
->fc_ratov
* 3) + 3);
4497 if (!timer_pending(&vport
->fc_disctmo
)) {
4498 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4499 "set disc timer: tmo:x%x state:x%x flg:x%x",
4500 tmo
, vport
->port_state
, vport
->fc_flag
);
4503 mod_timer(&vport
->fc_disctmo
, jiffies
+ msecs_to_jiffies(1000 * tmo
));
4504 spin_lock_irq(shost
->host_lock
);
4505 vport
->fc_flag
|= FC_DISC_TMO
;
4506 spin_unlock_irq(shost
->host_lock
);
4508 /* Start Discovery Timer state <hba_state> */
4509 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4510 "0247 Start Discovery Timer state x%x "
4511 "Data: x%x x%lx x%x x%x\n",
4512 vport
->port_state
, tmo
,
4513 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4514 vport
->fc_adisc_cnt
);
4520 * Cancel rescue timer for Discovery / RSCN handling
4523 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4525 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4526 unsigned long iflags
;
4528 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4529 "can disc timer: state:x%x rtry:x%x flg:x%x",
4530 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4532 /* Turn off discovery timer if its running */
4533 if (vport
->fc_flag
& FC_DISC_TMO
) {
4534 spin_lock_irqsave(shost
->host_lock
, iflags
);
4535 vport
->fc_flag
&= ~FC_DISC_TMO
;
4536 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4537 del_timer_sync(&vport
->fc_disctmo
);
4538 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4539 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4540 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4543 /* Cancel Discovery Timer state <hba_state> */
4544 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4545 "0248 Cancel Discovery Timer state x%x "
4546 "Data: x%x x%x x%x\n",
4547 vport
->port_state
, vport
->fc_flag
,
4548 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4553 * Check specified ring for outstanding IOCB on the SLI queue
4554 * Return true if iocb matches the specified nport
4557 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4558 struct lpfc_sli_ring
*pring
,
4559 struct lpfc_iocbq
*iocb
,
4560 struct lpfc_nodelist
*ndlp
)
4562 IOCB_t
*icmd
= &iocb
->iocb
;
4563 struct lpfc_vport
*vport
= ndlp
->vport
;
4565 if (iocb
->vport
!= vport
)
4568 if (pring
->ringno
== LPFC_ELS_RING
) {
4569 switch (icmd
->ulpCommand
) {
4570 case CMD_GEN_REQUEST64_CR
:
4571 if (iocb
->context_un
.ndlp
== ndlp
)
4573 case CMD_ELS_REQUEST64_CR
:
4574 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4576 case CMD_XMIT_ELS_RSP64_CX
:
4577 if (iocb
->context1
== (uint8_t *) ndlp
)
4580 } else if (pring
->ringno
== LPFC_FCP_RING
) {
4581 /* Skip match check if waiting to relogin to FCP target */
4582 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4583 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4586 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4594 __lpfc_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4595 struct lpfc_nodelist
*ndlp
, struct lpfc_sli_ring
*pring
,
4596 struct list_head
*dequeue_list
)
4598 struct lpfc_iocbq
*iocb
, *next_iocb
;
4600 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4601 /* Check to see if iocb matches the nport */
4602 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
))
4603 /* match, dequeue */
4604 list_move_tail(&iocb
->list
, dequeue_list
);
4609 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4610 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4612 struct lpfc_sli
*psli
= &phba
->sli
;
4615 spin_lock_irq(&phba
->hbalock
);
4616 for (i
= 0; i
< psli
->num_rings
; i
++)
4617 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, &psli
->sli3_ring
[i
],
4619 spin_unlock_irq(&phba
->hbalock
);
4623 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4624 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4626 struct lpfc_sli_ring
*pring
;
4627 struct lpfc_queue
*qp
= NULL
;
4629 spin_lock_irq(&phba
->hbalock
);
4630 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
4634 spin_lock(&pring
->ring_lock
);
4635 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, pring
, dequeue_list
);
4636 spin_unlock(&pring
->ring_lock
);
4638 spin_unlock_irq(&phba
->hbalock
);
4642 * Free resources / clean up outstanding I/Os
4643 * associated with nlp_rpi in the LPFC_NODELIST entry.
4646 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4648 LIST_HEAD(completions
);
4650 lpfc_fabric_abort_nport(ndlp
);
4653 * Everything that matches on txcmplq will be returned
4654 * by firmware with a no rpi error.
4656 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4657 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4658 lpfc_sli3_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4660 lpfc_sli4_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4663 /* Cancel all the IOCBs from the completions list */
4664 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4671 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4672 * @phba: Pointer to HBA context object.
4673 * @pmb: Pointer to mailbox object.
4675 * This function will issue an ELS LOGO command after completing
4679 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4681 struct lpfc_vport
*vport
= pmb
->vport
;
4682 struct lpfc_nodelist
*ndlp
;
4684 ndlp
= (struct lpfc_nodelist
*)(pmb
->context1
);
4687 lpfc_issue_els_logo(vport
, ndlp
, 0);
4688 mempool_free(pmb
, phba
->mbox_mem_pool
);
4692 * Free rpi associated with LPFC_NODELIST entry.
4693 * This routine is called from lpfc_freenode(), when we are removing
4694 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4695 * LOGO that completes successfully, and we are waiting to PLOGI back
4696 * to the remote NPort. In addition, it is called after we receive
4697 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4698 * we are waiting to PLOGI back to the remote NPort.
4701 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4703 struct lpfc_hba
*phba
= vport
->phba
;
4705 int rc
, acc_plogi
= 1;
4708 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
||
4709 ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) {
4710 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
4711 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
4712 "3366 RPI x%x needs to be "
4713 "unregistered nlp_flag x%x "
4715 ndlp
->nlp_rpi
, ndlp
->nlp_flag
,
4717 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4719 /* SLI4 ports require the physical rpi value. */
4720 rpi
= ndlp
->nlp_rpi
;
4721 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4722 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4724 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4725 mbox
->vport
= vport
;
4726 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4727 mbox
->context1
= ndlp
;
4728 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4730 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
4731 (!(vport
->load_flag
& FC_UNLOADING
)) &&
4732 (bf_get(lpfc_sli_intf_if_type
,
4733 &phba
->sli4_hba
.sli_intf
) ==
4734 LPFC_SLI_INTF_IF_TYPE_2
) &&
4735 (kref_read(&ndlp
->kref
) > 0)) {
4736 mbox
->context1
= lpfc_nlp_get(ndlp
);
4738 lpfc_sli4_unreg_rpi_cmpl_clr
;
4740 * accept PLOGIs after unreg_rpi_cmpl
4745 lpfc_sli_def_mbox_cmpl
;
4748 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4749 if (rc
== MBX_NOT_FINISHED
) {
4750 mempool_free(mbox
, phba
->mbox_mem_pool
);
4754 lpfc_no_rpi(phba
, ndlp
);
4756 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4758 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4759 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4761 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4764 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4769 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4770 * @phba: pointer to lpfc hba data structure.
4772 * This routine is invoked to unregister all the currently registered RPIs
4776 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4778 struct lpfc_vport
**vports
;
4779 struct lpfc_nodelist
*ndlp
;
4780 struct Scsi_Host
*shost
;
4783 vports
= lpfc_create_vport_work_array(phba
);
4785 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4786 "2884 Vport array allocation failed \n");
4789 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4790 shost
= lpfc_shost_from_vport(vports
[i
]);
4791 spin_lock_irq(shost
->host_lock
);
4792 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4793 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4794 /* The mempool_alloc might sleep */
4795 spin_unlock_irq(shost
->host_lock
);
4796 lpfc_unreg_rpi(vports
[i
], ndlp
);
4797 spin_lock_irq(shost
->host_lock
);
4800 spin_unlock_irq(shost
->host_lock
);
4802 lpfc_destroy_vport_work_array(phba
, vports
);
4806 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4808 struct lpfc_hba
*phba
= vport
->phba
;
4812 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4813 lpfc_sli4_unreg_all_rpis(vport
);
4817 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4819 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4821 mbox
->vport
= vport
;
4822 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4823 mbox
->context1
= NULL
;
4824 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4825 if (rc
!= MBX_TIMEOUT
)
4826 mempool_free(mbox
, phba
->mbox_mem_pool
);
4828 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4829 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4830 "1836 Could not issue "
4831 "unreg_login(all_rpis) status %d\n", rc
);
4836 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4838 struct lpfc_hba
*phba
= vport
->phba
;
4842 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4844 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4846 mbox
->vport
= vport
;
4847 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4848 mbox
->context1
= NULL
;
4849 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4850 if (rc
!= MBX_TIMEOUT
)
4851 mempool_free(mbox
, phba
->mbox_mem_pool
);
4853 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4854 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4855 "1815 Could not issue "
4856 "unreg_did (default rpis) status %d\n",
4862 * Free resources associated with LPFC_NODELIST entry
4863 * so it can be freed.
4866 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4868 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4869 struct lpfc_hba
*phba
= vport
->phba
;
4870 LPFC_MBOXQ_t
*mb
, *nextmb
;
4871 struct lpfc_dmabuf
*mp
;
4873 /* Cleanup node for NPort <nlp_DID> */
4874 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4875 "0900 Cleanup node for NPort x%x "
4876 "Data: x%x x%x x%x\n",
4877 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4878 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4879 if (NLP_CHK_FREE_REQ(ndlp
)) {
4880 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4881 "0280 lpfc_cleanup_node: ndlp:x%p "
4882 "usgmap:x%x refcnt:%d\n",
4883 (void *)ndlp
, ndlp
->nlp_usg_map
,
4884 kref_read(&ndlp
->kref
));
4885 lpfc_dequeue_node(vport
, ndlp
);
4887 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4888 "0281 lpfc_cleanup_node: ndlp:x%p "
4889 "usgmap:x%x refcnt:%d\n",
4890 (void *)ndlp
, ndlp
->nlp_usg_map
,
4891 kref_read(&ndlp
->kref
));
4892 lpfc_disable_node(vport
, ndlp
);
4896 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4898 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4899 if ((mb
= phba
->sli
.mbox_active
)) {
4900 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4901 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4902 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4903 mb
->context2
= NULL
;
4904 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4908 spin_lock_irq(&phba
->hbalock
);
4909 /* Cleanup REG_LOGIN completions which are not yet processed */
4910 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4911 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4912 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
4913 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4916 mb
->context2
= NULL
;
4917 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4920 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4921 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4922 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4923 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4924 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4926 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4929 list_del(&mb
->list
);
4930 mempool_free(mb
, phba
->mbox_mem_pool
);
4931 /* We shall not invoke the lpfc_nlp_put to decrement
4932 * the ndlp reference count as we are in the process
4933 * of lpfc_nlp_release.
4937 spin_unlock_irq(&phba
->hbalock
);
4939 lpfc_els_abort(phba
, ndlp
);
4941 spin_lock_irq(shost
->host_lock
);
4942 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4943 spin_unlock_irq(shost
->host_lock
);
4945 ndlp
->nlp_last_elscmd
= 0;
4946 del_timer_sync(&ndlp
->nlp_delayfunc
);
4948 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4949 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4950 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4951 lpfc_unreg_rpi(vport
, ndlp
);
4957 * Check to see if we can free the nlp back to the freelist.
4958 * If we are in the middle of using the nlp in the discovery state
4959 * machine, defer the free till we reach the end of the state machine.
4962 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4964 struct lpfc_hba
*phba
= vport
->phba
;
4965 struct lpfc_rport_data
*rdata
;
4966 struct fc_rport
*rport
;
4970 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4971 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4972 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4973 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4974 /* For this case we need to cleanup the default rpi
4975 * allocated by the firmware.
4977 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4978 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
4979 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4980 kref_read(&ndlp
->kref
),
4981 ndlp
->nlp_usg_map
, ndlp
);
4982 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4984 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4985 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4987 mempool_free(mbox
, phba
->mbox_mem_pool
);
4990 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4991 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4992 mbox
->vport
= vport
;
4993 mbox
->context2
= ndlp
;
4994 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4995 if (rc
== MBX_NOT_FINISHED
) {
4996 mempool_free(mbox
, phba
->mbox_mem_pool
);
5001 lpfc_cleanup_node(vport
, ndlp
);
5004 * ndlp->rport must be set to NULL before it reaches here
5005 * i.e. break rport/node link before doing lpfc_nlp_put for
5006 * registered rport and then drop the reference of rport.
5010 * extra lpfc_nlp_put dropped the reference of ndlp
5011 * for registered rport so need to cleanup rport
5013 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
5014 "0940 removed node x%p DID x%x "
5015 " rport not null %p\n",
5016 ndlp
, ndlp
->nlp_DID
, ndlp
->rport
);
5017 rport
= ndlp
->rport
;
5018 rdata
= rport
->dd_data
;
5019 rdata
->pnode
= NULL
;
5021 put_device(&rport
->dev
);
5026 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5029 D_ID mydid
, ndlpdid
, matchdid
;
5031 if (did
== Bcast_DID
)
5034 /* First check for Direct match */
5035 if (ndlp
->nlp_DID
== did
)
5038 /* Next check for area/domain identically equals 0 match */
5039 mydid
.un
.word
= vport
->fc_myDID
;
5040 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
5044 matchdid
.un
.word
= did
;
5045 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
5046 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
5047 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
5048 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
5049 /* This code is supposed to match the ID
5050 * for a private loop device that is
5051 * connect to fl_port. But we need to
5052 * check that the port did not just go
5053 * from pt2pt to fabric or we could end
5054 * up matching ndlp->nlp_DID 000001 to
5055 * fabric DID 0x20101
5057 if ((ndlpdid
.un
.b
.domain
== 0) &&
5058 (ndlpdid
.un
.b
.area
== 0)) {
5059 if (ndlpdid
.un
.b
.id
&&
5060 vport
->phba
->fc_topology
==
5067 matchdid
.un
.word
= ndlp
->nlp_DID
;
5068 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
5069 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
5070 if ((matchdid
.un
.b
.domain
== 0) &&
5071 (matchdid
.un
.b
.area
== 0)) {
5072 if (matchdid
.un
.b
.id
)
5080 /* Search for a nodelist entry */
5081 static struct lpfc_nodelist
*
5082 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5084 struct lpfc_nodelist
*ndlp
;
5087 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5088 if (lpfc_matchdid(vport
, ndlp
, did
)) {
5089 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
5090 ((uint32_t) ndlp
->nlp_xri
<< 16) |
5091 ((uint32_t) ndlp
->nlp_type
<< 8) |
5092 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
5093 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5094 "0929 FIND node DID "
5095 "Data: x%p x%x x%x x%x %p\n",
5096 ndlp
, ndlp
->nlp_DID
,
5097 ndlp
->nlp_flag
, data1
,
5098 ndlp
->active_rrqs_xri_bitmap
);
5103 /* FIND node did <did> NOT FOUND */
5104 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5105 "0932 FIND node did x%x NOT FOUND.\n", did
);
5109 struct lpfc_nodelist
*
5110 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5112 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5113 struct lpfc_nodelist
*ndlp
;
5114 unsigned long iflags
;
5116 spin_lock_irqsave(shost
->host_lock
, iflags
);
5117 ndlp
= __lpfc_findnode_did(vport
, did
);
5118 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
5122 struct lpfc_nodelist
*
5123 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
5125 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5126 struct lpfc_nodelist
*ndlp
;
5128 ndlp
= lpfc_findnode_did(vport
, did
);
5130 if (vport
->phba
->nvmet_support
)
5132 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
5133 lpfc_rscn_payload_check(vport
, did
) == 0)
5135 ndlp
= lpfc_nlp_init(vport
, did
);
5138 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5139 spin_lock_irq(shost
->host_lock
);
5140 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5141 spin_unlock_irq(shost
->host_lock
);
5143 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
5144 if (vport
->phba
->nvmet_support
)
5146 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
5149 spin_lock_irq(shost
->host_lock
);
5150 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5151 spin_unlock_irq(shost
->host_lock
);
5155 /* The NVME Target does not want to actively manage an rport.
5156 * The goal is to allow the target to reset its state and clear
5157 * pending IO in preparation for the initiator to recover.
5159 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
5160 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
5161 if (lpfc_rscn_payload_check(vport
, did
)) {
5163 /* Since this node is marked for discovery,
5164 * delay timeout is not needed.
5166 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5168 /* NVME Target mode waits until rport is known to be
5169 * impacted by the RSCN before it transitions. No
5170 * active management - just go to NPR provided the
5171 * node had a valid login.
5173 if (vport
->phba
->nvmet_support
)
5176 /* If we've already received a PLOGI from this NPort
5177 * we don't need to try to discover it again.
5179 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
5182 spin_lock_irq(shost
->host_lock
);
5183 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5184 spin_unlock_irq(shost
->host_lock
);
5188 /* If the initiator received a PLOGI from this NPort or if the
5189 * initiator is already in the process of discovery on it,
5190 * there's no need to try to discover it again.
5192 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
5193 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5194 (!vport
->phba
->nvmet_support
&&
5195 ndlp
->nlp_flag
& NLP_RCV_PLOGI
))
5198 if (vport
->phba
->nvmet_support
)
5201 /* Moving to NPR state clears unsolicited flags and
5202 * allows for rediscovery
5204 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5206 spin_lock_irq(shost
->host_lock
);
5207 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5208 spin_unlock_irq(shost
->host_lock
);
5213 /* Build a list of nodes to discover based on the loopmap */
5215 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
5217 struct lpfc_hba
*phba
= vport
->phba
;
5219 uint32_t alpa
, index
;
5221 if (!lpfc_is_link_up(phba
))
5224 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5227 /* Check for loop map present or not */
5228 if (phba
->alpa_map
[0]) {
5229 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
5230 alpa
= phba
->alpa_map
[j
];
5231 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
5233 lpfc_setup_disc_node(vport
, alpa
);
5236 /* No alpamap, so try all alpa's */
5237 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
5238 /* If cfg_scan_down is set, start from highest
5239 * ALPA (0xef) to lowest (0x1).
5241 if (vport
->cfg_scan_down
)
5244 index
= FC_MAXLOOP
- j
- 1;
5245 alpa
= lpfcAlpaArray
[index
];
5246 if ((vport
->fc_myDID
& 0xff) == alpa
)
5248 lpfc_setup_disc_node(vport
, alpa
);
5256 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5259 struct lpfc_sli
*psli
= &phba
->sli
;
5260 struct lpfc_sli_ring
*extra_ring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
5261 struct lpfc_sli_ring
*fcp_ring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
5265 * if it's not a physical port or if we already send
5266 * clear_la then don't send it.
5268 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
5269 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
5270 (phba
->sli_rev
== LPFC_SLI_REV4
))
5273 /* Link up discovery */
5274 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
5275 phba
->link_state
= LPFC_CLEAR_LA
;
5276 lpfc_clear_la(phba
, mbox
);
5277 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
5278 mbox
->vport
= vport
;
5279 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5280 if (rc
== MBX_NOT_FINISHED
) {
5281 mempool_free(mbox
, phba
->mbox_mem_pool
);
5282 lpfc_disc_flush_list(vport
);
5283 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5284 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5285 phba
->link_state
= LPFC_HBA_ERROR
;
5290 /* Reg_vpi to tell firmware to resume normal operations */
5292 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5294 LPFC_MBOXQ_t
*regvpimbox
;
5296 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5298 lpfc_reg_vpi(vport
, regvpimbox
);
5299 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
5300 regvpimbox
->vport
= vport
;
5301 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
5302 == MBX_NOT_FINISHED
) {
5303 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
5308 /* Start Link up / RSCN discovery on NPR nodes */
5310 lpfc_disc_start(struct lpfc_vport
*vport
)
5312 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5313 struct lpfc_hba
*phba
= vport
->phba
;
5315 uint32_t clear_la_pending
;
5317 if (!lpfc_is_link_up(phba
)) {
5318 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5319 "3315 Link is not up %x\n",
5324 if (phba
->link_state
== LPFC_CLEAR_LA
)
5325 clear_la_pending
= 1;
5327 clear_la_pending
= 0;
5329 if (vport
->port_state
< LPFC_VPORT_READY
)
5330 vport
->port_state
= LPFC_DISC_AUTH
;
5332 lpfc_set_disctmo(vport
);
5334 vport
->fc_prevDID
= vport
->fc_myDID
;
5335 vport
->num_disc_nodes
= 0;
5337 /* Start Discovery state <hba_state> */
5338 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5339 "0202 Start Discovery hba state x%x "
5340 "Data: x%x x%x x%x\n",
5341 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
5342 vport
->fc_adisc_cnt
);
5344 /* First do ADISCs - if any */
5345 num_sent
= lpfc_els_disc_adisc(vport
);
5350 /* Register the VPI for SLI3, NPIV only. */
5351 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
5352 !(vport
->fc_flag
& FC_PT2PT
) &&
5353 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
5354 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
5355 lpfc_issue_clear_la(phba
, vport
);
5356 lpfc_issue_reg_vpi(phba
, vport
);
5361 * For SLI2, we need to set port_state to READY and continue
5364 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
5365 /* If we get here, there is nothing to ADISC */
5366 lpfc_issue_clear_la(phba
, vport
);
5368 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
5369 vport
->num_disc_nodes
= 0;
5370 /* go thru NPR nodes and issue ELS PLOGIs */
5371 if (vport
->fc_npr_cnt
)
5372 lpfc_els_disc_plogi(vport
);
5374 if (!vport
->num_disc_nodes
) {
5375 spin_lock_irq(shost
->host_lock
);
5376 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5377 spin_unlock_irq(shost
->host_lock
);
5378 lpfc_can_disctmo(vport
);
5381 vport
->port_state
= LPFC_VPORT_READY
;
5383 /* Next do PLOGIs - if any */
5384 num_sent
= lpfc_els_disc_plogi(vport
);
5389 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5390 /* Check to see if more RSCNs came in while we
5391 * were processing this one.
5393 if ((vport
->fc_rscn_id_cnt
== 0) &&
5394 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5395 spin_lock_irq(shost
->host_lock
);
5396 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5397 spin_unlock_irq(shost
->host_lock
);
5398 lpfc_can_disctmo(vport
);
5400 lpfc_els_handle_rscn(vport
);
5407 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5408 * ring the match the sppecified nodelist.
5411 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5413 LIST_HEAD(completions
);
5414 struct lpfc_sli
*psli
;
5416 struct lpfc_iocbq
*iocb
, *next_iocb
;
5417 struct lpfc_sli_ring
*pring
;
5420 pring
= lpfc_phba_elsring(phba
);
5422 /* Error matching iocb on txq or txcmplq
5423 * First check the txq.
5425 spin_lock_irq(&phba
->hbalock
);
5426 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5427 if (iocb
->context1
!= ndlp
) {
5431 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5432 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5434 list_move_tail(&iocb
->list
, &completions
);
5438 /* Next check the txcmplq */
5439 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5440 if (iocb
->context1
!= ndlp
) {
5444 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5445 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5446 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5449 spin_unlock_irq(&phba
->hbalock
);
5451 /* Cancel all the IOCBs from the completions list */
5452 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5457 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5459 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5460 struct lpfc_hba
*phba
= vport
->phba
;
5462 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5463 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5465 if (!NLP_CHK_NODE_ACT(ndlp
))
5467 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5468 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5469 lpfc_free_tx(phba
, ndlp
);
5476 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5478 lpfc_els_flush_rscn(vport
);
5479 lpfc_els_flush_cmd(vport
);
5480 lpfc_disc_flush_list(vport
);
5483 /*****************************************************************************/
5485 * NAME: lpfc_disc_timeout
5487 * FUNCTION: Fibre Channel driver discovery timeout routine.
5489 * EXECUTION ENVIRONMENT: interrupt only
5497 /*****************************************************************************/
5499 lpfc_disc_timeout(unsigned long ptr
)
5501 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5502 struct lpfc_hba
*phba
= vport
->phba
;
5503 uint32_t tmo_posted
;
5504 unsigned long flags
= 0;
5506 if (unlikely(!phba
))
5509 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5510 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5512 vport
->work_port_events
|= WORKER_DISC_TMO
;
5513 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5516 lpfc_worker_wake_up(phba
);
5521 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5523 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5524 struct lpfc_hba
*phba
= vport
->phba
;
5525 struct lpfc_sli
*psli
= &phba
->sli
;
5526 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5527 LPFC_MBOXQ_t
*initlinkmbox
;
5528 int rc
, clrlaerr
= 0;
5530 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5533 spin_lock_irq(shost
->host_lock
);
5534 vport
->fc_flag
&= ~FC_DISC_TMO
;
5535 spin_unlock_irq(shost
->host_lock
);
5537 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5538 "disc timeout: state:x%x rtry:x%x flg:x%x",
5539 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5541 switch (vport
->port_state
) {
5543 case LPFC_LOCAL_CFG_LINK
:
5545 * port_state is identically LPFC_LOCAL_CFG_LINK while
5546 * waiting for FAN timeout
5548 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5549 "0221 FAN timeout\n");
5551 /* Start discovery by sending FLOGI, clean up old rpis */
5552 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5554 if (!NLP_CHK_NODE_ACT(ndlp
))
5556 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5558 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5559 /* Clean up the ndlp on Fabric connections */
5560 lpfc_drop_node(vport
, ndlp
);
5562 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5563 /* Fail outstanding IO now since device
5564 * is marked for PLOGI.
5566 lpfc_unreg_rpi(vport
, ndlp
);
5569 if (vport
->port_state
!= LPFC_FLOGI
) {
5570 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5571 lpfc_initial_flogi(vport
);
5573 lpfc_issue_init_vfi(vport
);
5580 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5581 /* Initial FLOGI timeout */
5582 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5583 "0222 Initial %s timeout\n",
5584 vport
->vpi
? "FDISC" : "FLOGI");
5586 /* Assume no Fabric and go on with discovery.
5587 * Check for outstanding ELS FLOGI to abort.
5590 /* FLOGI failed, so just use loop map to make discovery list */
5591 lpfc_disc_list_loopmap(vport
);
5593 /* Start discovery */
5594 lpfc_disc_start(vport
);
5597 case LPFC_FABRIC_CFG_LINK
:
5598 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5600 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5601 "0223 Timeout while waiting for "
5602 "NameServer login\n");
5603 /* Next look for NameServer ndlp */
5604 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5605 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5606 lpfc_els_abort(phba
, ndlp
);
5608 /* ReStart discovery */
5612 /* Check for wait for NameServer Rsp timeout */
5613 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5614 "0224 NameServer Query timeout "
5616 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5618 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5619 /* Try it one more time */
5620 vport
->fc_ns_retry
++;
5621 vport
->gidft_inp
= 0;
5622 rc
= lpfc_issue_gidft(vport
);
5626 vport
->fc_ns_retry
= 0;
5630 * Discovery is over.
5631 * set port_state to PORT_READY if SLI2.
5632 * cmpl_reg_vpi will set port_state to READY for SLI3.
5634 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5635 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5636 lpfc_issue_reg_vpi(phba
, vport
);
5638 lpfc_issue_clear_la(phba
, vport
);
5639 vport
->port_state
= LPFC_VPORT_READY
;
5643 /* Setup and issue mailbox INITIALIZE LINK command */
5644 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5645 if (!initlinkmbox
) {
5646 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5647 "0206 Device Discovery "
5648 "completion error\n");
5649 phba
->link_state
= LPFC_HBA_ERROR
;
5653 lpfc_linkdown(phba
);
5654 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5655 phba
->cfg_link_speed
);
5656 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5657 initlinkmbox
->vport
= vport
;
5658 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5659 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5660 lpfc_set_loopback_flag(phba
);
5661 if (rc
== MBX_NOT_FINISHED
)
5662 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5666 case LPFC_DISC_AUTH
:
5667 /* Node Authentication timeout */
5668 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5669 "0227 Node Authentication timeout\n");
5670 lpfc_disc_flush_list(vport
);
5673 * set port_state to PORT_READY if SLI2.
5674 * cmpl_reg_vpi will set port_state to READY for SLI3.
5676 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5677 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5678 lpfc_issue_reg_vpi(phba
, vport
);
5679 else { /* NPIV Not enabled */
5680 lpfc_issue_clear_la(phba
, vport
);
5681 vport
->port_state
= LPFC_VPORT_READY
;
5686 case LPFC_VPORT_READY
:
5687 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5688 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5689 "0231 RSCN timeout Data: x%x "
5691 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5693 /* Cleanup any outstanding ELS commands */
5694 lpfc_els_flush_cmd(vport
);
5696 lpfc_els_flush_rscn(vport
);
5697 lpfc_disc_flush_list(vport
);
5702 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5703 "0273 Unexpected discovery timeout, "
5704 "vport State x%x\n", vport
->port_state
);
5708 switch (phba
->link_state
) {
5710 /* CLEAR LA timeout */
5711 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5712 "0228 CLEAR LA timeout\n");
5717 lpfc_issue_clear_la(phba
, vport
);
5719 case LPFC_LINK_UNKNOWN
:
5720 case LPFC_WARM_START
:
5721 case LPFC_INIT_START
:
5722 case LPFC_INIT_MBX_CMDS
:
5723 case LPFC_LINK_DOWN
:
5724 case LPFC_HBA_ERROR
:
5725 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5726 "0230 Unexpected timeout, hba link "
5727 "state x%x\n", phba
->link_state
);
5731 case LPFC_HBA_READY
:
5736 lpfc_disc_flush_list(vport
);
5737 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
5738 psli
->sli3_ring
[(LPFC_EXTRA_RING
)].flag
&=
5739 ~LPFC_STOP_IOCB_EVENT
;
5740 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&=
5741 ~LPFC_STOP_IOCB_EVENT
;
5743 vport
->port_state
= LPFC_VPORT_READY
;
5749 * This routine handles processing a NameServer REG_LOGIN mailbox
5750 * command upon completion. It is setup in the LPFC_MBOXQ
5751 * as the completion routine when the command is
5752 * handed off to the SLI layer.
5755 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5757 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5758 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5759 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5760 struct lpfc_vport
*vport
= pmb
->vport
;
5762 pmb
->context1
= NULL
;
5763 pmb
->context2
= NULL
;
5765 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5766 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5767 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5768 ndlp
->nlp_type
|= NLP_FABRIC
;
5769 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5770 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5771 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5772 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5773 kref_read(&ndlp
->kref
),
5774 ndlp
->nlp_usg_map
, ndlp
);
5776 * Start issuing Fabric-Device Management Interface (FDMI) command to
5777 * 0xfffffa (FDMI well known port).
5778 * DHBA -> DPRT -> RHBA -> RPA (physical port)
5779 * DPRT -> RPRT (vports)
5781 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
5782 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
5784 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
5787 /* decrement the node reference count held for this callback
5791 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5793 mempool_free(pmb
, phba
->mbox_mem_pool
);
5799 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5801 uint16_t *rpi
= param
;
5803 /* check for active node */
5804 if (!NLP_CHK_NODE_ACT(ndlp
))
5807 return ndlp
->nlp_rpi
== *rpi
;
5811 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5813 return memcmp(&ndlp
->nlp_portname
, param
,
5814 sizeof(ndlp
->nlp_portname
)) == 0;
5817 static struct lpfc_nodelist
*
5818 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5820 struct lpfc_nodelist
*ndlp
;
5822 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5823 if (filter(ndlp
, param
)) {
5824 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5825 "3185 FIND node filter %p DID "
5826 "Data: x%p x%x x%x\n",
5827 filter
, ndlp
, ndlp
->nlp_DID
,
5832 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5833 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5838 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5839 * returns the node list element pointer else return NULL.
5841 struct lpfc_nodelist
*
5842 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5844 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5848 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5849 * returns the node element list pointer else return NULL.
5851 struct lpfc_nodelist
*
5852 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5854 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5855 struct lpfc_nodelist
*ndlp
;
5857 spin_lock_irq(shost
->host_lock
);
5858 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5859 spin_unlock_irq(shost
->host_lock
);
5864 * This routine looks up the ndlp lists for the given RPI. If the rpi
5865 * is found, the routine returns the node element list pointer else
5868 struct lpfc_nodelist
*
5869 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5871 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5872 struct lpfc_nodelist
*ndlp
;
5874 spin_lock_irq(shost
->host_lock
);
5875 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
5876 spin_unlock_irq(shost
->host_lock
);
5881 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5882 * @phba: pointer to lpfc hba data structure.
5883 * @vpi: the physical host virtual N_Port identifier.
5885 * This routine finds a vport on a HBA (referred by @phba) through a
5886 * @vpi. The function walks the HBA's vport list and returns the address
5887 * of the vport with the matching @vpi.
5890 * NULL - No vport with the matching @vpi found
5891 * Otherwise - Address to the vport with the matching @vpi.
5894 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
5896 struct lpfc_vport
*vport
;
5897 unsigned long flags
;
5900 /* The physical ports are always vpi 0 - translate is unnecessary. */
5903 * Translate the physical vpi to the logical vpi. The
5904 * vport stores the logical vpi.
5906 for (i
= 0; i
< phba
->max_vpi
; i
++) {
5907 if (vpi
== phba
->vpi_ids
[i
])
5911 if (i
>= phba
->max_vpi
) {
5912 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
5913 "2936 Could not find Vport mapped "
5914 "to vpi %d\n", vpi
);
5919 spin_lock_irqsave(&phba
->hbalock
, flags
);
5920 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
5921 if (vport
->vpi
== i
) {
5922 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5926 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5930 struct lpfc_nodelist
*
5931 lpfc_nlp_init(struct lpfc_vport
*vport
, uint32_t did
)
5933 struct lpfc_nodelist
*ndlp
;
5934 int rpi
= LPFC_RPI_ALLOC_ERROR
;
5936 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
5937 rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
5938 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
5942 ndlp
= mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
5944 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
5945 lpfc_sli4_free_rpi(vport
->phba
, rpi
);
5949 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5951 lpfc_initialize_node(vport
, ndlp
, did
);
5952 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5953 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
5954 ndlp
->nlp_rpi
= rpi
;
5955 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5956 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5957 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5959 kref_read(&ndlp
->kref
),
5960 ndlp
->nlp_usg_map
, ndlp
);
5962 ndlp
->active_rrqs_xri_bitmap
=
5963 mempool_alloc(vport
->phba
->active_rrq_pool
,
5965 if (ndlp
->active_rrqs_xri_bitmap
)
5966 memset(ndlp
->active_rrqs_xri_bitmap
, 0,
5967 ndlp
->phba
->cfg_rrq_xri_bitmap_sz
);
5972 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5973 "node init: did:x%x",
5974 ndlp
->nlp_DID
, 0, 0);
5979 /* This routine releases all resources associated with a specifc NPort's ndlp
5980 * and mempool_free's the nodelist.
5983 lpfc_nlp_release(struct kref
*kref
)
5985 struct lpfc_hba
*phba
;
5986 unsigned long flags
;
5987 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5990 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5991 "node release: did:x%x flg:x%x type:x%x",
5992 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5994 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5995 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5996 "usgmap:x%x refcnt:%d rpi:%x\n",
5997 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
5998 kref_read(&ndlp
->kref
), ndlp
->nlp_rpi
);
6000 /* remove ndlp from action. */
6001 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
6003 /* clear the ndlp active flag for all release cases */
6005 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6006 NLP_CLR_NODE_ACT(ndlp
);
6007 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6008 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6009 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
6011 /* free ndlp memory for final ndlp release */
6012 if (NLP_CHK_FREE_REQ(ndlp
)) {
6013 kfree(ndlp
->lat_data
);
6014 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6015 mempool_free(ndlp
->active_rrqs_xri_bitmap
,
6016 ndlp
->phba
->active_rrq_pool
);
6017 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
6021 /* This routine bumps the reference count for a ndlp structure to ensure
6022 * that one discovery thread won't free a ndlp while another discovery thread
6025 struct lpfc_nodelist
*
6026 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
6028 struct lpfc_hba
*phba
;
6029 unsigned long flags
;
6032 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6033 "node get: did:x%x flg:x%x refcnt:x%x",
6034 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6035 kref_read(&ndlp
->kref
));
6036 /* The check of ndlp usage to prevent incrementing the
6037 * ndlp reference count that is in the process of being
6041 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6042 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
6043 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6044 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6045 "0276 lpfc_nlp_get: ndlp:x%p "
6046 "usgmap:x%x refcnt:%d\n",
6047 (void *)ndlp
, ndlp
->nlp_usg_map
,
6048 kref_read(&ndlp
->kref
));
6051 kref_get(&ndlp
->kref
);
6052 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6057 /* This routine decrements the reference count for a ndlp structure. If the
6058 * count goes to 0, this indicates the the associated nodelist should be
6059 * freed. Returning 1 indicates the ndlp resource has been released; on the
6060 * other hand, returning 0 indicates the ndlp resource has not been released
6064 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
6066 struct lpfc_hba
*phba
;
6067 unsigned long flags
;
6072 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6073 "node put: did:x%x flg:x%x refcnt:x%x",
6074 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6075 kref_read(&ndlp
->kref
));
6077 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6078 /* Check the ndlp memory free acknowledge flag to avoid the
6079 * possible race condition that kref_put got invoked again
6080 * after previous one has done ndlp memory free.
6082 if (NLP_CHK_FREE_ACK(ndlp
)) {
6083 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6084 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6085 "0274 lpfc_nlp_put: ndlp:x%p "
6086 "usgmap:x%x refcnt:%d\n",
6087 (void *)ndlp
, ndlp
->nlp_usg_map
,
6088 kref_read(&ndlp
->kref
));
6091 /* Check the ndlp inactivate log flag to avoid the possible
6092 * race condition that kref_put got invoked again after ndlp
6093 * is already in inactivating state.
6095 if (NLP_CHK_IACT_REQ(ndlp
)) {
6096 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6097 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6098 "0275 lpfc_nlp_put: ndlp:x%p "
6099 "usgmap:x%x refcnt:%d\n",
6100 (void *)ndlp
, ndlp
->nlp_usg_map
,
6101 kref_read(&ndlp
->kref
));
6104 /* For last put, mark the ndlp usage flags to make sure no
6105 * other kref_get and kref_put on the same ndlp shall get
6106 * in between the process when the final kref_put has been
6107 * invoked on this ndlp.
6109 if (kref_read(&ndlp
->kref
) == 1) {
6110 /* Indicate ndlp is put to inactive state. */
6111 NLP_SET_IACT_REQ(ndlp
);
6112 /* Acknowledge ndlp memory free has been seen. */
6113 if (NLP_CHK_FREE_REQ(ndlp
))
6114 NLP_SET_FREE_ACK(ndlp
);
6116 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6117 /* Note, the kref_put returns 1 when decrementing a reference
6118 * count that was 1, it invokes the release callback function,
6119 * but it still left the reference count as 1 (not actually
6120 * performs the last decrementation). Otherwise, it actually
6121 * decrements the reference count and returns 0.
6123 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
6126 /* This routine free's the specified nodelist if it is not in use
6127 * by any other discovery thread. This routine returns 1 if the
6128 * ndlp has been freed. A return value of 0 indicates the ndlp is
6129 * not yet been released.
6132 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
6134 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6135 "node not used: did:x%x flg:x%x refcnt:x%x",
6136 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6137 kref_read(&ndlp
->kref
));
6138 if (kref_read(&ndlp
->kref
) == 1)
6139 if (lpfc_nlp_put(ndlp
))
6145 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6146 * @phba: Pointer to hba context object.
6148 * This function iterate through all FC nodes associated
6149 * will all vports to check if there is any node with
6150 * fc_rports associated with it. If there is an fc_rport
6151 * associated with the node, then the node is either in
6152 * discovered state or its devloss_timer is pending.
6155 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
6157 struct lpfc_vport
**vports
;
6159 struct lpfc_nodelist
*ndlp
;
6160 struct Scsi_Host
*shost
;
6162 vports
= lpfc_create_vport_work_array(phba
);
6164 /* If driver cannot allocate memory, indicate fcf is in use */
6168 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6169 shost
= lpfc_shost_from_vport(vports
[i
]);
6170 spin_lock_irq(shost
->host_lock
);
6172 * IF the CVL_RCVD bit is not set then we have sent the
6174 * If dev_loss fires while we are waiting we do not want to
6177 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
6178 spin_unlock_irq(shost
->host_lock
);
6182 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
6183 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
6184 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
6186 spin_unlock_irq(shost
->host_lock
);
6188 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
6190 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
6191 "2624 RPI %x DID %x flag %x "
6192 "still logged in\n",
6193 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6197 spin_unlock_irq(shost
->host_lock
);
6200 lpfc_destroy_vport_work_array(phba
, vports
);
6205 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6206 * @phba: Pointer to hba context object.
6207 * @mboxq: Pointer to mailbox object.
6209 * This function frees memory associated with the mailbox command.
6212 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6214 struct lpfc_vport
*vport
= mboxq
->vport
;
6215 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6217 if (mboxq
->u
.mb
.mbxStatus
) {
6218 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6219 "2555 UNREG_VFI mbxStatus error x%x "
6221 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6223 spin_lock_irq(shost
->host_lock
);
6224 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
6225 spin_unlock_irq(shost
->host_lock
);
6226 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6231 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6232 * @phba: Pointer to hba context object.
6233 * @mboxq: Pointer to mailbox object.
6235 * This function frees memory associated with the mailbox command.
6238 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6240 struct lpfc_vport
*vport
= mboxq
->vport
;
6242 if (mboxq
->u
.mb
.mbxStatus
) {
6243 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6244 "2550 UNREG_FCFI mbxStatus error x%x "
6246 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6248 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6253 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6254 * @phba: Pointer to hba context object.
6256 * This function prepare the HBA for unregistering the currently registered
6257 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6261 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
6263 struct lpfc_vport
**vports
;
6264 struct lpfc_nodelist
*ndlp
;
6265 struct Scsi_Host
*shost
;
6268 /* Unregister RPIs */
6269 if (lpfc_fcf_inuse(phba
))
6270 lpfc_unreg_hba_rpis(phba
);
6272 /* At this point, all discovery is aborted */
6273 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
6275 /* Unregister VPIs */
6276 vports
= lpfc_create_vport_work_array(phba
);
6277 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
6278 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6279 /* Stop FLOGI/FDISC retries */
6280 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
6282 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
6283 lpfc_cleanup_pending_mbox(vports
[i
]);
6284 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6285 lpfc_sli4_unreg_all_rpis(vports
[i
]);
6286 lpfc_mbx_unreg_vpi(vports
[i
]);
6287 shost
= lpfc_shost_from_vport(vports
[i
]);
6288 spin_lock_irq(shost
->host_lock
);
6289 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6290 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6291 spin_unlock_irq(shost
->host_lock
);
6293 lpfc_destroy_vport_work_array(phba
, vports
);
6294 if (i
== 0 && (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))) {
6295 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
6297 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
6298 lpfc_cleanup_pending_mbox(phba
->pport
);
6299 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6300 lpfc_sli4_unreg_all_rpis(phba
->pport
);
6301 lpfc_mbx_unreg_vpi(phba
->pport
);
6302 shost
= lpfc_shost_from_vport(phba
->pport
);
6303 spin_lock_irq(shost
->host_lock
);
6304 phba
->pport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6305 phba
->pport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6306 spin_unlock_irq(shost
->host_lock
);
6309 /* Cleanup any outstanding ELS commands */
6310 lpfc_els_flush_all_cmd(phba
);
6312 /* Unregister the physical port VFI */
6313 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
6318 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6319 * @phba: Pointer to hba context object.
6321 * This function issues synchronous unregister FCF mailbox command to HBA to
6322 * unregister the currently registered FCF record. The driver does not reset
6323 * the driver FCF usage state flags.
6325 * Return 0 if successfully issued, none-zero otherwise.
6328 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
6333 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6335 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6336 "2551 UNREG_FCFI mbox allocation failed"
6337 "HBA state x%x\n", phba
->pport
->port_state
);
6340 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
6341 mbox
->vport
= phba
->pport
;
6342 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
6343 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6345 if (rc
== MBX_NOT_FINISHED
) {
6346 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6347 "2552 Unregister FCFI command failed rc x%x "
6349 rc
, phba
->pport
->port_state
);
6356 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6357 * @phba: Pointer to hba context object.
6359 * This function unregisters the currently reigstered FCF. This function
6360 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6363 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
6367 /* Preparation for unregistering fcf */
6368 rc
= lpfc_unregister_fcf_prep(phba
);
6370 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6371 "2748 Failed to prepare for unregistering "
6372 "HBA's FCF record: rc=%d\n", rc
);
6376 /* Now, unregister FCF record and reset HBA FCF state */
6377 rc
= lpfc_sli4_unregister_fcf(phba
);
6380 /* Reset HBA FCF states after successful unregister FCF */
6381 phba
->fcf
.fcf_flag
= 0;
6382 phba
->fcf
.current_rec
.flag
= 0;
6385 * If driver is not unloading, check if there is any other
6386 * FCF record that can be used for discovery.
6388 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
6389 (phba
->link_state
< LPFC_LINK_UP
))
6392 /* This is considered as the initial FCF discovery scan */
6393 spin_lock_irq(&phba
->hbalock
);
6394 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
6395 spin_unlock_irq(&phba
->hbalock
);
6397 /* Reset FCF roundrobin bmask for new discovery */
6398 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6400 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6403 spin_lock_irq(&phba
->hbalock
);
6404 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
6405 spin_unlock_irq(&phba
->hbalock
);
6406 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6407 "2553 lpfc_unregister_unused_fcf failed "
6408 "to read FCF record HBA state x%x\n",
6409 phba
->pport
->port_state
);
6414 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6415 * @phba: Pointer to hba context object.
6417 * This function just unregisters the currently reigstered FCF. It does not
6418 * try to find another FCF for discovery.
6421 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
6425 /* Preparation for unregistering fcf */
6426 rc
= lpfc_unregister_fcf_prep(phba
);
6428 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6429 "2749 Failed to prepare for unregistering "
6430 "HBA's FCF record: rc=%d\n", rc
);
6434 /* Now, unregister FCF record and reset HBA FCF state */
6435 rc
= lpfc_sli4_unregister_fcf(phba
);
6438 /* Set proper HBA FCF states after successful unregister FCF */
6439 spin_lock_irq(&phba
->hbalock
);
6440 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6441 spin_unlock_irq(&phba
->hbalock
);
6445 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6446 * @phba: Pointer to hba context object.
6448 * This function check if there are any connected remote port for the FCF and
6449 * if all the devices are disconnected, this function unregister FCFI.
6450 * This function also tries to use another FCF for discovery.
6453 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6456 * If HBA is not running in FIP mode, if HBA does not support
6457 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6458 * registered, do nothing.
6460 spin_lock_irq(&phba
->hbalock
);
6461 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6462 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6463 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6464 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6465 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6466 spin_unlock_irq(&phba
->hbalock
);
6469 spin_unlock_irq(&phba
->hbalock
);
6471 if (lpfc_fcf_inuse(phba
))
6474 lpfc_unregister_fcf_rescan(phba
);
6478 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6479 * @phba: Pointer to hba context object.
6480 * @buff: Buffer containing the FCF connection table as in the config
6482 * This function create driver data structure for the FCF connection
6483 * record table read from config region 23.
6486 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6489 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6490 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6491 struct lpfc_fcf_conn_rec
*conn_rec
;
6492 uint32_t record_count
;
6495 /* Free the current connect table */
6496 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6497 &phba
->fcf_conn_rec_list
, list
) {
6498 list_del_init(&conn_entry
->list
);
6502 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6503 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6504 sizeof(struct lpfc_fcf_conn_rec
);
6506 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6507 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6509 for (i
= 0; i
< record_count
; i
++) {
6510 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6512 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6515 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6516 "2566 Failed to allocate connection"
6521 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6522 sizeof(struct lpfc_fcf_conn_rec
));
6523 list_add_tail(&conn_entry
->list
,
6524 &phba
->fcf_conn_rec_list
);
6527 if (!list_empty(&phba
->fcf_conn_rec_list
)) {
6529 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
,
6531 conn_rec
= &conn_entry
->conn_rec
;
6532 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6533 "3345 FCF connection list rec[%02d]: "
6534 "flags:x%04x, vtag:x%04x, "
6535 "fabric_name:x%02x:%02x:%02x:%02x:"
6536 "%02x:%02x:%02x:%02x, "
6537 "switch_name:x%02x:%02x:%02x:%02x:"
6538 "%02x:%02x:%02x:%02x\n", i
++,
6539 conn_rec
->flags
, conn_rec
->vlan_tag
,
6540 conn_rec
->fabric_name
[0],
6541 conn_rec
->fabric_name
[1],
6542 conn_rec
->fabric_name
[2],
6543 conn_rec
->fabric_name
[3],
6544 conn_rec
->fabric_name
[4],
6545 conn_rec
->fabric_name
[5],
6546 conn_rec
->fabric_name
[6],
6547 conn_rec
->fabric_name
[7],
6548 conn_rec
->switch_name
[0],
6549 conn_rec
->switch_name
[1],
6550 conn_rec
->switch_name
[2],
6551 conn_rec
->switch_name
[3],
6552 conn_rec
->switch_name
[4],
6553 conn_rec
->switch_name
[5],
6554 conn_rec
->switch_name
[6],
6555 conn_rec
->switch_name
[7]);
6561 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6562 * @phba: Pointer to hba context object.
6563 * @buff: Buffer containing the FCoE parameter data structure.
6565 * This function update driver data structure with config
6566 * parameters read from config region 23.
6569 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6572 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6573 struct lpfc_fcoe_params
*fcoe_param
;
6575 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6577 fcoe_param
= (struct lpfc_fcoe_params
*)
6578 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6580 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6581 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6584 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6585 phba
->valid_vlan
= 1;
6586 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6590 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6591 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6592 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6597 * lpfc_get_rec_conf23 - Get a record type in config region data.
6598 * @buff: Buffer containing config region 23 data.
6599 * @size: Size of the data buffer.
6600 * @rec_type: Record type to be searched.
6602 * This function searches config region data to find the beginning
6603 * of the record specified by record_type. If record found, this
6604 * function return pointer to the record else return NULL.
6607 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6609 uint32_t offset
= 0, rec_length
;
6611 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6612 (size
< sizeof(uint32_t)))
6615 rec_length
= buff
[offset
+ 1];
6618 * One TLV record has one word header and number of data words
6619 * specified in the rec_length field of the record header.
6621 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6623 if (buff
[offset
] == rec_type
)
6624 return &buff
[offset
];
6626 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6629 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6630 rec_length
= buff
[offset
+ 1];
6636 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6637 * @phba: Pointer to lpfc_hba data structure.
6638 * @buff: Buffer containing config region 23 data.
6639 * @size: Size of the data buffer.
6641 * This function parses the FCoE config parameters in config region 23 and
6642 * populate driver data structure with the parameters.
6645 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6649 uint32_t offset
= 0;
6653 * If data size is less than 2 words signature and version cannot be
6656 if (size
< 2*sizeof(uint32_t))
6659 /* Check the region signature first */
6660 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6661 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6662 "2567 Config region 23 has bad signature\n");
6668 /* Check the data structure version */
6669 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6670 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6671 "2568 Config region 23 has bad version\n");
6676 /* Read FCoE param record */
6677 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6678 size
- offset
, FCOE_PARAM_TYPE
);
6680 lpfc_read_fcoe_param(phba
, rec_ptr
);
6682 /* Read FCF connection table */
6683 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6684 size
- offset
, FCOE_CONN_TBL_TYPE
);
6686 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);