1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/kthread.h>
29 #include <linux/interrupt.h>
30 #include <linux/lockdep.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
38 #include <linux/nvme-fc-driver.h>
43 #include "lpfc_disc.h"
45 #include "lpfc_sli4.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 /* AlpaArray for assignment of scsid for scan-down and bind_method */
55 static uint8_t lpfcAlpaArray
[] = {
56 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
57 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
58 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
59 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
60 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
61 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
62 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
63 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
64 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
65 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
66 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
67 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
68 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
71 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
72 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
73 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
74 static int lpfc_fcf_inuse(struct lpfc_hba
*);
77 lpfc_terminate_rport_io(struct fc_rport
*rport
)
79 struct lpfc_rport_data
*rdata
;
80 struct lpfc_nodelist
* ndlp
;
81 struct lpfc_hba
*phba
;
83 rdata
= rport
->dd_data
;
86 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
87 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
88 printk(KERN_ERR
"Cannot find remote node"
89 " to terminate I/O Data x%x\n",
96 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
97 "rport terminate: sid:x%x did:x%x flg:x%x",
98 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
100 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
101 lpfc_sli_abort_iocb(ndlp
->vport
,
102 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
103 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
108 * This function will be called when dev_loss_tmo fire.
111 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
113 struct lpfc_rport_data
*rdata
;
114 struct lpfc_nodelist
* ndlp
;
115 struct lpfc_vport
*vport
;
116 struct Scsi_Host
*shost
;
117 struct lpfc_hba
*phba
;
118 struct lpfc_work_evt
*evtp
;
122 rdata
= rport
->dd_data
;
124 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
130 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
131 "rport devlosscb: sid:x%x did:x%x flg:x%x",
132 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
134 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
135 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
136 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
138 /* Don't defer this if we are in the process of deleting the vport
139 * or unloading the driver. The unload will cleanup the node
140 * appropriately we just need to cleanup the ndlp rport info here.
142 if (vport
->load_flag
& FC_UNLOADING
) {
143 put_node
= rdata
->pnode
!= NULL
;
144 put_rport
= ndlp
->rport
!= NULL
;
150 put_device(&rport
->dev
);
154 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
157 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
))
158 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
159 "6789 rport name %llx != node port name %llx",
161 wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
));
163 evtp
= &ndlp
->dev_loss_evt
;
165 if (!list_empty(&evtp
->evt_listp
)) {
166 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
167 "6790 rport name %llx dev_loss_evt pending",
172 shost
= lpfc_shost_from_vport(vport
);
173 spin_lock_irq(shost
->host_lock
);
174 ndlp
->nlp_flag
|= NLP_IN_DEV_LOSS
;
175 spin_unlock_irq(shost
->host_lock
);
177 /* We need to hold the node by incrementing the reference
178 * count until this queued work is done
180 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
182 spin_lock_irq(&phba
->hbalock
);
183 if (evtp
->evt_arg1
) {
184 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
185 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
186 lpfc_worker_wake_up(phba
);
188 spin_unlock_irq(&phba
->hbalock
);
194 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
195 * @ndlp: Pointer to remote node object.
197 * This function is called from the worker thread when devloss timeout timer
198 * expires. For SLI4 host, this routine shall return 1 when at lease one
199 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
200 * routine shall return 0 when there is no remote node is still in use of FCF
201 * when devloss timeout happened to this @ndlp.
204 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
206 struct lpfc_rport_data
*rdata
;
207 struct fc_rport
*rport
;
208 struct lpfc_vport
*vport
;
209 struct lpfc_hba
*phba
;
210 struct Scsi_Host
*shost
;
218 shost
= lpfc_shost_from_vport(vport
);
220 spin_lock_irq(shost
->host_lock
);
221 ndlp
->nlp_flag
&= ~NLP_IN_DEV_LOSS
;
222 spin_unlock_irq(shost
->host_lock
);
227 name
= (uint8_t *) &ndlp
->nlp_portname
;
230 if (phba
->sli_rev
== LPFC_SLI_REV4
)
231 fcf_inuse
= lpfc_fcf_inuse(phba
);
233 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
234 "rport devlosstmo:did:x%x type:x%x id:x%x",
235 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
237 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
238 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
239 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
242 * lpfc_nlp_remove if reached with dangling rport drops the
243 * reference. To make sure that does not happen clear rport
244 * pointer in ndlp before lpfc_nlp_put.
246 rdata
= rport
->dd_data
;
248 /* Don't defer this if we are in the process of deleting the vport
249 * or unloading the driver. The unload will cleanup the node
250 * appropriately we just need to cleanup the ndlp rport info here.
252 if (vport
->load_flag
& FC_UNLOADING
) {
253 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
254 /* flush the target */
255 lpfc_sli_abort_iocb(vport
,
256 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
257 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
259 put_node
= rdata
->pnode
!= NULL
;
264 put_device(&rport
->dev
);
269 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
270 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
271 "0284 Devloss timeout Ignored on "
272 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
274 *name
, *(name
+1), *(name
+2), *(name
+3),
275 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
280 put_node
= rdata
->pnode
!= NULL
;
285 put_device(&rport
->dev
);
287 if (ndlp
->nlp_type
& NLP_FABRIC
)
290 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
292 lpfc_sli_abort_iocb(vport
, &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
293 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
297 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
298 "0203 Devloss timeout on "
299 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
300 "NPort x%06x Data: x%x x%x x%x\n",
301 *name
, *(name
+1), *(name
+2), *(name
+3),
302 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
303 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
304 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
306 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
307 "0204 Devloss timeout on "
308 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
309 "NPort x%06x Data: x%x x%x x%x\n",
310 *name
, *(name
+1), *(name
+2), *(name
+3),
311 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
312 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
313 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
316 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
317 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
318 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
319 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
320 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
321 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
327 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
328 * @phba: Pointer to hba context object.
329 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
330 * @nlp_did: remote node identifer with devloss timeout.
332 * This function is called from the worker thread after invoking devloss
333 * timeout handler and releasing the reference count for the ndlp with
334 * which the devloss timeout was handled for SLI4 host. For the devloss
335 * timeout of the last remote node which had been in use of FCF, when this
336 * routine is invoked, it shall be guaranteed that none of the remote are
337 * in-use of FCF. When devloss timeout to the last remote using the FCF,
338 * if the FIP engine is neither in FCF table scan process nor roundrobin
339 * failover process, the in-use FCF shall be unregistered. If the FIP
340 * engine is in FCF discovery process, the devloss timeout state shall
341 * be set for either the FCF table scan process or roundrobin failover
342 * process to unregister the in-use FCF.
345 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
348 /* If devloss timeout happened to a remote node when FCF had no
349 * longer been in-use, do nothing.
354 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
355 spin_lock_irq(&phba
->hbalock
);
356 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
357 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
358 spin_unlock_irq(&phba
->hbalock
);
361 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
362 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
363 "2847 Last remote node (x%x) using "
364 "FCF devloss tmo\n", nlp_did
);
366 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
367 spin_unlock_irq(&phba
->hbalock
);
368 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
369 "2868 Devloss tmo to FCF rediscovery "
373 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
374 spin_unlock_irq(&phba
->hbalock
);
375 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
376 "2869 Devloss tmo to idle FIP engine, "
377 "unreg in-use FCF and rescan.\n");
378 /* Unregister in-use FCF and rescan */
379 lpfc_unregister_fcf_rescan(phba
);
382 spin_unlock_irq(&phba
->hbalock
);
383 if (phba
->hba_flag
& FCF_TS_INPROG
)
384 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
385 "2870 FCF table scan in progress\n");
386 if (phba
->hba_flag
& FCF_RR_INPROG
)
387 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
388 "2871 FLOGI roundrobin FCF failover "
391 lpfc_unregister_unused_fcf(phba
);
395 * lpfc_alloc_fast_evt - Allocates data structure for posting event
396 * @phba: Pointer to hba context object.
398 * This function is called from the functions which need to post
399 * events from interrupt context. This function allocates data
400 * structure required for posting event. It also keeps track of
401 * number of events pending and prevent event storm when there are
404 struct lpfc_fast_path_event
*
405 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
406 struct lpfc_fast_path_event
*ret
;
408 /* If there are lot of fast event do not exhaust memory due to this */
409 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
412 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
415 atomic_inc(&phba
->fast_event_count
);
416 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
417 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
423 * lpfc_free_fast_evt - Frees event data structure
424 * @phba: Pointer to hba context object.
425 * @evt: Event object which need to be freed.
427 * This function frees the data structure required for posting
431 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
432 struct lpfc_fast_path_event
*evt
) {
434 atomic_dec(&phba
->fast_event_count
);
439 * lpfc_send_fastpath_evt - Posts events generated from fast path
440 * @phba: Pointer to hba context object.
441 * @evtp: Event data structure.
443 * This function is called from worker thread, when the interrupt
444 * context need to post an event. This function posts the event
445 * to fc transport netlink interface.
448 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
449 struct lpfc_work_evt
*evtp
)
451 unsigned long evt_category
, evt_sub_category
;
452 struct lpfc_fast_path_event
*fast_evt_data
;
454 uint32_t evt_data_size
;
455 struct Scsi_Host
*shost
;
457 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
460 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
461 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
462 fabric_evt
.subcategory
;
463 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
464 if (evt_category
== FC_REG_FABRIC_EVENT
) {
465 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
466 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
467 evt_data_size
= sizeof(fast_evt_data
->un
.
469 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
470 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
471 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
472 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
474 lpfc_free_fast_evt(phba
, fast_evt_data
);
477 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
478 switch (evt_sub_category
) {
479 case LPFC_EVENT_QFULL
:
480 case LPFC_EVENT_DEVBSY
:
481 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
482 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
484 case LPFC_EVENT_CHECK_COND
:
485 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
486 evt_data_size
= sizeof(fast_evt_data
->un
.
489 case LPFC_EVENT_VARQUEDEPTH
:
490 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
491 evt_data_size
= sizeof(fast_evt_data
->un
.
495 lpfc_free_fast_evt(phba
, fast_evt_data
);
499 lpfc_free_fast_evt(phba
, fast_evt_data
);
503 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
504 fc_host_post_vendor_event(shost
,
505 fc_get_event_number(),
510 lpfc_free_fast_evt(phba
, fast_evt_data
);
515 lpfc_work_list_done(struct lpfc_hba
*phba
)
517 struct lpfc_work_evt
*evtp
= NULL
;
518 struct lpfc_nodelist
*ndlp
;
523 spin_lock_irq(&phba
->hbalock
);
524 while (!list_empty(&phba
->work_list
)) {
525 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
527 spin_unlock_irq(&phba
->hbalock
);
530 case LPFC_EVT_ELS_RETRY
:
531 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
532 lpfc_els_retry_delay_handler(ndlp
);
533 free_evt
= 0; /* evt is part of ndlp */
534 /* decrement the node reference count held
535 * for this queued work
539 case LPFC_EVT_DEV_LOSS
:
540 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
541 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
543 /* decrement the node reference count held for
546 nlp_did
= ndlp
->nlp_DID
;
548 if (phba
->sli_rev
== LPFC_SLI_REV4
)
549 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
553 case LPFC_EVT_ONLINE
:
554 if (phba
->link_state
< LPFC_LINK_DOWN
)
555 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
557 *(int *) (evtp
->evt_arg1
) = 0;
558 complete((struct completion
*)(evtp
->evt_arg2
));
560 case LPFC_EVT_OFFLINE_PREP
:
561 if (phba
->link_state
>= LPFC_LINK_DOWN
)
562 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
563 *(int *)(evtp
->evt_arg1
) = 0;
564 complete((struct completion
*)(evtp
->evt_arg2
));
566 case LPFC_EVT_OFFLINE
:
568 lpfc_sli_brdrestart(phba
);
569 *(int *)(evtp
->evt_arg1
) =
570 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
571 lpfc_unblock_mgmt_io(phba
);
572 complete((struct completion
*)(evtp
->evt_arg2
));
574 case LPFC_EVT_WARM_START
:
576 lpfc_reset_barrier(phba
);
577 lpfc_sli_brdreset(phba
);
578 lpfc_hba_down_post(phba
);
579 *(int *)(evtp
->evt_arg1
) =
580 lpfc_sli_brdready(phba
, HS_MBRDY
);
581 lpfc_unblock_mgmt_io(phba
);
582 complete((struct completion
*)(evtp
->evt_arg2
));
586 *(int *)(evtp
->evt_arg1
)
587 = (phba
->pport
->stopped
)
588 ? 0 : lpfc_sli_brdkill(phba
);
589 lpfc_unblock_mgmt_io(phba
);
590 complete((struct completion
*)(evtp
->evt_arg2
));
592 case LPFC_EVT_FASTPATH_MGMT_EVT
:
593 lpfc_send_fastpath_evt(phba
, evtp
);
596 case LPFC_EVT_RESET_HBA
:
597 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
598 lpfc_reset_hba(phba
);
603 spin_lock_irq(&phba
->hbalock
);
605 spin_unlock_irq(&phba
->hbalock
);
610 lpfc_work_done(struct lpfc_hba
*phba
)
612 struct lpfc_sli_ring
*pring
;
613 uint32_t ha_copy
, status
, control
, work_port_events
;
614 struct lpfc_vport
**vports
;
615 struct lpfc_vport
*vport
;
618 spin_lock_irq(&phba
->hbalock
);
619 ha_copy
= phba
->work_ha
;
621 spin_unlock_irq(&phba
->hbalock
);
623 /* First, try to post the next mailbox command to SLI4 device */
624 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
625 lpfc_sli4_post_async_mbox(phba
);
627 if (ha_copy
& HA_ERATT
)
628 /* Handle the error attention event */
629 lpfc_handle_eratt(phba
);
631 if (ha_copy
& HA_MBATT
)
632 lpfc_sli_handle_mb_event(phba
);
634 if (ha_copy
& HA_LATT
)
635 lpfc_handle_latt(phba
);
637 /* Process SLI4 events */
638 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
639 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
640 lpfc_handle_rrq_active(phba
);
641 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
642 lpfc_sli4_els_xri_abort_event_proc(phba
);
643 if (phba
->hba_flag
& ASYNC_EVENT
)
644 lpfc_sli4_async_event_proc(phba
);
645 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
646 spin_lock_irq(&phba
->hbalock
);
647 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
648 spin_unlock_irq(&phba
->hbalock
);
649 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
651 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
652 lpfc_sli4_fcf_redisc_event_proc(phba
);
655 vports
= lpfc_create_vport_work_array(phba
);
657 for (i
= 0; i
<= phba
->max_vports
; i
++) {
659 * We could have no vports in array if unloading, so if
660 * this happens then just use the pport
662 if (vports
[i
] == NULL
&& i
== 0)
668 spin_lock_irq(&vport
->work_port_lock
);
669 work_port_events
= vport
->work_port_events
;
670 vport
->work_port_events
&= ~work_port_events
;
671 spin_unlock_irq(&vport
->work_port_lock
);
672 if (work_port_events
& WORKER_DISC_TMO
)
673 lpfc_disc_timeout_handler(vport
);
674 if (work_port_events
& WORKER_ELS_TMO
)
675 lpfc_els_timeout_handler(vport
);
676 if (work_port_events
& WORKER_HB_TMO
)
677 lpfc_hb_timeout_handler(phba
);
678 if (work_port_events
& WORKER_MBOX_TMO
)
679 lpfc_mbox_timeout_handler(phba
);
680 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
681 lpfc_unblock_fabric_iocbs(phba
);
682 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
683 lpfc_ramp_down_queue_handler(phba
);
684 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
685 lpfc_delayed_disc_timeout_handler(vport
);
687 lpfc_destroy_vport_work_array(phba
, vports
);
689 pring
= lpfc_phba_elsring(phba
);
690 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
691 status
>>= (4*LPFC_ELS_RING
);
692 if (pring
&& (status
& HA_RXMASK
||
693 pring
->flag
& LPFC_DEFERRED_RING_EVENT
||
694 phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
695 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
696 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
697 /* Preserve legacy behavior. */
698 if (!(phba
->hba_flag
& HBA_SP_QUEUE_EVT
))
699 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
701 if (phba
->link_state
>= LPFC_LINK_UP
||
702 phba
->link_flag
& LS_MDS_LOOPBACK
) {
703 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
704 lpfc_sli_handle_slow_ring_event(phba
, pring
,
709 if (phba
->sli_rev
== LPFC_SLI_REV4
)
710 lpfc_drain_txq(phba
);
712 * Turn on Ring interrupts
714 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
715 spin_lock_irq(&phba
->hbalock
);
716 control
= readl(phba
->HCregaddr
);
717 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
718 lpfc_debugfs_slow_ring_trc(phba
,
719 "WRK Enable ring: cntl:x%x hacopy:x%x",
720 control
, ha_copy
, 0);
722 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
723 writel(control
, phba
->HCregaddr
);
724 readl(phba
->HCregaddr
); /* flush */
726 lpfc_debugfs_slow_ring_trc(phba
,
727 "WRK Ring ok: cntl:x%x hacopy:x%x",
728 control
, ha_copy
, 0);
730 spin_unlock_irq(&phba
->hbalock
);
733 lpfc_work_list_done(phba
);
737 lpfc_do_work(void *p
)
739 struct lpfc_hba
*phba
= p
;
742 set_user_nice(current
, MIN_NICE
);
743 current
->flags
|= PF_NOFREEZE
;
744 phba
->data_flags
= 0;
746 while (!kthread_should_stop()) {
747 /* wait and check worker queue activities */
748 rc
= wait_event_interruptible(phba
->work_waitq
,
749 (test_and_clear_bit(LPFC_DATA_READY
,
751 || kthread_should_stop()));
752 /* Signal wakeup shall terminate the worker thread */
754 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
755 "0433 Wakeup on signal: rc=x%x\n", rc
);
759 /* Attend pending lpfc data processing */
760 lpfc_work_done(phba
);
762 phba
->worker_thread
= NULL
;
763 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
764 "0432 Worker thread stopped.\n");
769 * This is only called to handle FC worker events. Since this a rare
770 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
771 * embedding it in the IOCB.
774 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
777 struct lpfc_work_evt
*evtp
;
781 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
782 * be queued to worker thread for processing
784 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
788 evtp
->evt_arg1
= arg1
;
789 evtp
->evt_arg2
= arg2
;
792 spin_lock_irqsave(&phba
->hbalock
, flags
);
793 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
794 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
796 lpfc_worker_wake_up(phba
);
802 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
804 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
805 struct lpfc_hba
*phba
= vport
->phba
;
806 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
808 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
809 if (!NLP_CHK_NODE_ACT(ndlp
))
811 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
813 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
814 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
815 (ndlp
->nlp_DID
== NameServer_DID
)))
816 lpfc_unreg_rpi(vport
, ndlp
);
818 /* Leave Fabric nodes alone on link down */
819 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
820 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
822 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
825 : NLP_EVT_DEVICE_RECOVERY
);
827 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
828 if (phba
->sli_rev
== LPFC_SLI_REV4
)
829 lpfc_sli4_unreg_all_rpis(vport
);
830 lpfc_mbx_unreg_vpi(vport
);
831 spin_lock_irq(shost
->host_lock
);
832 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
833 spin_unlock_irq(shost
->host_lock
);
838 lpfc_port_link_failure(struct lpfc_vport
*vport
)
840 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
842 /* Cleanup any outstanding received buffers */
843 lpfc_cleanup_rcv_buffers(vport
);
845 /* Cleanup any outstanding RSCN activity */
846 lpfc_els_flush_rscn(vport
);
848 /* Cleanup any outstanding ELS commands */
849 lpfc_els_flush_cmd(vport
);
851 lpfc_cleanup_rpis(vport
, 0);
853 /* Turn off discovery timer if its running */
854 lpfc_can_disctmo(vport
);
858 lpfc_linkdown_port(struct lpfc_vport
*vport
)
860 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
862 if (vport
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
863 fc_host_post_event(shost
, fc_get_event_number(),
864 FCH_EVT_LINKDOWN
, 0);
866 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
867 "Link Down: state:x%x rtry:x%x flg:x%x",
868 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
870 lpfc_port_link_failure(vport
);
872 /* Stop delayed Nport discovery */
873 spin_lock_irq(shost
->host_lock
);
874 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
875 spin_unlock_irq(shost
->host_lock
);
876 del_timer_sync(&vport
->delayed_disc_tmo
);
880 lpfc_linkdown(struct lpfc_hba
*phba
)
882 struct lpfc_vport
*vport
= phba
->pport
;
883 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
884 struct lpfc_vport
**vports
;
888 if (phba
->link_state
== LPFC_LINK_DOWN
) {
889 if (phba
->sli4_hba
.conf_trunk
) {
890 phba
->trunk_link
.link0
.state
= 0;
891 phba
->trunk_link
.link1
.state
= 0;
892 phba
->trunk_link
.link2
.state
= 0;
893 phba
->trunk_link
.link3
.state
= 0;
897 /* Block all SCSI stack I/Os */
898 lpfc_scsi_dev_block(phba
);
900 phba
->defer_flogi_acc_flag
= false;
902 spin_lock_irq(&phba
->hbalock
);
903 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
904 spin_unlock_irq(&phba
->hbalock
);
905 if (phba
->link_state
> LPFC_LINK_DOWN
) {
906 phba
->link_state
= LPFC_LINK_DOWN
;
907 if (phba
->sli4_hba
.conf_trunk
) {
908 phba
->trunk_link
.link0
.state
= 0;
909 phba
->trunk_link
.link1
.state
= 0;
910 phba
->trunk_link
.link2
.state
= 0;
911 phba
->trunk_link
.link3
.state
= 0;
913 spin_lock_irq(shost
->host_lock
);
914 phba
->pport
->fc_flag
&= ~FC_LBIT
;
915 spin_unlock_irq(shost
->host_lock
);
917 vports
= lpfc_create_vport_work_array(phba
);
918 if (vports
!= NULL
) {
919 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
920 /* Issue a LINK DOWN event to all nodes */
921 lpfc_linkdown_port(vports
[i
]);
923 vports
[i
]->fc_myDID
= 0;
925 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
926 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
927 if (phba
->nvmet_support
)
928 lpfc_nvmet_update_targetport(phba
);
930 lpfc_nvme_update_localport(vports
[i
]);
934 lpfc_destroy_vport_work_array(phba
, vports
);
935 /* Clean up any firmware default rpi's */
936 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
938 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
940 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
941 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
942 == MBX_NOT_FINISHED
) {
943 mempool_free(mb
, phba
->mbox_mem_pool
);
947 /* Setup myDID for link up if we are in pt2pt mode */
948 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
949 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
951 lpfc_config_link(phba
, mb
);
952 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
954 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
955 == MBX_NOT_FINISHED
) {
956 mempool_free(mb
, phba
->mbox_mem_pool
);
959 spin_lock_irq(shost
->host_lock
);
960 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
961 phba
->pport
->rcv_flogi_cnt
= 0;
962 spin_unlock_irq(shost
->host_lock
);
968 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
970 struct lpfc_nodelist
*ndlp
;
972 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
973 ndlp
->nlp_fc4_type
&= ~(NLP_FC4_FCP
| NLP_FC4_NVME
);
974 if (!NLP_CHK_NODE_ACT(ndlp
))
976 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
978 if (ndlp
->nlp_type
& NLP_FABRIC
) {
979 /* On Linkup its safe to clean up the ndlp
980 * from Fabric connections.
982 if (ndlp
->nlp_DID
!= Fabric_DID
)
983 lpfc_unreg_rpi(vport
, ndlp
);
984 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
985 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
986 /* Fail outstanding IO now since device is
989 lpfc_unreg_rpi(vport
, ndlp
);
995 lpfc_linkup_port(struct lpfc_vport
*vport
)
997 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
998 struct lpfc_hba
*phba
= vport
->phba
;
1000 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
1003 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1004 "Link Up: top:x%x speed:x%x flg:x%x",
1005 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
1007 /* If NPIV is not enabled, only bring the physical port up */
1008 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
1009 (vport
!= phba
->pport
))
1012 if (vport
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
1013 fc_host_post_event(shost
, fc_get_event_number(),
1016 spin_lock_irq(shost
->host_lock
);
1017 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
1018 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
1019 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
1020 vport
->fc_ns_retry
= 0;
1021 spin_unlock_irq(shost
->host_lock
);
1023 if (vport
->fc_flag
& FC_LBIT
)
1024 lpfc_linkup_cleanup_nodes(vport
);
1029 lpfc_linkup(struct lpfc_hba
*phba
)
1031 struct lpfc_vport
**vports
;
1033 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
1035 phba
->link_state
= LPFC_LINK_UP
;
1037 /* Unblock fabric iocbs if they are blocked */
1038 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1039 del_timer_sync(&phba
->fabric_block_timer
);
1041 vports
= lpfc_create_vport_work_array(phba
);
1043 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1044 lpfc_linkup_port(vports
[i
]);
1045 lpfc_destroy_vport_work_array(phba
, vports
);
1047 /* Clear the pport flogi counter in case the link down was
1048 * absorbed without an ACQE. No lock here - in worker thread
1049 * and discovery is synchronized.
1051 spin_lock_irq(shost
->host_lock
);
1052 phba
->pport
->rcv_flogi_cnt
= 0;
1053 spin_unlock_irq(shost
->host_lock
);
1055 /* reinitialize initial FLOGI flag */
1056 phba
->hba_flag
&= ~(HBA_FLOGI_ISSUED
);
1057 phba
->defer_flogi_acc_flag
= false;
1063 * This routine handles processing a CLEAR_LA mailbox
1064 * command upon completion. It is setup in the LPFC_MBOXQ
1065 * as the completion routine when the command is
1066 * handed off to the SLI layer. SLI3 only.
1069 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1071 struct lpfc_vport
*vport
= pmb
->vport
;
1072 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1073 struct lpfc_sli
*psli
= &phba
->sli
;
1074 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1077 /* Since we don't do discovery right now, turn these off here */
1078 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1079 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1081 /* Check for error */
1082 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1083 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1084 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1085 "0320 CLEAR_LA mbxStatus error x%x hba "
1087 mb
->mbxStatus
, vport
->port_state
);
1088 phba
->link_state
= LPFC_HBA_ERROR
;
1092 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1093 phba
->link_state
= LPFC_HBA_READY
;
1095 spin_lock_irq(&phba
->hbalock
);
1096 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1097 control
= readl(phba
->HCregaddr
);
1098 control
|= HC_LAINT_ENA
;
1099 writel(control
, phba
->HCregaddr
);
1100 readl(phba
->HCregaddr
); /* flush */
1101 spin_unlock_irq(&phba
->hbalock
);
1102 mempool_free(pmb
, phba
->mbox_mem_pool
);
1106 /* Device Discovery completes */
1107 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1108 "0225 Device Discovery completes\n");
1109 mempool_free(pmb
, phba
->mbox_mem_pool
);
1111 spin_lock_irq(shost
->host_lock
);
1112 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1113 spin_unlock_irq(shost
->host_lock
);
1115 lpfc_can_disctmo(vport
);
1117 /* turn on Link Attention interrupts */
1119 spin_lock_irq(&phba
->hbalock
);
1120 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1121 control
= readl(phba
->HCregaddr
);
1122 control
|= HC_LAINT_ENA
;
1123 writel(control
, phba
->HCregaddr
);
1124 readl(phba
->HCregaddr
); /* flush */
1125 spin_unlock_irq(&phba
->hbalock
);
1132 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1134 struct lpfc_vport
*vport
= pmb
->vport
;
1137 if (pmb
->u
.mb
.mbxStatus
)
1140 mempool_free(pmb
, phba
->mbox_mem_pool
);
1142 /* don't perform discovery for SLI4 loopback diagnostic test */
1143 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1144 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1145 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1148 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1149 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1150 !(vport
->fc_flag
& FC_LBIT
)) {
1151 /* Need to wait for FAN - use discovery timer
1152 * for timeout. port_state is identically
1153 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1155 lpfc_set_disctmo(vport
);
1159 /* Start discovery by sending a FLOGI. port_state is identically
1160 * LPFC_FLOGI while waiting for FLOGI cmpl
1162 if (vport
->port_state
!= LPFC_FLOGI
) {
1163 if (phba
->bbcredit_support
&& phba
->cfg_enable_bbcr
) {
1164 bbscn
= bf_get(lpfc_bbscn_def
,
1165 &phba
->sli4_hba
.bbscn_params
);
1166 vport
->fc_sparam
.cmn
.bbRcvSizeMsb
&= 0xf;
1167 vport
->fc_sparam
.cmn
.bbRcvSizeMsb
|= (bbscn
<< 4);
1169 lpfc_initial_flogi(vport
);
1170 } else if (vport
->fc_flag
& FC_PT2PT
) {
1171 lpfc_disc_start(vport
);
1176 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1177 "0306 CONFIG_LINK mbxStatus error x%x "
1179 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1180 mempool_free(pmb
, phba
->mbox_mem_pool
);
1182 lpfc_linkdown(phba
);
1184 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1185 "0200 CONFIG_LINK bad hba state x%x\n",
1188 lpfc_issue_clear_la(phba
, vport
);
1193 * lpfc_sli4_clear_fcf_rr_bmask
1194 * @phba pointer to the struct lpfc_hba for this port.
1195 * This fucnction resets the round robin bit mask and clears the
1196 * fcf priority list. The list deletions are done while holding the
1197 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1198 * from the lpfc_fcf_pri record.
1201 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1203 struct lpfc_fcf_pri
*fcf_pri
;
1204 struct lpfc_fcf_pri
*next_fcf_pri
;
1205 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1206 spin_lock_irq(&phba
->hbalock
);
1207 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1208 &phba
->fcf
.fcf_pri_list
, list
) {
1209 list_del_init(&fcf_pri
->list
);
1210 fcf_pri
->fcf_rec
.flag
= 0;
1212 spin_unlock_irq(&phba
->hbalock
);
1215 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1217 struct lpfc_vport
*vport
= mboxq
->vport
;
1219 if (mboxq
->u
.mb
.mbxStatus
) {
1220 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1221 "2017 REG_FCFI mbxStatus error x%x "
1223 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1227 /* Start FCoE discovery by sending a FLOGI. */
1228 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1229 /* Set the FCFI registered flag */
1230 spin_lock_irq(&phba
->hbalock
);
1231 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1232 spin_unlock_irq(&phba
->hbalock
);
1234 /* If there is a pending FCoE event, restart FCF table scan. */
1235 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1236 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1239 /* Mark successful completion of FCF table scan */
1240 spin_lock_irq(&phba
->hbalock
);
1241 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1242 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1243 if (vport
->port_state
!= LPFC_FLOGI
) {
1244 phba
->hba_flag
|= FCF_RR_INPROG
;
1245 spin_unlock_irq(&phba
->hbalock
);
1246 lpfc_issue_init_vfi(vport
);
1249 spin_unlock_irq(&phba
->hbalock
);
1253 spin_lock_irq(&phba
->hbalock
);
1254 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1255 spin_unlock_irq(&phba
->hbalock
);
1257 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1261 * lpfc_fab_name_match - Check if the fcf fabric name match.
1262 * @fab_name: pointer to fabric name.
1263 * @new_fcf_record: pointer to fcf record.
1265 * This routine compare the fcf record's fabric name with provided
1266 * fabric name. If the fabric name are identical this function
1267 * returns 1 else return 0.
1270 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1272 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1274 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1276 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1278 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1280 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1282 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1284 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1286 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1292 * lpfc_sw_name_match - Check if the fcf switch name match.
1293 * @fab_name: pointer to fabric name.
1294 * @new_fcf_record: pointer to fcf record.
1296 * This routine compare the fcf record's switch name with provided
1297 * switch name. If the switch name are identical this function
1298 * returns 1 else return 0.
1301 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1303 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1305 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1307 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1309 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1311 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1313 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1315 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1317 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1323 * lpfc_mac_addr_match - Check if the fcf mac address match.
1324 * @mac_addr: pointer to mac address.
1325 * @new_fcf_record: pointer to fcf record.
1327 * This routine compare the fcf record's mac address with HBA's
1328 * FCF mac address. If the mac addresses are identical this function
1329 * returns 1 else return 0.
1332 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1334 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1336 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1338 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1340 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1342 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1344 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1350 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1352 return (curr_vlan_id
== new_vlan_id
);
1356 * lpfc_update_fcf_record - Update driver fcf record
1357 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1358 * @phba: pointer to lpfc hba data structure.
1359 * @fcf_index: Index for the lpfc_fcf_record.
1360 * @new_fcf_record: pointer to hba fcf record.
1362 * This routine updates the driver FCF priority record from the new HBA FCF
1363 * record. This routine is called with the host lock held.
1366 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1367 struct fcf_record
*new_fcf_record
1370 struct lpfc_fcf_pri
*fcf_pri
;
1372 lockdep_assert_held(&phba
->hbalock
);
1374 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1375 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1376 /* FCF record priority */
1377 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1382 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1383 * @fcf: pointer to driver fcf record.
1384 * @new_fcf_record: pointer to fcf record.
1386 * This routine copies the FCF information from the FCF
1387 * record to lpfc_hba data structure.
1390 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1391 struct fcf_record
*new_fcf_record
)
1394 fcf_rec
->fabric_name
[0] =
1395 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1396 fcf_rec
->fabric_name
[1] =
1397 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1398 fcf_rec
->fabric_name
[2] =
1399 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1400 fcf_rec
->fabric_name
[3] =
1401 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1402 fcf_rec
->fabric_name
[4] =
1403 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1404 fcf_rec
->fabric_name
[5] =
1405 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1406 fcf_rec
->fabric_name
[6] =
1407 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1408 fcf_rec
->fabric_name
[7] =
1409 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1411 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1412 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1413 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1414 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1415 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1416 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1417 /* FCF record index */
1418 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1419 /* FCF record priority */
1420 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1422 fcf_rec
->switch_name
[0] =
1423 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1424 fcf_rec
->switch_name
[1] =
1425 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1426 fcf_rec
->switch_name
[2] =
1427 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1428 fcf_rec
->switch_name
[3] =
1429 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1430 fcf_rec
->switch_name
[4] =
1431 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1432 fcf_rec
->switch_name
[5] =
1433 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1434 fcf_rec
->switch_name
[6] =
1435 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1436 fcf_rec
->switch_name
[7] =
1437 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1441 * lpfc_update_fcf_record - Update driver fcf record
1442 * @phba: pointer to lpfc hba data structure.
1443 * @fcf_rec: pointer to driver fcf record.
1444 * @new_fcf_record: pointer to hba fcf record.
1445 * @addr_mode: address mode to be set to the driver fcf record.
1446 * @vlan_id: vlan tag to be set to the driver fcf record.
1447 * @flag: flag bits to be set to the driver fcf record.
1449 * This routine updates the driver FCF record from the new HBA FCF record
1450 * together with the address mode, vlan_id, and other informations. This
1451 * routine is called with the host lock held.
1454 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1455 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1456 uint16_t vlan_id
, uint32_t flag
)
1458 lockdep_assert_held(&phba
->hbalock
);
1460 /* Copy the fields from the HBA's FCF record */
1461 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1462 /* Update other fields of driver FCF record */
1463 fcf_rec
->addr_mode
= addr_mode
;
1464 fcf_rec
->vlan_id
= vlan_id
;
1465 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1466 __lpfc_update_fcf_record_pri(phba
,
1467 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1472 * lpfc_register_fcf - Register the FCF with hba.
1473 * @phba: pointer to lpfc hba data structure.
1475 * This routine issues a register fcfi mailbox command to register
1479 lpfc_register_fcf(struct lpfc_hba
*phba
)
1481 LPFC_MBOXQ_t
*fcf_mbxq
;
1484 spin_lock_irq(&phba
->hbalock
);
1485 /* If the FCF is not available do nothing. */
1486 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1487 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1488 spin_unlock_irq(&phba
->hbalock
);
1492 /* The FCF is already registered, start discovery */
1493 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1494 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1495 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1496 if (phba
->pport
->port_state
!= LPFC_FLOGI
&&
1497 phba
->pport
->fc_flag
& FC_FABRIC
) {
1498 phba
->hba_flag
|= FCF_RR_INPROG
;
1499 spin_unlock_irq(&phba
->hbalock
);
1500 lpfc_initial_flogi(phba
->pport
);
1503 spin_unlock_irq(&phba
->hbalock
);
1506 spin_unlock_irq(&phba
->hbalock
);
1508 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1510 spin_lock_irq(&phba
->hbalock
);
1511 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1512 spin_unlock_irq(&phba
->hbalock
);
1516 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1517 fcf_mbxq
->vport
= phba
->pport
;
1518 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1519 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1520 if (rc
== MBX_NOT_FINISHED
) {
1521 spin_lock_irq(&phba
->hbalock
);
1522 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1523 spin_unlock_irq(&phba
->hbalock
);
1524 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1531 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1532 * @phba: pointer to lpfc hba data structure.
1533 * @new_fcf_record: pointer to fcf record.
1534 * @boot_flag: Indicates if this record used by boot bios.
1535 * @addr_mode: The address mode to be used by this FCF
1536 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1538 * This routine compare the fcf record with connect list obtained from the
1539 * config region to decide if this FCF can be used for SAN discovery. It returns
1540 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1541 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1542 * is used by boot bios and addr_mode will indicate the addressing mode to be
1543 * used for this FCF when the function returns.
1544 * If the FCF record need to be used with a particular vlan id, the vlan is
1545 * set in the vlan_id on return of the function. If not VLAN tagging need to
1546 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1549 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1550 struct fcf_record
*new_fcf_record
,
1551 uint32_t *boot_flag
, uint32_t *addr_mode
,
1554 struct lpfc_fcf_conn_entry
*conn_entry
;
1555 int i
, j
, fcf_vlan_id
= 0;
1557 /* Find the lowest VLAN id in the FCF record */
1558 for (i
= 0; i
< 512; i
++) {
1559 if (new_fcf_record
->vlan_bitmap
[i
]) {
1560 fcf_vlan_id
= i
* 8;
1562 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1570 /* FCF not valid/available or solicitation in progress */
1571 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1572 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1573 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1576 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1578 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1580 if (phba
->valid_vlan
)
1581 *vlan_id
= phba
->vlan_id
;
1583 *vlan_id
= LPFC_FCOE_NULL_VID
;
1588 * If there are no FCF connection table entry, driver connect to all
1591 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1593 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1597 * When there are no FCF connect entries, use driver's default
1598 * addressing mode - FPMA.
1600 if (*addr_mode
& LPFC_FCF_FPMA
)
1601 *addr_mode
= LPFC_FCF_FPMA
;
1603 /* If FCF record report a vlan id use that vlan id */
1605 *vlan_id
= fcf_vlan_id
;
1607 *vlan_id
= LPFC_FCOE_NULL_VID
;
1611 list_for_each_entry(conn_entry
,
1612 &phba
->fcf_conn_rec_list
, list
) {
1613 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1616 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1617 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1620 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1621 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1624 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1626 * If the vlan bit map does not have the bit set for the
1627 * vlan id to be used, then it is not a match.
1629 if (!(new_fcf_record
->vlan_bitmap
1630 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1631 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1636 * If connection record does not support any addressing mode,
1637 * skip the FCF record.
1639 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1640 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1644 * Check if the connection record specifies a required
1647 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1648 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1651 * If SPMA required but FCF not support this continue.
1653 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1654 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1655 new_fcf_record
) & LPFC_FCF_SPMA
))
1659 * If FPMA required but FCF not support this continue.
1661 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1662 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1663 new_fcf_record
) & LPFC_FCF_FPMA
))
1668 * This fcf record matches filtering criteria.
1670 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1676 * If user did not specify any addressing mode, or if the
1677 * preferred addressing mode specified by user is not supported
1678 * by FCF, allow fabric to pick the addressing mode.
1680 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1683 * If the user specified a required address mode, assign that
1686 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1687 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1688 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1690 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1692 * If the user specified a preferred address mode, use the
1693 * addr mode only if FCF support the addr_mode.
1695 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1696 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1697 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1698 (*addr_mode
& LPFC_FCF_SPMA
))
1699 *addr_mode
= LPFC_FCF_SPMA
;
1700 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1701 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1702 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1703 (*addr_mode
& LPFC_FCF_FPMA
))
1704 *addr_mode
= LPFC_FCF_FPMA
;
1706 /* If matching connect list has a vlan id, use it */
1707 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1708 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1710 * If no vlan id is specified in connect list, use the vlan id
1713 else if (fcf_vlan_id
)
1714 *vlan_id
= fcf_vlan_id
;
1716 *vlan_id
= LPFC_FCOE_NULL_VID
;
1725 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1726 * @phba: pointer to lpfc hba data structure.
1727 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1729 * This function check if there is any fcoe event pending while driver
1730 * scan FCF entries. If there is any pending event, it will restart the
1731 * FCF saning and return 1 else return 0.
1734 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1737 * If the Link is up and no FCoE events while in the
1738 * FCF discovery, no need to restart FCF discovery.
1740 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1741 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1744 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1745 "2768 Pending link or FCF event during current "
1746 "handling of the previous event: link_state:x%x, "
1747 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1748 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1749 phba
->fcoe_eventtag
);
1751 spin_lock_irq(&phba
->hbalock
);
1752 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1753 spin_unlock_irq(&phba
->hbalock
);
1755 if (phba
->link_state
>= LPFC_LINK_UP
) {
1756 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1757 "2780 Restart FCF table scan due to "
1758 "pending FCF event:evt_tag_at_scan:x%x, "
1759 "evt_tag_current:x%x\n",
1760 phba
->fcoe_eventtag_at_fcf_scan
,
1761 phba
->fcoe_eventtag
);
1762 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1765 * Do not continue FCF discovery and clear FCF_TS_INPROG
1768 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1769 "2833 Stop FCF discovery process due to link "
1770 "state change (x%x)\n", phba
->link_state
);
1771 spin_lock_irq(&phba
->hbalock
);
1772 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1773 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1774 spin_unlock_irq(&phba
->hbalock
);
1777 /* Unregister the currently registered FCF if required */
1779 spin_lock_irq(&phba
->hbalock
);
1780 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1781 spin_unlock_irq(&phba
->hbalock
);
1782 lpfc_sli4_unregister_fcf(phba
);
1788 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1789 * @phba: pointer to lpfc hba data structure.
1790 * @fcf_cnt: number of eligible fcf record seen so far.
1792 * This function makes an running random selection decision on FCF record to
1793 * use through a sequence of @fcf_cnt eligible FCF records with equal
1794 * probability. To perform integer manunipulation of random numbers with
1795 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1796 * from prandom_u32() are taken as the random random number generated.
1798 * Returns true when outcome is for the newly read FCF record should be
1799 * chosen; otherwise, return false when outcome is for keeping the previously
1800 * chosen FCF record.
1803 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1807 /* Get 16-bit uniform random number */
1808 rand_num
= 0xFFFF & prandom_u32();
1810 /* Decision with probability 1/fcf_cnt */
1811 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1818 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1819 * @phba: pointer to lpfc hba data structure.
1820 * @mboxq: pointer to mailbox object.
1821 * @next_fcf_index: pointer to holder of next fcf index.
1823 * This routine parses the non-embedded fcf mailbox command by performing the
1824 * necessarily error checking, non-embedded read FCF record mailbox command
1825 * SGE parsing, and endianness swapping.
1827 * Returns the pointer to the new FCF record in the non-embedded mailbox
1828 * command DMA memory if successfully, other NULL.
1830 static struct fcf_record
*
1831 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1832 uint16_t *next_fcf_index
)
1835 struct lpfc_mbx_sge sge
;
1836 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1837 uint32_t shdr_status
, shdr_add_status
, if_type
;
1838 union lpfc_sli4_cfg_shdr
*shdr
;
1839 struct fcf_record
*new_fcf_record
;
1841 /* Get the first SGE entry from the non-embedded DMA memory. This
1842 * routine only uses a single SGE.
1844 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1845 if (unlikely(!mboxq
->sge_array
)) {
1846 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1847 "2524 Failed to get the non-embedded SGE "
1848 "virtual address\n");
1851 virt_addr
= mboxq
->sge_array
->addr
[0];
1853 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1854 lpfc_sli_pcimem_bcopy(shdr
, shdr
,
1855 sizeof(union lpfc_sli4_cfg_shdr
));
1856 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1857 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1858 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1859 if (shdr_status
|| shdr_add_status
) {
1860 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
||
1861 if_type
== LPFC_SLI_INTF_IF_TYPE_2
)
1862 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1863 "2726 READ_FCF_RECORD Indicates empty "
1866 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1867 "2521 READ_FCF_RECORD mailbox failed "
1868 "with status x%x add_status x%x, "
1869 "mbx\n", shdr_status
, shdr_add_status
);
1873 /* Interpreting the returned information of the FCF record */
1874 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1875 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1876 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1877 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1878 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1879 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1880 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1881 offsetof(struct fcf_record
, vlan_bitmap
));
1882 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1883 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1885 return new_fcf_record
;
1889 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1890 * @phba: pointer to lpfc hba data structure.
1891 * @fcf_record: pointer to the fcf record.
1892 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1893 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1895 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1899 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1900 struct fcf_record
*fcf_record
,
1902 uint16_t next_fcf_index
)
1904 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1905 "2764 READ_FCF_RECORD:\n"
1906 "\tFCF_Index : x%x\n"
1907 "\tFCF_Avail : x%x\n"
1908 "\tFCF_Valid : x%x\n"
1910 "\tFIP_Priority : x%x\n"
1911 "\tMAC_Provider : x%x\n"
1912 "\tLowest VLANID : x%x\n"
1913 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1914 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1915 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1916 "\tNext_FCF_Index: x%x\n",
1917 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1918 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1919 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1920 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1921 fcf_record
->fip_priority
,
1922 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1924 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1925 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1926 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1927 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1928 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1929 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1930 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1931 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1932 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1933 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1934 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1935 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1936 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1937 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1938 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1939 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1940 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1941 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1942 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1943 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1944 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1945 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1950 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1951 * @phba: pointer to lpfc hba data structure.
1952 * @fcf_rec: pointer to an existing FCF record.
1953 * @new_fcf_record: pointer to a new FCF record.
1954 * @new_vlan_id: vlan id from the new FCF record.
1956 * This function performs matching test of a new FCF record against an existing
1957 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1958 * will not be used as part of the FCF record matching criteria.
1960 * Returns true if all the fields matching, otherwise returns false.
1963 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1964 struct lpfc_fcf_rec
*fcf_rec
,
1965 struct fcf_record
*new_fcf_record
,
1966 uint16_t new_vlan_id
)
1968 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1969 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1971 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1973 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1975 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1977 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1983 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1984 * @vport: Pointer to vport object.
1985 * @fcf_index: index to next fcf.
1987 * This function processing the roundrobin fcf failover to next fcf index.
1988 * When this function is invoked, there will be a current fcf registered
1990 * Return: 0 for continue retrying flogi on currently registered fcf;
1991 * 1 for stop flogi on currently registered fcf;
1993 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1995 struct lpfc_hba
*phba
= vport
->phba
;
1998 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1999 spin_lock_irq(&phba
->hbalock
);
2000 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2001 spin_unlock_irq(&phba
->hbalock
);
2002 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2003 "2872 Devloss tmo with no eligible "
2004 "FCF, unregister in-use FCF (x%x) "
2005 "and rescan FCF table\n",
2006 phba
->fcf
.current_rec
.fcf_indx
);
2007 lpfc_unregister_fcf_rescan(phba
);
2008 goto stop_flogi_current_fcf
;
2010 /* Mark the end to FLOGI roundrobin failover */
2011 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2012 /* Allow action to new fcf asynchronous event */
2013 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
2014 spin_unlock_irq(&phba
->hbalock
);
2015 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2016 "2865 No FCF available, stop roundrobin FCF "
2017 "failover and change port state:x%x/x%x\n",
2018 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
2019 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
2021 if (!phba
->fcf
.fcf_redisc_attempted
) {
2022 lpfc_unregister_fcf(phba
);
2024 rc
= lpfc_sli4_redisc_fcf_table(phba
);
2026 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2027 "3195 Rediscover FCF table\n");
2028 phba
->fcf
.fcf_redisc_attempted
= 1;
2029 lpfc_sli4_clear_fcf_rr_bmask(phba
);
2031 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2032 "3196 Rediscover FCF table "
2033 "failed. Status:x%x\n", rc
);
2036 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2037 "3197 Already rediscover FCF table "
2038 "attempted. No more retry\n");
2040 goto stop_flogi_current_fcf
;
2042 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
2043 "2794 Try FLOGI roundrobin FCF failover to "
2044 "(x%x)\n", fcf_index
);
2045 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
2047 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
2048 "2761 FLOGI roundrobin FCF failover "
2049 "failed (rc:x%x) to read FCF (x%x)\n",
2050 rc
, phba
->fcf
.current_rec
.fcf_indx
);
2052 goto stop_flogi_current_fcf
;
2056 stop_flogi_current_fcf
:
2057 lpfc_can_disctmo(vport
);
2062 * lpfc_sli4_fcf_pri_list_del
2063 * @phba: pointer to lpfc hba data structure.
2064 * @fcf_index the index of the fcf record to delete
2065 * This routine checks the on list flag of the fcf_index to be deleted.
2066 * If it is one the list then it is removed from the list, and the flag
2067 * is cleared. This routine grab the hbalock before removing the fcf
2068 * record from the list.
2070 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
2073 struct lpfc_fcf_pri
*new_fcf_pri
;
2075 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2076 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2077 "3058 deleting idx x%x pri x%x flg x%x\n",
2078 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
2079 new_fcf_pri
->fcf_rec
.flag
);
2080 spin_lock_irq(&phba
->hbalock
);
2081 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2082 if (phba
->fcf
.current_rec
.priority
==
2083 new_fcf_pri
->fcf_rec
.priority
)
2084 phba
->fcf
.eligible_fcf_cnt
--;
2085 list_del_init(&new_fcf_pri
->list
);
2086 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2088 spin_unlock_irq(&phba
->hbalock
);
2092 * lpfc_sli4_set_fcf_flogi_fail
2093 * @phba: pointer to lpfc hba data structure.
2094 * @fcf_index the index of the fcf record to update
2095 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2096 * flag so the the round robin slection for the particular priority level
2097 * will try a different fcf record that does not have this bit set.
2098 * If the fcf record is re-read for any reason this flag is cleared brfore
2099 * adding it to the priority list.
2102 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2104 struct lpfc_fcf_pri
*new_fcf_pri
;
2105 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2106 spin_lock_irq(&phba
->hbalock
);
2107 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2108 spin_unlock_irq(&phba
->hbalock
);
2112 * lpfc_sli4_fcf_pri_list_add
2113 * @phba: pointer to lpfc hba data structure.
2114 * @fcf_index the index of the fcf record to add
2115 * This routine checks the priority of the fcf_index to be added.
2116 * If it is a lower priority than the current head of the fcf_pri list
2117 * then it is added to the list in the right order.
2118 * If it is the same priority as the current head of the list then it
2119 * is added to the head of the list and its bit in the rr_bmask is set.
2120 * If the fcf_index to be added is of a higher priority than the current
2121 * head of the list then the rr_bmask is cleared, its bit is set in the
2122 * rr_bmask and it is added to the head of the list.
2124 * 0=success 1=failure
2126 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
,
2128 struct fcf_record
*new_fcf_record
)
2130 uint16_t current_fcf_pri
;
2131 uint16_t last_index
;
2132 struct lpfc_fcf_pri
*fcf_pri
;
2133 struct lpfc_fcf_pri
*next_fcf_pri
;
2134 struct lpfc_fcf_pri
*new_fcf_pri
;
2137 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2138 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2139 "3059 adding idx x%x pri x%x flg x%x\n",
2140 fcf_index
, new_fcf_record
->fip_priority
,
2141 new_fcf_pri
->fcf_rec
.flag
);
2142 spin_lock_irq(&phba
->hbalock
);
2143 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2144 list_del_init(&new_fcf_pri
->list
);
2145 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2146 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2147 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2148 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2149 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2150 new_fcf_pri
->fcf_rec
.fcf_index
);
2154 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2155 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2156 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2157 ret
= 0; /* Empty rr list */
2160 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2161 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2162 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2163 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2164 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2165 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2166 /* fcfs_at_this_priority_level = 1; */
2167 phba
->fcf
.eligible_fcf_cnt
= 1;
2169 /* fcfs_at_this_priority_level++; */
2170 phba
->fcf
.eligible_fcf_cnt
++;
2171 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2172 new_fcf_pri
->fcf_rec
.fcf_index
);
2176 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2177 &phba
->fcf
.fcf_pri_list
, list
) {
2178 if (new_fcf_pri
->fcf_rec
.priority
<=
2179 fcf_pri
->fcf_rec
.priority
) {
2180 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2181 list_add(&new_fcf_pri
->list
,
2182 &phba
->fcf
.fcf_pri_list
);
2184 list_add(&new_fcf_pri
->list
,
2185 &((struct lpfc_fcf_pri
*)
2186 fcf_pri
->list
.prev
)->list
);
2189 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2190 || new_fcf_pri
->fcf_rec
.priority
<
2191 next_fcf_pri
->fcf_rec
.priority
) {
2192 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2196 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2202 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2203 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2204 spin_unlock_irq(&phba
->hbalock
);
2209 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2210 * @phba: pointer to lpfc hba data structure.
2211 * @mboxq: pointer to mailbox object.
2213 * This function iterates through all the fcf records available in
2214 * HBA and chooses the optimal FCF record for discovery. After finding
2215 * the FCF for discovery it registers the FCF record and kicks start
2217 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2218 * use an FCF record which matches fabric name and mac address of the
2219 * currently used FCF record.
2220 * If the driver supports only one FCF, it will try to use the FCF record
2221 * used by BOOT_BIOS.
2224 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2226 struct fcf_record
*new_fcf_record
;
2227 uint32_t boot_flag
, addr_mode
;
2228 uint16_t fcf_index
, next_fcf_index
;
2229 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2230 uint16_t vlan_id
= LPFC_FCOE_NULL_VID
;
2231 bool select_new_fcf
;
2234 /* If there is pending FCoE event restart FCF table scan */
2235 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2236 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2240 /* Parse the FCF record from the non-embedded mailbox command */
2241 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2243 if (!new_fcf_record
) {
2244 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2245 "2765 Mailbox command READ_FCF_RECORD "
2246 "failed to retrieve a FCF record.\n");
2247 /* Let next new FCF event trigger fast failover */
2248 spin_lock_irq(&phba
->hbalock
);
2249 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2250 spin_unlock_irq(&phba
->hbalock
);
2251 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2255 /* Check the FCF record against the connection list */
2256 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2257 &addr_mode
, &vlan_id
);
2259 /* Log the FCF record information if turned on */
2260 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2264 * If the fcf record does not match with connect list entries
2265 * read the next entry; otherwise, this is an eligible FCF
2266 * record for roundrobin FCF failover.
2269 lpfc_sli4_fcf_pri_list_del(phba
,
2270 bf_get(lpfc_fcf_record_fcf_index
,
2272 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2273 "2781 FCF (x%x) failed connection "
2274 "list check: (x%x/x%x/%x)\n",
2275 bf_get(lpfc_fcf_record_fcf_index
,
2277 bf_get(lpfc_fcf_record_fcf_avail
,
2279 bf_get(lpfc_fcf_record_fcf_valid
,
2281 bf_get(lpfc_fcf_record_fcf_sol
,
2283 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2284 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2285 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2286 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2287 phba
->fcf
.current_rec
.fcf_indx
) {
2288 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2289 "2862 FCF (x%x) matches property "
2290 "of in-use FCF (x%x)\n",
2291 bf_get(lpfc_fcf_record_fcf_index
,
2293 phba
->fcf
.current_rec
.fcf_indx
);
2297 * In case the current in-use FCF record becomes
2298 * invalid/unavailable during FCF discovery that
2299 * was not triggered by fast FCF failover process,
2300 * treat it as fast FCF failover.
2302 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2303 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2304 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2305 "2835 Invalid in-use FCF "
2306 "(x%x), enter FCF failover "
2308 phba
->fcf
.current_rec
.fcf_indx
);
2309 spin_lock_irq(&phba
->hbalock
);
2310 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2311 spin_unlock_irq(&phba
->hbalock
);
2312 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2313 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2314 LPFC_FCOE_FCF_GET_FIRST
);
2320 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2321 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2328 * If this is not the first FCF discovery of the HBA, use last
2329 * FCF record for the discovery. The condition that a rescan
2330 * matches the in-use FCF record: fabric name, switch name, mac
2331 * address, and vlan_id.
2333 spin_lock_irq(&phba
->hbalock
);
2334 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2335 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2336 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2337 new_fcf_record
, vlan_id
)) {
2338 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2339 phba
->fcf
.current_rec
.fcf_indx
) {
2340 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2341 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2342 /* Stop FCF redisc wait timer */
2343 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2345 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2346 /* Fast failover, mark completed */
2347 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2348 spin_unlock_irq(&phba
->hbalock
);
2349 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2350 "2836 New FCF matches in-use "
2351 "FCF (x%x), port_state:x%x, "
2353 phba
->fcf
.current_rec
.fcf_indx
,
2354 phba
->pport
->port_state
,
2355 phba
->pport
->fc_flag
);
2358 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2359 "2863 New FCF (x%x) matches "
2360 "property of in-use FCF (x%x)\n",
2361 bf_get(lpfc_fcf_record_fcf_index
,
2363 phba
->fcf
.current_rec
.fcf_indx
);
2366 * Read next FCF record from HBA searching for the matching
2367 * with in-use record only if not during the fast failover
2368 * period. In case of fast failover period, it shall try to
2369 * determine whether the FCF record just read should be the
2372 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2373 spin_unlock_irq(&phba
->hbalock
);
2378 * Update on failover FCF record only if it's in FCF fast-failover
2379 * period; otherwise, update on current FCF record.
2381 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2382 fcf_rec
= &phba
->fcf
.failover_rec
;
2384 fcf_rec
= &phba
->fcf
.current_rec
;
2386 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2388 * If the driver FCF record does not have boot flag
2389 * set and new hba fcf record has boot flag set, use
2390 * the new hba fcf record.
2392 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2393 /* Choose this FCF record */
2394 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2395 "2837 Update current FCF record "
2396 "(x%x) with new FCF record (x%x)\n",
2398 bf_get(lpfc_fcf_record_fcf_index
,
2400 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2401 addr_mode
, vlan_id
, BOOT_ENABLE
);
2402 spin_unlock_irq(&phba
->hbalock
);
2406 * If the driver FCF record has boot flag set and the
2407 * new hba FCF record does not have boot flag, read
2408 * the next FCF record.
2410 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2411 spin_unlock_irq(&phba
->hbalock
);
2415 * If the new hba FCF record has lower priority value
2416 * than the driver FCF record, use the new record.
2418 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2419 /* Choose the new FCF record with lower priority */
2420 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2421 "2838 Update current FCF record "
2422 "(x%x) with new FCF record (x%x)\n",
2424 bf_get(lpfc_fcf_record_fcf_index
,
2426 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2427 addr_mode
, vlan_id
, 0);
2428 /* Reset running random FCF selection count */
2429 phba
->fcf
.eligible_fcf_cnt
= 1;
2430 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2431 /* Update running random FCF selection count */
2432 phba
->fcf
.eligible_fcf_cnt
++;
2433 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2434 phba
->fcf
.eligible_fcf_cnt
);
2435 if (select_new_fcf
) {
2436 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2437 "2839 Update current FCF record "
2438 "(x%x) with new FCF record (x%x)\n",
2440 bf_get(lpfc_fcf_record_fcf_index
,
2442 /* Choose the new FCF by random selection */
2443 __lpfc_update_fcf_record(phba
, fcf_rec
,
2445 addr_mode
, vlan_id
, 0);
2448 spin_unlock_irq(&phba
->hbalock
);
2452 * This is the first suitable FCF record, choose this record for
2453 * initial best-fit FCF.
2456 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2457 "2840 Update initial FCF candidate "
2459 bf_get(lpfc_fcf_record_fcf_index
,
2461 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2462 addr_mode
, vlan_id
, (boot_flag
?
2464 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2465 /* Setup initial running random FCF selection count */
2466 phba
->fcf
.eligible_fcf_cnt
= 1;
2468 spin_unlock_irq(&phba
->hbalock
);
2472 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2473 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2474 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2476 * Case of FCF fast failover scan
2480 * It has not found any suitable FCF record, cancel
2481 * FCF scan inprogress, and do nothing
2483 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2484 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2485 "2782 No suitable FCF found: "
2487 phba
->fcoe_eventtag_at_fcf_scan
,
2488 bf_get(lpfc_fcf_record_fcf_index
,
2490 spin_lock_irq(&phba
->hbalock
);
2491 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2492 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2493 spin_unlock_irq(&phba
->hbalock
);
2494 /* Unregister in-use FCF and rescan */
2495 lpfc_printf_log(phba
, KERN_INFO
,
2497 "2864 On devloss tmo "
2498 "unreg in-use FCF and "
2499 "rescan FCF table\n");
2500 lpfc_unregister_fcf_rescan(phba
);
2504 * Let next new FCF event trigger fast failover
2506 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2507 spin_unlock_irq(&phba
->hbalock
);
2511 * It has found a suitable FCF record that is not
2512 * the same as in-use FCF record, unregister the
2513 * in-use FCF record, replace the in-use FCF record
2514 * with the new FCF record, mark FCF fast failover
2515 * completed, and then start register the new FCF
2519 /* Unregister the current in-use FCF record */
2520 lpfc_unregister_fcf(phba
);
2522 /* Replace in-use record with the new record */
2523 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2524 "2842 Replace in-use FCF (x%x) "
2525 "with failover FCF (x%x)\n",
2526 phba
->fcf
.current_rec
.fcf_indx
,
2527 phba
->fcf
.failover_rec
.fcf_indx
);
2528 memcpy(&phba
->fcf
.current_rec
,
2529 &phba
->fcf
.failover_rec
,
2530 sizeof(struct lpfc_fcf_rec
));
2532 * Mark the fast FCF failover rediscovery completed
2533 * and the start of the first round of the roundrobin
2536 spin_lock_irq(&phba
->hbalock
);
2537 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2538 spin_unlock_irq(&phba
->hbalock
);
2539 /* Register to the new FCF record */
2540 lpfc_register_fcf(phba
);
2543 * In case of transaction period to fast FCF failover,
2544 * do nothing when search to the end of the FCF table.
2546 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2547 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2550 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2551 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2553 * In case the current in-use FCF record no
2554 * longer existed during FCF discovery that
2555 * was not triggered by fast FCF failover
2556 * process, treat it as fast FCF failover.
2558 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2559 "2841 In-use FCF record (x%x) "
2560 "not reported, entering fast "
2561 "FCF failover mode scanning.\n",
2562 phba
->fcf
.current_rec
.fcf_indx
);
2563 spin_lock_irq(&phba
->hbalock
);
2564 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2565 spin_unlock_irq(&phba
->hbalock
);
2566 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2567 LPFC_FCOE_FCF_GET_FIRST
);
2570 /* Register to the new FCF record */
2571 lpfc_register_fcf(phba
);
2574 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2578 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2579 lpfc_register_fcf(phba
);
2585 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2586 * @phba: pointer to lpfc hba data structure.
2587 * @mboxq: pointer to mailbox object.
2589 * This is the callback function for FLOGI failure roundrobin FCF failover
2590 * read FCF record mailbox command from the eligible FCF record bmask for
2591 * performing the failover. If the FCF read back is not valid/available, it
2592 * fails through to retrying FLOGI to the currently registered FCF again.
2593 * Otherwise, if the FCF read back is valid and available, it will set the
2594 * newly read FCF record to the failover FCF record, unregister currently
2595 * registered FCF record, copy the failover FCF record to the current
2596 * FCF record, and then register the current FCF record before proceeding
2597 * to trying FLOGI on the new failover FCF.
2600 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2602 struct fcf_record
*new_fcf_record
;
2603 uint32_t boot_flag
, addr_mode
;
2604 uint16_t next_fcf_index
, fcf_index
;
2605 uint16_t current_fcf_index
;
2609 /* If link state is not up, stop the roundrobin failover process */
2610 if (phba
->link_state
< LPFC_LINK_UP
) {
2611 spin_lock_irq(&phba
->hbalock
);
2612 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2613 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2614 spin_unlock_irq(&phba
->hbalock
);
2618 /* Parse the FCF record from the non-embedded mailbox command */
2619 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2621 if (!new_fcf_record
) {
2622 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2623 "2766 Mailbox command READ_FCF_RECORD "
2624 "failed to retrieve a FCF record. "
2625 "hba_flg x%x fcf_flg x%x\n", phba
->hba_flag
,
2626 phba
->fcf
.fcf_flag
);
2627 lpfc_unregister_fcf_rescan(phba
);
2631 /* Get the needed parameters from FCF record */
2632 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2633 &addr_mode
, &vlan_id
);
2635 /* Log the FCF record information if turned on */
2636 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2639 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2641 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2642 "2848 Remove ineligible FCF (x%x) from "
2643 "from roundrobin bmask\n", fcf_index
);
2644 /* Clear roundrobin bmask bit for ineligible FCF */
2645 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2646 /* Perform next round of roundrobin FCF failover */
2647 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2648 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2654 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2655 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2656 "2760 Perform FLOGI roundrobin FCF failover: "
2657 "FCF (x%x) back to FCF (x%x)\n",
2658 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2659 /* Wait 500 ms before retrying FLOGI to current FCF */
2661 lpfc_issue_init_vfi(phba
->pport
);
2665 /* Upload new FCF record to the failover FCF record */
2666 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2667 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2668 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2669 spin_lock_irq(&phba
->hbalock
);
2670 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2671 new_fcf_record
, addr_mode
, vlan_id
,
2672 (boot_flag
? BOOT_ENABLE
: 0));
2673 spin_unlock_irq(&phba
->hbalock
);
2675 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2677 /* Unregister the current in-use FCF record */
2678 lpfc_unregister_fcf(phba
);
2680 /* Replace in-use record with the new record */
2681 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2682 sizeof(struct lpfc_fcf_rec
));
2684 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2685 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2686 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2689 lpfc_register_fcf(phba
);
2691 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2695 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2696 * @phba: pointer to lpfc hba data structure.
2697 * @mboxq: pointer to mailbox object.
2699 * This is the callback function of read FCF record mailbox command for
2700 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2701 * failover when a new FCF event happened. If the FCF read back is
2702 * valid/available and it passes the connection list check, it updates
2703 * the bmask for the eligible FCF record for roundrobin failover.
2706 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2708 struct fcf_record
*new_fcf_record
;
2709 uint32_t boot_flag
, addr_mode
;
2710 uint16_t fcf_index
, next_fcf_index
;
2714 /* If link state is not up, no need to proceed */
2715 if (phba
->link_state
< LPFC_LINK_UP
)
2718 /* If FCF discovery period is over, no need to proceed */
2719 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2722 /* Parse the FCF record from the non-embedded mailbox command */
2723 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2725 if (!new_fcf_record
) {
2726 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2727 "2767 Mailbox command READ_FCF_RECORD "
2728 "failed to retrieve a FCF record.\n");
2732 /* Check the connection list for eligibility */
2733 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2734 &addr_mode
, &vlan_id
);
2736 /* Log the FCF record information if turned on */
2737 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2743 /* Update the eligible FCF record index bmask */
2744 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2746 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2749 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2753 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2754 * @phba: pointer to lpfc hba data structure.
2755 * @mboxq: pointer to mailbox data structure.
2757 * This function handles completion of init vfi mailbox command.
2760 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2762 struct lpfc_vport
*vport
= mboxq
->vport
;
2765 * VFI not supported on interface type 0, just do the flogi
2766 * Also continue if the VFI is in use - just use the same one.
2768 if (mboxq
->u
.mb
.mbxStatus
&&
2769 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2770 LPFC_SLI_INTF_IF_TYPE_0
) &&
2771 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2772 lpfc_printf_vlog(vport
, KERN_ERR
,
2774 "2891 Init VFI mailbox failed 0x%x\n",
2775 mboxq
->u
.mb
.mbxStatus
);
2776 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2777 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2781 lpfc_initial_flogi(vport
);
2782 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2787 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2788 * @vport: pointer to lpfc_vport data structure.
2790 * This function issue a init_vfi mailbox command to initialize the VFI and
2791 * VPI for the physical port.
2794 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2796 LPFC_MBOXQ_t
*mboxq
;
2798 struct lpfc_hba
*phba
= vport
->phba
;
2800 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2802 lpfc_printf_vlog(vport
, KERN_ERR
,
2803 LOG_MBOX
, "2892 Failed to allocate "
2804 "init_vfi mailbox\n");
2807 lpfc_init_vfi(mboxq
, vport
);
2808 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2809 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2810 if (rc
== MBX_NOT_FINISHED
) {
2811 lpfc_printf_vlog(vport
, KERN_ERR
,
2812 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2813 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2818 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2819 * @phba: pointer to lpfc hba data structure.
2820 * @mboxq: pointer to mailbox data structure.
2822 * This function handles completion of init vpi mailbox command.
2825 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2827 struct lpfc_vport
*vport
= mboxq
->vport
;
2828 struct lpfc_nodelist
*ndlp
;
2829 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2831 if (mboxq
->u
.mb
.mbxStatus
) {
2832 lpfc_printf_vlog(vport
, KERN_ERR
,
2834 "2609 Init VPI mailbox failed 0x%x\n",
2835 mboxq
->u
.mb
.mbxStatus
);
2836 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2837 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2840 spin_lock_irq(shost
->host_lock
);
2841 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2842 spin_unlock_irq(shost
->host_lock
);
2844 /* If this port is physical port or FDISC is done, do reg_vpi */
2845 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2846 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2848 lpfc_printf_vlog(vport
, KERN_ERR
,
2850 "2731 Cannot find fabric "
2851 "controller node\n");
2853 lpfc_register_new_vport(phba
, vport
, ndlp
);
2854 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2858 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2859 lpfc_initial_fdisc(vport
);
2861 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2862 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2863 "2606 No NPIV Fabric support\n");
2865 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2870 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2871 * @vport: pointer to lpfc_vport data structure.
2873 * This function issue a init_vpi mailbox command to initialize
2874 * VPI for the vport.
2877 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2879 LPFC_MBOXQ_t
*mboxq
;
2882 if ((vport
->port_type
!= LPFC_PHYSICAL_PORT
) && (!vport
->vpi
)) {
2883 vpi
= lpfc_alloc_vpi(vport
->phba
);
2885 lpfc_printf_vlog(vport
, KERN_ERR
,
2887 "3303 Failed to obtain vport vpi\n");
2888 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2894 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2896 lpfc_printf_vlog(vport
, KERN_ERR
,
2897 LOG_MBOX
, "2607 Failed to allocate "
2898 "init_vpi mailbox\n");
2901 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2902 mboxq
->vport
= vport
;
2903 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2904 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2905 if (rc
== MBX_NOT_FINISHED
) {
2906 lpfc_printf_vlog(vport
, KERN_ERR
,
2907 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2908 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2913 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2914 * @phba: pointer to lpfc hba data structure.
2916 * This function loops through the list of vports on the @phba and issues an
2917 * FDISC if possible.
2920 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2922 struct lpfc_vport
**vports
;
2925 vports
= lpfc_create_vport_work_array(phba
);
2926 if (vports
!= NULL
) {
2927 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2928 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2930 /* There are no vpi for this vport */
2931 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2932 lpfc_vport_set_state(vports
[i
],
2936 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2937 lpfc_vport_set_state(vports
[i
],
2941 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2942 lpfc_issue_init_vpi(vports
[i
]);
2945 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2946 lpfc_initial_fdisc(vports
[i
]);
2948 lpfc_vport_set_state(vports
[i
],
2949 FC_VPORT_NO_FABRIC_SUPP
);
2950 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2953 "Fabric support\n");
2957 lpfc_destroy_vport_work_array(phba
, vports
);
2961 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2963 struct lpfc_dmabuf
*dmabuf
= mboxq
->ctx_buf
;
2964 struct lpfc_vport
*vport
= mboxq
->vport
;
2965 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2968 * VFI not supported for interface type 0, so ignore any mailbox
2969 * error (except VFI in use) and continue with the discovery.
2971 if (mboxq
->u
.mb
.mbxStatus
&&
2972 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2973 LPFC_SLI_INTF_IF_TYPE_0
) &&
2974 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2975 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2976 "2018 REG_VFI mbxStatus error x%x "
2978 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2979 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2980 /* FLOGI failed, use loop map to make discovery list */
2981 lpfc_disc_list_loopmap(vport
);
2982 /* Start discovery */
2983 lpfc_disc_start(vport
);
2986 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2990 /* If the VFI is already registered, there is nothing else to do
2991 * Unless this was a VFI update and we are in PT2PT mode, then
2992 * we should drop through to set the port state to ready.
2994 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
2995 if (!(phba
->sli_rev
== LPFC_SLI_REV4
&&
2996 vport
->fc_flag
& FC_PT2PT
))
2999 /* The VPI is implicitly registered when the VFI is registered */
3000 spin_lock_irq(shost
->host_lock
);
3001 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3002 vport
->fc_flag
|= FC_VFI_REGISTERED
;
3003 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3004 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
3005 spin_unlock_irq(shost
->host_lock
);
3007 /* In case SLI4 FC loopback test, we are ready */
3008 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
3009 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
3010 phba
->link_state
= LPFC_HBA_READY
;
3014 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3015 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3016 "alpacnt:%d LinkState:%x topology:%x\n",
3017 vport
->port_state
, vport
->fc_flag
, vport
->fc_myDID
,
3018 vport
->phba
->alpa_map
[0],
3019 phba
->link_state
, phba
->fc_topology
);
3021 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3023 * For private loop or for NPort pt2pt,
3024 * just start discovery and we are done.
3026 if ((vport
->fc_flag
& FC_PT2PT
) ||
3027 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
3028 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
3030 /* Use loop map to make discovery list */
3031 lpfc_disc_list_loopmap(vport
);
3032 /* Start discovery */
3033 if (vport
->fc_flag
& FC_PT2PT
)
3034 vport
->port_state
= LPFC_VPORT_READY
;
3036 lpfc_disc_start(vport
);
3038 lpfc_start_fdiscs(phba
);
3039 lpfc_do_scr_ns_plogi(phba
, vport
);
3044 mempool_free(mboxq
, phba
->mbox_mem_pool
);
3046 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
3053 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3055 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3056 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
3057 struct lpfc_vport
*vport
= pmb
->vport
;
3058 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3059 struct serv_parm
*sp
= &vport
->fc_sparam
;
3062 /* Check for error */
3063 if (mb
->mbxStatus
) {
3064 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3065 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3066 "0319 READ_SPARAM mbxStatus error x%x "
3068 mb
->mbxStatus
, vport
->port_state
);
3069 lpfc_linkdown(phba
);
3073 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
3074 sizeof (struct serv_parm
));
3076 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
3077 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
3078 ed_tov
= (ed_tov
+ 999999) / 1000000;
3080 phba
->fc_edtov
= ed_tov
;
3081 phba
->fc_ratov
= (2 * ed_tov
) / 1000;
3082 if (phba
->fc_ratov
< FF_DEF_RATOV
) {
3083 /* RA_TOV should be atleast 10sec for initial flogi */
3084 phba
->fc_ratov
= FF_DEF_RATOV
;
3087 lpfc_update_vport_wwn(vport
);
3088 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
3089 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
3090 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
3091 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
3094 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3096 mempool_free(pmb
, phba
->mbox_mem_pool
);
3100 pmb
->ctx_buf
= NULL
;
3101 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3103 lpfc_issue_clear_la(phba
, vport
);
3104 mempool_free(pmb
, phba
->mbox_mem_pool
);
3109 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
3111 struct lpfc_vport
*vport
= phba
->pport
;
3112 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
3113 struct Scsi_Host
*shost
;
3115 struct lpfc_dmabuf
*mp
;
3117 struct fcf_record
*fcf_record
;
3118 uint32_t fc_flags
= 0;
3120 spin_lock_irq(&phba
->hbalock
);
3121 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
3123 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3124 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
3125 case LPFC_LINK_SPEED_1GHZ
:
3126 case LPFC_LINK_SPEED_2GHZ
:
3127 case LPFC_LINK_SPEED_4GHZ
:
3128 case LPFC_LINK_SPEED_8GHZ
:
3129 case LPFC_LINK_SPEED_10GHZ
:
3130 case LPFC_LINK_SPEED_16GHZ
:
3131 case LPFC_LINK_SPEED_32GHZ
:
3132 case LPFC_LINK_SPEED_64GHZ
:
3133 case LPFC_LINK_SPEED_128GHZ
:
3136 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3141 if (phba
->fc_topology
&&
3142 phba
->fc_topology
!= bf_get(lpfc_mbx_read_top_topology
, la
)) {
3143 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3144 "3314 Toplogy changed was 0x%x is 0x%x\n",
3146 bf_get(lpfc_mbx_read_top_topology
, la
));
3147 phba
->fc_topology_changed
= 1;
3150 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3151 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3153 shost
= lpfc_shost_from_vport(vport
);
3154 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3155 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3157 /* if npiv is enabled and this adapter supports npiv log
3158 * a message that npiv is not supported in this topology
3160 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3161 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3162 "1309 Link Up Event npiv not supported in loop "
3164 /* Get Loop Map information */
3165 if (bf_get(lpfc_mbx_read_top_il
, la
))
3166 fc_flags
|= FC_LBIT
;
3168 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3169 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3172 phba
->alpa_map
[0] = 0;
3174 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3185 numalpa
= phba
->alpa_map
[0];
3187 while (j
< numalpa
) {
3188 memset(un
.pamap
, 0, 16);
3189 for (k
= 1; j
< numalpa
; k
++) {
3191 phba
->alpa_map
[j
+ 1];
3196 /* Link Up Event ALPA map */
3197 lpfc_printf_log(phba
,
3200 "1304 Link Up Event "
3201 "ALPA map Data: x%x "
3203 un
.pa
.wd1
, un
.pa
.wd2
,
3204 un
.pa
.wd3
, un
.pa
.wd4
);
3209 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3210 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3211 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3212 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3214 vport
->fc_myDID
= phba
->fc_pref_DID
;
3215 fc_flags
|= FC_LBIT
;
3217 spin_unlock_irq(&phba
->hbalock
);
3220 spin_lock_irq(shost
->host_lock
);
3221 vport
->fc_flag
|= fc_flags
;
3222 spin_unlock_irq(shost
->host_lock
);
3226 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3230 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3232 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3235 sparam_mbox
->vport
= vport
;
3236 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3237 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3238 if (rc
== MBX_NOT_FINISHED
) {
3239 mp
= (struct lpfc_dmabuf
*)sparam_mbox
->ctx_buf
;
3240 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3242 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3246 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3247 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3250 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3251 lpfc_config_link(phba
, cfglink_mbox
);
3252 cfglink_mbox
->vport
= vport
;
3253 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3254 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3255 if (rc
== MBX_NOT_FINISHED
) {
3256 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3260 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3262 * Add the driver's default FCF record at FCF index 0 now. This
3263 * is phase 1 implementation that support FCF index 0 and driver
3266 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3267 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3269 if (unlikely(!fcf_record
)) {
3270 lpfc_printf_log(phba
, KERN_ERR
,
3272 "2554 Could not allocate memory for "
3278 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3279 LPFC_FCOE_FCF_DEF_INDEX
);
3280 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3282 lpfc_printf_log(phba
, KERN_ERR
,
3284 "2013 Could not manually add FCF "
3285 "record 0, status %d\n", rc
);
3293 * The driver is expected to do FIP/FCF. Call the port
3294 * and get the FCF Table.
3296 spin_lock_irq(&phba
->hbalock
);
3297 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3298 spin_unlock_irq(&phba
->hbalock
);
3301 /* This is the initial FCF discovery scan */
3302 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3303 spin_unlock_irq(&phba
->hbalock
);
3304 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3305 "2778 Start FCF table scan at linkup\n");
3306 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3307 LPFC_FCOE_FCF_GET_FIRST
);
3309 spin_lock_irq(&phba
->hbalock
);
3310 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3311 spin_unlock_irq(&phba
->hbalock
);
3314 /* Reset FCF roundrobin bmask for new discovery */
3315 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3320 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3321 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3322 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3323 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3324 lpfc_issue_clear_la(phba
, vport
);
3329 lpfc_enable_la(struct lpfc_hba
*phba
)
3332 struct lpfc_sli
*psli
= &phba
->sli
;
3333 spin_lock_irq(&phba
->hbalock
);
3334 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3335 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3336 control
= readl(phba
->HCregaddr
);
3337 control
|= HC_LAINT_ENA
;
3338 writel(control
, phba
->HCregaddr
);
3339 readl(phba
->HCregaddr
); /* flush */
3341 spin_unlock_irq(&phba
->hbalock
);
3345 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3347 lpfc_linkdown(phba
);
3348 lpfc_enable_la(phba
);
3349 lpfc_unregister_unused_fcf(phba
);
3350 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3355 * This routine handles processing a READ_TOPOLOGY mailbox
3356 * command upon completion. It is setup in the LPFC_MBOXQ
3357 * as the completion routine when the command is
3358 * handed off to the SLI layer. SLI4 only.
3361 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3363 struct lpfc_vport
*vport
= pmb
->vport
;
3364 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3365 struct lpfc_mbx_read_top
*la
;
3366 struct lpfc_sli_ring
*pring
;
3367 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3368 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
3371 /* Unblock ELS traffic */
3372 pring
= lpfc_phba_elsring(phba
);
3374 pring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3376 /* Check for error */
3377 if (mb
->mbxStatus
) {
3378 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3379 "1307 READ_LA mbox error x%x state x%x\n",
3380 mb
->mbxStatus
, vport
->port_state
);
3381 lpfc_mbx_issue_link_down(phba
);
3382 phba
->link_state
= LPFC_HBA_ERROR
;
3383 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3386 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3387 attn_type
= bf_get(lpfc_mbx_read_top_att_type
, la
);
3389 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3391 spin_lock_irq(shost
->host_lock
);
3392 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3393 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3395 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3396 spin_unlock_irq(shost
->host_lock
);
3398 if (phba
->fc_eventTag
<= la
->eventTag
) {
3399 phba
->fc_stat
.LinkMultiEvent
++;
3400 if (attn_type
== LPFC_ATT_LINK_UP
)
3401 if (phba
->fc_eventTag
!= 0)
3402 lpfc_linkdown(phba
);
3405 phba
->fc_eventTag
= la
->eventTag
;
3406 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3407 spin_lock_irq(&phba
->hbalock
);
3408 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3409 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3411 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3412 spin_unlock_irq(&phba
->hbalock
);
3415 phba
->link_events
++;
3416 if ((attn_type
== LPFC_ATT_LINK_UP
) &&
3417 !(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)) {
3418 phba
->fc_stat
.LinkUp
++;
3419 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3420 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3421 "1306 Link Up Event in loop back mode "
3422 "x%x received Data: x%x x%x x%x x%x\n",
3423 la
->eventTag
, phba
->fc_eventTag
,
3424 bf_get(lpfc_mbx_read_top_alpa_granted
,
3426 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3429 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3430 "1303 Link Up Event x%x received "
3431 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3432 la
->eventTag
, phba
->fc_eventTag
,
3433 bf_get(lpfc_mbx_read_top_alpa_granted
,
3435 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3437 bf_get(lpfc_mbx_read_top_mm
, la
),
3438 bf_get(lpfc_mbx_read_top_fa
, la
),
3439 phba
->wait_4_mlo_maint_flg
);
3441 lpfc_mbx_process_link_up(phba
, la
);
3442 } else if (attn_type
== LPFC_ATT_LINK_DOWN
||
3443 attn_type
== LPFC_ATT_UNEXP_WWPN
) {
3444 phba
->fc_stat
.LinkDown
++;
3445 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3446 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3447 "1308 Link Down Event in loop back mode "
3449 "Data: x%x x%x x%x\n",
3450 la
->eventTag
, phba
->fc_eventTag
,
3451 phba
->pport
->port_state
, vport
->fc_flag
);
3452 else if (attn_type
== LPFC_ATT_UNEXP_WWPN
)
3453 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3454 "1313 Link Down UNEXP WWPN Event x%x received "
3455 "Data: x%x x%x x%x x%x x%x\n",
3456 la
->eventTag
, phba
->fc_eventTag
,
3457 phba
->pport
->port_state
, vport
->fc_flag
,
3458 bf_get(lpfc_mbx_read_top_mm
, la
),
3459 bf_get(lpfc_mbx_read_top_fa
, la
));
3461 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3462 "1305 Link Down Event x%x received "
3463 "Data: x%x x%x x%x x%x x%x\n",
3464 la
->eventTag
, phba
->fc_eventTag
,
3465 phba
->pport
->port_state
, vport
->fc_flag
,
3466 bf_get(lpfc_mbx_read_top_mm
, la
),
3467 bf_get(lpfc_mbx_read_top_fa
, la
));
3468 lpfc_mbx_issue_link_down(phba
);
3470 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
&&
3471 attn_type
== LPFC_ATT_LINK_UP
) {
3472 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3473 phba
->fc_stat
.LinkDown
++;
3474 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3475 "1312 Link Down Event x%x received "
3476 "Data: x%x x%x x%x\n",
3477 la
->eventTag
, phba
->fc_eventTag
,
3478 phba
->pport
->port_state
, vport
->fc_flag
);
3479 lpfc_mbx_issue_link_down(phba
);
3481 lpfc_enable_la(phba
);
3483 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3484 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3485 "Data: x%x x%x x%x\n",
3486 la
->eventTag
, phba
->fc_eventTag
,
3487 phba
->pport
->port_state
, vport
->fc_flag
);
3489 * The cmnd that triggered this will be waiting for this
3492 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3493 if (phba
->wait_4_mlo_maint_flg
) {
3494 phba
->wait_4_mlo_maint_flg
= 0;
3495 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3499 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3500 bf_get(lpfc_mbx_read_top_fa
, la
)) {
3501 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)
3502 lpfc_issue_clear_la(phba
, vport
);
3503 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3505 bf_get(lpfc_mbx_read_top_fa
, la
));
3508 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3509 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3511 mempool_free(pmb
, phba
->mbox_mem_pool
);
3516 * This routine handles processing a REG_LOGIN mailbox
3517 * command upon completion. It is setup in the LPFC_MBOXQ
3518 * as the completion routine when the command is
3519 * handed off to the SLI layer.
3522 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3524 struct lpfc_vport
*vport
= pmb
->vport
;
3525 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
3526 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
3527 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3529 pmb
->ctx_buf
= NULL
;
3530 pmb
->ctx_ndlp
= NULL
;
3532 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3533 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3534 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3535 kref_read(&ndlp
->kref
),
3536 ndlp
->nlp_usg_map
, ndlp
);
3537 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3538 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3540 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3541 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3542 /* We rcvd a rscn after issuing this
3543 * mbox reg login, we may have cycled
3544 * back through the state and be
3545 * back at reg login state so this
3546 * mbox needs to be ignored becase
3547 * there is another reg login in
3550 spin_lock_irq(shost
->host_lock
);
3551 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3552 spin_unlock_irq(shost
->host_lock
);
3555 * We cannot leave the RPI registered because
3556 * if we go thru discovery again for this ndlp
3557 * a subsequent REG_RPI will fail.
3559 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3560 lpfc_unreg_rpi(vport
, ndlp
);
3563 /* Call state machine */
3564 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
3566 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3568 mempool_free(pmb
, phba
->mbox_mem_pool
);
3569 /* decrement the node reference count held for this callback
3578 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3580 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3581 struct lpfc_vport
*vport
= pmb
->vport
;
3582 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3584 switch (mb
->mbxStatus
) {
3587 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3588 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3591 /* If VPI is busy, reset the HBA */
3593 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3594 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3595 vport
->vpi
, mb
->mbxStatus
);
3596 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3597 lpfc_workq_post_event(phba
, NULL
, NULL
,
3598 LPFC_EVT_RESET_HBA
);
3600 spin_lock_irq(shost
->host_lock
);
3601 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3602 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3603 spin_unlock_irq(shost
->host_lock
);
3604 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3605 mempool_free(pmb
, phba
->mbox_mem_pool
);
3606 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3608 * This shost reference might have been taken at the beginning of
3609 * lpfc_vport_delete()
3611 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3612 scsi_host_put(shost
);
3616 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3618 struct lpfc_hba
*phba
= vport
->phba
;
3622 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3626 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3627 mbox
->vport
= vport
;
3628 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3629 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3630 if (rc
== MBX_NOT_FINISHED
) {
3631 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3632 "1800 Could not issue unreg_vpi\n");
3633 mempool_free(mbox
, phba
->mbox_mem_pool
);
3634 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3641 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3643 struct lpfc_vport
*vport
= pmb
->vport
;
3644 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3645 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3647 switch (mb
->mbxStatus
) {
3651 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3652 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3654 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3655 spin_lock_irq(shost
->host_lock
);
3656 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3657 spin_unlock_irq(shost
->host_lock
);
3658 vport
->fc_myDID
= 0;
3660 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3661 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3662 if (phba
->nvmet_support
)
3663 lpfc_nvmet_update_targetport(phba
);
3665 lpfc_nvme_update_localport(vport
);
3670 spin_lock_irq(shost
->host_lock
);
3671 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3672 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3673 spin_unlock_irq(shost
->host_lock
);
3674 vport
->num_disc_nodes
= 0;
3675 /* go thru NPR list and issue ELS PLOGIs */
3676 if (vport
->fc_npr_cnt
)
3677 lpfc_els_disc_plogi(vport
);
3679 if (!vport
->num_disc_nodes
) {
3680 spin_lock_irq(shost
->host_lock
);
3681 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3682 spin_unlock_irq(shost
->host_lock
);
3683 lpfc_can_disctmo(vport
);
3685 vport
->port_state
= LPFC_VPORT_READY
;
3688 mempool_free(pmb
, phba
->mbox_mem_pool
);
3693 * lpfc_create_static_vport - Read HBA config region to create static vports.
3694 * @phba: pointer to lpfc hba data structure.
3696 * This routine issue a DUMP mailbox command for config region 22 to get
3697 * the list of static vports to be created. The function create vports
3698 * based on the information returned from the HBA.
3701 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3703 LPFC_MBOXQ_t
*pmb
= NULL
;
3705 struct static_vport_info
*vport_info
;
3706 int mbx_wait_rc
= 0, i
;
3707 struct fc_vport_identifiers vport_id
;
3708 struct fc_vport
*new_fc_vport
;
3709 struct Scsi_Host
*shost
;
3710 struct lpfc_vport
*vport
;
3711 uint16_t offset
= 0;
3712 uint8_t *vport_buff
;
3713 struct lpfc_dmabuf
*mp
;
3714 uint32_t byte_count
= 0;
3716 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3718 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3719 "0542 lpfc_create_static_vport failed to"
3720 " allocate mailbox memory\n");
3723 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3726 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3728 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3729 "0543 lpfc_create_static_vport failed to"
3730 " allocate vport_info\n");
3731 mempool_free(pmb
, phba
->mbox_mem_pool
);
3735 vport_buff
= (uint8_t *) vport_info
;
3737 /* free dma buffer from previous round */
3739 mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
3740 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3743 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3746 pmb
->vport
= phba
->pport
;
3747 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3750 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3751 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3752 "0544 lpfc_create_static_vport failed to"
3753 " issue dump mailbox command ret 0x%x "
3755 mbx_wait_rc
, mb
->mbxStatus
);
3759 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3760 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3761 mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
3762 if (byte_count
> sizeof(struct static_vport_info
) -
3764 byte_count
= sizeof(struct static_vport_info
)
3766 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3767 offset
+= byte_count
;
3769 if (mb
->un
.varDmp
.word_cnt
>
3770 sizeof(struct static_vport_info
) - offset
)
3771 mb
->un
.varDmp
.word_cnt
=
3772 sizeof(struct static_vport_info
)
3774 byte_count
= mb
->un
.varDmp
.word_cnt
;
3775 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3776 vport_buff
+ offset
,
3779 offset
+= byte_count
;
3782 } while (byte_count
&&
3783 offset
< sizeof(struct static_vport_info
));
3786 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3787 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3788 != VPORT_INFO_REV
)) {
3789 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3790 "0545 lpfc_create_static_vport bad"
3791 " information header 0x%x 0x%x\n",
3792 le32_to_cpu(vport_info
->signature
),
3793 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3798 shost
= lpfc_shost_from_vport(phba
->pport
);
3800 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3801 memset(&vport_id
, 0, sizeof(vport_id
));
3802 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3803 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3804 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3807 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3808 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3809 vport_id
.disable
= false;
3810 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3812 if (!new_fc_vport
) {
3813 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3814 "0546 lpfc_create_static_vport failed to"
3819 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3820 vport
->vport_flag
|= STATIC_VPORT
;
3825 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3827 mp
= (struct lpfc_dmabuf
*)pmb
->ctx_buf
;
3828 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3831 mempool_free(pmb
, phba
->mbox_mem_pool
);
3838 * This routine handles processing a Fabric REG_LOGIN mailbox
3839 * command upon completion. It is setup in the LPFC_MBOXQ
3840 * as the completion routine when the command is
3841 * handed off to the SLI layer.
3844 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3846 struct lpfc_vport
*vport
= pmb
->vport
;
3847 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3848 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
3849 struct lpfc_nodelist
*ndlp
;
3850 struct Scsi_Host
*shost
;
3852 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
3853 pmb
->ctx_ndlp
= NULL
;
3854 pmb
->ctx_buf
= NULL
;
3856 if (mb
->mbxStatus
) {
3857 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3858 "0258 Register Fabric login error: 0x%x\n",
3860 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3862 mempool_free(pmb
, phba
->mbox_mem_pool
);
3864 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3865 /* FLOGI failed, use loop map to make discovery list */
3866 lpfc_disc_list_loopmap(vport
);
3868 /* Start discovery */
3869 lpfc_disc_start(vport
);
3870 /* Decrement the reference count to ndlp after the
3871 * reference to the ndlp are done.
3877 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3878 /* Decrement the reference count to ndlp after the reference
3879 * to the ndlp are done.
3885 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3886 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3887 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3888 ndlp
->nlp_type
|= NLP_FABRIC
;
3889 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3891 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3892 /* when physical port receive logo donot start
3893 * vport discovery */
3894 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3895 lpfc_start_fdiscs(phba
);
3897 shost
= lpfc_shost_from_vport(vport
);
3898 spin_lock_irq(shost
->host_lock
);
3899 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3900 spin_unlock_irq(shost
->host_lock
);
3902 lpfc_do_scr_ns_plogi(phba
, vport
);
3905 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3907 mempool_free(pmb
, phba
->mbox_mem_pool
);
3909 /* Drop the reference count from the mbox at the end after
3910 * all the current reference to the ndlp have been done.
3917 * This routine will issue a GID_FT for each FC4 Type supported
3918 * by the driver. ALL GID_FTs must complete before discovery is started.
3921 lpfc_issue_gidft(struct lpfc_vport
*vport
)
3923 /* Good status, issue CT Request to NameServer */
3924 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3925 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)) {
3926 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_FCP
)) {
3927 /* Cannot issue NameServer FCP Query, so finish up
3930 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3931 "0604 %s FC TYPE %x %s\n",
3932 "Failed to issue GID_FT to ",
3934 "Finishing discovery.");
3940 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3941 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3942 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_NVME
)) {
3943 /* Cannot issue NameServer NVME Query, so finish up
3946 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3947 "0605 %s FC_TYPE %x %s %d\n",
3948 "Failed to issue GID_FT to ",
3950 "Finishing discovery: gidftinp ",
3952 if (vport
->gidft_inp
== 0)
3957 return vport
->gidft_inp
;
3961 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
3962 * @vport: The virtual port for which this call is being executed.
3964 * This routine will issue a GID_PT to get a list of all N_Ports
3967 * 0 - Failure to issue a GID_PT
3971 lpfc_issue_gidpt(struct lpfc_vport
*vport
)
3973 /* Good status, issue CT Request to NameServer */
3974 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_PT
, 0, GID_PT_N_PORT
)) {
3975 /* Cannot issue NameServer FCP Query, so finish up
3978 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3979 "0606 %s Port TYPE %x %s\n",
3980 "Failed to issue GID_PT to ",
3982 "Finishing discovery.");
3990 * This routine handles processing a NameServer REG_LOGIN mailbox
3991 * command upon completion. It is setup in the LPFC_MBOXQ
3992 * as the completion routine when the command is
3993 * handed off to the SLI layer.
3996 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3998 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3999 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
4000 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
4001 struct lpfc_vport
*vport
= pmb
->vport
;
4003 pmb
->ctx_buf
= NULL
;
4004 pmb
->ctx_ndlp
= NULL
;
4005 vport
->gidft_inp
= 0;
4007 if (mb
->mbxStatus
) {
4008 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
4009 "0260 Register NameServer error: 0x%x\n",
4013 /* decrement the node reference count held for this
4014 * callback function.
4017 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4019 mempool_free(pmb
, phba
->mbox_mem_pool
);
4021 /* If no other thread is using the ndlp, free it */
4022 lpfc_nlp_not_used(ndlp
);
4024 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
4026 * RegLogin failed, use loop map to make discovery
4029 lpfc_disc_list_loopmap(vport
);
4031 /* Start discovery */
4032 lpfc_disc_start(vport
);
4035 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
4039 if (phba
->sli_rev
< LPFC_SLI_REV4
)
4040 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4041 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
4042 ndlp
->nlp_type
|= NLP_FABRIC
;
4043 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4044 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
4045 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
4046 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4047 kref_read(&ndlp
->kref
),
4048 ndlp
->nlp_usg_map
, ndlp
);
4050 if (vport
->port_state
< LPFC_VPORT_READY
) {
4051 /* Link up discovery requires Fabric registration. */
4052 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
4053 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
4054 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
4055 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
4057 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4058 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
))
4059 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, FC_TYPE_FCP
);
4061 if ((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4062 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
))
4063 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0,
4066 /* Issue SCR just before NameServer GID_FT Query */
4067 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
4070 vport
->fc_ns_retry
= 0;
4071 if (lpfc_issue_gidft(vport
) == 0)
4075 * At this point in time we may need to wait for multiple
4076 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4078 * decrement the node reference count held for this
4079 * callback function.
4082 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4084 mempool_free(pmb
, phba
->mbox_mem_pool
);
4090 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4092 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4093 struct fc_rport
*rport
;
4094 struct lpfc_rport_data
*rdata
;
4095 struct fc_rport_identifiers rport_ids
;
4096 struct lpfc_hba
*phba
= vport
->phba
;
4098 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4101 /* Remote port has reappeared. Re-register w/ FC transport */
4102 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
4103 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
4104 rport_ids
.port_id
= ndlp
->nlp_DID
;
4105 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
4108 * We leave our node pointer in rport->dd_data when we unregister a
4109 * FCP target port. But fc_remote_port_add zeros the space to which
4110 * rport->dd_data points. So, if we're reusing a previously
4111 * registered port, drop the reference that we took the last time we
4112 * registered the port.
4114 rport
= ndlp
->rport
;
4116 rdata
= rport
->dd_data
;
4117 /* break the link before dropping the ref */
4120 if (rdata
->pnode
== ndlp
)
4122 rdata
->pnode
= NULL
;
4124 /* drop reference for earlier registeration */
4125 put_device(&rport
->dev
);
4128 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4129 "rport add: did:x%x flg:x%x type x%x",
4130 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4132 /* Don't add the remote port if unloading. */
4133 if (vport
->load_flag
& FC_UNLOADING
)
4136 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
4137 if (!rport
|| !get_device(&rport
->dev
)) {
4138 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
4139 "Warning: fc_remote_port_add failed\n");
4143 /* initialize static port data */
4144 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
4145 rport
->supported_classes
= ndlp
->nlp_class_sup
;
4146 rdata
= rport
->dd_data
;
4147 rdata
->pnode
= lpfc_nlp_get(ndlp
);
4149 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
4150 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
4151 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
4152 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
4154 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
4155 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
4157 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4158 "3183 rport register x%06x, rport %p role x%x\n",
4159 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
4161 if ((rport
->scsi_target_id
!= -1) &&
4162 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
4163 ndlp
->nlp_sid
= rport
->scsi_target_id
;
4169 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
4171 struct fc_rport
*rport
= ndlp
->rport
;
4172 struct lpfc_vport
*vport
= ndlp
->vport
;
4174 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4177 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4178 "rport delete: did:x%x flg:x%x type x%x",
4179 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4181 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4182 "3184 rport unregister x%06x, rport %p\n",
4183 ndlp
->nlp_DID
, rport
);
4185 fc_remote_port_delete(rport
);
4191 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
4193 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4195 spin_lock_irq(shost
->host_lock
);
4197 case NLP_STE_UNUSED_NODE
:
4198 vport
->fc_unused_cnt
+= count
;
4200 case NLP_STE_PLOGI_ISSUE
:
4201 vport
->fc_plogi_cnt
+= count
;
4203 case NLP_STE_ADISC_ISSUE
:
4204 vport
->fc_adisc_cnt
+= count
;
4206 case NLP_STE_REG_LOGIN_ISSUE
:
4207 vport
->fc_reglogin_cnt
+= count
;
4209 case NLP_STE_PRLI_ISSUE
:
4210 vport
->fc_prli_cnt
+= count
;
4212 case NLP_STE_UNMAPPED_NODE
:
4213 vport
->fc_unmap_cnt
+= count
;
4215 case NLP_STE_MAPPED_NODE
:
4216 vport
->fc_map_cnt
+= count
;
4218 case NLP_STE_NPR_NODE
:
4219 if (vport
->fc_npr_cnt
== 0 && count
== -1)
4220 vport
->fc_npr_cnt
= 0;
4222 vport
->fc_npr_cnt
+= count
;
4225 spin_unlock_irq(shost
->host_lock
);
4229 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4230 int old_state
, int new_state
)
4232 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4234 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
4235 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4236 ndlp
->nlp_type
|= NLP_FC_NODE
;
4238 if (new_state
== NLP_STE_MAPPED_NODE
)
4239 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4240 if (new_state
== NLP_STE_NPR_NODE
)
4241 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
4243 /* FCP and NVME Transport interface */
4244 if ((old_state
== NLP_STE_MAPPED_NODE
||
4245 old_state
== NLP_STE_UNMAPPED_NODE
)) {
4247 vport
->phba
->nport_event_cnt
++;
4248 lpfc_unregister_remote_port(ndlp
);
4251 if (ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4252 vport
->phba
->nport_event_cnt
++;
4253 if (vport
->phba
->nvmet_support
== 0) {
4254 /* Start devloss if target. */
4255 if (ndlp
->nlp_type
& NLP_NVME_TARGET
)
4256 lpfc_nvme_unregister_port(vport
, ndlp
);
4258 /* NVMET has no upcall. */
4264 /* FCP and NVME Transport interfaces */
4266 if (new_state
== NLP_STE_MAPPED_NODE
||
4267 new_state
== NLP_STE_UNMAPPED_NODE
) {
4268 if (ndlp
->nlp_fc4_type
||
4269 ndlp
->nlp_DID
== Fabric_DID
||
4270 ndlp
->nlp_DID
== NameServer_DID
||
4271 ndlp
->nlp_DID
== FDMI_DID
) {
4272 vport
->phba
->nport_event_cnt
++;
4274 * Tell the fc transport about the port, if we haven't
4275 * already. If we have, and it's a scsi entity, be
4277 lpfc_register_remote_port(vport
, ndlp
);
4279 /* Notify the NVME transport of this new rport. */
4280 if (vport
->phba
->sli_rev
>= LPFC_SLI_REV4
&&
4281 ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4282 if (vport
->phba
->nvmet_support
== 0) {
4283 /* Register this rport with the transport.
4284 * Only NVME Target Rports are registered with
4287 if (ndlp
->nlp_type
& NLP_NVME_TARGET
) {
4288 vport
->phba
->nport_event_cnt
++;
4289 lpfc_nvme_register_port(vport
, ndlp
);
4292 /* Just take an NDLP ref count since the
4293 * target does not register rports.
4300 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4301 (vport
->stat_data_enabled
)) {
4303 * A new target is discovered, if there is no buffer for
4304 * statistical data collection allocate buffer.
4306 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
4307 sizeof(struct lpfc_scsicmd_bkt
),
4310 if (!ndlp
->lat_data
)
4311 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
4312 "0286 lpfc_nlp_state_cleanup failed to "
4313 "allocate statistical data buffer DID "
4314 "0x%x\n", ndlp
->nlp_DID
);
4317 * If the node just added to Mapped list was an FCP target,
4318 * but the remote port registration failed or assigned a target
4319 * id outside the presentable range - move the node to the
4322 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4323 (ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4325 ndlp
->rport
->scsi_target_id
== -1 ||
4326 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
4327 spin_lock_irq(shost
->host_lock
);
4328 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
4329 spin_unlock_irq(shost
->host_lock
);
4330 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4335 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
4337 static char *states
[] = {
4338 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4339 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4340 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4341 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4342 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4343 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4344 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4345 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4346 [NLP_STE_NPR_NODE
] = "NPR",
4349 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4350 strlcpy(buffer
, states
[state
], size
);
4352 snprintf(buffer
, size
, "unknown (%d)", state
);
4357 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4360 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4361 int old_state
= ndlp
->nlp_state
;
4362 char name1
[16], name2
[16];
4364 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4365 "0904 NPort state transition x%06x, %s -> %s\n",
4367 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4368 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4370 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4371 "node statechg did:x%x old:%d ste:%d",
4372 ndlp
->nlp_DID
, old_state
, state
);
4374 if (old_state
== NLP_STE_NPR_NODE
&&
4375 state
!= NLP_STE_NPR_NODE
)
4376 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4377 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4378 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4379 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4382 if (list_empty(&ndlp
->nlp_listp
)) {
4383 spin_lock_irq(shost
->host_lock
);
4384 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4385 spin_unlock_irq(shost
->host_lock
);
4386 } else if (old_state
)
4387 lpfc_nlp_counters(vport
, old_state
, -1);
4389 ndlp
->nlp_state
= state
;
4390 lpfc_nlp_counters(vport
, state
, 1);
4391 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4395 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4397 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4399 if (list_empty(&ndlp
->nlp_listp
)) {
4400 spin_lock_irq(shost
->host_lock
);
4401 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4402 spin_unlock_irq(shost
->host_lock
);
4407 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4409 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4411 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4412 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4413 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4414 spin_lock_irq(shost
->host_lock
);
4415 list_del_init(&ndlp
->nlp_listp
);
4416 spin_unlock_irq(shost
->host_lock
);
4417 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4418 NLP_STE_UNUSED_NODE
);
4422 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4424 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4425 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4426 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4427 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4428 NLP_STE_UNUSED_NODE
);
4431 * lpfc_initialize_node - Initialize all fields of node object
4432 * @vport: Pointer to Virtual Port object.
4433 * @ndlp: Pointer to FC node object.
4434 * @did: FC_ID of the node.
4436 * This function is always called when node object need to be initialized.
4437 * It initializes all the fields of the node object. Although the reference
4438 * to phba from @ndlp can be obtained indirectly through it's reference to
4439 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4440 * to the life-span of the @ndlp might go beyond the existence of @vport as
4441 * the final release of ndlp is determined by its reference count. And, the
4442 * operation on @ndlp needs the reference to phba.
4445 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4448 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4449 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4450 timer_setup(&ndlp
->nlp_delayfunc
, lpfc_els_retry_delay
, 0);
4451 ndlp
->nlp_DID
= did
;
4452 ndlp
->vport
= vport
;
4453 ndlp
->phba
= vport
->phba
;
4454 ndlp
->nlp_sid
= NLP_NO_SID
;
4455 ndlp
->nlp_fc4_type
= NLP_FC4_NONE
;
4456 kref_init(&ndlp
->kref
);
4457 NLP_INT_NODE_ACT(ndlp
);
4458 atomic_set(&ndlp
->cmd_pending
, 0);
4459 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4460 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
4463 struct lpfc_nodelist
*
4464 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4467 struct lpfc_hba
*phba
= vport
->phba
;
4469 unsigned long flags
;
4470 unsigned long *active_rrqs_xri_bitmap
= NULL
;
4471 int rpi
= LPFC_RPI_ALLOC_ERROR
;
4472 uint32_t defer_did
= 0;
4477 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4478 rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4479 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
4483 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4484 /* The ndlp should not be in memory free mode */
4485 if (NLP_CHK_FREE_REQ(ndlp
)) {
4486 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4487 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4488 "0277 lpfc_enable_node: ndlp:x%p "
4489 "usgmap:x%x refcnt:%d\n",
4490 (void *)ndlp
, ndlp
->nlp_usg_map
,
4491 kref_read(&ndlp
->kref
));
4494 /* The ndlp should not already be in active mode */
4495 if (NLP_CHK_NODE_ACT(ndlp
)) {
4496 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4497 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4498 "0278 lpfc_enable_node: ndlp:x%p "
4499 "usgmap:x%x refcnt:%d\n",
4500 (void *)ndlp
, ndlp
->nlp_usg_map
,
4501 kref_read(&ndlp
->kref
));
4505 /* First preserve the orginal DID, xri_bitmap and some flags */
4506 did
= ndlp
->nlp_DID
;
4507 flag
= (ndlp
->nlp_flag
& NLP_UNREG_INP
);
4508 if (flag
& NLP_UNREG_INP
)
4509 defer_did
= ndlp
->nlp_defer_did
;
4510 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4511 active_rrqs_xri_bitmap
= ndlp
->active_rrqs_xri_bitmap
;
4513 /* Zero ndlp except of ndlp linked list pointer */
4514 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4515 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4517 /* Next reinitialize and restore saved objects */
4518 lpfc_initialize_node(vport
, ndlp
, did
);
4519 ndlp
->nlp_flag
|= flag
;
4520 if (flag
& NLP_UNREG_INP
)
4521 ndlp
->nlp_defer_did
= defer_did
;
4522 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4523 ndlp
->active_rrqs_xri_bitmap
= active_rrqs_xri_bitmap
;
4525 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4526 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4527 ndlp
->nlp_rpi
= rpi
;
4528 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4529 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4530 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4532 kref_read(&ndlp
->kref
),
4533 ndlp
->nlp_usg_map
, ndlp
);
4537 if (state
!= NLP_STE_UNUSED_NODE
)
4538 lpfc_nlp_set_state(vport
, ndlp
, state
);
4540 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4541 "node enable: did:x%x",
4542 ndlp
->nlp_DID
, 0, 0);
4546 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4547 lpfc_sli4_free_rpi(vport
->phba
, rpi
);
4552 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4555 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4556 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4557 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4558 * until ALL other outstanding threads have completed. We check
4559 * that the ndlp not already in the UNUSED state before we proceed.
4561 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4563 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4564 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4565 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4566 lpfc_unreg_rpi(vport
, ndlp
);
4574 * Start / ReStart rescue timer for Discovery / RSCN handling
4577 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4579 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4580 struct lpfc_hba
*phba
= vport
->phba
;
4583 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4584 /* For FAN, timeout should be greater than edtov */
4585 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4587 /* Normal discovery timeout should be > than ELS/CT timeout
4588 * FC spec states we need 3 * ratov for CT requests
4590 tmo
= ((phba
->fc_ratov
* 3) + 3);
4594 if (!timer_pending(&vport
->fc_disctmo
)) {
4595 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4596 "set disc timer: tmo:x%x state:x%x flg:x%x",
4597 tmo
, vport
->port_state
, vport
->fc_flag
);
4600 mod_timer(&vport
->fc_disctmo
, jiffies
+ msecs_to_jiffies(1000 * tmo
));
4601 spin_lock_irq(shost
->host_lock
);
4602 vport
->fc_flag
|= FC_DISC_TMO
;
4603 spin_unlock_irq(shost
->host_lock
);
4605 /* Start Discovery Timer state <hba_state> */
4606 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4607 "0247 Start Discovery Timer state x%x "
4608 "Data: x%x x%lx x%x x%x\n",
4609 vport
->port_state
, tmo
,
4610 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4611 vport
->fc_adisc_cnt
);
4617 * Cancel rescue timer for Discovery / RSCN handling
4620 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4622 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4623 unsigned long iflags
;
4625 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4626 "can disc timer: state:x%x rtry:x%x flg:x%x",
4627 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4629 /* Turn off discovery timer if its running */
4630 if (vport
->fc_flag
& FC_DISC_TMO
) {
4631 spin_lock_irqsave(shost
->host_lock
, iflags
);
4632 vport
->fc_flag
&= ~FC_DISC_TMO
;
4633 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4634 del_timer_sync(&vport
->fc_disctmo
);
4635 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4636 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4637 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4640 /* Cancel Discovery Timer state <hba_state> */
4641 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4642 "0248 Cancel Discovery Timer state x%x "
4643 "Data: x%x x%x x%x\n",
4644 vport
->port_state
, vport
->fc_flag
,
4645 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4650 * Check specified ring for outstanding IOCB on the SLI queue
4651 * Return true if iocb matches the specified nport
4654 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4655 struct lpfc_sli_ring
*pring
,
4656 struct lpfc_iocbq
*iocb
,
4657 struct lpfc_nodelist
*ndlp
)
4659 IOCB_t
*icmd
= &iocb
->iocb
;
4660 struct lpfc_vport
*vport
= ndlp
->vport
;
4662 if (iocb
->vport
!= vport
)
4665 if (pring
->ringno
== LPFC_ELS_RING
) {
4666 switch (icmd
->ulpCommand
) {
4667 case CMD_GEN_REQUEST64_CR
:
4668 if (iocb
->context_un
.ndlp
== ndlp
)
4670 case CMD_ELS_REQUEST64_CR
:
4671 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4673 case CMD_XMIT_ELS_RSP64_CX
:
4674 if (iocb
->context1
== (uint8_t *) ndlp
)
4677 } else if (pring
->ringno
== LPFC_FCP_RING
) {
4678 /* Skip match check if waiting to relogin to FCP target */
4679 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4680 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4683 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4691 __lpfc_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4692 struct lpfc_nodelist
*ndlp
, struct lpfc_sli_ring
*pring
,
4693 struct list_head
*dequeue_list
)
4695 struct lpfc_iocbq
*iocb
, *next_iocb
;
4697 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4698 /* Check to see if iocb matches the nport */
4699 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
))
4700 /* match, dequeue */
4701 list_move_tail(&iocb
->list
, dequeue_list
);
4706 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4707 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4709 struct lpfc_sli
*psli
= &phba
->sli
;
4712 spin_lock_irq(&phba
->hbalock
);
4713 for (i
= 0; i
< psli
->num_rings
; i
++)
4714 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, &psli
->sli3_ring
[i
],
4716 spin_unlock_irq(&phba
->hbalock
);
4720 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4721 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4723 struct lpfc_sli_ring
*pring
;
4724 struct lpfc_queue
*qp
= NULL
;
4726 spin_lock_irq(&phba
->hbalock
);
4727 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
4731 spin_lock(&pring
->ring_lock
);
4732 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, pring
, dequeue_list
);
4733 spin_unlock(&pring
->ring_lock
);
4735 spin_unlock_irq(&phba
->hbalock
);
4739 * Free resources / clean up outstanding I/Os
4740 * associated with nlp_rpi in the LPFC_NODELIST entry.
4743 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4745 LIST_HEAD(completions
);
4747 lpfc_fabric_abort_nport(ndlp
);
4750 * Everything that matches on txcmplq will be returned
4751 * by firmware with a no rpi error.
4753 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4754 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4755 lpfc_sli3_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4757 lpfc_sli4_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4760 /* Cancel all the IOCBs from the completions list */
4761 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4768 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4769 * @phba: Pointer to HBA context object.
4770 * @pmb: Pointer to mailbox object.
4772 * This function will issue an ELS LOGO command after completing
4776 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4778 struct lpfc_vport
*vport
= pmb
->vport
;
4779 struct lpfc_nodelist
*ndlp
;
4781 ndlp
= (struct lpfc_nodelist
*)(pmb
->ctx_ndlp
);
4784 lpfc_issue_els_logo(vport
, ndlp
, 0);
4785 mempool_free(pmb
, phba
->mbox_mem_pool
);
4787 /* Check to see if there are any deferred events to process */
4788 if ((ndlp
->nlp_flag
& NLP_UNREG_INP
) &&
4789 (ndlp
->nlp_defer_did
!= NLP_EVT_NOTHING_PENDING
)) {
4790 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4791 "1434 UNREG cmpl deferred logo x%x "
4792 "on NPort x%x Data: x%x %p\n",
4793 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4794 ndlp
->nlp_defer_did
, ndlp
);
4796 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
4797 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
4798 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
4800 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
4805 * Free rpi associated with LPFC_NODELIST entry.
4806 * This routine is called from lpfc_freenode(), when we are removing
4807 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4808 * LOGO that completes successfully, and we are waiting to PLOGI back
4809 * to the remote NPort. In addition, it is called after we receive
4810 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4811 * we are waiting to PLOGI back to the remote NPort.
4814 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4816 struct lpfc_hba
*phba
= vport
->phba
;
4818 int rc
, acc_plogi
= 1;
4821 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
||
4822 ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) {
4823 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
4824 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
4825 "3366 RPI x%x needs to be "
4826 "unregistered nlp_flag x%x "
4828 ndlp
->nlp_rpi
, ndlp
->nlp_flag
,
4831 /* If there is already an UNREG in progress for this ndlp,
4832 * no need to queue up another one.
4834 if (ndlp
->nlp_flag
& NLP_UNREG_INP
) {
4835 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4836 "1436 unreg_rpi SKIP UNREG x%x on "
4837 "NPort x%x deferred x%x flg x%x "
4839 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4840 ndlp
->nlp_defer_did
,
4841 ndlp
->nlp_flag
, ndlp
);
4845 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4847 /* SLI4 ports require the physical rpi value. */
4848 rpi
= ndlp
->nlp_rpi
;
4849 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4850 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4852 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4853 mbox
->vport
= vport
;
4854 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4855 mbox
->ctx_ndlp
= ndlp
;
4856 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4858 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
4859 (!(vport
->load_flag
& FC_UNLOADING
)) &&
4860 (bf_get(lpfc_sli_intf_if_type
,
4861 &phba
->sli4_hba
.sli_intf
) >=
4862 LPFC_SLI_INTF_IF_TYPE_2
) &&
4863 (kref_read(&ndlp
->kref
) > 0)) {
4864 mbox
->ctx_ndlp
= lpfc_nlp_get(ndlp
);
4866 lpfc_sli4_unreg_rpi_cmpl_clr
;
4868 * accept PLOGIs after unreg_rpi_cmpl
4872 mbox
->ctx_ndlp
= ndlp
;
4874 lpfc_sli_def_mbox_cmpl
;
4877 if (((ndlp
->nlp_DID
& Fabric_DID_MASK
) !=
4879 (!(vport
->fc_flag
& FC_OFFLINE_MODE
)))
4880 ndlp
->nlp_flag
|= NLP_UNREG_INP
;
4882 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4883 "1433 unreg_rpi UNREG x%x on "
4884 "NPort x%x deferred flg x%x Data:%p\n",
4885 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4886 ndlp
->nlp_flag
, ndlp
);
4888 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4889 if (rc
== MBX_NOT_FINISHED
) {
4890 mempool_free(mbox
, phba
->mbox_mem_pool
);
4894 lpfc_no_rpi(phba
, ndlp
);
4896 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4898 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4899 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4901 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4904 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4909 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4910 * @phba: pointer to lpfc hba data structure.
4912 * This routine is invoked to unregister all the currently registered RPIs
4916 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4918 struct lpfc_vport
**vports
;
4919 struct lpfc_nodelist
*ndlp
;
4920 struct Scsi_Host
*shost
;
4923 vports
= lpfc_create_vport_work_array(phba
);
4925 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4926 "2884 Vport array allocation failed \n");
4929 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4930 shost
= lpfc_shost_from_vport(vports
[i
]);
4931 spin_lock_irq(shost
->host_lock
);
4932 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4933 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4934 /* The mempool_alloc might sleep */
4935 spin_unlock_irq(shost
->host_lock
);
4936 lpfc_unreg_rpi(vports
[i
], ndlp
);
4937 spin_lock_irq(shost
->host_lock
);
4940 spin_unlock_irq(shost
->host_lock
);
4942 lpfc_destroy_vport_work_array(phba
, vports
);
4946 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4948 struct lpfc_hba
*phba
= vport
->phba
;
4952 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4953 lpfc_sli4_unreg_all_rpis(vport
);
4957 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4959 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4961 mbox
->vport
= vport
;
4962 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4963 mbox
->ctx_ndlp
= NULL
;
4964 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4965 if (rc
!= MBX_TIMEOUT
)
4966 mempool_free(mbox
, phba
->mbox_mem_pool
);
4968 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4969 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4970 "1836 Could not issue "
4971 "unreg_login(all_rpis) status %d\n", rc
);
4976 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4978 struct lpfc_hba
*phba
= vport
->phba
;
4982 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4984 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4986 mbox
->vport
= vport
;
4987 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4988 mbox
->ctx_ndlp
= NULL
;
4989 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4990 if (rc
!= MBX_TIMEOUT
)
4991 mempool_free(mbox
, phba
->mbox_mem_pool
);
4993 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4994 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4995 "1815 Could not issue "
4996 "unreg_did (default rpis) status %d\n",
5002 * Free resources associated with LPFC_NODELIST entry
5003 * so it can be freed.
5006 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
5008 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5009 struct lpfc_hba
*phba
= vport
->phba
;
5010 LPFC_MBOXQ_t
*mb
, *nextmb
;
5011 struct lpfc_dmabuf
*mp
;
5013 /* Cleanup node for NPort <nlp_DID> */
5014 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5015 "0900 Cleanup node for NPort x%x "
5016 "Data: x%x x%x x%x\n",
5017 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5018 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
5019 if (NLP_CHK_FREE_REQ(ndlp
)) {
5020 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
5021 "0280 lpfc_cleanup_node: ndlp:x%p "
5022 "usgmap:x%x refcnt:%d\n",
5023 (void *)ndlp
, ndlp
->nlp_usg_map
,
5024 kref_read(&ndlp
->kref
));
5025 lpfc_dequeue_node(vport
, ndlp
);
5027 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
5028 "0281 lpfc_cleanup_node: ndlp:x%p "
5029 "usgmap:x%x refcnt:%d\n",
5030 (void *)ndlp
, ndlp
->nlp_usg_map
,
5031 kref_read(&ndlp
->kref
));
5032 lpfc_disable_node(vport
, ndlp
);
5036 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
5038 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5039 if ((mb
= phba
->sli
.mbox_active
)) {
5040 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
5041 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
5042 (ndlp
== (struct lpfc_nodelist
*)mb
->ctx_ndlp
)) {
5043 mb
->ctx_ndlp
= NULL
;
5044 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5048 spin_lock_irq(&phba
->hbalock
);
5049 /* Cleanup REG_LOGIN completions which are not yet processed */
5050 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
5051 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
5052 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
5053 (ndlp
!= (struct lpfc_nodelist
*)mb
->ctx_ndlp
))
5056 mb
->ctx_ndlp
= NULL
;
5057 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5060 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
5061 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
5062 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
5063 (ndlp
== (struct lpfc_nodelist
*)mb
->ctx_ndlp
)) {
5064 mp
= (struct lpfc_dmabuf
*)(mb
->ctx_buf
);
5066 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5069 list_del(&mb
->list
);
5070 mempool_free(mb
, phba
->mbox_mem_pool
);
5071 /* We shall not invoke the lpfc_nlp_put to decrement
5072 * the ndlp reference count as we are in the process
5073 * of lpfc_nlp_release.
5077 spin_unlock_irq(&phba
->hbalock
);
5079 lpfc_els_abort(phba
, ndlp
);
5081 spin_lock_irq(shost
->host_lock
);
5082 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
5083 spin_unlock_irq(shost
->host_lock
);
5085 ndlp
->nlp_last_elscmd
= 0;
5086 del_timer_sync(&ndlp
->nlp_delayfunc
);
5088 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
5089 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
5090 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
5091 lpfc_unreg_rpi(vport
, ndlp
);
5097 * Check to see if we can free the nlp back to the freelist.
5098 * If we are in the middle of using the nlp in the discovery state
5099 * machine, defer the free till we reach the end of the state machine.
5102 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
5104 struct lpfc_hba
*phba
= vport
->phba
;
5105 struct lpfc_rport_data
*rdata
;
5106 struct fc_rport
*rport
;
5110 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5111 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
5112 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
5113 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) &&
5114 phba
->sli_rev
!= LPFC_SLI_REV4
) {
5115 /* For this case we need to cleanup the default rpi
5116 * allocated by the firmware.
5118 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5119 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5120 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5121 kref_read(&ndlp
->kref
),
5122 ndlp
->nlp_usg_map
, ndlp
);
5123 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
5125 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
5126 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
5128 mempool_free(mbox
, phba
->mbox_mem_pool
);
5131 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
5132 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
5133 mbox
->vport
= vport
;
5134 mbox
->ctx_ndlp
= ndlp
;
5135 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5136 if (rc
== MBX_NOT_FINISHED
) {
5137 mempool_free(mbox
, phba
->mbox_mem_pool
);
5142 lpfc_cleanup_node(vport
, ndlp
);
5145 * ndlp->rport must be set to NULL before it reaches here
5146 * i.e. break rport/node link before doing lpfc_nlp_put for
5147 * registered rport and then drop the reference of rport.
5151 * extra lpfc_nlp_put dropped the reference of ndlp
5152 * for registered rport so need to cleanup rport
5154 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
5155 "0940 removed node x%p DID x%x "
5156 " rport not null %p\n",
5157 ndlp
, ndlp
->nlp_DID
, ndlp
->rport
);
5158 rport
= ndlp
->rport
;
5159 rdata
= rport
->dd_data
;
5160 rdata
->pnode
= NULL
;
5162 put_device(&rport
->dev
);
5167 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5170 D_ID mydid
, ndlpdid
, matchdid
;
5172 if (did
== Bcast_DID
)
5175 /* First check for Direct match */
5176 if (ndlp
->nlp_DID
== did
)
5179 /* Next check for area/domain identically equals 0 match */
5180 mydid
.un
.word
= vport
->fc_myDID
;
5181 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
5185 matchdid
.un
.word
= did
;
5186 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
5187 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
5188 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
5189 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
5190 /* This code is supposed to match the ID
5191 * for a private loop device that is
5192 * connect to fl_port. But we need to
5193 * check that the port did not just go
5194 * from pt2pt to fabric or we could end
5195 * up matching ndlp->nlp_DID 000001 to
5196 * fabric DID 0x20101
5198 if ((ndlpdid
.un
.b
.domain
== 0) &&
5199 (ndlpdid
.un
.b
.area
== 0)) {
5200 if (ndlpdid
.un
.b
.id
&&
5201 vport
->phba
->fc_topology
==
5208 matchdid
.un
.word
= ndlp
->nlp_DID
;
5209 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
5210 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
5211 if ((matchdid
.un
.b
.domain
== 0) &&
5212 (matchdid
.un
.b
.area
== 0)) {
5213 if (matchdid
.un
.b
.id
)
5221 /* Search for a nodelist entry */
5222 static struct lpfc_nodelist
*
5223 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5225 struct lpfc_nodelist
*ndlp
;
5228 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5229 if (lpfc_matchdid(vport
, ndlp
, did
)) {
5230 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
5231 ((uint32_t) ndlp
->nlp_xri
<< 16) |
5232 ((uint32_t) ndlp
->nlp_type
<< 8) |
5233 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
5234 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5235 "0929 FIND node DID "
5236 "Data: x%p x%x x%x x%x %p\n",
5237 ndlp
, ndlp
->nlp_DID
,
5238 ndlp
->nlp_flag
, data1
,
5239 ndlp
->active_rrqs_xri_bitmap
);
5244 /* FIND node did <did> NOT FOUND */
5245 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5246 "0932 FIND node did x%x NOT FOUND.\n", did
);
5250 struct lpfc_nodelist
*
5251 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5253 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5254 struct lpfc_nodelist
*ndlp
;
5255 unsigned long iflags
;
5257 spin_lock_irqsave(shost
->host_lock
, iflags
);
5258 ndlp
= __lpfc_findnode_did(vport
, did
);
5259 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
5263 struct lpfc_nodelist
*
5264 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
5266 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5267 struct lpfc_nodelist
*ndlp
;
5269 ndlp
= lpfc_findnode_did(vport
, did
);
5271 if (vport
->phba
->nvmet_support
)
5273 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
5274 lpfc_rscn_payload_check(vport
, did
) == 0)
5276 ndlp
= lpfc_nlp_init(vport
, did
);
5279 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5280 spin_lock_irq(shost
->host_lock
);
5281 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5282 spin_unlock_irq(shost
->host_lock
);
5284 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
5285 if (vport
->phba
->nvmet_support
)
5287 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
5290 spin_lock_irq(shost
->host_lock
);
5291 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5292 spin_unlock_irq(shost
->host_lock
);
5296 /* The NVME Target does not want to actively manage an rport.
5297 * The goal is to allow the target to reset its state and clear
5298 * pending IO in preparation for the initiator to recover.
5300 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
5301 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
5302 if (lpfc_rscn_payload_check(vport
, did
)) {
5304 /* Since this node is marked for discovery,
5305 * delay timeout is not needed.
5307 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5309 /* NVME Target mode waits until rport is known to be
5310 * impacted by the RSCN before it transitions. No
5311 * active management - just go to NPR provided the
5312 * node had a valid login.
5314 if (vport
->phba
->nvmet_support
)
5317 /* If we've already received a PLOGI from this NPort
5318 * we don't need to try to discover it again.
5320 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
5323 spin_lock_irq(shost
->host_lock
);
5324 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5325 spin_unlock_irq(shost
->host_lock
);
5329 /* If the initiator received a PLOGI from this NPort or if the
5330 * initiator is already in the process of discovery on it,
5331 * there's no need to try to discover it again.
5333 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
5334 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5335 (!vport
->phba
->nvmet_support
&&
5336 ndlp
->nlp_flag
& NLP_RCV_PLOGI
))
5339 if (vport
->phba
->nvmet_support
)
5342 /* Moving to NPR state clears unsolicited flags and
5343 * allows for rediscovery
5345 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5347 spin_lock_irq(shost
->host_lock
);
5348 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5349 spin_unlock_irq(shost
->host_lock
);
5354 /* Build a list of nodes to discover based on the loopmap */
5356 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
5358 struct lpfc_hba
*phba
= vport
->phba
;
5360 uint32_t alpa
, index
;
5362 if (!lpfc_is_link_up(phba
))
5365 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5368 /* Check for loop map present or not */
5369 if (phba
->alpa_map
[0]) {
5370 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
5371 alpa
= phba
->alpa_map
[j
];
5372 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
5374 lpfc_setup_disc_node(vport
, alpa
);
5377 /* No alpamap, so try all alpa's */
5378 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
5379 /* If cfg_scan_down is set, start from highest
5380 * ALPA (0xef) to lowest (0x1).
5382 if (vport
->cfg_scan_down
)
5385 index
= FC_MAXLOOP
- j
- 1;
5386 alpa
= lpfcAlpaArray
[index
];
5387 if ((vport
->fc_myDID
& 0xff) == alpa
)
5389 lpfc_setup_disc_node(vport
, alpa
);
5397 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5400 struct lpfc_sli
*psli
= &phba
->sli
;
5401 struct lpfc_sli_ring
*extra_ring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
5402 struct lpfc_sli_ring
*fcp_ring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
5406 * if it's not a physical port or if we already send
5407 * clear_la then don't send it.
5409 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
5410 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
5411 (phba
->sli_rev
== LPFC_SLI_REV4
))
5414 /* Link up discovery */
5415 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
5416 phba
->link_state
= LPFC_CLEAR_LA
;
5417 lpfc_clear_la(phba
, mbox
);
5418 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
5419 mbox
->vport
= vport
;
5420 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5421 if (rc
== MBX_NOT_FINISHED
) {
5422 mempool_free(mbox
, phba
->mbox_mem_pool
);
5423 lpfc_disc_flush_list(vport
);
5424 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5425 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5426 phba
->link_state
= LPFC_HBA_ERROR
;
5431 /* Reg_vpi to tell firmware to resume normal operations */
5433 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5435 LPFC_MBOXQ_t
*regvpimbox
;
5437 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5439 lpfc_reg_vpi(vport
, regvpimbox
);
5440 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
5441 regvpimbox
->vport
= vport
;
5442 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
5443 == MBX_NOT_FINISHED
) {
5444 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
5449 /* Start Link up / RSCN discovery on NPR nodes */
5451 lpfc_disc_start(struct lpfc_vport
*vport
)
5453 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5454 struct lpfc_hba
*phba
= vport
->phba
;
5456 uint32_t clear_la_pending
;
5458 if (!lpfc_is_link_up(phba
)) {
5459 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5460 "3315 Link is not up %x\n",
5465 if (phba
->link_state
== LPFC_CLEAR_LA
)
5466 clear_la_pending
= 1;
5468 clear_la_pending
= 0;
5470 if (vport
->port_state
< LPFC_VPORT_READY
)
5471 vport
->port_state
= LPFC_DISC_AUTH
;
5473 lpfc_set_disctmo(vport
);
5475 vport
->fc_prevDID
= vport
->fc_myDID
;
5476 vport
->num_disc_nodes
= 0;
5478 /* Start Discovery state <hba_state> */
5479 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5480 "0202 Start Discovery hba state x%x "
5481 "Data: x%x x%x x%x\n",
5482 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
5483 vport
->fc_adisc_cnt
);
5485 /* First do ADISCs - if any */
5486 num_sent
= lpfc_els_disc_adisc(vport
);
5491 /* Register the VPI for SLI3, NPIV only. */
5492 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
5493 !(vport
->fc_flag
& FC_PT2PT
) &&
5494 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
5495 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
5496 lpfc_issue_clear_la(phba
, vport
);
5497 lpfc_issue_reg_vpi(phba
, vport
);
5502 * For SLI2, we need to set port_state to READY and continue
5505 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
5506 /* If we get here, there is nothing to ADISC */
5507 lpfc_issue_clear_la(phba
, vport
);
5509 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
5510 vport
->num_disc_nodes
= 0;
5511 /* go thru NPR nodes and issue ELS PLOGIs */
5512 if (vport
->fc_npr_cnt
)
5513 lpfc_els_disc_plogi(vport
);
5515 if (!vport
->num_disc_nodes
) {
5516 spin_lock_irq(shost
->host_lock
);
5517 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5518 spin_unlock_irq(shost
->host_lock
);
5519 lpfc_can_disctmo(vport
);
5522 vport
->port_state
= LPFC_VPORT_READY
;
5524 /* Next do PLOGIs - if any */
5525 num_sent
= lpfc_els_disc_plogi(vport
);
5530 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5531 /* Check to see if more RSCNs came in while we
5532 * were processing this one.
5534 if ((vport
->fc_rscn_id_cnt
== 0) &&
5535 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5536 spin_lock_irq(shost
->host_lock
);
5537 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5538 spin_unlock_irq(shost
->host_lock
);
5539 lpfc_can_disctmo(vport
);
5541 lpfc_els_handle_rscn(vport
);
5548 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5549 * ring the match the sppecified nodelist.
5552 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5554 LIST_HEAD(completions
);
5556 struct lpfc_iocbq
*iocb
, *next_iocb
;
5557 struct lpfc_sli_ring
*pring
;
5559 pring
= lpfc_phba_elsring(phba
);
5560 if (unlikely(!pring
))
5563 /* Error matching iocb on txq or txcmplq
5564 * First check the txq.
5566 spin_lock_irq(&phba
->hbalock
);
5567 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5568 if (iocb
->context1
!= ndlp
) {
5572 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5573 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5575 list_move_tail(&iocb
->list
, &completions
);
5579 /* Next check the txcmplq */
5580 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5581 if (iocb
->context1
!= ndlp
) {
5585 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5586 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5587 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5590 spin_unlock_irq(&phba
->hbalock
);
5592 /* Cancel all the IOCBs from the completions list */
5593 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5598 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5600 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5601 struct lpfc_hba
*phba
= vport
->phba
;
5603 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5604 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5606 if (!NLP_CHK_NODE_ACT(ndlp
))
5608 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5609 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5610 lpfc_free_tx(phba
, ndlp
);
5617 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5619 lpfc_els_flush_rscn(vport
);
5620 lpfc_els_flush_cmd(vport
);
5621 lpfc_disc_flush_list(vport
);
5624 /*****************************************************************************/
5626 * NAME: lpfc_disc_timeout
5628 * FUNCTION: Fibre Channel driver discovery timeout routine.
5630 * EXECUTION ENVIRONMENT: interrupt only
5638 /*****************************************************************************/
5640 lpfc_disc_timeout(struct timer_list
*t
)
5642 struct lpfc_vport
*vport
= from_timer(vport
, t
, fc_disctmo
);
5643 struct lpfc_hba
*phba
= vport
->phba
;
5644 uint32_t tmo_posted
;
5645 unsigned long flags
= 0;
5647 if (unlikely(!phba
))
5650 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5651 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5653 vport
->work_port_events
|= WORKER_DISC_TMO
;
5654 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5657 lpfc_worker_wake_up(phba
);
5662 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5664 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5665 struct lpfc_hba
*phba
= vport
->phba
;
5666 struct lpfc_sli
*psli
= &phba
->sli
;
5667 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5668 LPFC_MBOXQ_t
*initlinkmbox
;
5669 int rc
, clrlaerr
= 0;
5671 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5674 spin_lock_irq(shost
->host_lock
);
5675 vport
->fc_flag
&= ~FC_DISC_TMO
;
5676 spin_unlock_irq(shost
->host_lock
);
5678 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5679 "disc timeout: state:x%x rtry:x%x flg:x%x",
5680 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5682 switch (vport
->port_state
) {
5684 case LPFC_LOCAL_CFG_LINK
:
5686 * port_state is identically LPFC_LOCAL_CFG_LINK while
5687 * waiting for FAN timeout
5689 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5690 "0221 FAN timeout\n");
5692 /* Start discovery by sending FLOGI, clean up old rpis */
5693 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5695 if (!NLP_CHK_NODE_ACT(ndlp
))
5697 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5699 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5700 /* Clean up the ndlp on Fabric connections */
5701 lpfc_drop_node(vport
, ndlp
);
5703 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5704 /* Fail outstanding IO now since device
5705 * is marked for PLOGI.
5707 lpfc_unreg_rpi(vport
, ndlp
);
5710 if (vport
->port_state
!= LPFC_FLOGI
) {
5711 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5712 lpfc_initial_flogi(vport
);
5714 lpfc_issue_init_vfi(vport
);
5721 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5722 /* Initial FLOGI timeout */
5723 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5724 "0222 Initial %s timeout\n",
5725 vport
->vpi
? "FDISC" : "FLOGI");
5727 /* Assume no Fabric and go on with discovery.
5728 * Check for outstanding ELS FLOGI to abort.
5731 /* FLOGI failed, so just use loop map to make discovery list */
5732 lpfc_disc_list_loopmap(vport
);
5734 /* Start discovery */
5735 lpfc_disc_start(vport
);
5738 case LPFC_FABRIC_CFG_LINK
:
5739 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5741 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5742 "0223 Timeout while waiting for "
5743 "NameServer login\n");
5744 /* Next look for NameServer ndlp */
5745 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5746 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5747 lpfc_els_abort(phba
, ndlp
);
5749 /* ReStart discovery */
5753 /* Check for wait for NameServer Rsp timeout */
5754 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5755 "0224 NameServer Query timeout "
5757 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5759 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5760 /* Try it one more time */
5761 vport
->fc_ns_retry
++;
5762 vport
->gidft_inp
= 0;
5763 rc
= lpfc_issue_gidft(vport
);
5767 vport
->fc_ns_retry
= 0;
5771 * Discovery is over.
5772 * set port_state to PORT_READY if SLI2.
5773 * cmpl_reg_vpi will set port_state to READY for SLI3.
5775 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5776 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5777 lpfc_issue_reg_vpi(phba
, vport
);
5779 lpfc_issue_clear_la(phba
, vport
);
5780 vport
->port_state
= LPFC_VPORT_READY
;
5784 /* Setup and issue mailbox INITIALIZE LINK command */
5785 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5786 if (!initlinkmbox
) {
5787 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5788 "0206 Device Discovery "
5789 "completion error\n");
5790 phba
->link_state
= LPFC_HBA_ERROR
;
5794 lpfc_linkdown(phba
);
5795 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5796 phba
->cfg_link_speed
);
5797 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5798 initlinkmbox
->vport
= vport
;
5799 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5800 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5801 lpfc_set_loopback_flag(phba
);
5802 if (rc
== MBX_NOT_FINISHED
)
5803 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5807 case LPFC_DISC_AUTH
:
5808 /* Node Authentication timeout */
5809 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5810 "0227 Node Authentication timeout\n");
5811 lpfc_disc_flush_list(vport
);
5814 * set port_state to PORT_READY if SLI2.
5815 * cmpl_reg_vpi will set port_state to READY for SLI3.
5817 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5818 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5819 lpfc_issue_reg_vpi(phba
, vport
);
5820 else { /* NPIV Not enabled */
5821 lpfc_issue_clear_la(phba
, vport
);
5822 vport
->port_state
= LPFC_VPORT_READY
;
5827 case LPFC_VPORT_READY
:
5828 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5829 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5830 "0231 RSCN timeout Data: x%x "
5832 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5834 /* Cleanup any outstanding ELS commands */
5835 lpfc_els_flush_cmd(vport
);
5837 lpfc_els_flush_rscn(vport
);
5838 lpfc_disc_flush_list(vport
);
5843 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5844 "0273 Unexpected discovery timeout, "
5845 "vport State x%x\n", vport
->port_state
);
5849 switch (phba
->link_state
) {
5851 /* CLEAR LA timeout */
5852 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5853 "0228 CLEAR LA timeout\n");
5858 lpfc_issue_clear_la(phba
, vport
);
5860 case LPFC_LINK_UNKNOWN
:
5861 case LPFC_WARM_START
:
5862 case LPFC_INIT_START
:
5863 case LPFC_INIT_MBX_CMDS
:
5864 case LPFC_LINK_DOWN
:
5865 case LPFC_HBA_ERROR
:
5866 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5867 "0230 Unexpected timeout, hba link "
5868 "state x%x\n", phba
->link_state
);
5872 case LPFC_HBA_READY
:
5877 lpfc_disc_flush_list(vport
);
5878 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
5879 psli
->sli3_ring
[(LPFC_EXTRA_RING
)].flag
&=
5880 ~LPFC_STOP_IOCB_EVENT
;
5881 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&=
5882 ~LPFC_STOP_IOCB_EVENT
;
5884 vport
->port_state
= LPFC_VPORT_READY
;
5890 * This routine handles processing a NameServer REG_LOGIN mailbox
5891 * command upon completion. It is setup in the LPFC_MBOXQ
5892 * as the completion routine when the command is
5893 * handed off to the SLI layer.
5896 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5898 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5899 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
5900 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
5901 struct lpfc_vport
*vport
= pmb
->vport
;
5903 pmb
->ctx_buf
= NULL
;
5904 pmb
->ctx_ndlp
= NULL
;
5906 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5907 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5908 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5909 ndlp
->nlp_type
|= NLP_FABRIC
;
5910 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5911 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5912 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5913 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5914 kref_read(&ndlp
->kref
),
5915 ndlp
->nlp_usg_map
, ndlp
);
5917 * Start issuing Fabric-Device Management Interface (FDMI) command to
5918 * 0xfffffa (FDMI well known port).
5919 * DHBA -> DPRT -> RHBA -> RPA (physical port)
5920 * DPRT -> RPRT (vports)
5922 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
5923 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
5925 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
5928 /* decrement the node reference count held for this callback
5932 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5934 mempool_free(pmb
, phba
->mbox_mem_pool
);
5940 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5942 uint16_t *rpi
= param
;
5944 /* check for active node */
5945 if (!NLP_CHK_NODE_ACT(ndlp
))
5948 return ndlp
->nlp_rpi
== *rpi
;
5952 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5954 return memcmp(&ndlp
->nlp_portname
, param
,
5955 sizeof(ndlp
->nlp_portname
)) == 0;
5958 static struct lpfc_nodelist
*
5959 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5961 struct lpfc_nodelist
*ndlp
;
5963 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5964 if (filter(ndlp
, param
)) {
5965 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5966 "3185 FIND node filter %p DID "
5967 "ndlp %p did x%x flg x%x st x%x "
5968 "xri x%x type x%x rpi x%x\n",
5969 filter
, ndlp
, ndlp
->nlp_DID
,
5970 ndlp
->nlp_flag
, ndlp
->nlp_state
,
5971 ndlp
->nlp_xri
, ndlp
->nlp_type
,
5976 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5977 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5982 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5983 * returns the node list element pointer else return NULL.
5985 struct lpfc_nodelist
*
5986 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5988 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5992 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5993 * returns the node element list pointer else return NULL.
5995 struct lpfc_nodelist
*
5996 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5998 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5999 struct lpfc_nodelist
*ndlp
;
6001 spin_lock_irq(shost
->host_lock
);
6002 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
6003 spin_unlock_irq(shost
->host_lock
);
6008 * This routine looks up the ndlp lists for the given RPI. If the rpi
6009 * is found, the routine returns the node element list pointer else
6012 struct lpfc_nodelist
*
6013 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
6015 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6016 struct lpfc_nodelist
*ndlp
;
6018 spin_lock_irq(shost
->host_lock
);
6019 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
6020 spin_unlock_irq(shost
->host_lock
);
6025 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6026 * @phba: pointer to lpfc hba data structure.
6027 * @vpi: the physical host virtual N_Port identifier.
6029 * This routine finds a vport on a HBA (referred by @phba) through a
6030 * @vpi. The function walks the HBA's vport list and returns the address
6031 * of the vport with the matching @vpi.
6034 * NULL - No vport with the matching @vpi found
6035 * Otherwise - Address to the vport with the matching @vpi.
6038 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
6040 struct lpfc_vport
*vport
;
6041 unsigned long flags
;
6044 /* The physical ports are always vpi 0 - translate is unnecessary. */
6047 * Translate the physical vpi to the logical vpi. The
6048 * vport stores the logical vpi.
6050 for (i
= 0; i
< phba
->max_vpi
; i
++) {
6051 if (vpi
== phba
->vpi_ids
[i
])
6055 if (i
>= phba
->max_vpi
) {
6056 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
6057 "2936 Could not find Vport mapped "
6058 "to vpi %d\n", vpi
);
6063 spin_lock_irqsave(&phba
->port_list_lock
, flags
);
6064 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
6065 if (vport
->vpi
== i
) {
6066 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
6070 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
6074 struct lpfc_nodelist
*
6075 lpfc_nlp_init(struct lpfc_vport
*vport
, uint32_t did
)
6077 struct lpfc_nodelist
*ndlp
;
6078 int rpi
= LPFC_RPI_ALLOC_ERROR
;
6080 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
6081 rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
6082 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
6086 ndlp
= mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
6088 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
6089 lpfc_sli4_free_rpi(vport
->phba
, rpi
);
6093 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
6095 lpfc_initialize_node(vport
, ndlp
, did
);
6096 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
6097 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
6098 ndlp
->nlp_rpi
= rpi
;
6099 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
6100 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
6101 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6103 kref_read(&ndlp
->kref
),
6104 ndlp
->nlp_usg_map
, ndlp
);
6106 ndlp
->active_rrqs_xri_bitmap
=
6107 mempool_alloc(vport
->phba
->active_rrq_pool
,
6109 if (ndlp
->active_rrqs_xri_bitmap
)
6110 memset(ndlp
->active_rrqs_xri_bitmap
, 0,
6111 ndlp
->phba
->cfg_rrq_xri_bitmap_sz
);
6116 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
6117 "node init: did:x%x",
6118 ndlp
->nlp_DID
, 0, 0);
6123 /* This routine releases all resources associated with a specifc NPort's ndlp
6124 * and mempool_free's the nodelist.
6127 lpfc_nlp_release(struct kref
*kref
)
6129 struct lpfc_hba
*phba
;
6130 unsigned long flags
;
6131 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
6134 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6135 "node release: did:x%x flg:x%x type:x%x",
6136 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
6138 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
6139 "0279 lpfc_nlp_release: ndlp:x%p did %x "
6140 "usgmap:x%x refcnt:%d rpi:%x\n",
6141 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
6142 kref_read(&ndlp
->kref
), ndlp
->nlp_rpi
);
6144 /* remove ndlp from action. */
6145 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
6147 /* clear the ndlp active flag for all release cases */
6149 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6150 NLP_CLR_NODE_ACT(ndlp
);
6151 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6152 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6153 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
6155 /* free ndlp memory for final ndlp release */
6156 if (NLP_CHK_FREE_REQ(ndlp
)) {
6157 kfree(ndlp
->lat_data
);
6158 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6159 mempool_free(ndlp
->active_rrqs_xri_bitmap
,
6160 ndlp
->phba
->active_rrq_pool
);
6161 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
6165 /* This routine bumps the reference count for a ndlp structure to ensure
6166 * that one discovery thread won't free a ndlp while another discovery thread
6169 struct lpfc_nodelist
*
6170 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
6172 struct lpfc_hba
*phba
;
6173 unsigned long flags
;
6176 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6177 "node get: did:x%x flg:x%x refcnt:x%x",
6178 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6179 kref_read(&ndlp
->kref
));
6180 /* The check of ndlp usage to prevent incrementing the
6181 * ndlp reference count that is in the process of being
6185 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6186 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
6187 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6188 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6189 "0276 lpfc_nlp_get: ndlp:x%p "
6190 "usgmap:x%x refcnt:%d\n",
6191 (void *)ndlp
, ndlp
->nlp_usg_map
,
6192 kref_read(&ndlp
->kref
));
6195 kref_get(&ndlp
->kref
);
6196 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6201 /* This routine decrements the reference count for a ndlp structure. If the
6202 * count goes to 0, this indicates the the associated nodelist should be
6203 * freed. Returning 1 indicates the ndlp resource has been released; on the
6204 * other hand, returning 0 indicates the ndlp resource has not been released
6208 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
6210 struct lpfc_hba
*phba
;
6211 unsigned long flags
;
6216 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6217 "node put: did:x%x flg:x%x refcnt:x%x",
6218 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6219 kref_read(&ndlp
->kref
));
6221 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6222 /* Check the ndlp memory free acknowledge flag to avoid the
6223 * possible race condition that kref_put got invoked again
6224 * after previous one has done ndlp memory free.
6226 if (NLP_CHK_FREE_ACK(ndlp
)) {
6227 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6228 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6229 "0274 lpfc_nlp_put: ndlp:x%p "
6230 "usgmap:x%x refcnt:%d\n",
6231 (void *)ndlp
, ndlp
->nlp_usg_map
,
6232 kref_read(&ndlp
->kref
));
6235 /* Check the ndlp inactivate log flag to avoid the possible
6236 * race condition that kref_put got invoked again after ndlp
6237 * is already in inactivating state.
6239 if (NLP_CHK_IACT_REQ(ndlp
)) {
6240 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6241 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6242 "0275 lpfc_nlp_put: ndlp:x%p "
6243 "usgmap:x%x refcnt:%d\n",
6244 (void *)ndlp
, ndlp
->nlp_usg_map
,
6245 kref_read(&ndlp
->kref
));
6248 /* For last put, mark the ndlp usage flags to make sure no
6249 * other kref_get and kref_put on the same ndlp shall get
6250 * in between the process when the final kref_put has been
6251 * invoked on this ndlp.
6253 if (kref_read(&ndlp
->kref
) == 1) {
6254 /* Indicate ndlp is put to inactive state. */
6255 NLP_SET_IACT_REQ(ndlp
);
6256 /* Acknowledge ndlp memory free has been seen. */
6257 if (NLP_CHK_FREE_REQ(ndlp
))
6258 NLP_SET_FREE_ACK(ndlp
);
6260 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6261 /* Note, the kref_put returns 1 when decrementing a reference
6262 * count that was 1, it invokes the release callback function,
6263 * but it still left the reference count as 1 (not actually
6264 * performs the last decrementation). Otherwise, it actually
6265 * decrements the reference count and returns 0.
6267 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
6270 /* This routine free's the specified nodelist if it is not in use
6271 * by any other discovery thread. This routine returns 1 if the
6272 * ndlp has been freed. A return value of 0 indicates the ndlp is
6273 * not yet been released.
6276 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
6278 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6279 "node not used: did:x%x flg:x%x refcnt:x%x",
6280 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6281 kref_read(&ndlp
->kref
));
6282 if (kref_read(&ndlp
->kref
) == 1)
6283 if (lpfc_nlp_put(ndlp
))
6289 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6290 * @phba: Pointer to hba context object.
6292 * This function iterate through all FC nodes associated
6293 * will all vports to check if there is any node with
6294 * fc_rports associated with it. If there is an fc_rport
6295 * associated with the node, then the node is either in
6296 * discovered state or its devloss_timer is pending.
6299 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
6301 struct lpfc_vport
**vports
;
6303 struct lpfc_nodelist
*ndlp
;
6304 struct Scsi_Host
*shost
;
6306 vports
= lpfc_create_vport_work_array(phba
);
6308 /* If driver cannot allocate memory, indicate fcf is in use */
6312 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6313 shost
= lpfc_shost_from_vport(vports
[i
]);
6314 spin_lock_irq(shost
->host_lock
);
6316 * IF the CVL_RCVD bit is not set then we have sent the
6318 * If dev_loss fires while we are waiting we do not want to
6321 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
6322 spin_unlock_irq(shost
->host_lock
);
6326 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
6327 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
6328 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
6330 spin_unlock_irq(shost
->host_lock
);
6332 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
6334 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
6335 "2624 RPI %x DID %x flag %x "
6336 "still logged in\n",
6337 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6341 spin_unlock_irq(shost
->host_lock
);
6344 lpfc_destroy_vport_work_array(phba
, vports
);
6349 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6350 * @phba: Pointer to hba context object.
6351 * @mboxq: Pointer to mailbox object.
6353 * This function frees memory associated with the mailbox command.
6356 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6358 struct lpfc_vport
*vport
= mboxq
->vport
;
6359 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6361 if (mboxq
->u
.mb
.mbxStatus
) {
6362 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6363 "2555 UNREG_VFI mbxStatus error x%x "
6365 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6367 spin_lock_irq(shost
->host_lock
);
6368 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
6369 spin_unlock_irq(shost
->host_lock
);
6370 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6375 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6376 * @phba: Pointer to hba context object.
6377 * @mboxq: Pointer to mailbox object.
6379 * This function frees memory associated with the mailbox command.
6382 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6384 struct lpfc_vport
*vport
= mboxq
->vport
;
6386 if (mboxq
->u
.mb
.mbxStatus
) {
6387 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6388 "2550 UNREG_FCFI mbxStatus error x%x "
6390 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6392 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6397 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6398 * @phba: Pointer to hba context object.
6400 * This function prepare the HBA for unregistering the currently registered
6401 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6405 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
6407 struct lpfc_vport
**vports
;
6408 struct lpfc_nodelist
*ndlp
;
6409 struct Scsi_Host
*shost
;
6412 /* Unregister RPIs */
6413 if (lpfc_fcf_inuse(phba
))
6414 lpfc_unreg_hba_rpis(phba
);
6416 /* At this point, all discovery is aborted */
6417 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
6419 /* Unregister VPIs */
6420 vports
= lpfc_create_vport_work_array(phba
);
6421 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
6422 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6423 /* Stop FLOGI/FDISC retries */
6424 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
6426 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
6427 lpfc_cleanup_pending_mbox(vports
[i
]);
6428 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6429 lpfc_sli4_unreg_all_rpis(vports
[i
]);
6430 lpfc_mbx_unreg_vpi(vports
[i
]);
6431 shost
= lpfc_shost_from_vport(vports
[i
]);
6432 spin_lock_irq(shost
->host_lock
);
6433 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6434 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6435 spin_unlock_irq(shost
->host_lock
);
6437 lpfc_destroy_vport_work_array(phba
, vports
);
6438 if (i
== 0 && (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))) {
6439 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
6441 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
6442 lpfc_cleanup_pending_mbox(phba
->pport
);
6443 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6444 lpfc_sli4_unreg_all_rpis(phba
->pport
);
6445 lpfc_mbx_unreg_vpi(phba
->pport
);
6446 shost
= lpfc_shost_from_vport(phba
->pport
);
6447 spin_lock_irq(shost
->host_lock
);
6448 phba
->pport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6449 phba
->pport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6450 spin_unlock_irq(shost
->host_lock
);
6453 /* Cleanup any outstanding ELS commands */
6454 lpfc_els_flush_all_cmd(phba
);
6456 /* Unregister the physical port VFI */
6457 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
6462 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6463 * @phba: Pointer to hba context object.
6465 * This function issues synchronous unregister FCF mailbox command to HBA to
6466 * unregister the currently registered FCF record. The driver does not reset
6467 * the driver FCF usage state flags.
6469 * Return 0 if successfully issued, none-zero otherwise.
6472 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
6477 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6479 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6480 "2551 UNREG_FCFI mbox allocation failed"
6481 "HBA state x%x\n", phba
->pport
->port_state
);
6484 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
6485 mbox
->vport
= phba
->pport
;
6486 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
6487 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6489 if (rc
== MBX_NOT_FINISHED
) {
6490 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6491 "2552 Unregister FCFI command failed rc x%x "
6493 rc
, phba
->pport
->port_state
);
6500 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6501 * @phba: Pointer to hba context object.
6503 * This function unregisters the currently reigstered FCF. This function
6504 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6507 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
6511 /* Preparation for unregistering fcf */
6512 rc
= lpfc_unregister_fcf_prep(phba
);
6514 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6515 "2748 Failed to prepare for unregistering "
6516 "HBA's FCF record: rc=%d\n", rc
);
6520 /* Now, unregister FCF record and reset HBA FCF state */
6521 rc
= lpfc_sli4_unregister_fcf(phba
);
6524 /* Reset HBA FCF states after successful unregister FCF */
6525 phba
->fcf
.fcf_flag
= 0;
6526 phba
->fcf
.current_rec
.flag
= 0;
6529 * If driver is not unloading, check if there is any other
6530 * FCF record that can be used for discovery.
6532 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
6533 (phba
->link_state
< LPFC_LINK_UP
))
6536 /* This is considered as the initial FCF discovery scan */
6537 spin_lock_irq(&phba
->hbalock
);
6538 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
6539 spin_unlock_irq(&phba
->hbalock
);
6541 /* Reset FCF roundrobin bmask for new discovery */
6542 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6544 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6547 spin_lock_irq(&phba
->hbalock
);
6548 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
6549 spin_unlock_irq(&phba
->hbalock
);
6550 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6551 "2553 lpfc_unregister_unused_fcf failed "
6552 "to read FCF record HBA state x%x\n",
6553 phba
->pport
->port_state
);
6558 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6559 * @phba: Pointer to hba context object.
6561 * This function just unregisters the currently reigstered FCF. It does not
6562 * try to find another FCF for discovery.
6565 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
6569 /* Preparation for unregistering fcf */
6570 rc
= lpfc_unregister_fcf_prep(phba
);
6572 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6573 "2749 Failed to prepare for unregistering "
6574 "HBA's FCF record: rc=%d\n", rc
);
6578 /* Now, unregister FCF record and reset HBA FCF state */
6579 rc
= lpfc_sli4_unregister_fcf(phba
);
6582 /* Set proper HBA FCF states after successful unregister FCF */
6583 spin_lock_irq(&phba
->hbalock
);
6584 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6585 spin_unlock_irq(&phba
->hbalock
);
6589 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6590 * @phba: Pointer to hba context object.
6592 * This function check if there are any connected remote port for the FCF and
6593 * if all the devices are disconnected, this function unregister FCFI.
6594 * This function also tries to use another FCF for discovery.
6597 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6600 * If HBA is not running in FIP mode, if HBA does not support
6601 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6602 * registered, do nothing.
6604 spin_lock_irq(&phba
->hbalock
);
6605 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6606 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6607 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6608 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6609 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6610 spin_unlock_irq(&phba
->hbalock
);
6613 spin_unlock_irq(&phba
->hbalock
);
6615 if (lpfc_fcf_inuse(phba
))
6618 lpfc_unregister_fcf_rescan(phba
);
6622 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6623 * @phba: Pointer to hba context object.
6624 * @buff: Buffer containing the FCF connection table as in the config
6626 * This function create driver data structure for the FCF connection
6627 * record table read from config region 23.
6630 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6633 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6634 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6635 struct lpfc_fcf_conn_rec
*conn_rec
;
6636 uint32_t record_count
;
6639 /* Free the current connect table */
6640 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6641 &phba
->fcf_conn_rec_list
, list
) {
6642 list_del_init(&conn_entry
->list
);
6646 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6647 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6648 sizeof(struct lpfc_fcf_conn_rec
);
6650 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6651 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6653 for (i
= 0; i
< record_count
; i
++) {
6654 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6656 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6659 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6660 "2566 Failed to allocate connection"
6665 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6666 sizeof(struct lpfc_fcf_conn_rec
));
6667 list_add_tail(&conn_entry
->list
,
6668 &phba
->fcf_conn_rec_list
);
6671 if (!list_empty(&phba
->fcf_conn_rec_list
)) {
6673 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
,
6675 conn_rec
= &conn_entry
->conn_rec
;
6676 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6677 "3345 FCF connection list rec[%02d]: "
6678 "flags:x%04x, vtag:x%04x, "
6679 "fabric_name:x%02x:%02x:%02x:%02x:"
6680 "%02x:%02x:%02x:%02x, "
6681 "switch_name:x%02x:%02x:%02x:%02x:"
6682 "%02x:%02x:%02x:%02x\n", i
++,
6683 conn_rec
->flags
, conn_rec
->vlan_tag
,
6684 conn_rec
->fabric_name
[0],
6685 conn_rec
->fabric_name
[1],
6686 conn_rec
->fabric_name
[2],
6687 conn_rec
->fabric_name
[3],
6688 conn_rec
->fabric_name
[4],
6689 conn_rec
->fabric_name
[5],
6690 conn_rec
->fabric_name
[6],
6691 conn_rec
->fabric_name
[7],
6692 conn_rec
->switch_name
[0],
6693 conn_rec
->switch_name
[1],
6694 conn_rec
->switch_name
[2],
6695 conn_rec
->switch_name
[3],
6696 conn_rec
->switch_name
[4],
6697 conn_rec
->switch_name
[5],
6698 conn_rec
->switch_name
[6],
6699 conn_rec
->switch_name
[7]);
6705 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6706 * @phba: Pointer to hba context object.
6707 * @buff: Buffer containing the FCoE parameter data structure.
6709 * This function update driver data structure with config
6710 * parameters read from config region 23.
6713 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6716 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6717 struct lpfc_fcoe_params
*fcoe_param
;
6719 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6721 fcoe_param
= (struct lpfc_fcoe_params
*)
6722 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6724 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6725 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6728 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6729 phba
->valid_vlan
= 1;
6730 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6734 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6735 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6736 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6741 * lpfc_get_rec_conf23 - Get a record type in config region data.
6742 * @buff: Buffer containing config region 23 data.
6743 * @size: Size of the data buffer.
6744 * @rec_type: Record type to be searched.
6746 * This function searches config region data to find the beginning
6747 * of the record specified by record_type. If record found, this
6748 * function return pointer to the record else return NULL.
6751 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6753 uint32_t offset
= 0, rec_length
;
6755 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6756 (size
< sizeof(uint32_t)))
6759 rec_length
= buff
[offset
+ 1];
6762 * One TLV record has one word header and number of data words
6763 * specified in the rec_length field of the record header.
6765 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6767 if (buff
[offset
] == rec_type
)
6768 return &buff
[offset
];
6770 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6773 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6774 rec_length
= buff
[offset
+ 1];
6780 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6781 * @phba: Pointer to lpfc_hba data structure.
6782 * @buff: Buffer containing config region 23 data.
6783 * @size: Size of the data buffer.
6785 * This function parses the FCoE config parameters in config region 23 and
6786 * populate driver data structure with the parameters.
6789 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6793 uint32_t offset
= 0;
6797 * If data size is less than 2 words signature and version cannot be
6800 if (size
< 2*sizeof(uint32_t))
6803 /* Check the region signature first */
6804 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6805 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6806 "2567 Config region 23 has bad signature\n");
6812 /* Check the data structure version */
6813 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6814 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6815 "2568 Config region 23 has bad version\n");
6820 /* Read FCoE param record */
6821 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6822 size
- offset
, FCOE_PARAM_TYPE
);
6824 lpfc_read_fcoe_param(phba
, rec_ptr
);
6826 /* Read FCF connection table */
6827 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6828 size
- offset
, FCOE_CONN_TBL_TYPE
);
6830 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);