1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/kthread.h>
29 #include <linux/interrupt.h>
30 #include <linux/lockdep.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
38 #include <linux/nvme-fc-driver.h>
43 #include "lpfc_disc.h"
45 #include "lpfc_sli4.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 /* AlpaArray for assignment of scsid for scan-down and bind_method */
55 static uint8_t lpfcAlpaArray
[] = {
56 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
57 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
58 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
59 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
60 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
61 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
62 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
63 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
64 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
65 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
66 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
67 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
68 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
71 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
72 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
73 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
74 static int lpfc_fcf_inuse(struct lpfc_hba
*);
77 lpfc_terminate_rport_io(struct fc_rport
*rport
)
79 struct lpfc_rport_data
*rdata
;
80 struct lpfc_nodelist
* ndlp
;
81 struct lpfc_hba
*phba
;
83 rdata
= rport
->dd_data
;
86 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
87 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
88 printk(KERN_ERR
"Cannot find remote node"
89 " to terminate I/O Data x%x\n",
96 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
97 "rport terminate: sid:x%x did:x%x flg:x%x",
98 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
100 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
101 lpfc_sli_abort_iocb(ndlp
->vport
,
102 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
103 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
108 * This function will be called when dev_loss_tmo fire.
111 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
113 struct lpfc_rport_data
*rdata
;
114 struct lpfc_nodelist
* ndlp
;
115 struct lpfc_vport
*vport
;
116 struct Scsi_Host
*shost
;
117 struct lpfc_hba
*phba
;
118 struct lpfc_work_evt
*evtp
;
122 rdata
= rport
->dd_data
;
124 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
130 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
131 "rport devlosscb: sid:x%x did:x%x flg:x%x",
132 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
134 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
135 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
136 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
138 /* Don't defer this if we are in the process of deleting the vport
139 * or unloading the driver. The unload will cleanup the node
140 * appropriately we just need to cleanup the ndlp rport info here.
142 if (vport
->load_flag
& FC_UNLOADING
) {
143 put_node
= rdata
->pnode
!= NULL
;
144 put_rport
= ndlp
->rport
!= NULL
;
150 put_device(&rport
->dev
);
154 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
157 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
))
158 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
159 "6789 rport name %llx != node port name %llx",
161 wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
));
163 evtp
= &ndlp
->dev_loss_evt
;
165 if (!list_empty(&evtp
->evt_listp
)) {
166 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
167 "6790 rport name %llx dev_loss_evt pending",
172 shost
= lpfc_shost_from_vport(vport
);
173 spin_lock_irq(shost
->host_lock
);
174 ndlp
->nlp_flag
|= NLP_IN_DEV_LOSS
;
175 spin_unlock_irq(shost
->host_lock
);
177 /* We need to hold the node by incrementing the reference
178 * count until this queued work is done
180 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
182 spin_lock_irq(&phba
->hbalock
);
183 if (evtp
->evt_arg1
) {
184 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
185 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
186 lpfc_worker_wake_up(phba
);
188 spin_unlock_irq(&phba
->hbalock
);
194 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
195 * @ndlp: Pointer to remote node object.
197 * This function is called from the worker thread when devloss timeout timer
198 * expires. For SLI4 host, this routine shall return 1 when at lease one
199 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
200 * routine shall return 0 when there is no remote node is still in use of FCF
201 * when devloss timeout happened to this @ndlp.
204 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
206 struct lpfc_rport_data
*rdata
;
207 struct fc_rport
*rport
;
208 struct lpfc_vport
*vport
;
209 struct lpfc_hba
*phba
;
210 struct Scsi_Host
*shost
;
218 shost
= lpfc_shost_from_vport(vport
);
220 spin_lock_irq(shost
->host_lock
);
221 ndlp
->nlp_flag
&= ~NLP_IN_DEV_LOSS
;
222 spin_unlock_irq(shost
->host_lock
);
227 name
= (uint8_t *) &ndlp
->nlp_portname
;
230 if (phba
->sli_rev
== LPFC_SLI_REV4
)
231 fcf_inuse
= lpfc_fcf_inuse(phba
);
233 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
234 "rport devlosstmo:did:x%x type:x%x id:x%x",
235 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
237 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
238 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
239 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
242 * lpfc_nlp_remove if reached with dangling rport drops the
243 * reference. To make sure that does not happen clear rport
244 * pointer in ndlp before lpfc_nlp_put.
246 rdata
= rport
->dd_data
;
248 /* Don't defer this if we are in the process of deleting the vport
249 * or unloading the driver. The unload will cleanup the node
250 * appropriately we just need to cleanup the ndlp rport info here.
252 if (vport
->load_flag
& FC_UNLOADING
) {
253 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
254 /* flush the target */
255 lpfc_sli_abort_iocb(vport
,
256 &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
257 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
259 put_node
= rdata
->pnode
!= NULL
;
264 put_device(&rport
->dev
);
269 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
270 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
271 "0284 Devloss timeout Ignored on "
272 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
274 *name
, *(name
+1), *(name
+2), *(name
+3),
275 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
280 put_node
= rdata
->pnode
!= NULL
;
285 put_device(&rport
->dev
);
287 if (ndlp
->nlp_type
& NLP_FABRIC
)
290 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
292 lpfc_sli_abort_iocb(vport
, &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
293 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
297 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
298 "0203 Devloss timeout on "
299 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
300 "NPort x%06x Data: x%x x%x x%x\n",
301 *name
, *(name
+1), *(name
+2), *(name
+3),
302 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
303 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
304 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
306 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
307 "0204 Devloss timeout on "
308 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
309 "NPort x%06x Data: x%x x%x x%x\n",
310 *name
, *(name
+1), *(name
+2), *(name
+3),
311 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
312 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
313 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
316 if (!(vport
->load_flag
& FC_UNLOADING
) &&
317 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
318 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
319 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
320 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
321 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
322 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
328 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
329 * @phba: Pointer to hba context object.
330 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
331 * @nlp_did: remote node identifer with devloss timeout.
333 * This function is called from the worker thread after invoking devloss
334 * timeout handler and releasing the reference count for the ndlp with
335 * which the devloss timeout was handled for SLI4 host. For the devloss
336 * timeout of the last remote node which had been in use of FCF, when this
337 * routine is invoked, it shall be guaranteed that none of the remote are
338 * in-use of FCF. When devloss timeout to the last remote using the FCF,
339 * if the FIP engine is neither in FCF table scan process nor roundrobin
340 * failover process, the in-use FCF shall be unregistered. If the FIP
341 * engine is in FCF discovery process, the devloss timeout state shall
342 * be set for either the FCF table scan process or roundrobin failover
343 * process to unregister the in-use FCF.
346 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
349 /* If devloss timeout happened to a remote node when FCF had no
350 * longer been in-use, do nothing.
355 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
356 spin_lock_irq(&phba
->hbalock
);
357 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
358 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
359 spin_unlock_irq(&phba
->hbalock
);
362 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
363 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
364 "2847 Last remote node (x%x) using "
365 "FCF devloss tmo\n", nlp_did
);
367 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
368 spin_unlock_irq(&phba
->hbalock
);
369 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
370 "2868 Devloss tmo to FCF rediscovery "
374 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
375 spin_unlock_irq(&phba
->hbalock
);
376 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
377 "2869 Devloss tmo to idle FIP engine, "
378 "unreg in-use FCF and rescan.\n");
379 /* Unregister in-use FCF and rescan */
380 lpfc_unregister_fcf_rescan(phba
);
383 spin_unlock_irq(&phba
->hbalock
);
384 if (phba
->hba_flag
& FCF_TS_INPROG
)
385 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
386 "2870 FCF table scan in progress\n");
387 if (phba
->hba_flag
& FCF_RR_INPROG
)
388 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
389 "2871 FLOGI roundrobin FCF failover "
392 lpfc_unregister_unused_fcf(phba
);
396 * lpfc_alloc_fast_evt - Allocates data structure for posting event
397 * @phba: Pointer to hba context object.
399 * This function is called from the functions which need to post
400 * events from interrupt context. This function allocates data
401 * structure required for posting event. It also keeps track of
402 * number of events pending and prevent event storm when there are
405 struct lpfc_fast_path_event
*
406 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
407 struct lpfc_fast_path_event
*ret
;
409 /* If there are lot of fast event do not exhaust memory due to this */
410 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
413 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
416 atomic_inc(&phba
->fast_event_count
);
417 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
418 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
424 * lpfc_free_fast_evt - Frees event data structure
425 * @phba: Pointer to hba context object.
426 * @evt: Event object which need to be freed.
428 * This function frees the data structure required for posting
432 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
433 struct lpfc_fast_path_event
*evt
) {
435 atomic_dec(&phba
->fast_event_count
);
440 * lpfc_send_fastpath_evt - Posts events generated from fast path
441 * @phba: Pointer to hba context object.
442 * @evtp: Event data structure.
444 * This function is called from worker thread, when the interrupt
445 * context need to post an event. This function posts the event
446 * to fc transport netlink interface.
449 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
450 struct lpfc_work_evt
*evtp
)
452 unsigned long evt_category
, evt_sub_category
;
453 struct lpfc_fast_path_event
*fast_evt_data
;
455 uint32_t evt_data_size
;
456 struct Scsi_Host
*shost
;
458 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
461 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
462 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
463 fabric_evt
.subcategory
;
464 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
465 if (evt_category
== FC_REG_FABRIC_EVENT
) {
466 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
467 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
468 evt_data_size
= sizeof(fast_evt_data
->un
.
470 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
471 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
472 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
473 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
475 lpfc_free_fast_evt(phba
, fast_evt_data
);
478 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
479 switch (evt_sub_category
) {
480 case LPFC_EVENT_QFULL
:
481 case LPFC_EVENT_DEVBSY
:
482 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
483 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
485 case LPFC_EVENT_CHECK_COND
:
486 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
487 evt_data_size
= sizeof(fast_evt_data
->un
.
490 case LPFC_EVENT_VARQUEDEPTH
:
491 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
492 evt_data_size
= sizeof(fast_evt_data
->un
.
496 lpfc_free_fast_evt(phba
, fast_evt_data
);
500 lpfc_free_fast_evt(phba
, fast_evt_data
);
504 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
505 fc_host_post_vendor_event(shost
,
506 fc_get_event_number(),
511 lpfc_free_fast_evt(phba
, fast_evt_data
);
516 lpfc_work_list_done(struct lpfc_hba
*phba
)
518 struct lpfc_work_evt
*evtp
= NULL
;
519 struct lpfc_nodelist
*ndlp
;
524 spin_lock_irq(&phba
->hbalock
);
525 while (!list_empty(&phba
->work_list
)) {
526 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
528 spin_unlock_irq(&phba
->hbalock
);
531 case LPFC_EVT_ELS_RETRY
:
532 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
533 lpfc_els_retry_delay_handler(ndlp
);
534 free_evt
= 0; /* evt is part of ndlp */
535 /* decrement the node reference count held
536 * for this queued work
540 case LPFC_EVT_DEV_LOSS
:
541 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
542 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
544 /* decrement the node reference count held for
547 nlp_did
= ndlp
->nlp_DID
;
549 if (phba
->sli_rev
== LPFC_SLI_REV4
)
550 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
554 case LPFC_EVT_ONLINE
:
555 if (phba
->link_state
< LPFC_LINK_DOWN
)
556 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
558 *(int *) (evtp
->evt_arg1
) = 0;
559 complete((struct completion
*)(evtp
->evt_arg2
));
561 case LPFC_EVT_OFFLINE_PREP
:
562 if (phba
->link_state
>= LPFC_LINK_DOWN
)
563 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
564 *(int *)(evtp
->evt_arg1
) = 0;
565 complete((struct completion
*)(evtp
->evt_arg2
));
567 case LPFC_EVT_OFFLINE
:
569 lpfc_sli_brdrestart(phba
);
570 *(int *)(evtp
->evt_arg1
) =
571 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
572 lpfc_unblock_mgmt_io(phba
);
573 complete((struct completion
*)(evtp
->evt_arg2
));
575 case LPFC_EVT_WARM_START
:
577 lpfc_reset_barrier(phba
);
578 lpfc_sli_brdreset(phba
);
579 lpfc_hba_down_post(phba
);
580 *(int *)(evtp
->evt_arg1
) =
581 lpfc_sli_brdready(phba
, HS_MBRDY
);
582 lpfc_unblock_mgmt_io(phba
);
583 complete((struct completion
*)(evtp
->evt_arg2
));
587 *(int *)(evtp
->evt_arg1
)
588 = (phba
->pport
->stopped
)
589 ? 0 : lpfc_sli_brdkill(phba
);
590 lpfc_unblock_mgmt_io(phba
);
591 complete((struct completion
*)(evtp
->evt_arg2
));
593 case LPFC_EVT_FASTPATH_MGMT_EVT
:
594 lpfc_send_fastpath_evt(phba
, evtp
);
597 case LPFC_EVT_RESET_HBA
:
598 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
599 lpfc_reset_hba(phba
);
604 spin_lock_irq(&phba
->hbalock
);
606 spin_unlock_irq(&phba
->hbalock
);
611 lpfc_work_done(struct lpfc_hba
*phba
)
613 struct lpfc_sli_ring
*pring
;
614 uint32_t ha_copy
, status
, control
, work_port_events
;
615 struct lpfc_vport
**vports
;
616 struct lpfc_vport
*vport
;
619 spin_lock_irq(&phba
->hbalock
);
620 ha_copy
= phba
->work_ha
;
622 spin_unlock_irq(&phba
->hbalock
);
624 /* First, try to post the next mailbox command to SLI4 device */
625 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
626 lpfc_sli4_post_async_mbox(phba
);
628 if (ha_copy
& HA_ERATT
)
629 /* Handle the error attention event */
630 lpfc_handle_eratt(phba
);
632 if (ha_copy
& HA_MBATT
)
633 lpfc_sli_handle_mb_event(phba
);
635 if (ha_copy
& HA_LATT
)
636 lpfc_handle_latt(phba
);
638 /* Process SLI4 events */
639 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
640 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
641 lpfc_handle_rrq_active(phba
);
642 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
643 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
644 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
645 lpfc_sli4_els_xri_abort_event_proc(phba
);
646 if (phba
->hba_flag
& ASYNC_EVENT
)
647 lpfc_sli4_async_event_proc(phba
);
648 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
649 spin_lock_irq(&phba
->hbalock
);
650 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
651 spin_unlock_irq(&phba
->hbalock
);
652 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
654 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
655 lpfc_sli4_fcf_redisc_event_proc(phba
);
658 vports
= lpfc_create_vport_work_array(phba
);
660 for (i
= 0; i
<= phba
->max_vports
; i
++) {
662 * We could have no vports in array if unloading, so if
663 * this happens then just use the pport
665 if (vports
[i
] == NULL
&& i
== 0)
671 spin_lock_irq(&vport
->work_port_lock
);
672 work_port_events
= vport
->work_port_events
;
673 vport
->work_port_events
&= ~work_port_events
;
674 spin_unlock_irq(&vport
->work_port_lock
);
675 if (work_port_events
& WORKER_DISC_TMO
)
676 lpfc_disc_timeout_handler(vport
);
677 if (work_port_events
& WORKER_ELS_TMO
)
678 lpfc_els_timeout_handler(vport
);
679 if (work_port_events
& WORKER_HB_TMO
)
680 lpfc_hb_timeout_handler(phba
);
681 if (work_port_events
& WORKER_MBOX_TMO
)
682 lpfc_mbox_timeout_handler(phba
);
683 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
684 lpfc_unblock_fabric_iocbs(phba
);
685 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
686 lpfc_ramp_down_queue_handler(phba
);
687 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
688 lpfc_delayed_disc_timeout_handler(vport
);
690 lpfc_destroy_vport_work_array(phba
, vports
);
692 pring
= lpfc_phba_elsring(phba
);
693 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
694 status
>>= (4*LPFC_ELS_RING
);
695 if ((status
& HA_RXMASK
) ||
696 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
697 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
698 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
699 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
700 /* Set the lpfc data pending flag */
701 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
703 if (phba
->link_state
>= LPFC_LINK_UP
) {
704 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
705 lpfc_sli_handle_slow_ring_event(phba
, pring
,
710 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
711 (!list_empty(&pring
->txq
)))
712 lpfc_drain_txq(phba
);
714 * Turn on Ring interrupts
716 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
717 spin_lock_irq(&phba
->hbalock
);
718 control
= readl(phba
->HCregaddr
);
719 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
720 lpfc_debugfs_slow_ring_trc(phba
,
721 "WRK Enable ring: cntl:x%x hacopy:x%x",
722 control
, ha_copy
, 0);
724 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
725 writel(control
, phba
->HCregaddr
);
726 readl(phba
->HCregaddr
); /* flush */
728 lpfc_debugfs_slow_ring_trc(phba
,
729 "WRK Ring ok: cntl:x%x hacopy:x%x",
730 control
, ha_copy
, 0);
732 spin_unlock_irq(&phba
->hbalock
);
735 lpfc_work_list_done(phba
);
739 lpfc_do_work(void *p
)
741 struct lpfc_hba
*phba
= p
;
744 set_user_nice(current
, MIN_NICE
);
745 current
->flags
|= PF_NOFREEZE
;
746 phba
->data_flags
= 0;
748 while (!kthread_should_stop()) {
749 /* wait and check worker queue activities */
750 rc
= wait_event_interruptible(phba
->work_waitq
,
751 (test_and_clear_bit(LPFC_DATA_READY
,
753 || kthread_should_stop()));
754 /* Signal wakeup shall terminate the worker thread */
756 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
757 "0433 Wakeup on signal: rc=x%x\n", rc
);
761 /* Attend pending lpfc data processing */
762 lpfc_work_done(phba
);
764 phba
->worker_thread
= NULL
;
765 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
766 "0432 Worker thread stopped.\n");
771 * This is only called to handle FC worker events. Since this a rare
772 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
773 * embedding it in the IOCB.
776 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
779 struct lpfc_work_evt
*evtp
;
783 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
784 * be queued to worker thread for processing
786 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
790 evtp
->evt_arg1
= arg1
;
791 evtp
->evt_arg2
= arg2
;
794 spin_lock_irqsave(&phba
->hbalock
, flags
);
795 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
796 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
798 lpfc_worker_wake_up(phba
);
804 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
806 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
807 struct lpfc_hba
*phba
= vport
->phba
;
808 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
810 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
811 if (!NLP_CHK_NODE_ACT(ndlp
))
813 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
815 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
816 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
817 (ndlp
->nlp_DID
== NameServer_DID
)))
818 lpfc_unreg_rpi(vport
, ndlp
);
820 /* Leave Fabric nodes alone on link down */
821 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
822 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
824 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
827 : NLP_EVT_DEVICE_RECOVERY
);
829 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
830 if (phba
->sli_rev
== LPFC_SLI_REV4
)
831 lpfc_sli4_unreg_all_rpis(vport
);
832 lpfc_mbx_unreg_vpi(vport
);
833 spin_lock_irq(shost
->host_lock
);
834 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
835 spin_unlock_irq(shost
->host_lock
);
840 lpfc_port_link_failure(struct lpfc_vport
*vport
)
842 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
844 /* Cleanup any outstanding received buffers */
845 lpfc_cleanup_rcv_buffers(vport
);
847 /* Cleanup any outstanding RSCN activity */
848 lpfc_els_flush_rscn(vport
);
850 /* Cleanup any outstanding ELS commands */
851 lpfc_els_flush_cmd(vport
);
853 lpfc_cleanup_rpis(vport
, 0);
855 /* Turn off discovery timer if its running */
856 lpfc_can_disctmo(vport
);
860 lpfc_linkdown_port(struct lpfc_vport
*vport
)
862 struct lpfc_hba
*phba
= vport
->phba
;
863 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
865 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
866 fc_host_post_event(shost
, fc_get_event_number(),
867 FCH_EVT_LINKDOWN
, 0);
869 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
870 "Link Down: state:x%x rtry:x%x flg:x%x",
871 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
873 lpfc_port_link_failure(vport
);
875 /* Stop delayed Nport discovery */
876 spin_lock_irq(shost
->host_lock
);
877 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
878 spin_unlock_irq(shost
->host_lock
);
879 del_timer_sync(&vport
->delayed_disc_tmo
);
883 lpfc_linkdown(struct lpfc_hba
*phba
)
885 struct lpfc_vport
*vport
= phba
->pport
;
886 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
887 struct lpfc_vport
**vports
;
891 if (phba
->link_state
== LPFC_LINK_DOWN
)
894 /* Block all SCSI stack I/Os */
895 lpfc_scsi_dev_block(phba
);
897 spin_lock_irq(&phba
->hbalock
);
898 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
899 spin_unlock_irq(&phba
->hbalock
);
900 if (phba
->link_state
> LPFC_LINK_DOWN
) {
901 phba
->link_state
= LPFC_LINK_DOWN
;
902 spin_lock_irq(shost
->host_lock
);
903 phba
->pport
->fc_flag
&= ~FC_LBIT
;
904 spin_unlock_irq(shost
->host_lock
);
906 vports
= lpfc_create_vport_work_array(phba
);
907 if (vports
!= NULL
) {
908 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
909 /* Issue a LINK DOWN event to all nodes */
910 lpfc_linkdown_port(vports
[i
]);
912 vports
[i
]->fc_myDID
= 0;
914 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
915 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
916 if (phba
->nvmet_support
)
917 lpfc_nvmet_update_targetport(phba
);
919 lpfc_nvme_update_localport(vports
[i
]);
923 lpfc_destroy_vport_work_array(phba
, vports
);
924 /* Clean up any firmware default rpi's */
925 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
927 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
929 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
930 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
931 == MBX_NOT_FINISHED
) {
932 mempool_free(mb
, phba
->mbox_mem_pool
);
936 /* Setup myDID for link up if we are in pt2pt mode */
937 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
938 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
940 lpfc_config_link(phba
, mb
);
941 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
943 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
944 == MBX_NOT_FINISHED
) {
945 mempool_free(mb
, phba
->mbox_mem_pool
);
948 spin_lock_irq(shost
->host_lock
);
949 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
950 spin_unlock_irq(shost
->host_lock
);
956 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
958 struct lpfc_nodelist
*ndlp
;
960 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
961 if (!NLP_CHK_NODE_ACT(ndlp
))
963 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
965 if (ndlp
->nlp_type
& NLP_FABRIC
) {
966 /* On Linkup its safe to clean up the ndlp
967 * from Fabric connections.
969 if (ndlp
->nlp_DID
!= Fabric_DID
)
970 lpfc_unreg_rpi(vport
, ndlp
);
971 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
972 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
973 /* Fail outstanding IO now since device is
976 lpfc_unreg_rpi(vport
, ndlp
);
982 lpfc_linkup_port(struct lpfc_vport
*vport
)
984 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
985 struct lpfc_hba
*phba
= vport
->phba
;
987 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
990 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
991 "Link Up: top:x%x speed:x%x flg:x%x",
992 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
994 /* If NPIV is not enabled, only bring the physical port up */
995 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
996 (vport
!= phba
->pport
))
999 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
1000 fc_host_post_event(shost
, fc_get_event_number(),
1003 spin_lock_irq(shost
->host_lock
);
1004 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
1005 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
1006 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
1007 vport
->fc_ns_retry
= 0;
1008 spin_unlock_irq(shost
->host_lock
);
1010 if (vport
->fc_flag
& FC_LBIT
)
1011 lpfc_linkup_cleanup_nodes(vport
);
1016 lpfc_linkup(struct lpfc_hba
*phba
)
1018 struct lpfc_vport
**vports
;
1021 phba
->link_state
= LPFC_LINK_UP
;
1023 /* Unblock fabric iocbs if they are blocked */
1024 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1025 del_timer_sync(&phba
->fabric_block_timer
);
1027 vports
= lpfc_create_vport_work_array(phba
);
1029 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1030 lpfc_linkup_port(vports
[i
]);
1031 lpfc_destroy_vport_work_array(phba
, vports
);
1037 * This routine handles processing a CLEAR_LA mailbox
1038 * command upon completion. It is setup in the LPFC_MBOXQ
1039 * as the completion routine when the command is
1040 * handed off to the SLI layer. SLI3 only.
1043 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1045 struct lpfc_vport
*vport
= pmb
->vport
;
1046 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1047 struct lpfc_sli
*psli
= &phba
->sli
;
1048 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1051 /* Since we don't do discovery right now, turn these off here */
1052 psli
->sli3_ring
[LPFC_EXTRA_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1053 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1055 /* Check for error */
1056 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1057 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1058 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1059 "0320 CLEAR_LA mbxStatus error x%x hba "
1061 mb
->mbxStatus
, vport
->port_state
);
1062 phba
->link_state
= LPFC_HBA_ERROR
;
1066 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1067 phba
->link_state
= LPFC_HBA_READY
;
1069 spin_lock_irq(&phba
->hbalock
);
1070 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1071 control
= readl(phba
->HCregaddr
);
1072 control
|= HC_LAINT_ENA
;
1073 writel(control
, phba
->HCregaddr
);
1074 readl(phba
->HCregaddr
); /* flush */
1075 spin_unlock_irq(&phba
->hbalock
);
1076 mempool_free(pmb
, phba
->mbox_mem_pool
);
1080 /* Device Discovery completes */
1081 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1082 "0225 Device Discovery completes\n");
1083 mempool_free(pmb
, phba
->mbox_mem_pool
);
1085 spin_lock_irq(shost
->host_lock
);
1086 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1087 spin_unlock_irq(shost
->host_lock
);
1089 lpfc_can_disctmo(vport
);
1091 /* turn on Link Attention interrupts */
1093 spin_lock_irq(&phba
->hbalock
);
1094 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1095 control
= readl(phba
->HCregaddr
);
1096 control
|= HC_LAINT_ENA
;
1097 writel(control
, phba
->HCregaddr
);
1098 readl(phba
->HCregaddr
); /* flush */
1099 spin_unlock_irq(&phba
->hbalock
);
1106 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1108 struct lpfc_vport
*vport
= pmb
->vport
;
1110 if (pmb
->u
.mb
.mbxStatus
)
1113 mempool_free(pmb
, phba
->mbox_mem_pool
);
1115 /* don't perform discovery for SLI4 loopback diagnostic test */
1116 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1117 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1118 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1121 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1122 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1123 !(vport
->fc_flag
& FC_LBIT
)) {
1124 /* Need to wait for FAN - use discovery timer
1125 * for timeout. port_state is identically
1126 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1128 lpfc_set_disctmo(vport
);
1132 /* Start discovery by sending a FLOGI. port_state is identically
1133 * LPFC_FLOGI while waiting for FLOGI cmpl
1135 if (vport
->port_state
!= LPFC_FLOGI
)
1136 lpfc_initial_flogi(vport
);
1137 else if (vport
->fc_flag
& FC_PT2PT
)
1138 lpfc_disc_start(vport
);
1142 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1143 "0306 CONFIG_LINK mbxStatus error x%x "
1145 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1146 mempool_free(pmb
, phba
->mbox_mem_pool
);
1148 lpfc_linkdown(phba
);
1150 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1151 "0200 CONFIG_LINK bad hba state x%x\n",
1154 lpfc_issue_clear_la(phba
, vport
);
1159 * lpfc_sli4_clear_fcf_rr_bmask
1160 * @phba pointer to the struct lpfc_hba for this port.
1161 * This fucnction resets the round robin bit mask and clears the
1162 * fcf priority list. The list deletions are done while holding the
1163 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1164 * from the lpfc_fcf_pri record.
1167 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1169 struct lpfc_fcf_pri
*fcf_pri
;
1170 struct lpfc_fcf_pri
*next_fcf_pri
;
1171 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1172 spin_lock_irq(&phba
->hbalock
);
1173 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1174 &phba
->fcf
.fcf_pri_list
, list
) {
1175 list_del_init(&fcf_pri
->list
);
1176 fcf_pri
->fcf_rec
.flag
= 0;
1178 spin_unlock_irq(&phba
->hbalock
);
1181 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1183 struct lpfc_vport
*vport
= mboxq
->vport
;
1185 if (mboxq
->u
.mb
.mbxStatus
) {
1186 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1187 "2017 REG_FCFI mbxStatus error x%x "
1189 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1193 /* Start FCoE discovery by sending a FLOGI. */
1194 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1195 /* Set the FCFI registered flag */
1196 spin_lock_irq(&phba
->hbalock
);
1197 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1198 spin_unlock_irq(&phba
->hbalock
);
1200 /* If there is a pending FCoE event, restart FCF table scan. */
1201 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1202 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1205 /* Mark successful completion of FCF table scan */
1206 spin_lock_irq(&phba
->hbalock
);
1207 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1208 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1209 if (vport
->port_state
!= LPFC_FLOGI
) {
1210 phba
->hba_flag
|= FCF_RR_INPROG
;
1211 spin_unlock_irq(&phba
->hbalock
);
1212 lpfc_issue_init_vfi(vport
);
1215 spin_unlock_irq(&phba
->hbalock
);
1219 spin_lock_irq(&phba
->hbalock
);
1220 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1221 spin_unlock_irq(&phba
->hbalock
);
1223 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1227 * lpfc_fab_name_match - Check if the fcf fabric name match.
1228 * @fab_name: pointer to fabric name.
1229 * @new_fcf_record: pointer to fcf record.
1231 * This routine compare the fcf record's fabric name with provided
1232 * fabric name. If the fabric name are identical this function
1233 * returns 1 else return 0.
1236 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1238 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1240 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1242 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1244 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1246 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1248 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1250 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1252 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1258 * lpfc_sw_name_match - Check if the fcf switch name match.
1259 * @fab_name: pointer to fabric name.
1260 * @new_fcf_record: pointer to fcf record.
1262 * This routine compare the fcf record's switch name with provided
1263 * switch name. If the switch name are identical this function
1264 * returns 1 else return 0.
1267 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1269 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1271 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1273 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1275 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1277 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1279 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1281 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1283 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1289 * lpfc_mac_addr_match - Check if the fcf mac address match.
1290 * @mac_addr: pointer to mac address.
1291 * @new_fcf_record: pointer to fcf record.
1293 * This routine compare the fcf record's mac address with HBA's
1294 * FCF mac address. If the mac addresses are identical this function
1295 * returns 1 else return 0.
1298 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1300 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1302 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1304 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1306 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1308 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1310 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1316 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1318 return (curr_vlan_id
== new_vlan_id
);
1322 * lpfc_update_fcf_record - Update driver fcf record
1323 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1324 * @phba: pointer to lpfc hba data structure.
1325 * @fcf_index: Index for the lpfc_fcf_record.
1326 * @new_fcf_record: pointer to hba fcf record.
1328 * This routine updates the driver FCF priority record from the new HBA FCF
1329 * record. This routine is called with the host lock held.
1332 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1333 struct fcf_record
*new_fcf_record
1336 struct lpfc_fcf_pri
*fcf_pri
;
1338 lockdep_assert_held(&phba
->hbalock
);
1340 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1341 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1342 /* FCF record priority */
1343 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1348 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1349 * @fcf: pointer to driver fcf record.
1350 * @new_fcf_record: pointer to fcf record.
1352 * This routine copies the FCF information from the FCF
1353 * record to lpfc_hba data structure.
1356 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1357 struct fcf_record
*new_fcf_record
)
1360 fcf_rec
->fabric_name
[0] =
1361 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1362 fcf_rec
->fabric_name
[1] =
1363 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1364 fcf_rec
->fabric_name
[2] =
1365 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1366 fcf_rec
->fabric_name
[3] =
1367 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1368 fcf_rec
->fabric_name
[4] =
1369 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1370 fcf_rec
->fabric_name
[5] =
1371 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1372 fcf_rec
->fabric_name
[6] =
1373 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1374 fcf_rec
->fabric_name
[7] =
1375 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1377 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1378 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1379 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1380 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1381 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1382 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1383 /* FCF record index */
1384 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1385 /* FCF record priority */
1386 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1388 fcf_rec
->switch_name
[0] =
1389 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1390 fcf_rec
->switch_name
[1] =
1391 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1392 fcf_rec
->switch_name
[2] =
1393 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1394 fcf_rec
->switch_name
[3] =
1395 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1396 fcf_rec
->switch_name
[4] =
1397 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1398 fcf_rec
->switch_name
[5] =
1399 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1400 fcf_rec
->switch_name
[6] =
1401 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1402 fcf_rec
->switch_name
[7] =
1403 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1407 * lpfc_update_fcf_record - Update driver fcf record
1408 * @phba: pointer to lpfc hba data structure.
1409 * @fcf_rec: pointer to driver fcf record.
1410 * @new_fcf_record: pointer to hba fcf record.
1411 * @addr_mode: address mode to be set to the driver fcf record.
1412 * @vlan_id: vlan tag to be set to the driver fcf record.
1413 * @flag: flag bits to be set to the driver fcf record.
1415 * This routine updates the driver FCF record from the new HBA FCF record
1416 * together with the address mode, vlan_id, and other informations. This
1417 * routine is called with the host lock held.
1420 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1421 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1422 uint16_t vlan_id
, uint32_t flag
)
1424 lockdep_assert_held(&phba
->hbalock
);
1426 /* Copy the fields from the HBA's FCF record */
1427 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1428 /* Update other fields of driver FCF record */
1429 fcf_rec
->addr_mode
= addr_mode
;
1430 fcf_rec
->vlan_id
= vlan_id
;
1431 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1432 __lpfc_update_fcf_record_pri(phba
,
1433 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1438 * lpfc_register_fcf - Register the FCF with hba.
1439 * @phba: pointer to lpfc hba data structure.
1441 * This routine issues a register fcfi mailbox command to register
1445 lpfc_register_fcf(struct lpfc_hba
*phba
)
1447 LPFC_MBOXQ_t
*fcf_mbxq
;
1450 spin_lock_irq(&phba
->hbalock
);
1451 /* If the FCF is not available do nothing. */
1452 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1453 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1454 spin_unlock_irq(&phba
->hbalock
);
1458 /* The FCF is already registered, start discovery */
1459 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1460 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1461 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1462 if (phba
->pport
->port_state
!= LPFC_FLOGI
&&
1463 phba
->pport
->fc_flag
& FC_FABRIC
) {
1464 phba
->hba_flag
|= FCF_RR_INPROG
;
1465 spin_unlock_irq(&phba
->hbalock
);
1466 lpfc_initial_flogi(phba
->pport
);
1469 spin_unlock_irq(&phba
->hbalock
);
1472 spin_unlock_irq(&phba
->hbalock
);
1474 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1476 spin_lock_irq(&phba
->hbalock
);
1477 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1478 spin_unlock_irq(&phba
->hbalock
);
1482 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1483 fcf_mbxq
->vport
= phba
->pport
;
1484 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1485 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1486 if (rc
== MBX_NOT_FINISHED
) {
1487 spin_lock_irq(&phba
->hbalock
);
1488 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1489 spin_unlock_irq(&phba
->hbalock
);
1490 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1497 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1498 * @phba: pointer to lpfc hba data structure.
1499 * @new_fcf_record: pointer to fcf record.
1500 * @boot_flag: Indicates if this record used by boot bios.
1501 * @addr_mode: The address mode to be used by this FCF
1502 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1504 * This routine compare the fcf record with connect list obtained from the
1505 * config region to decide if this FCF can be used for SAN discovery. It returns
1506 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1507 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1508 * is used by boot bios and addr_mode will indicate the addressing mode to be
1509 * used for this FCF when the function returns.
1510 * If the FCF record need to be used with a particular vlan id, the vlan is
1511 * set in the vlan_id on return of the function. If not VLAN tagging need to
1512 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1515 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1516 struct fcf_record
*new_fcf_record
,
1517 uint32_t *boot_flag
, uint32_t *addr_mode
,
1520 struct lpfc_fcf_conn_entry
*conn_entry
;
1521 int i
, j
, fcf_vlan_id
= 0;
1523 /* Find the lowest VLAN id in the FCF record */
1524 for (i
= 0; i
< 512; i
++) {
1525 if (new_fcf_record
->vlan_bitmap
[i
]) {
1526 fcf_vlan_id
= i
* 8;
1528 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1536 /* FCF not valid/available or solicitation in progress */
1537 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1538 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1539 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1542 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1544 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1546 if (phba
->valid_vlan
)
1547 *vlan_id
= phba
->vlan_id
;
1549 *vlan_id
= LPFC_FCOE_NULL_VID
;
1554 * If there are no FCF connection table entry, driver connect to all
1557 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1559 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1563 * When there are no FCF connect entries, use driver's default
1564 * addressing mode - FPMA.
1566 if (*addr_mode
& LPFC_FCF_FPMA
)
1567 *addr_mode
= LPFC_FCF_FPMA
;
1569 /* If FCF record report a vlan id use that vlan id */
1571 *vlan_id
= fcf_vlan_id
;
1573 *vlan_id
= LPFC_FCOE_NULL_VID
;
1577 list_for_each_entry(conn_entry
,
1578 &phba
->fcf_conn_rec_list
, list
) {
1579 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1582 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1583 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1586 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1587 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1590 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1592 * If the vlan bit map does not have the bit set for the
1593 * vlan id to be used, then it is not a match.
1595 if (!(new_fcf_record
->vlan_bitmap
1596 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1597 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1602 * If connection record does not support any addressing mode,
1603 * skip the FCF record.
1605 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1606 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1610 * Check if the connection record specifies a required
1613 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1614 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1617 * If SPMA required but FCF not support this continue.
1619 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1620 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1621 new_fcf_record
) & LPFC_FCF_SPMA
))
1625 * If FPMA required but FCF not support this continue.
1627 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1628 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1629 new_fcf_record
) & LPFC_FCF_FPMA
))
1634 * This fcf record matches filtering criteria.
1636 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1642 * If user did not specify any addressing mode, or if the
1643 * preferred addressing mode specified by user is not supported
1644 * by FCF, allow fabric to pick the addressing mode.
1646 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1649 * If the user specified a required address mode, assign that
1652 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1653 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1654 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1656 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1658 * If the user specified a preferred address mode, use the
1659 * addr mode only if FCF support the addr_mode.
1661 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1662 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1663 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1664 (*addr_mode
& LPFC_FCF_SPMA
))
1665 *addr_mode
= LPFC_FCF_SPMA
;
1666 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1667 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1668 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1669 (*addr_mode
& LPFC_FCF_FPMA
))
1670 *addr_mode
= LPFC_FCF_FPMA
;
1672 /* If matching connect list has a vlan id, use it */
1673 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1674 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1676 * If no vlan id is specified in connect list, use the vlan id
1679 else if (fcf_vlan_id
)
1680 *vlan_id
= fcf_vlan_id
;
1682 *vlan_id
= LPFC_FCOE_NULL_VID
;
1691 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1692 * @phba: pointer to lpfc hba data structure.
1693 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1695 * This function check if there is any fcoe event pending while driver
1696 * scan FCF entries. If there is any pending event, it will restart the
1697 * FCF saning and return 1 else return 0.
1700 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1703 * If the Link is up and no FCoE events while in the
1704 * FCF discovery, no need to restart FCF discovery.
1706 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1707 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1710 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1711 "2768 Pending link or FCF event during current "
1712 "handling of the previous event: link_state:x%x, "
1713 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1714 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1715 phba
->fcoe_eventtag
);
1717 spin_lock_irq(&phba
->hbalock
);
1718 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1719 spin_unlock_irq(&phba
->hbalock
);
1721 if (phba
->link_state
>= LPFC_LINK_UP
) {
1722 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1723 "2780 Restart FCF table scan due to "
1724 "pending FCF event:evt_tag_at_scan:x%x, "
1725 "evt_tag_current:x%x\n",
1726 phba
->fcoe_eventtag_at_fcf_scan
,
1727 phba
->fcoe_eventtag
);
1728 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1731 * Do not continue FCF discovery and clear FCF_TS_INPROG
1734 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1735 "2833 Stop FCF discovery process due to link "
1736 "state change (x%x)\n", phba
->link_state
);
1737 spin_lock_irq(&phba
->hbalock
);
1738 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1739 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1740 spin_unlock_irq(&phba
->hbalock
);
1743 /* Unregister the currently registered FCF if required */
1745 spin_lock_irq(&phba
->hbalock
);
1746 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1747 spin_unlock_irq(&phba
->hbalock
);
1748 lpfc_sli4_unregister_fcf(phba
);
1754 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1755 * @phba: pointer to lpfc hba data structure.
1756 * @fcf_cnt: number of eligible fcf record seen so far.
1758 * This function makes an running random selection decision on FCF record to
1759 * use through a sequence of @fcf_cnt eligible FCF records with equal
1760 * probability. To perform integer manunipulation of random numbers with
1761 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1762 * from prandom_u32() are taken as the random random number generated.
1764 * Returns true when outcome is for the newly read FCF record should be
1765 * chosen; otherwise, return false when outcome is for keeping the previously
1766 * chosen FCF record.
1769 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1773 /* Get 16-bit uniform random number */
1774 rand_num
= 0xFFFF & prandom_u32();
1776 /* Decision with probability 1/fcf_cnt */
1777 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1784 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1785 * @phba: pointer to lpfc hba data structure.
1786 * @mboxq: pointer to mailbox object.
1787 * @next_fcf_index: pointer to holder of next fcf index.
1789 * This routine parses the non-embedded fcf mailbox command by performing the
1790 * necessarily error checking, non-embedded read FCF record mailbox command
1791 * SGE parsing, and endianness swapping.
1793 * Returns the pointer to the new FCF record in the non-embedded mailbox
1794 * command DMA memory if successfully, other NULL.
1796 static struct fcf_record
*
1797 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1798 uint16_t *next_fcf_index
)
1801 struct lpfc_mbx_sge sge
;
1802 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1803 uint32_t shdr_status
, shdr_add_status
, if_type
;
1804 union lpfc_sli4_cfg_shdr
*shdr
;
1805 struct fcf_record
*new_fcf_record
;
1807 /* Get the first SGE entry from the non-embedded DMA memory. This
1808 * routine only uses a single SGE.
1810 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1811 if (unlikely(!mboxq
->sge_array
)) {
1812 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1813 "2524 Failed to get the non-embedded SGE "
1814 "virtual address\n");
1817 virt_addr
= mboxq
->sge_array
->addr
[0];
1819 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1820 lpfc_sli_pcimem_bcopy(shdr
, shdr
,
1821 sizeof(union lpfc_sli4_cfg_shdr
));
1822 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1823 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1824 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1825 if (shdr_status
|| shdr_add_status
) {
1826 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
||
1827 if_type
== LPFC_SLI_INTF_IF_TYPE_2
)
1828 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1829 "2726 READ_FCF_RECORD Indicates empty "
1832 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1833 "2521 READ_FCF_RECORD mailbox failed "
1834 "with status x%x add_status x%x, "
1835 "mbx\n", shdr_status
, shdr_add_status
);
1839 /* Interpreting the returned information of the FCF record */
1840 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1841 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1842 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1843 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1844 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1845 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1846 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1847 offsetof(struct fcf_record
, vlan_bitmap
));
1848 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1849 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1851 return new_fcf_record
;
1855 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1856 * @phba: pointer to lpfc hba data structure.
1857 * @fcf_record: pointer to the fcf record.
1858 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1859 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1861 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1865 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1866 struct fcf_record
*fcf_record
,
1868 uint16_t next_fcf_index
)
1870 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1871 "2764 READ_FCF_RECORD:\n"
1872 "\tFCF_Index : x%x\n"
1873 "\tFCF_Avail : x%x\n"
1874 "\tFCF_Valid : x%x\n"
1876 "\tFIP_Priority : x%x\n"
1877 "\tMAC_Provider : x%x\n"
1878 "\tLowest VLANID : x%x\n"
1879 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1880 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1881 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1882 "\tNext_FCF_Index: x%x\n",
1883 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1884 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1885 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1886 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1887 fcf_record
->fip_priority
,
1888 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1890 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1891 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1892 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1893 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1894 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1895 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1896 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1897 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1898 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1899 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1900 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1901 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1902 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1903 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1904 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1905 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1906 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1907 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1908 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1909 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1910 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1911 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1916 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1917 * @phba: pointer to lpfc hba data structure.
1918 * @fcf_rec: pointer to an existing FCF record.
1919 * @new_fcf_record: pointer to a new FCF record.
1920 * @new_vlan_id: vlan id from the new FCF record.
1922 * This function performs matching test of a new FCF record against an existing
1923 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1924 * will not be used as part of the FCF record matching criteria.
1926 * Returns true if all the fields matching, otherwise returns false.
1929 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1930 struct lpfc_fcf_rec
*fcf_rec
,
1931 struct fcf_record
*new_fcf_record
,
1932 uint16_t new_vlan_id
)
1934 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1935 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1937 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1939 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1941 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1943 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1949 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1950 * @vport: Pointer to vport object.
1951 * @fcf_index: index to next fcf.
1953 * This function processing the roundrobin fcf failover to next fcf index.
1954 * When this function is invoked, there will be a current fcf registered
1956 * Return: 0 for continue retrying flogi on currently registered fcf;
1957 * 1 for stop flogi on currently registered fcf;
1959 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1961 struct lpfc_hba
*phba
= vport
->phba
;
1964 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1965 spin_lock_irq(&phba
->hbalock
);
1966 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1967 spin_unlock_irq(&phba
->hbalock
);
1968 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1969 "2872 Devloss tmo with no eligible "
1970 "FCF, unregister in-use FCF (x%x) "
1971 "and rescan FCF table\n",
1972 phba
->fcf
.current_rec
.fcf_indx
);
1973 lpfc_unregister_fcf_rescan(phba
);
1974 goto stop_flogi_current_fcf
;
1976 /* Mark the end to FLOGI roundrobin failover */
1977 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1978 /* Allow action to new fcf asynchronous event */
1979 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1980 spin_unlock_irq(&phba
->hbalock
);
1981 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1982 "2865 No FCF available, stop roundrobin FCF "
1983 "failover and change port state:x%x/x%x\n",
1984 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1985 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1986 goto stop_flogi_current_fcf
;
1988 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1989 "2794 Try FLOGI roundrobin FCF failover to "
1990 "(x%x)\n", fcf_index
);
1991 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1993 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1994 "2761 FLOGI roundrobin FCF failover "
1995 "failed (rc:x%x) to read FCF (x%x)\n",
1996 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1998 goto stop_flogi_current_fcf
;
2002 stop_flogi_current_fcf
:
2003 lpfc_can_disctmo(vport
);
2008 * lpfc_sli4_fcf_pri_list_del
2009 * @phba: pointer to lpfc hba data structure.
2010 * @fcf_index the index of the fcf record to delete
2011 * This routine checks the on list flag of the fcf_index to be deleted.
2012 * If it is one the list then it is removed from the list, and the flag
2013 * is cleared. This routine grab the hbalock before removing the fcf
2014 * record from the list.
2016 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
2019 struct lpfc_fcf_pri
*new_fcf_pri
;
2021 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2022 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2023 "3058 deleting idx x%x pri x%x flg x%x\n",
2024 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
2025 new_fcf_pri
->fcf_rec
.flag
);
2026 spin_lock_irq(&phba
->hbalock
);
2027 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2028 if (phba
->fcf
.current_rec
.priority
==
2029 new_fcf_pri
->fcf_rec
.priority
)
2030 phba
->fcf
.eligible_fcf_cnt
--;
2031 list_del_init(&new_fcf_pri
->list
);
2032 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2034 spin_unlock_irq(&phba
->hbalock
);
2038 * lpfc_sli4_set_fcf_flogi_fail
2039 * @phba: pointer to lpfc hba data structure.
2040 * @fcf_index the index of the fcf record to update
2041 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2042 * flag so the the round robin slection for the particular priority level
2043 * will try a different fcf record that does not have this bit set.
2044 * If the fcf record is re-read for any reason this flag is cleared brfore
2045 * adding it to the priority list.
2048 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2050 struct lpfc_fcf_pri
*new_fcf_pri
;
2051 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2052 spin_lock_irq(&phba
->hbalock
);
2053 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2054 spin_unlock_irq(&phba
->hbalock
);
2058 * lpfc_sli4_fcf_pri_list_add
2059 * @phba: pointer to lpfc hba data structure.
2060 * @fcf_index the index of the fcf record to add
2061 * This routine checks the priority of the fcf_index to be added.
2062 * If it is a lower priority than the current head of the fcf_pri list
2063 * then it is added to the list in the right order.
2064 * If it is the same priority as the current head of the list then it
2065 * is added to the head of the list and its bit in the rr_bmask is set.
2066 * If the fcf_index to be added is of a higher priority than the current
2067 * head of the list then the rr_bmask is cleared, its bit is set in the
2068 * rr_bmask and it is added to the head of the list.
2070 * 0=success 1=failure
2072 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
,
2074 struct fcf_record
*new_fcf_record
)
2076 uint16_t current_fcf_pri
;
2077 uint16_t last_index
;
2078 struct lpfc_fcf_pri
*fcf_pri
;
2079 struct lpfc_fcf_pri
*next_fcf_pri
;
2080 struct lpfc_fcf_pri
*new_fcf_pri
;
2083 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2084 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2085 "3059 adding idx x%x pri x%x flg x%x\n",
2086 fcf_index
, new_fcf_record
->fip_priority
,
2087 new_fcf_pri
->fcf_rec
.flag
);
2088 spin_lock_irq(&phba
->hbalock
);
2089 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2090 list_del_init(&new_fcf_pri
->list
);
2091 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2092 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2093 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2094 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2095 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2096 new_fcf_pri
->fcf_rec
.fcf_index
);
2100 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2101 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2102 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2103 ret
= 0; /* Empty rr list */
2106 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2107 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2108 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2109 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2110 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2111 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2112 /* fcfs_at_this_priority_level = 1; */
2113 phba
->fcf
.eligible_fcf_cnt
= 1;
2115 /* fcfs_at_this_priority_level++; */
2116 phba
->fcf
.eligible_fcf_cnt
++;
2117 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2118 new_fcf_pri
->fcf_rec
.fcf_index
);
2122 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2123 &phba
->fcf
.fcf_pri_list
, list
) {
2124 if (new_fcf_pri
->fcf_rec
.priority
<=
2125 fcf_pri
->fcf_rec
.priority
) {
2126 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2127 list_add(&new_fcf_pri
->list
,
2128 &phba
->fcf
.fcf_pri_list
);
2130 list_add(&new_fcf_pri
->list
,
2131 &((struct lpfc_fcf_pri
*)
2132 fcf_pri
->list
.prev
)->list
);
2135 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2136 || new_fcf_pri
->fcf_rec
.priority
<
2137 next_fcf_pri
->fcf_rec
.priority
) {
2138 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2142 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2148 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2149 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2150 spin_unlock_irq(&phba
->hbalock
);
2155 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2156 * @phba: pointer to lpfc hba data structure.
2157 * @mboxq: pointer to mailbox object.
2159 * This function iterates through all the fcf records available in
2160 * HBA and chooses the optimal FCF record for discovery. After finding
2161 * the FCF for discovery it registers the FCF record and kicks start
2163 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2164 * use an FCF record which matches fabric name and mac address of the
2165 * currently used FCF record.
2166 * If the driver supports only one FCF, it will try to use the FCF record
2167 * used by BOOT_BIOS.
2170 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2172 struct fcf_record
*new_fcf_record
;
2173 uint32_t boot_flag
, addr_mode
;
2174 uint16_t fcf_index
, next_fcf_index
;
2175 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2177 bool select_new_fcf
;
2180 /* If there is pending FCoE event restart FCF table scan */
2181 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2182 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2186 /* Parse the FCF record from the non-embedded mailbox command */
2187 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2189 if (!new_fcf_record
) {
2190 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2191 "2765 Mailbox command READ_FCF_RECORD "
2192 "failed to retrieve a FCF record.\n");
2193 /* Let next new FCF event trigger fast failover */
2194 spin_lock_irq(&phba
->hbalock
);
2195 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2196 spin_unlock_irq(&phba
->hbalock
);
2197 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2201 /* Check the FCF record against the connection list */
2202 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2203 &addr_mode
, &vlan_id
);
2205 /* Log the FCF record information if turned on */
2206 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2210 * If the fcf record does not match with connect list entries
2211 * read the next entry; otherwise, this is an eligible FCF
2212 * record for roundrobin FCF failover.
2215 lpfc_sli4_fcf_pri_list_del(phba
,
2216 bf_get(lpfc_fcf_record_fcf_index
,
2218 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2219 "2781 FCF (x%x) failed connection "
2220 "list check: (x%x/x%x/%x)\n",
2221 bf_get(lpfc_fcf_record_fcf_index
,
2223 bf_get(lpfc_fcf_record_fcf_avail
,
2225 bf_get(lpfc_fcf_record_fcf_valid
,
2227 bf_get(lpfc_fcf_record_fcf_sol
,
2229 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2230 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2231 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2232 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2233 phba
->fcf
.current_rec
.fcf_indx
) {
2234 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2235 "2862 FCF (x%x) matches property "
2236 "of in-use FCF (x%x)\n",
2237 bf_get(lpfc_fcf_record_fcf_index
,
2239 phba
->fcf
.current_rec
.fcf_indx
);
2243 * In case the current in-use FCF record becomes
2244 * invalid/unavailable during FCF discovery that
2245 * was not triggered by fast FCF failover process,
2246 * treat it as fast FCF failover.
2248 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2249 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2250 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2251 "2835 Invalid in-use FCF "
2252 "(x%x), enter FCF failover "
2254 phba
->fcf
.current_rec
.fcf_indx
);
2255 spin_lock_irq(&phba
->hbalock
);
2256 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2257 spin_unlock_irq(&phba
->hbalock
);
2258 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2259 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2260 LPFC_FCOE_FCF_GET_FIRST
);
2266 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2267 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2274 * If this is not the first FCF discovery of the HBA, use last
2275 * FCF record for the discovery. The condition that a rescan
2276 * matches the in-use FCF record: fabric name, switch name, mac
2277 * address, and vlan_id.
2279 spin_lock_irq(&phba
->hbalock
);
2280 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2281 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2282 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2283 new_fcf_record
, vlan_id
)) {
2284 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2285 phba
->fcf
.current_rec
.fcf_indx
) {
2286 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2287 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2288 /* Stop FCF redisc wait timer */
2289 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2291 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2292 /* Fast failover, mark completed */
2293 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2294 spin_unlock_irq(&phba
->hbalock
);
2295 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2296 "2836 New FCF matches in-use "
2297 "FCF (x%x), port_state:x%x, "
2299 phba
->fcf
.current_rec
.fcf_indx
,
2300 phba
->pport
->port_state
,
2301 phba
->pport
->fc_flag
);
2304 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2305 "2863 New FCF (x%x) matches "
2306 "property of in-use FCF (x%x)\n",
2307 bf_get(lpfc_fcf_record_fcf_index
,
2309 phba
->fcf
.current_rec
.fcf_indx
);
2312 * Read next FCF record from HBA searching for the matching
2313 * with in-use record only if not during the fast failover
2314 * period. In case of fast failover period, it shall try to
2315 * determine whether the FCF record just read should be the
2318 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2319 spin_unlock_irq(&phba
->hbalock
);
2324 * Update on failover FCF record only if it's in FCF fast-failover
2325 * period; otherwise, update on current FCF record.
2327 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2328 fcf_rec
= &phba
->fcf
.failover_rec
;
2330 fcf_rec
= &phba
->fcf
.current_rec
;
2332 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2334 * If the driver FCF record does not have boot flag
2335 * set and new hba fcf record has boot flag set, use
2336 * the new hba fcf record.
2338 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2339 /* Choose this FCF record */
2340 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2341 "2837 Update current FCF record "
2342 "(x%x) with new FCF record (x%x)\n",
2344 bf_get(lpfc_fcf_record_fcf_index
,
2346 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2347 addr_mode
, vlan_id
, BOOT_ENABLE
);
2348 spin_unlock_irq(&phba
->hbalock
);
2352 * If the driver FCF record has boot flag set and the
2353 * new hba FCF record does not have boot flag, read
2354 * the next FCF record.
2356 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2357 spin_unlock_irq(&phba
->hbalock
);
2361 * If the new hba FCF record has lower priority value
2362 * than the driver FCF record, use the new record.
2364 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2365 /* Choose the new FCF record with lower priority */
2366 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2367 "2838 Update current FCF record "
2368 "(x%x) with new FCF record (x%x)\n",
2370 bf_get(lpfc_fcf_record_fcf_index
,
2372 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2373 addr_mode
, vlan_id
, 0);
2374 /* Reset running random FCF selection count */
2375 phba
->fcf
.eligible_fcf_cnt
= 1;
2376 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2377 /* Update running random FCF selection count */
2378 phba
->fcf
.eligible_fcf_cnt
++;
2379 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2380 phba
->fcf
.eligible_fcf_cnt
);
2381 if (select_new_fcf
) {
2382 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2383 "2839 Update current FCF record "
2384 "(x%x) with new FCF record (x%x)\n",
2386 bf_get(lpfc_fcf_record_fcf_index
,
2388 /* Choose the new FCF by random selection */
2389 __lpfc_update_fcf_record(phba
, fcf_rec
,
2391 addr_mode
, vlan_id
, 0);
2394 spin_unlock_irq(&phba
->hbalock
);
2398 * This is the first suitable FCF record, choose this record for
2399 * initial best-fit FCF.
2402 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2403 "2840 Update initial FCF candidate "
2405 bf_get(lpfc_fcf_record_fcf_index
,
2407 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2408 addr_mode
, vlan_id
, (boot_flag
?
2410 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2411 /* Setup initial running random FCF selection count */
2412 phba
->fcf
.eligible_fcf_cnt
= 1;
2414 spin_unlock_irq(&phba
->hbalock
);
2418 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2419 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2420 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2422 * Case of FCF fast failover scan
2426 * It has not found any suitable FCF record, cancel
2427 * FCF scan inprogress, and do nothing
2429 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2430 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2431 "2782 No suitable FCF found: "
2433 phba
->fcoe_eventtag_at_fcf_scan
,
2434 bf_get(lpfc_fcf_record_fcf_index
,
2436 spin_lock_irq(&phba
->hbalock
);
2437 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2438 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2439 spin_unlock_irq(&phba
->hbalock
);
2440 /* Unregister in-use FCF and rescan */
2441 lpfc_printf_log(phba
, KERN_INFO
,
2443 "2864 On devloss tmo "
2444 "unreg in-use FCF and "
2445 "rescan FCF table\n");
2446 lpfc_unregister_fcf_rescan(phba
);
2450 * Let next new FCF event trigger fast failover
2452 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2453 spin_unlock_irq(&phba
->hbalock
);
2457 * It has found a suitable FCF record that is not
2458 * the same as in-use FCF record, unregister the
2459 * in-use FCF record, replace the in-use FCF record
2460 * with the new FCF record, mark FCF fast failover
2461 * completed, and then start register the new FCF
2465 /* Unregister the current in-use FCF record */
2466 lpfc_unregister_fcf(phba
);
2468 /* Replace in-use record with the new record */
2469 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2470 "2842 Replace in-use FCF (x%x) "
2471 "with failover FCF (x%x)\n",
2472 phba
->fcf
.current_rec
.fcf_indx
,
2473 phba
->fcf
.failover_rec
.fcf_indx
);
2474 memcpy(&phba
->fcf
.current_rec
,
2475 &phba
->fcf
.failover_rec
,
2476 sizeof(struct lpfc_fcf_rec
));
2478 * Mark the fast FCF failover rediscovery completed
2479 * and the start of the first round of the roundrobin
2482 spin_lock_irq(&phba
->hbalock
);
2483 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2484 spin_unlock_irq(&phba
->hbalock
);
2485 /* Register to the new FCF record */
2486 lpfc_register_fcf(phba
);
2489 * In case of transaction period to fast FCF failover,
2490 * do nothing when search to the end of the FCF table.
2492 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2493 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2496 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2497 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2499 * In case the current in-use FCF record no
2500 * longer existed during FCF discovery that
2501 * was not triggered by fast FCF failover
2502 * process, treat it as fast FCF failover.
2504 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2505 "2841 In-use FCF record (x%x) "
2506 "not reported, entering fast "
2507 "FCF failover mode scanning.\n",
2508 phba
->fcf
.current_rec
.fcf_indx
);
2509 spin_lock_irq(&phba
->hbalock
);
2510 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2511 spin_unlock_irq(&phba
->hbalock
);
2512 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2513 LPFC_FCOE_FCF_GET_FIRST
);
2516 /* Register to the new FCF record */
2517 lpfc_register_fcf(phba
);
2520 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2524 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2525 lpfc_register_fcf(phba
);
2531 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2532 * @phba: pointer to lpfc hba data structure.
2533 * @mboxq: pointer to mailbox object.
2535 * This is the callback function for FLOGI failure roundrobin FCF failover
2536 * read FCF record mailbox command from the eligible FCF record bmask for
2537 * performing the failover. If the FCF read back is not valid/available, it
2538 * fails through to retrying FLOGI to the currently registered FCF again.
2539 * Otherwise, if the FCF read back is valid and available, it will set the
2540 * newly read FCF record to the failover FCF record, unregister currently
2541 * registered FCF record, copy the failover FCF record to the current
2542 * FCF record, and then register the current FCF record before proceeding
2543 * to trying FLOGI on the new failover FCF.
2546 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2548 struct fcf_record
*new_fcf_record
;
2549 uint32_t boot_flag
, addr_mode
;
2550 uint16_t next_fcf_index
, fcf_index
;
2551 uint16_t current_fcf_index
;
2555 /* If link state is not up, stop the roundrobin failover process */
2556 if (phba
->link_state
< LPFC_LINK_UP
) {
2557 spin_lock_irq(&phba
->hbalock
);
2558 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2559 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2560 spin_unlock_irq(&phba
->hbalock
);
2564 /* Parse the FCF record from the non-embedded mailbox command */
2565 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2567 if (!new_fcf_record
) {
2568 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2569 "2766 Mailbox command READ_FCF_RECORD "
2570 "failed to retrieve a FCF record. "
2571 "hba_flg x%x fcf_flg x%x\n", phba
->hba_flag
,
2572 phba
->fcf
.fcf_flag
);
2573 lpfc_unregister_fcf_rescan(phba
);
2577 /* Get the needed parameters from FCF record */
2578 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2579 &addr_mode
, &vlan_id
);
2581 /* Log the FCF record information if turned on */
2582 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2585 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2587 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2588 "2848 Remove ineligible FCF (x%x) from "
2589 "from roundrobin bmask\n", fcf_index
);
2590 /* Clear roundrobin bmask bit for ineligible FCF */
2591 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2592 /* Perform next round of roundrobin FCF failover */
2593 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2594 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2600 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2601 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2602 "2760 Perform FLOGI roundrobin FCF failover: "
2603 "FCF (x%x) back to FCF (x%x)\n",
2604 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2605 /* Wait 500 ms before retrying FLOGI to current FCF */
2607 lpfc_issue_init_vfi(phba
->pport
);
2611 /* Upload new FCF record to the failover FCF record */
2612 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2613 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2614 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2615 spin_lock_irq(&phba
->hbalock
);
2616 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2617 new_fcf_record
, addr_mode
, vlan_id
,
2618 (boot_flag
? BOOT_ENABLE
: 0));
2619 spin_unlock_irq(&phba
->hbalock
);
2621 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2623 /* Unregister the current in-use FCF record */
2624 lpfc_unregister_fcf(phba
);
2626 /* Replace in-use record with the new record */
2627 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2628 sizeof(struct lpfc_fcf_rec
));
2630 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2631 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2632 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2635 lpfc_register_fcf(phba
);
2637 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2641 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2642 * @phba: pointer to lpfc hba data structure.
2643 * @mboxq: pointer to mailbox object.
2645 * This is the callback function of read FCF record mailbox command for
2646 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2647 * failover when a new FCF event happened. If the FCF read back is
2648 * valid/available and it passes the connection list check, it updates
2649 * the bmask for the eligible FCF record for roundrobin failover.
2652 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2654 struct fcf_record
*new_fcf_record
;
2655 uint32_t boot_flag
, addr_mode
;
2656 uint16_t fcf_index
, next_fcf_index
;
2660 /* If link state is not up, no need to proceed */
2661 if (phba
->link_state
< LPFC_LINK_UP
)
2664 /* If FCF discovery period is over, no need to proceed */
2665 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2668 /* Parse the FCF record from the non-embedded mailbox command */
2669 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2671 if (!new_fcf_record
) {
2672 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2673 "2767 Mailbox command READ_FCF_RECORD "
2674 "failed to retrieve a FCF record.\n");
2678 /* Check the connection list for eligibility */
2679 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2680 &addr_mode
, &vlan_id
);
2682 /* Log the FCF record information if turned on */
2683 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2689 /* Update the eligible FCF record index bmask */
2690 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2692 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2695 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2699 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2700 * @phba: pointer to lpfc hba data structure.
2701 * @mboxq: pointer to mailbox data structure.
2703 * This function handles completion of init vfi mailbox command.
2706 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2708 struct lpfc_vport
*vport
= mboxq
->vport
;
2711 * VFI not supported on interface type 0, just do the flogi
2712 * Also continue if the VFI is in use - just use the same one.
2714 if (mboxq
->u
.mb
.mbxStatus
&&
2715 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2716 LPFC_SLI_INTF_IF_TYPE_0
) &&
2717 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2718 lpfc_printf_vlog(vport
, KERN_ERR
,
2720 "2891 Init VFI mailbox failed 0x%x\n",
2721 mboxq
->u
.mb
.mbxStatus
);
2722 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2723 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2727 lpfc_initial_flogi(vport
);
2728 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2733 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2734 * @vport: pointer to lpfc_vport data structure.
2736 * This function issue a init_vfi mailbox command to initialize the VFI and
2737 * VPI for the physical port.
2740 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2742 LPFC_MBOXQ_t
*mboxq
;
2744 struct lpfc_hba
*phba
= vport
->phba
;
2746 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2748 lpfc_printf_vlog(vport
, KERN_ERR
,
2749 LOG_MBOX
, "2892 Failed to allocate "
2750 "init_vfi mailbox\n");
2753 lpfc_init_vfi(mboxq
, vport
);
2754 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2755 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2756 if (rc
== MBX_NOT_FINISHED
) {
2757 lpfc_printf_vlog(vport
, KERN_ERR
,
2758 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2759 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2764 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2765 * @phba: pointer to lpfc hba data structure.
2766 * @mboxq: pointer to mailbox data structure.
2768 * This function handles completion of init vpi mailbox command.
2771 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2773 struct lpfc_vport
*vport
= mboxq
->vport
;
2774 struct lpfc_nodelist
*ndlp
;
2775 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2777 if (mboxq
->u
.mb
.mbxStatus
) {
2778 lpfc_printf_vlog(vport
, KERN_ERR
,
2780 "2609 Init VPI mailbox failed 0x%x\n",
2781 mboxq
->u
.mb
.mbxStatus
);
2782 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2783 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2786 spin_lock_irq(shost
->host_lock
);
2787 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2788 spin_unlock_irq(shost
->host_lock
);
2790 /* If this port is physical port or FDISC is done, do reg_vpi */
2791 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2792 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2794 lpfc_printf_vlog(vport
, KERN_ERR
,
2796 "2731 Cannot find fabric "
2797 "controller node\n");
2799 lpfc_register_new_vport(phba
, vport
, ndlp
);
2800 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2804 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2805 lpfc_initial_fdisc(vport
);
2807 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2808 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2809 "2606 No NPIV Fabric support\n");
2811 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2816 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2817 * @vport: pointer to lpfc_vport data structure.
2819 * This function issue a init_vpi mailbox command to initialize
2820 * VPI for the vport.
2823 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2825 LPFC_MBOXQ_t
*mboxq
;
2828 if ((vport
->port_type
!= LPFC_PHYSICAL_PORT
) && (!vport
->vpi
)) {
2829 vpi
= lpfc_alloc_vpi(vport
->phba
);
2831 lpfc_printf_vlog(vport
, KERN_ERR
,
2833 "3303 Failed to obtain vport vpi\n");
2834 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2840 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2842 lpfc_printf_vlog(vport
, KERN_ERR
,
2843 LOG_MBOX
, "2607 Failed to allocate "
2844 "init_vpi mailbox\n");
2847 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2848 mboxq
->vport
= vport
;
2849 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2850 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2851 if (rc
== MBX_NOT_FINISHED
) {
2852 lpfc_printf_vlog(vport
, KERN_ERR
,
2853 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2854 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2859 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2860 * @phba: pointer to lpfc hba data structure.
2862 * This function loops through the list of vports on the @phba and issues an
2863 * FDISC if possible.
2866 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2868 struct lpfc_vport
**vports
;
2871 vports
= lpfc_create_vport_work_array(phba
);
2872 if (vports
!= NULL
) {
2873 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2874 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2876 /* There are no vpi for this vport */
2877 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2878 lpfc_vport_set_state(vports
[i
],
2882 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2883 lpfc_vport_set_state(vports
[i
],
2887 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2888 lpfc_issue_init_vpi(vports
[i
]);
2891 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2892 lpfc_initial_fdisc(vports
[i
]);
2894 lpfc_vport_set_state(vports
[i
],
2895 FC_VPORT_NO_FABRIC_SUPP
);
2896 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2899 "Fabric support\n");
2903 lpfc_destroy_vport_work_array(phba
, vports
);
2907 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2909 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2910 struct lpfc_vport
*vport
= mboxq
->vport
;
2911 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2914 * VFI not supported for interface type 0, so ignore any mailbox
2915 * error (except VFI in use) and continue with the discovery.
2917 if (mboxq
->u
.mb
.mbxStatus
&&
2918 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2919 LPFC_SLI_INTF_IF_TYPE_0
) &&
2920 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2921 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2922 "2018 REG_VFI mbxStatus error x%x "
2924 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2925 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2926 /* FLOGI failed, use loop map to make discovery list */
2927 lpfc_disc_list_loopmap(vport
);
2928 /* Start discovery */
2929 lpfc_disc_start(vport
);
2932 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2936 /* If the VFI is already registered, there is nothing else to do
2937 * Unless this was a VFI update and we are in PT2PT mode, then
2938 * we should drop through to set the port state to ready.
2940 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
2941 if (!(phba
->sli_rev
== LPFC_SLI_REV4
&&
2942 vport
->fc_flag
& FC_PT2PT
))
2945 /* The VPI is implicitly registered when the VFI is registered */
2946 spin_lock_irq(shost
->host_lock
);
2947 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2948 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2949 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2950 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2951 spin_unlock_irq(shost
->host_lock
);
2953 /* In case SLI4 FC loopback test, we are ready */
2954 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2955 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2956 phba
->link_state
= LPFC_HBA_READY
;
2960 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
2961 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2962 "alpacnt:%d LinkState:%x topology:%x\n",
2963 vport
->port_state
, vport
->fc_flag
, vport
->fc_myDID
,
2964 vport
->phba
->alpa_map
[0],
2965 phba
->link_state
, phba
->fc_topology
);
2967 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2969 * For private loop or for NPort pt2pt,
2970 * just start discovery and we are done.
2972 if ((vport
->fc_flag
& FC_PT2PT
) ||
2973 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2974 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
2976 /* Use loop map to make discovery list */
2977 lpfc_disc_list_loopmap(vport
);
2978 /* Start discovery */
2979 if (vport
->fc_flag
& FC_PT2PT
)
2980 vport
->port_state
= LPFC_VPORT_READY
;
2982 lpfc_disc_start(vport
);
2984 lpfc_start_fdiscs(phba
);
2985 lpfc_do_scr_ns_plogi(phba
, vport
);
2990 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2992 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2999 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3001 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3002 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
3003 struct lpfc_vport
*vport
= pmb
->vport
;
3004 struct serv_parm
*sp
= &vport
->fc_sparam
;
3007 /* Check for error */
3008 if (mb
->mbxStatus
) {
3009 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3010 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3011 "0319 READ_SPARAM mbxStatus error x%x "
3013 mb
->mbxStatus
, vport
->port_state
);
3014 lpfc_linkdown(phba
);
3018 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
3019 sizeof (struct serv_parm
));
3021 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
3022 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
3023 ed_tov
= (ed_tov
+ 999999) / 1000000;
3025 phba
->fc_edtov
= ed_tov
;
3026 phba
->fc_ratov
= (2 * ed_tov
) / 1000;
3027 if (phba
->fc_ratov
< FF_DEF_RATOV
) {
3028 /* RA_TOV should be atleast 10sec for initial flogi */
3029 phba
->fc_ratov
= FF_DEF_RATOV
;
3032 lpfc_update_vport_wwn(vport
);
3033 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
3034 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
3035 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
3038 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3040 mempool_free(pmb
, phba
->mbox_mem_pool
);
3044 pmb
->context1
= NULL
;
3045 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3047 lpfc_issue_clear_la(phba
, vport
);
3048 mempool_free(pmb
, phba
->mbox_mem_pool
);
3053 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
3055 struct lpfc_vport
*vport
= phba
->pport
;
3056 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
3057 struct Scsi_Host
*shost
;
3059 struct lpfc_dmabuf
*mp
;
3061 struct fcf_record
*fcf_record
;
3062 uint32_t fc_flags
= 0;
3064 spin_lock_irq(&phba
->hbalock
);
3065 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
3067 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3068 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
3069 case LPFC_LINK_SPEED_1GHZ
:
3070 case LPFC_LINK_SPEED_2GHZ
:
3071 case LPFC_LINK_SPEED_4GHZ
:
3072 case LPFC_LINK_SPEED_8GHZ
:
3073 case LPFC_LINK_SPEED_10GHZ
:
3074 case LPFC_LINK_SPEED_16GHZ
:
3075 case LPFC_LINK_SPEED_32GHZ
:
3078 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3083 if (phba
->fc_topology
&&
3084 phba
->fc_topology
!= bf_get(lpfc_mbx_read_top_topology
, la
)) {
3085 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3086 "3314 Toplogy changed was 0x%x is 0x%x\n",
3088 bf_get(lpfc_mbx_read_top_topology
, la
));
3089 phba
->fc_topology_changed
= 1;
3092 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3093 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3095 shost
= lpfc_shost_from_vport(vport
);
3096 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3097 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3099 /* if npiv is enabled and this adapter supports npiv log
3100 * a message that npiv is not supported in this topology
3102 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3103 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3104 "1309 Link Up Event npiv not supported in loop "
3106 /* Get Loop Map information */
3107 if (bf_get(lpfc_mbx_read_top_il
, la
))
3108 fc_flags
|= FC_LBIT
;
3110 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3111 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3114 phba
->alpa_map
[0] = 0;
3116 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3127 numalpa
= phba
->alpa_map
[0];
3129 while (j
< numalpa
) {
3130 memset(un
.pamap
, 0, 16);
3131 for (k
= 1; j
< numalpa
; k
++) {
3133 phba
->alpa_map
[j
+ 1];
3138 /* Link Up Event ALPA map */
3139 lpfc_printf_log(phba
,
3142 "1304 Link Up Event "
3143 "ALPA map Data: x%x "
3145 un
.pa
.wd1
, un
.pa
.wd2
,
3146 un
.pa
.wd3
, un
.pa
.wd4
);
3151 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3152 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3153 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3154 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3156 vport
->fc_myDID
= phba
->fc_pref_DID
;
3157 fc_flags
|= FC_LBIT
;
3159 spin_unlock_irq(&phba
->hbalock
);
3162 spin_lock_irq(shost
->host_lock
);
3163 vport
->fc_flag
|= fc_flags
;
3164 spin_unlock_irq(shost
->host_lock
);
3168 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3172 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3174 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3177 sparam_mbox
->vport
= vport
;
3178 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3179 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3180 if (rc
== MBX_NOT_FINISHED
) {
3181 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3182 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3184 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3188 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3189 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3192 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3193 lpfc_config_link(phba
, cfglink_mbox
);
3194 cfglink_mbox
->vport
= vport
;
3195 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3196 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3197 if (rc
== MBX_NOT_FINISHED
) {
3198 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3202 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3204 * Add the driver's default FCF record at FCF index 0 now. This
3205 * is phase 1 implementation that support FCF index 0 and driver
3208 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3209 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3211 if (unlikely(!fcf_record
)) {
3212 lpfc_printf_log(phba
, KERN_ERR
,
3214 "2554 Could not allocate memory for "
3220 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3221 LPFC_FCOE_FCF_DEF_INDEX
);
3222 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3224 lpfc_printf_log(phba
, KERN_ERR
,
3226 "2013 Could not manually add FCF "
3227 "record 0, status %d\n", rc
);
3235 * The driver is expected to do FIP/FCF. Call the port
3236 * and get the FCF Table.
3238 spin_lock_irq(&phba
->hbalock
);
3239 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3240 spin_unlock_irq(&phba
->hbalock
);
3243 /* This is the initial FCF discovery scan */
3244 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3245 spin_unlock_irq(&phba
->hbalock
);
3246 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3247 "2778 Start FCF table scan at linkup\n");
3248 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3249 LPFC_FCOE_FCF_GET_FIRST
);
3251 spin_lock_irq(&phba
->hbalock
);
3252 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3253 spin_unlock_irq(&phba
->hbalock
);
3256 /* Reset FCF roundrobin bmask for new discovery */
3257 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3262 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3263 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3264 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3265 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3266 lpfc_issue_clear_la(phba
, vport
);
3271 lpfc_enable_la(struct lpfc_hba
*phba
)
3274 struct lpfc_sli
*psli
= &phba
->sli
;
3275 spin_lock_irq(&phba
->hbalock
);
3276 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3277 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3278 control
= readl(phba
->HCregaddr
);
3279 control
|= HC_LAINT_ENA
;
3280 writel(control
, phba
->HCregaddr
);
3281 readl(phba
->HCregaddr
); /* flush */
3283 spin_unlock_irq(&phba
->hbalock
);
3287 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3289 lpfc_linkdown(phba
);
3290 lpfc_enable_la(phba
);
3291 lpfc_unregister_unused_fcf(phba
);
3292 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3297 * This routine handles processing a READ_TOPOLOGY mailbox
3298 * command upon completion. It is setup in the LPFC_MBOXQ
3299 * as the completion routine when the command is
3300 * handed off to the SLI layer. SLI4 only.
3303 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3305 struct lpfc_vport
*vport
= pmb
->vport
;
3306 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3307 struct lpfc_mbx_read_top
*la
;
3308 struct lpfc_sli_ring
*pring
;
3309 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3310 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3312 /* Unblock ELS traffic */
3313 pring
= lpfc_phba_elsring(phba
);
3314 pring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3316 /* Check for error */
3317 if (mb
->mbxStatus
) {
3318 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3319 "1307 READ_LA mbox error x%x state x%x\n",
3320 mb
->mbxStatus
, vport
->port_state
);
3321 lpfc_mbx_issue_link_down(phba
);
3322 phba
->link_state
= LPFC_HBA_ERROR
;
3323 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3326 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3328 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3330 spin_lock_irq(shost
->host_lock
);
3331 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3332 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3334 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3335 spin_unlock_irq(shost
->host_lock
);
3337 if (phba
->fc_eventTag
<= la
->eventTag
) {
3338 phba
->fc_stat
.LinkMultiEvent
++;
3339 if (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)
3340 if (phba
->fc_eventTag
!= 0)
3341 lpfc_linkdown(phba
);
3344 phba
->fc_eventTag
= la
->eventTag
;
3345 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3346 spin_lock_irq(&phba
->hbalock
);
3347 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3348 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3350 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3351 spin_unlock_irq(&phba
->hbalock
);
3354 phba
->link_events
++;
3355 if ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
) &&
3356 !(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)) {
3357 phba
->fc_stat
.LinkUp
++;
3358 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3359 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3360 "1306 Link Up Event in loop back mode "
3361 "x%x received Data: x%x x%x x%x x%x\n",
3362 la
->eventTag
, phba
->fc_eventTag
,
3363 bf_get(lpfc_mbx_read_top_alpa_granted
,
3365 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3368 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3369 "1303 Link Up Event x%x received "
3370 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3371 la
->eventTag
, phba
->fc_eventTag
,
3372 bf_get(lpfc_mbx_read_top_alpa_granted
,
3374 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3376 bf_get(lpfc_mbx_read_top_mm
, la
),
3377 bf_get(lpfc_mbx_read_top_fa
, la
),
3378 phba
->wait_4_mlo_maint_flg
);
3380 lpfc_mbx_process_link_up(phba
, la
);
3381 } else if (bf_get(lpfc_mbx_read_top_att_type
, la
) ==
3382 LPFC_ATT_LINK_DOWN
) {
3383 phba
->fc_stat
.LinkDown
++;
3384 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3385 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3386 "1308 Link Down Event in loop back mode "
3388 "Data: x%x x%x x%x\n",
3389 la
->eventTag
, phba
->fc_eventTag
,
3390 phba
->pport
->port_state
, vport
->fc_flag
);
3392 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3393 "1305 Link Down Event x%x received "
3394 "Data: x%x x%x x%x x%x x%x\n",
3395 la
->eventTag
, phba
->fc_eventTag
,
3396 phba
->pport
->port_state
, vport
->fc_flag
,
3397 bf_get(lpfc_mbx_read_top_mm
, la
),
3398 bf_get(lpfc_mbx_read_top_fa
, la
));
3399 lpfc_mbx_issue_link_down(phba
);
3401 if ((phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) &&
3402 ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
))) {
3403 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3404 phba
->fc_stat
.LinkDown
++;
3405 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3406 "1312 Link Down Event x%x received "
3407 "Data: x%x x%x x%x\n",
3408 la
->eventTag
, phba
->fc_eventTag
,
3409 phba
->pport
->port_state
, vport
->fc_flag
);
3410 lpfc_mbx_issue_link_down(phba
);
3412 lpfc_enable_la(phba
);
3414 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3415 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3416 "Data: x%x x%x x%x\n",
3417 la
->eventTag
, phba
->fc_eventTag
,
3418 phba
->pport
->port_state
, vport
->fc_flag
);
3420 * The cmnd that triggered this will be waiting for this
3423 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3424 if (phba
->wait_4_mlo_maint_flg
) {
3425 phba
->wait_4_mlo_maint_flg
= 0;
3426 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3430 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3431 bf_get(lpfc_mbx_read_top_fa
, la
)) {
3432 if (phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
)
3433 lpfc_issue_clear_la(phba
, vport
);
3434 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3436 bf_get(lpfc_mbx_read_top_fa
, la
));
3439 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3440 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3442 mempool_free(pmb
, phba
->mbox_mem_pool
);
3447 * This routine handles processing a REG_LOGIN mailbox
3448 * command upon completion. It is setup in the LPFC_MBOXQ
3449 * as the completion routine when the command is
3450 * handed off to the SLI layer.
3453 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3455 struct lpfc_vport
*vport
= pmb
->vport
;
3456 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3457 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3458 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3460 pmb
->context1
= NULL
;
3461 pmb
->context2
= NULL
;
3463 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3464 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3465 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3466 kref_read(&ndlp
->kref
),
3467 ndlp
->nlp_usg_map
, ndlp
);
3468 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3469 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3471 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3472 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3473 /* We rcvd a rscn after issuing this
3474 * mbox reg login, we may have cycled
3475 * back through the state and be
3476 * back at reg login state so this
3477 * mbox needs to be ignored becase
3478 * there is another reg login in
3481 spin_lock_irq(shost
->host_lock
);
3482 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3483 spin_unlock_irq(shost
->host_lock
);
3486 * We cannot leave the RPI registered because
3487 * if we go thru discovery again for this ndlp
3488 * a subsequent REG_RPI will fail.
3490 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3491 lpfc_unreg_rpi(vport
, ndlp
);
3494 /* Call state machine */
3495 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
3497 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3499 mempool_free(pmb
, phba
->mbox_mem_pool
);
3500 /* decrement the node reference count held for this callback
3509 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3511 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3512 struct lpfc_vport
*vport
= pmb
->vport
;
3513 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3515 switch (mb
->mbxStatus
) {
3518 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3519 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3522 /* If VPI is busy, reset the HBA */
3524 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3525 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3526 vport
->vpi
, mb
->mbxStatus
);
3527 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3528 lpfc_workq_post_event(phba
, NULL
, NULL
,
3529 LPFC_EVT_RESET_HBA
);
3531 spin_lock_irq(shost
->host_lock
);
3532 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3533 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3534 spin_unlock_irq(shost
->host_lock
);
3535 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3536 mempool_free(pmb
, phba
->mbox_mem_pool
);
3537 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3539 * This shost reference might have been taken at the beginning of
3540 * lpfc_vport_delete()
3542 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3543 scsi_host_put(shost
);
3547 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3549 struct lpfc_hba
*phba
= vport
->phba
;
3553 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3557 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3558 mbox
->vport
= vport
;
3559 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3560 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3561 if (rc
== MBX_NOT_FINISHED
) {
3562 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3563 "1800 Could not issue unreg_vpi\n");
3564 mempool_free(mbox
, phba
->mbox_mem_pool
);
3565 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3572 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3574 struct lpfc_vport
*vport
= pmb
->vport
;
3575 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3576 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3578 switch (mb
->mbxStatus
) {
3582 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3583 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3585 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3586 spin_lock_irq(shost
->host_lock
);
3587 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3588 spin_unlock_irq(shost
->host_lock
);
3589 vport
->fc_myDID
= 0;
3591 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3592 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3593 if (phba
->nvmet_support
)
3594 lpfc_nvmet_update_targetport(phba
);
3596 lpfc_nvme_update_localport(vport
);
3601 spin_lock_irq(shost
->host_lock
);
3602 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3603 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3604 spin_unlock_irq(shost
->host_lock
);
3605 vport
->num_disc_nodes
= 0;
3606 /* go thru NPR list and issue ELS PLOGIs */
3607 if (vport
->fc_npr_cnt
)
3608 lpfc_els_disc_plogi(vport
);
3610 if (!vport
->num_disc_nodes
) {
3611 spin_lock_irq(shost
->host_lock
);
3612 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3613 spin_unlock_irq(shost
->host_lock
);
3614 lpfc_can_disctmo(vport
);
3616 vport
->port_state
= LPFC_VPORT_READY
;
3619 mempool_free(pmb
, phba
->mbox_mem_pool
);
3624 * lpfc_create_static_vport - Read HBA config region to create static vports.
3625 * @phba: pointer to lpfc hba data structure.
3627 * This routine issue a DUMP mailbox command for config region 22 to get
3628 * the list of static vports to be created. The function create vports
3629 * based on the information returned from the HBA.
3632 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3634 LPFC_MBOXQ_t
*pmb
= NULL
;
3636 struct static_vport_info
*vport_info
;
3637 int mbx_wait_rc
= 0, i
;
3638 struct fc_vport_identifiers vport_id
;
3639 struct fc_vport
*new_fc_vport
;
3640 struct Scsi_Host
*shost
;
3641 struct lpfc_vport
*vport
;
3642 uint16_t offset
= 0;
3643 uint8_t *vport_buff
;
3644 struct lpfc_dmabuf
*mp
;
3645 uint32_t byte_count
= 0;
3647 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3649 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3650 "0542 lpfc_create_static_vport failed to"
3651 " allocate mailbox memory\n");
3654 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3657 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3659 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3660 "0543 lpfc_create_static_vport failed to"
3661 " allocate vport_info\n");
3662 mempool_free(pmb
, phba
->mbox_mem_pool
);
3666 vport_buff
= (uint8_t *) vport_info
;
3668 /* free dma buffer from previous round */
3669 if (pmb
->context1
) {
3670 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3671 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3674 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3677 pmb
->vport
= phba
->pport
;
3678 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3681 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3682 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3683 "0544 lpfc_create_static_vport failed to"
3684 " issue dump mailbox command ret 0x%x "
3686 mbx_wait_rc
, mb
->mbxStatus
);
3690 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3691 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3692 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3693 if (byte_count
> sizeof(struct static_vport_info
) -
3695 byte_count
= sizeof(struct static_vport_info
)
3697 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3698 offset
+= byte_count
;
3700 if (mb
->un
.varDmp
.word_cnt
>
3701 sizeof(struct static_vport_info
) - offset
)
3702 mb
->un
.varDmp
.word_cnt
=
3703 sizeof(struct static_vport_info
)
3705 byte_count
= mb
->un
.varDmp
.word_cnt
;
3706 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3707 vport_buff
+ offset
,
3710 offset
+= byte_count
;
3713 } while (byte_count
&&
3714 offset
< sizeof(struct static_vport_info
));
3717 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3718 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3719 != VPORT_INFO_REV
)) {
3720 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3721 "0545 lpfc_create_static_vport bad"
3722 " information header 0x%x 0x%x\n",
3723 le32_to_cpu(vport_info
->signature
),
3724 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3729 shost
= lpfc_shost_from_vport(phba
->pport
);
3731 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3732 memset(&vport_id
, 0, sizeof(vport_id
));
3733 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3734 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3735 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3738 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3739 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3740 vport_id
.disable
= false;
3741 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3743 if (!new_fc_vport
) {
3744 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3745 "0546 lpfc_create_static_vport failed to"
3750 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3751 vport
->vport_flag
|= STATIC_VPORT
;
3756 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3757 if (pmb
->context1
) {
3758 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3759 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3762 mempool_free(pmb
, phba
->mbox_mem_pool
);
3769 * This routine handles processing a Fabric REG_LOGIN mailbox
3770 * command upon completion. It is setup in the LPFC_MBOXQ
3771 * as the completion routine when the command is
3772 * handed off to the SLI layer.
3775 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3777 struct lpfc_vport
*vport
= pmb
->vport
;
3778 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3779 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3780 struct lpfc_nodelist
*ndlp
;
3781 struct Scsi_Host
*shost
;
3783 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3784 pmb
->context1
= NULL
;
3785 pmb
->context2
= NULL
;
3787 if (mb
->mbxStatus
) {
3788 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3789 "0258 Register Fabric login error: 0x%x\n",
3791 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3793 mempool_free(pmb
, phba
->mbox_mem_pool
);
3795 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3796 /* FLOGI failed, use loop map to make discovery list */
3797 lpfc_disc_list_loopmap(vport
);
3799 /* Start discovery */
3800 lpfc_disc_start(vport
);
3801 /* Decrement the reference count to ndlp after the
3802 * reference to the ndlp are done.
3808 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3809 /* Decrement the reference count to ndlp after the reference
3810 * to the ndlp are done.
3816 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3817 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3818 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3819 ndlp
->nlp_type
|= NLP_FABRIC
;
3820 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3822 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3823 /* when physical port receive logo donot start
3824 * vport discovery */
3825 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3826 lpfc_start_fdiscs(phba
);
3828 shost
= lpfc_shost_from_vport(vport
);
3829 spin_lock_irq(shost
->host_lock
);
3830 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3831 spin_unlock_irq(shost
->host_lock
);
3833 lpfc_do_scr_ns_plogi(phba
, vport
);
3836 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3838 mempool_free(pmb
, phba
->mbox_mem_pool
);
3840 /* Drop the reference count from the mbox at the end after
3841 * all the current reference to the ndlp have been done.
3848 * This routine will issue a GID_FT for each FC4 Type supported
3849 * by the driver. ALL GID_FTs must complete before discovery is started.
3852 lpfc_issue_gidft(struct lpfc_vport
*vport
)
3854 struct lpfc_hba
*phba
= vport
->phba
;
3856 /* Good status, issue CT Request to NameServer */
3857 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3858 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)) {
3859 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_FCP
)) {
3860 /* Cannot issue NameServer FCP Query, so finish up
3863 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3864 "0604 %s FC TYPE %x %s\n",
3865 "Failed to issue GID_FT to ",
3867 "Finishing discovery.");
3873 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3874 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
3875 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, SLI_CTPT_NVME
)) {
3876 /* Cannot issue NameServer NVME Query, so finish up
3879 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
3880 "0605 %s FC_TYPE %x %s %d\n",
3881 "Failed to issue GID_FT to ",
3883 "Finishing discovery: gidftinp ",
3885 if (vport
->gidft_inp
== 0)
3890 return vport
->gidft_inp
;
3894 * This routine handles processing a NameServer REG_LOGIN mailbox
3895 * command upon completion. It is setup in the LPFC_MBOXQ
3896 * as the completion routine when the command is
3897 * handed off to the SLI layer.
3900 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3902 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3903 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3904 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3905 struct lpfc_vport
*vport
= pmb
->vport
;
3907 pmb
->context1
= NULL
;
3908 pmb
->context2
= NULL
;
3909 vport
->gidft_inp
= 0;
3911 if (mb
->mbxStatus
) {
3912 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3913 "0260 Register NameServer error: 0x%x\n",
3917 /* decrement the node reference count held for this
3918 * callback function.
3921 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3923 mempool_free(pmb
, phba
->mbox_mem_pool
);
3925 /* If no other thread is using the ndlp, free it */
3926 lpfc_nlp_not_used(ndlp
);
3928 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3930 * RegLogin failed, use loop map to make discovery
3933 lpfc_disc_list_loopmap(vport
);
3935 /* Start discovery */
3936 lpfc_disc_start(vport
);
3939 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3943 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3944 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3945 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3946 ndlp
->nlp_type
|= NLP_FABRIC
;
3947 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3948 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
3949 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3950 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3951 kref_read(&ndlp
->kref
),
3952 ndlp
->nlp_usg_map
, ndlp
);
3954 if (vport
->port_state
< LPFC_VPORT_READY
) {
3955 /* Link up discovery requires Fabric registration. */
3956 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3957 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3958 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3959 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3961 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3962 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
))
3963 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, FC_TYPE_FCP
);
3965 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
3966 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
))
3967 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0,
3970 /* Issue SCR just before NameServer GID_FT Query */
3971 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3974 vport
->fc_ns_retry
= 0;
3975 if (lpfc_issue_gidft(vport
) == 0)
3979 * At this point in time we may need to wait for multiple
3980 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
3982 * decrement the node reference count held for this
3983 * callback function.
3986 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3988 mempool_free(pmb
, phba
->mbox_mem_pool
);
3994 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3996 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3997 struct fc_rport
*rport
;
3998 struct lpfc_rport_data
*rdata
;
3999 struct fc_rport_identifiers rport_ids
;
4000 struct lpfc_hba
*phba
= vport
->phba
;
4002 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4005 /* Remote port has reappeared. Re-register w/ FC transport */
4006 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
4007 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
4008 rport_ids
.port_id
= ndlp
->nlp_DID
;
4009 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
4012 * We leave our node pointer in rport->dd_data when we unregister a
4013 * FCP target port. But fc_remote_port_add zeros the space to which
4014 * rport->dd_data points. So, if we're reusing a previously
4015 * registered port, drop the reference that we took the last time we
4016 * registered the port.
4018 rport
= ndlp
->rport
;
4020 rdata
= rport
->dd_data
;
4021 /* break the link before dropping the ref */
4023 if (rdata
&& rdata
->pnode
== ndlp
)
4025 rdata
->pnode
= NULL
;
4026 /* drop reference for earlier registeration */
4027 put_device(&rport
->dev
);
4030 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4031 "rport add: did:x%x flg:x%x type x%x",
4032 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4034 /* Don't add the remote port if unloading. */
4035 if (vport
->load_flag
& FC_UNLOADING
)
4038 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
4039 if (!rport
|| !get_device(&rport
->dev
)) {
4040 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
4041 "Warning: fc_remote_port_add failed\n");
4045 /* initialize static port data */
4046 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
4047 rport
->supported_classes
= ndlp
->nlp_class_sup
;
4048 rdata
= rport
->dd_data
;
4049 rdata
->pnode
= lpfc_nlp_get(ndlp
);
4051 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
4052 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
4053 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
4054 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
4056 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
4057 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
4059 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4060 "3183 rport register x%06x, rport %p role x%x\n",
4061 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
4063 if ((rport
->scsi_target_id
!= -1) &&
4064 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
4065 ndlp
->nlp_sid
= rport
->scsi_target_id
;
4071 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
4073 struct fc_rport
*rport
= ndlp
->rport
;
4074 struct lpfc_vport
*vport
= ndlp
->vport
;
4075 struct lpfc_hba
*phba
= vport
->phba
;
4077 if (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
4080 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
4081 "rport delete: did:x%x flg:x%x type x%x",
4082 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4084 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4085 "3184 rport unregister x%06x, rport %p\n",
4086 ndlp
->nlp_DID
, rport
);
4088 fc_remote_port_delete(rport
);
4094 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
4096 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4098 spin_lock_irq(shost
->host_lock
);
4100 case NLP_STE_UNUSED_NODE
:
4101 vport
->fc_unused_cnt
+= count
;
4103 case NLP_STE_PLOGI_ISSUE
:
4104 vport
->fc_plogi_cnt
+= count
;
4106 case NLP_STE_ADISC_ISSUE
:
4107 vport
->fc_adisc_cnt
+= count
;
4109 case NLP_STE_REG_LOGIN_ISSUE
:
4110 vport
->fc_reglogin_cnt
+= count
;
4112 case NLP_STE_PRLI_ISSUE
:
4113 vport
->fc_prli_cnt
+= count
;
4115 case NLP_STE_UNMAPPED_NODE
:
4116 vport
->fc_unmap_cnt
+= count
;
4118 case NLP_STE_MAPPED_NODE
:
4119 vport
->fc_map_cnt
+= count
;
4121 case NLP_STE_NPR_NODE
:
4122 if (vport
->fc_npr_cnt
== 0 && count
== -1)
4123 vport
->fc_npr_cnt
= 0;
4125 vport
->fc_npr_cnt
+= count
;
4128 spin_unlock_irq(shost
->host_lock
);
4132 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4133 int old_state
, int new_state
)
4135 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4136 struct lpfc_hba
*phba
= vport
->phba
;
4138 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
4139 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4140 ndlp
->nlp_type
|= NLP_FC_NODE
;
4142 if (new_state
== NLP_STE_MAPPED_NODE
)
4143 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
4144 if (new_state
== NLP_STE_NPR_NODE
)
4145 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
4147 /* FCP and NVME Transport interface */
4148 if ((old_state
== NLP_STE_MAPPED_NODE
||
4149 old_state
== NLP_STE_UNMAPPED_NODE
)) {
4151 vport
->phba
->nport_event_cnt
++;
4152 lpfc_unregister_remote_port(ndlp
);
4155 /* Notify the NVME transport of this rport's loss */
4156 if (((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
4157 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) &&
4158 (vport
->phba
->nvmet_support
== 0) &&
4159 ((ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) ||
4160 (ndlp
->nlp_DID
== Fabric_DID
))) {
4161 vport
->phba
->nport_event_cnt
++;
4162 lpfc_nvme_unregister_port(vport
, ndlp
);
4166 /* FCP and NVME Transport interfaces */
4168 if (new_state
== NLP_STE_MAPPED_NODE
||
4169 new_state
== NLP_STE_UNMAPPED_NODE
) {
4170 if ((ndlp
->nlp_fc4_type
& NLP_FC4_FCP
) ||
4171 (ndlp
->nlp_DID
== Fabric_DID
)) {
4172 vport
->phba
->nport_event_cnt
++;
4174 * Tell the fc transport about the port, if we haven't
4175 * already. If we have, and it's a scsi entity, be
4177 lpfc_register_remote_port(vport
, ndlp
);
4179 /* Notify the NVME transport of this new rport. */
4180 if (ndlp
->nlp_fc4_type
& NLP_FC4_NVME
) {
4181 if (vport
->phba
->nvmet_support
== 0) {
4182 /* Register this rport with the transport.
4183 * Initiators take the NDLP ref count in
4186 vport
->phba
->nport_event_cnt
++;
4187 lpfc_nvme_register_port(vport
, ndlp
);
4189 /* Just take an NDLP ref count since the
4190 * target does not register rports.
4197 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4198 (vport
->stat_data_enabled
)) {
4200 * A new target is discovered, if there is no buffer for
4201 * statistical data collection allocate buffer.
4203 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
4204 sizeof(struct lpfc_scsicmd_bkt
),
4207 if (!ndlp
->lat_data
)
4208 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
4209 "0286 lpfc_nlp_state_cleanup failed to "
4210 "allocate statistical data buffer DID "
4211 "0x%x\n", ndlp
->nlp_DID
);
4214 * If the node just added to Mapped list was an FCP target,
4215 * but the remote port registration failed or assigned a target
4216 * id outside the presentable range - move the node to the
4219 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
4220 (ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4222 ndlp
->rport
->scsi_target_id
== -1 ||
4223 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
4224 spin_lock_irq(shost
->host_lock
);
4225 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
4226 spin_unlock_irq(shost
->host_lock
);
4227 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4232 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
4234 static char *states
[] = {
4235 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4236 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4237 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4238 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4239 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4240 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4241 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4242 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4243 [NLP_STE_NPR_NODE
] = "NPR",
4246 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4247 strlcpy(buffer
, states
[state
], size
);
4249 snprintf(buffer
, size
, "unknown (%d)", state
);
4254 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4257 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4258 int old_state
= ndlp
->nlp_state
;
4259 char name1
[16], name2
[16];
4261 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4262 "0904 NPort state transition x%06x, %s -> %s\n",
4264 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4265 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4267 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4268 "node statechg did:x%x old:%d ste:%d",
4269 ndlp
->nlp_DID
, old_state
, state
);
4271 if (old_state
== NLP_STE_NPR_NODE
&&
4272 state
!= NLP_STE_NPR_NODE
)
4273 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4274 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4275 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4276 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4279 if (list_empty(&ndlp
->nlp_listp
)) {
4280 spin_lock_irq(shost
->host_lock
);
4281 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4282 spin_unlock_irq(shost
->host_lock
);
4283 } else if (old_state
)
4284 lpfc_nlp_counters(vport
, old_state
, -1);
4286 ndlp
->nlp_state
= state
;
4287 lpfc_nlp_counters(vport
, state
, 1);
4288 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4292 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4294 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4296 if (list_empty(&ndlp
->nlp_listp
)) {
4297 spin_lock_irq(shost
->host_lock
);
4298 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4299 spin_unlock_irq(shost
->host_lock
);
4304 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4306 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4308 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4309 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4310 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4311 spin_lock_irq(shost
->host_lock
);
4312 list_del_init(&ndlp
->nlp_listp
);
4313 spin_unlock_irq(shost
->host_lock
);
4314 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4315 NLP_STE_UNUSED_NODE
);
4319 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4321 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4322 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4323 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4324 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4325 NLP_STE_UNUSED_NODE
);
4328 * lpfc_initialize_node - Initialize all fields of node object
4329 * @vport: Pointer to Virtual Port object.
4330 * @ndlp: Pointer to FC node object.
4331 * @did: FC_ID of the node.
4333 * This function is always called when node object need to be initialized.
4334 * It initializes all the fields of the node object. Although the reference
4335 * to phba from @ndlp can be obtained indirectly through it's reference to
4336 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4337 * to the life-span of the @ndlp might go beyond the existence of @vport as
4338 * the final release of ndlp is determined by its reference count. And, the
4339 * operation on @ndlp needs the reference to phba.
4342 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4345 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4346 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4347 init_timer(&ndlp
->nlp_delayfunc
);
4348 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
4349 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
4350 ndlp
->nlp_DID
= did
;
4351 ndlp
->vport
= vport
;
4352 ndlp
->phba
= vport
->phba
;
4353 ndlp
->nlp_sid
= NLP_NO_SID
;
4354 ndlp
->nlp_fc4_type
= NLP_FC4_NONE
;
4355 kref_init(&ndlp
->kref
);
4356 NLP_INT_NODE_ACT(ndlp
);
4357 atomic_set(&ndlp
->cmd_pending
, 0);
4358 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4361 struct lpfc_nodelist
*
4362 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4365 struct lpfc_hba
*phba
= vport
->phba
;
4367 unsigned long flags
;
4368 unsigned long *active_rrqs_xri_bitmap
= NULL
;
4373 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4374 /* The ndlp should not be in memory free mode */
4375 if (NLP_CHK_FREE_REQ(ndlp
)) {
4376 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4377 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4378 "0277 lpfc_enable_node: ndlp:x%p "
4379 "usgmap:x%x refcnt:%d\n",
4380 (void *)ndlp
, ndlp
->nlp_usg_map
,
4381 kref_read(&ndlp
->kref
));
4384 /* The ndlp should not already be in active mode */
4385 if (NLP_CHK_NODE_ACT(ndlp
)) {
4386 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4387 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4388 "0278 lpfc_enable_node: ndlp:x%p "
4389 "usgmap:x%x refcnt:%d\n",
4390 (void *)ndlp
, ndlp
->nlp_usg_map
,
4391 kref_read(&ndlp
->kref
));
4395 /* Keep the original DID */
4396 did
= ndlp
->nlp_DID
;
4397 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4398 active_rrqs_xri_bitmap
= ndlp
->active_rrqs_xri_bitmap
;
4400 /* re-initialize ndlp except of ndlp linked list pointer */
4401 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4402 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4403 lpfc_initialize_node(vport
, ndlp
, did
);
4405 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4406 ndlp
->active_rrqs_xri_bitmap
= active_rrqs_xri_bitmap
;
4408 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4409 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4410 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4411 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4412 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4413 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4415 kref_read(&ndlp
->kref
),
4416 ndlp
->nlp_usg_map
, ndlp
);
4420 if (state
!= NLP_STE_UNUSED_NODE
)
4421 lpfc_nlp_set_state(vport
, ndlp
, state
);
4423 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4424 "node enable: did:x%x",
4425 ndlp
->nlp_DID
, 0, 0);
4430 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4433 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4434 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4435 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4436 * until ALL other outstanding threads have completed. We check
4437 * that the ndlp not already in the UNUSED state before we proceed.
4439 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4441 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4442 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
4443 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4444 lpfc_unreg_rpi(vport
, ndlp
);
4452 * Start / ReStart rescue timer for Discovery / RSCN handling
4455 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4457 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4458 struct lpfc_hba
*phba
= vport
->phba
;
4461 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4462 /* For FAN, timeout should be greater than edtov */
4463 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4465 /* Normal discovery timeout should be > than ELS/CT timeout
4466 * FC spec states we need 3 * ratov for CT requests
4468 tmo
= ((phba
->fc_ratov
* 3) + 3);
4472 if (!timer_pending(&vport
->fc_disctmo
)) {
4473 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4474 "set disc timer: tmo:x%x state:x%x flg:x%x",
4475 tmo
, vport
->port_state
, vport
->fc_flag
);
4478 mod_timer(&vport
->fc_disctmo
, jiffies
+ msecs_to_jiffies(1000 * tmo
));
4479 spin_lock_irq(shost
->host_lock
);
4480 vport
->fc_flag
|= FC_DISC_TMO
;
4481 spin_unlock_irq(shost
->host_lock
);
4483 /* Start Discovery Timer state <hba_state> */
4484 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4485 "0247 Start Discovery Timer state x%x "
4486 "Data: x%x x%lx x%x x%x\n",
4487 vport
->port_state
, tmo
,
4488 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4489 vport
->fc_adisc_cnt
);
4495 * Cancel rescue timer for Discovery / RSCN handling
4498 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4500 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4501 unsigned long iflags
;
4503 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4504 "can disc timer: state:x%x rtry:x%x flg:x%x",
4505 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4507 /* Turn off discovery timer if its running */
4508 if (vport
->fc_flag
& FC_DISC_TMO
) {
4509 spin_lock_irqsave(shost
->host_lock
, iflags
);
4510 vport
->fc_flag
&= ~FC_DISC_TMO
;
4511 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4512 del_timer_sync(&vport
->fc_disctmo
);
4513 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4514 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4515 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4518 /* Cancel Discovery Timer state <hba_state> */
4519 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4520 "0248 Cancel Discovery Timer state x%x "
4521 "Data: x%x x%x x%x\n",
4522 vport
->port_state
, vport
->fc_flag
,
4523 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4528 * Check specified ring for outstanding IOCB on the SLI queue
4529 * Return true if iocb matches the specified nport
4532 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4533 struct lpfc_sli_ring
*pring
,
4534 struct lpfc_iocbq
*iocb
,
4535 struct lpfc_nodelist
*ndlp
)
4537 IOCB_t
*icmd
= &iocb
->iocb
;
4538 struct lpfc_vport
*vport
= ndlp
->vport
;
4540 if (iocb
->vport
!= vport
)
4543 if (pring
->ringno
== LPFC_ELS_RING
) {
4544 switch (icmd
->ulpCommand
) {
4545 case CMD_GEN_REQUEST64_CR
:
4546 if (iocb
->context_un
.ndlp
== ndlp
)
4548 case CMD_ELS_REQUEST64_CR
:
4549 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4551 case CMD_XMIT_ELS_RSP64_CX
:
4552 if (iocb
->context1
== (uint8_t *) ndlp
)
4555 } else if (pring
->ringno
== LPFC_FCP_RING
) {
4556 /* Skip match check if waiting to relogin to FCP target */
4557 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4558 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4561 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4569 __lpfc_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4570 struct lpfc_nodelist
*ndlp
, struct lpfc_sli_ring
*pring
,
4571 struct list_head
*dequeue_list
)
4573 struct lpfc_iocbq
*iocb
, *next_iocb
;
4575 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4576 /* Check to see if iocb matches the nport */
4577 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
))
4578 /* match, dequeue */
4579 list_move_tail(&iocb
->list
, dequeue_list
);
4584 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4585 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4587 struct lpfc_sli
*psli
= &phba
->sli
;
4590 spin_lock_irq(&phba
->hbalock
);
4591 for (i
= 0; i
< psli
->num_rings
; i
++)
4592 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, &psli
->sli3_ring
[i
],
4594 spin_unlock_irq(&phba
->hbalock
);
4598 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba
*phba
,
4599 struct lpfc_nodelist
*ndlp
, struct list_head
*dequeue_list
)
4601 struct lpfc_sli_ring
*pring
;
4602 struct lpfc_queue
*qp
= NULL
;
4604 spin_lock_irq(&phba
->hbalock
);
4605 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
4609 spin_lock_irq(&pring
->ring_lock
);
4610 __lpfc_dequeue_nport_iocbs(phba
, ndlp
, pring
, dequeue_list
);
4611 spin_unlock_irq(&pring
->ring_lock
);
4613 spin_unlock_irq(&phba
->hbalock
);
4617 * Free resources / clean up outstanding I/Os
4618 * associated with nlp_rpi in the LPFC_NODELIST entry.
4621 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4623 LIST_HEAD(completions
);
4625 lpfc_fabric_abort_nport(ndlp
);
4628 * Everything that matches on txcmplq will be returned
4629 * by firmware with a no rpi error.
4631 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4632 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4633 lpfc_sli3_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4635 lpfc_sli4_dequeue_nport_iocbs(phba
, ndlp
, &completions
);
4638 /* Cancel all the IOCBs from the completions list */
4639 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4646 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4647 * @phba: Pointer to HBA context object.
4648 * @pmb: Pointer to mailbox object.
4650 * This function will issue an ELS LOGO command after completing
4654 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4656 struct lpfc_vport
*vport
= pmb
->vport
;
4657 struct lpfc_nodelist
*ndlp
;
4659 ndlp
= (struct lpfc_nodelist
*)(pmb
->context1
);
4662 lpfc_issue_els_logo(vport
, ndlp
, 0);
4663 mempool_free(pmb
, phba
->mbox_mem_pool
);
4667 * Free rpi associated with LPFC_NODELIST entry.
4668 * This routine is called from lpfc_freenode(), when we are removing
4669 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4670 * LOGO that completes successfully, and we are waiting to PLOGI back
4671 * to the remote NPort. In addition, it is called after we receive
4672 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4673 * we are waiting to PLOGI back to the remote NPort.
4676 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4678 struct lpfc_hba
*phba
= vport
->phba
;
4680 int rc
, acc_plogi
= 1;
4683 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
||
4684 ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) {
4685 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
4686 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
4687 "3366 RPI x%x needs to be "
4688 "unregistered nlp_flag x%x "
4690 ndlp
->nlp_rpi
, ndlp
->nlp_flag
,
4692 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4694 /* SLI4 ports require the physical rpi value. */
4695 rpi
= ndlp
->nlp_rpi
;
4696 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4697 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4699 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4700 mbox
->vport
= vport
;
4701 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4702 mbox
->context1
= ndlp
;
4703 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4705 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
4706 (!(vport
->load_flag
& FC_UNLOADING
)) &&
4707 (bf_get(lpfc_sli_intf_if_type
,
4708 &phba
->sli4_hba
.sli_intf
) ==
4709 LPFC_SLI_INTF_IF_TYPE_2
) &&
4710 (kref_read(&ndlp
->kref
) > 0)) {
4711 mbox
->context1
= lpfc_nlp_get(ndlp
);
4713 lpfc_sli4_unreg_rpi_cmpl_clr
;
4715 * accept PLOGIs after unreg_rpi_cmpl
4720 lpfc_sli_def_mbox_cmpl
;
4723 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4724 if (rc
== MBX_NOT_FINISHED
) {
4725 mempool_free(mbox
, phba
->mbox_mem_pool
);
4729 lpfc_no_rpi(phba
, ndlp
);
4731 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4733 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4734 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4736 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4739 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4744 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4745 * @phba: pointer to lpfc hba data structure.
4747 * This routine is invoked to unregister all the currently registered RPIs
4751 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4753 struct lpfc_vport
**vports
;
4754 struct lpfc_nodelist
*ndlp
;
4755 struct Scsi_Host
*shost
;
4758 vports
= lpfc_create_vport_work_array(phba
);
4760 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4761 "2884 Vport array allocation failed \n");
4764 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4765 shost
= lpfc_shost_from_vport(vports
[i
]);
4766 spin_lock_irq(shost
->host_lock
);
4767 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4768 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4769 /* The mempool_alloc might sleep */
4770 spin_unlock_irq(shost
->host_lock
);
4771 lpfc_unreg_rpi(vports
[i
], ndlp
);
4772 spin_lock_irq(shost
->host_lock
);
4775 spin_unlock_irq(shost
->host_lock
);
4777 lpfc_destroy_vport_work_array(phba
, vports
);
4781 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4783 struct lpfc_hba
*phba
= vport
->phba
;
4787 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4788 lpfc_sli4_unreg_all_rpis(vport
);
4792 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4794 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4796 mbox
->vport
= vport
;
4797 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4798 mbox
->context1
= NULL
;
4799 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4800 if (rc
!= MBX_TIMEOUT
)
4801 mempool_free(mbox
, phba
->mbox_mem_pool
);
4803 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4804 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4805 "1836 Could not issue "
4806 "unreg_login(all_rpis) status %d\n", rc
);
4811 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4813 struct lpfc_hba
*phba
= vport
->phba
;
4817 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4819 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4821 mbox
->vport
= vport
;
4822 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4823 mbox
->context1
= NULL
;
4824 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4825 if (rc
!= MBX_TIMEOUT
)
4826 mempool_free(mbox
, phba
->mbox_mem_pool
);
4828 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4829 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4830 "1815 Could not issue "
4831 "unreg_did (default rpis) status %d\n",
4837 * Free resources associated with LPFC_NODELIST entry
4838 * so it can be freed.
4841 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4843 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4844 struct lpfc_hba
*phba
= vport
->phba
;
4845 LPFC_MBOXQ_t
*mb
, *nextmb
;
4846 struct lpfc_dmabuf
*mp
;
4848 /* Cleanup node for NPort <nlp_DID> */
4849 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4850 "0900 Cleanup node for NPort x%x "
4851 "Data: x%x x%x x%x\n",
4852 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4853 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4854 if (NLP_CHK_FREE_REQ(ndlp
)) {
4855 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4856 "0280 lpfc_cleanup_node: ndlp:x%p "
4857 "usgmap:x%x refcnt:%d\n",
4858 (void *)ndlp
, ndlp
->nlp_usg_map
,
4859 kref_read(&ndlp
->kref
));
4860 lpfc_dequeue_node(vport
, ndlp
);
4862 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4863 "0281 lpfc_cleanup_node: ndlp:x%p "
4864 "usgmap:x%x refcnt:%d\n",
4865 (void *)ndlp
, ndlp
->nlp_usg_map
,
4866 kref_read(&ndlp
->kref
));
4867 lpfc_disable_node(vport
, ndlp
);
4871 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4873 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4874 if ((mb
= phba
->sli
.mbox_active
)) {
4875 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4876 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4877 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4878 mb
->context2
= NULL
;
4879 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4883 spin_lock_irq(&phba
->hbalock
);
4884 /* Cleanup REG_LOGIN completions which are not yet processed */
4885 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4886 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4887 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
4888 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4891 mb
->context2
= NULL
;
4892 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4895 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4896 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4897 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4898 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4899 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4901 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4904 list_del(&mb
->list
);
4905 mempool_free(mb
, phba
->mbox_mem_pool
);
4906 /* We shall not invoke the lpfc_nlp_put to decrement
4907 * the ndlp reference count as we are in the process
4908 * of lpfc_nlp_release.
4912 spin_unlock_irq(&phba
->hbalock
);
4914 lpfc_els_abort(phba
, ndlp
);
4916 spin_lock_irq(shost
->host_lock
);
4917 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4918 spin_unlock_irq(shost
->host_lock
);
4920 ndlp
->nlp_last_elscmd
= 0;
4921 del_timer_sync(&ndlp
->nlp_delayfunc
);
4923 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4924 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4925 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4926 lpfc_unreg_rpi(vport
, ndlp
);
4932 * Check to see if we can free the nlp back to the freelist.
4933 * If we are in the middle of using the nlp in the discovery state
4934 * machine, defer the free till we reach the end of the state machine.
4937 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4939 struct lpfc_hba
*phba
= vport
->phba
;
4940 struct lpfc_rport_data
*rdata
;
4941 struct fc_rport
*rport
;
4945 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4946 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4947 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4948 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4949 /* For this case we need to cleanup the default rpi
4950 * allocated by the firmware.
4952 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4953 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
4954 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4955 kref_read(&ndlp
->kref
),
4956 ndlp
->nlp_usg_map
, ndlp
);
4957 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4959 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4960 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4962 mempool_free(mbox
, phba
->mbox_mem_pool
);
4965 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4966 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4967 mbox
->vport
= vport
;
4968 mbox
->context2
= ndlp
;
4969 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4970 if (rc
== MBX_NOT_FINISHED
) {
4971 mempool_free(mbox
, phba
->mbox_mem_pool
);
4976 lpfc_cleanup_node(vport
, ndlp
);
4979 * ndlp->rport must be set to NULL before it reaches here
4980 * i.e. break rport/node link before doing lpfc_nlp_put for
4981 * registered rport and then drop the reference of rport.
4985 * extra lpfc_nlp_put dropped the reference of ndlp
4986 * for registered rport so need to cleanup rport
4988 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4989 "0940 removed node x%p DID x%x "
4990 " rport not null %p\n",
4991 ndlp
, ndlp
->nlp_DID
, ndlp
->rport
);
4992 rport
= ndlp
->rport
;
4993 rdata
= rport
->dd_data
;
4994 rdata
->pnode
= NULL
;
4996 put_device(&rport
->dev
);
5001 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5004 D_ID mydid
, ndlpdid
, matchdid
;
5006 if (did
== Bcast_DID
)
5009 /* First check for Direct match */
5010 if (ndlp
->nlp_DID
== did
)
5013 /* Next check for area/domain identically equals 0 match */
5014 mydid
.un
.word
= vport
->fc_myDID
;
5015 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
5019 matchdid
.un
.word
= did
;
5020 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
5021 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
5022 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
5023 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
5024 /* This code is supposed to match the ID
5025 * for a private loop device that is
5026 * connect to fl_port. But we need to
5027 * check that the port did not just go
5028 * from pt2pt to fabric or we could end
5029 * up matching ndlp->nlp_DID 000001 to
5030 * fabric DID 0x20101
5032 if ((ndlpdid
.un
.b
.domain
== 0) &&
5033 (ndlpdid
.un
.b
.area
== 0)) {
5034 if (ndlpdid
.un
.b
.id
&&
5035 vport
->phba
->fc_topology
==
5042 matchdid
.un
.word
= ndlp
->nlp_DID
;
5043 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
5044 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
5045 if ((matchdid
.un
.b
.domain
== 0) &&
5046 (matchdid
.un
.b
.area
== 0)) {
5047 if (matchdid
.un
.b
.id
)
5055 /* Search for a nodelist entry */
5056 static struct lpfc_nodelist
*
5057 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5059 struct lpfc_nodelist
*ndlp
;
5062 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5063 if (lpfc_matchdid(vport
, ndlp
, did
)) {
5064 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
5065 ((uint32_t) ndlp
->nlp_xri
<< 16) |
5066 ((uint32_t) ndlp
->nlp_type
<< 8) |
5067 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
5068 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5069 "0929 FIND node DID "
5070 "Data: x%p x%x x%x x%x %p\n",
5071 ndlp
, ndlp
->nlp_DID
,
5072 ndlp
->nlp_flag
, data1
,
5073 ndlp
->active_rrqs_xri_bitmap
);
5078 /* FIND node did <did> NOT FOUND */
5079 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5080 "0932 FIND node did x%x NOT FOUND.\n", did
);
5084 struct lpfc_nodelist
*
5085 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
5087 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5088 struct lpfc_nodelist
*ndlp
;
5089 unsigned long iflags
;
5091 spin_lock_irqsave(shost
->host_lock
, iflags
);
5092 ndlp
= __lpfc_findnode_did(vport
, did
);
5093 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
5097 struct lpfc_nodelist
*
5098 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
5100 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5101 struct lpfc_nodelist
*ndlp
;
5103 ndlp
= lpfc_findnode_did(vport
, did
);
5105 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
5106 lpfc_rscn_payload_check(vport
, did
) == 0)
5108 ndlp
= (struct lpfc_nodelist
*)
5109 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
5112 lpfc_nlp_init(vport
, ndlp
, did
);
5113 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5114 if (vport
->phba
->nvmet_support
)
5116 spin_lock_irq(shost
->host_lock
);
5117 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5118 spin_unlock_irq(shost
->host_lock
);
5120 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
5121 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
5124 if (vport
->phba
->nvmet_support
)
5126 spin_lock_irq(shost
->host_lock
);
5127 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5128 spin_unlock_irq(shost
->host_lock
);
5132 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
5133 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
5134 if (lpfc_rscn_payload_check(vport
, did
)) {
5135 /* If we've already received a PLOGI from this NPort
5136 * we don't need to try to discover it again.
5138 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
5141 /* Since this node is marked for discovery,
5142 * delay timeout is not needed.
5144 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5145 if (vport
->phba
->nvmet_support
)
5147 spin_lock_irq(shost
->host_lock
);
5148 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5149 spin_unlock_irq(shost
->host_lock
);
5153 /* If we've already received a PLOGI from this NPort,
5154 * or we are already in the process of discovery on it,
5155 * we don't need to try to discover it again.
5157 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
5158 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5159 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
5161 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
5162 if (vport
->phba
->nvmet_support
)
5164 spin_lock_irq(shost
->host_lock
);
5165 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
5166 spin_unlock_irq(shost
->host_lock
);
5171 /* Build a list of nodes to discover based on the loopmap */
5173 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
5175 struct lpfc_hba
*phba
= vport
->phba
;
5177 uint32_t alpa
, index
;
5179 if (!lpfc_is_link_up(phba
))
5182 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
5185 /* Check for loop map present or not */
5186 if (phba
->alpa_map
[0]) {
5187 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
5188 alpa
= phba
->alpa_map
[j
];
5189 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
5191 lpfc_setup_disc_node(vport
, alpa
);
5194 /* No alpamap, so try all alpa's */
5195 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
5196 /* If cfg_scan_down is set, start from highest
5197 * ALPA (0xef) to lowest (0x1).
5199 if (vport
->cfg_scan_down
)
5202 index
= FC_MAXLOOP
- j
- 1;
5203 alpa
= lpfcAlpaArray
[index
];
5204 if ((vport
->fc_myDID
& 0xff) == alpa
)
5206 lpfc_setup_disc_node(vport
, alpa
);
5214 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5217 struct lpfc_sli
*psli
= &phba
->sli
;
5218 struct lpfc_sli_ring
*extra_ring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
5219 struct lpfc_sli_ring
*fcp_ring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
5223 * if it's not a physical port or if we already send
5224 * clear_la then don't send it.
5226 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
5227 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
5228 (phba
->sli_rev
== LPFC_SLI_REV4
))
5231 /* Link up discovery */
5232 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
5233 phba
->link_state
= LPFC_CLEAR_LA
;
5234 lpfc_clear_la(phba
, mbox
);
5235 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
5236 mbox
->vport
= vport
;
5237 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5238 if (rc
== MBX_NOT_FINISHED
) {
5239 mempool_free(mbox
, phba
->mbox_mem_pool
);
5240 lpfc_disc_flush_list(vport
);
5241 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5242 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
5243 phba
->link_state
= LPFC_HBA_ERROR
;
5248 /* Reg_vpi to tell firmware to resume normal operations */
5250 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
5252 LPFC_MBOXQ_t
*regvpimbox
;
5254 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5256 lpfc_reg_vpi(vport
, regvpimbox
);
5257 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
5258 regvpimbox
->vport
= vport
;
5259 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
5260 == MBX_NOT_FINISHED
) {
5261 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
5266 /* Start Link up / RSCN discovery on NPR nodes */
5268 lpfc_disc_start(struct lpfc_vport
*vport
)
5270 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5271 struct lpfc_hba
*phba
= vport
->phba
;
5273 uint32_t clear_la_pending
;
5275 if (!lpfc_is_link_up(phba
)) {
5276 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5277 "3315 Link is not up %x\n",
5282 if (phba
->link_state
== LPFC_CLEAR_LA
)
5283 clear_la_pending
= 1;
5285 clear_la_pending
= 0;
5287 if (vport
->port_state
< LPFC_VPORT_READY
)
5288 vport
->port_state
= LPFC_DISC_AUTH
;
5290 lpfc_set_disctmo(vport
);
5292 vport
->fc_prevDID
= vport
->fc_myDID
;
5293 vport
->num_disc_nodes
= 0;
5295 /* Start Discovery state <hba_state> */
5296 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
5297 "0202 Start Discovery hba state x%x "
5298 "Data: x%x x%x x%x\n",
5299 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
5300 vport
->fc_adisc_cnt
);
5302 /* First do ADISCs - if any */
5303 num_sent
= lpfc_els_disc_adisc(vport
);
5308 /* Register the VPI for SLI3, NPIV only. */
5309 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
5310 !(vport
->fc_flag
& FC_PT2PT
) &&
5311 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
5312 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
5313 lpfc_issue_clear_la(phba
, vport
);
5314 lpfc_issue_reg_vpi(phba
, vport
);
5319 * For SLI2, we need to set port_state to READY and continue
5322 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
5323 /* If we get here, there is nothing to ADISC */
5324 lpfc_issue_clear_la(phba
, vport
);
5326 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
5327 vport
->num_disc_nodes
= 0;
5328 /* go thru NPR nodes and issue ELS PLOGIs */
5329 if (vport
->fc_npr_cnt
)
5330 lpfc_els_disc_plogi(vport
);
5332 if (!vport
->num_disc_nodes
) {
5333 spin_lock_irq(shost
->host_lock
);
5334 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5335 spin_unlock_irq(shost
->host_lock
);
5336 lpfc_can_disctmo(vport
);
5339 vport
->port_state
= LPFC_VPORT_READY
;
5341 /* Next do PLOGIs - if any */
5342 num_sent
= lpfc_els_disc_plogi(vport
);
5347 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5348 /* Check to see if more RSCNs came in while we
5349 * were processing this one.
5351 if ((vport
->fc_rscn_id_cnt
== 0) &&
5352 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5353 spin_lock_irq(shost
->host_lock
);
5354 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5355 spin_unlock_irq(shost
->host_lock
);
5356 lpfc_can_disctmo(vport
);
5358 lpfc_els_handle_rscn(vport
);
5365 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5366 * ring the match the sppecified nodelist.
5369 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5371 LIST_HEAD(completions
);
5372 struct lpfc_sli
*psli
;
5374 struct lpfc_iocbq
*iocb
, *next_iocb
;
5375 struct lpfc_sli_ring
*pring
;
5378 pring
= lpfc_phba_elsring(phba
);
5380 /* Error matching iocb on txq or txcmplq
5381 * First check the txq.
5383 spin_lock_irq(&phba
->hbalock
);
5384 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5385 if (iocb
->context1
!= ndlp
) {
5389 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5390 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5392 list_move_tail(&iocb
->list
, &completions
);
5396 /* Next check the txcmplq */
5397 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5398 if (iocb
->context1
!= ndlp
) {
5402 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5403 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5404 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5407 spin_unlock_irq(&phba
->hbalock
);
5409 /* Cancel all the IOCBs from the completions list */
5410 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5415 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5417 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5418 struct lpfc_hba
*phba
= vport
->phba
;
5420 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5421 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5423 if (!NLP_CHK_NODE_ACT(ndlp
))
5425 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5426 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5427 lpfc_free_tx(phba
, ndlp
);
5434 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5436 lpfc_els_flush_rscn(vport
);
5437 lpfc_els_flush_cmd(vport
);
5438 lpfc_disc_flush_list(vport
);
5441 /*****************************************************************************/
5443 * NAME: lpfc_disc_timeout
5445 * FUNCTION: Fibre Channel driver discovery timeout routine.
5447 * EXECUTION ENVIRONMENT: interrupt only
5455 /*****************************************************************************/
5457 lpfc_disc_timeout(unsigned long ptr
)
5459 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5460 struct lpfc_hba
*phba
= vport
->phba
;
5461 uint32_t tmo_posted
;
5462 unsigned long flags
= 0;
5464 if (unlikely(!phba
))
5467 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5468 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5470 vport
->work_port_events
|= WORKER_DISC_TMO
;
5471 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5474 lpfc_worker_wake_up(phba
);
5479 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5481 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5482 struct lpfc_hba
*phba
= vport
->phba
;
5483 struct lpfc_sli
*psli
= &phba
->sli
;
5484 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5485 LPFC_MBOXQ_t
*initlinkmbox
;
5486 int rc
, clrlaerr
= 0;
5488 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5491 spin_lock_irq(shost
->host_lock
);
5492 vport
->fc_flag
&= ~FC_DISC_TMO
;
5493 spin_unlock_irq(shost
->host_lock
);
5495 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5496 "disc timeout: state:x%x rtry:x%x flg:x%x",
5497 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5499 switch (vport
->port_state
) {
5501 case LPFC_LOCAL_CFG_LINK
:
5503 * port_state is identically LPFC_LOCAL_CFG_LINK while
5504 * waiting for FAN timeout
5506 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5507 "0221 FAN timeout\n");
5509 /* Start discovery by sending FLOGI, clean up old rpis */
5510 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5512 if (!NLP_CHK_NODE_ACT(ndlp
))
5514 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5516 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5517 /* Clean up the ndlp on Fabric connections */
5518 lpfc_drop_node(vport
, ndlp
);
5520 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5521 /* Fail outstanding IO now since device
5522 * is marked for PLOGI.
5524 lpfc_unreg_rpi(vport
, ndlp
);
5527 if (vport
->port_state
!= LPFC_FLOGI
) {
5528 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5529 lpfc_initial_flogi(vport
);
5531 lpfc_issue_init_vfi(vport
);
5538 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5539 /* Initial FLOGI timeout */
5540 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5541 "0222 Initial %s timeout\n",
5542 vport
->vpi
? "FDISC" : "FLOGI");
5544 /* Assume no Fabric and go on with discovery.
5545 * Check for outstanding ELS FLOGI to abort.
5548 /* FLOGI failed, so just use loop map to make discovery list */
5549 lpfc_disc_list_loopmap(vport
);
5551 /* Start discovery */
5552 lpfc_disc_start(vport
);
5555 case LPFC_FABRIC_CFG_LINK
:
5556 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5558 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5559 "0223 Timeout while waiting for "
5560 "NameServer login\n");
5561 /* Next look for NameServer ndlp */
5562 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5563 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5564 lpfc_els_abort(phba
, ndlp
);
5566 /* ReStart discovery */
5570 /* Check for wait for NameServer Rsp timeout */
5571 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5572 "0224 NameServer Query timeout "
5574 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5576 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5577 /* Try it one more time */
5578 vport
->fc_ns_retry
++;
5579 vport
->gidft_inp
= 0;
5580 rc
= lpfc_issue_gidft(vport
);
5584 vport
->fc_ns_retry
= 0;
5588 * Discovery is over.
5589 * set port_state to PORT_READY if SLI2.
5590 * cmpl_reg_vpi will set port_state to READY for SLI3.
5592 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5593 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5594 lpfc_issue_reg_vpi(phba
, vport
);
5596 lpfc_issue_clear_la(phba
, vport
);
5597 vport
->port_state
= LPFC_VPORT_READY
;
5601 /* Setup and issue mailbox INITIALIZE LINK command */
5602 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5603 if (!initlinkmbox
) {
5604 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5605 "0206 Device Discovery "
5606 "completion error\n");
5607 phba
->link_state
= LPFC_HBA_ERROR
;
5611 lpfc_linkdown(phba
);
5612 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5613 phba
->cfg_link_speed
);
5614 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5615 initlinkmbox
->vport
= vport
;
5616 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5617 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5618 lpfc_set_loopback_flag(phba
);
5619 if (rc
== MBX_NOT_FINISHED
)
5620 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5624 case LPFC_DISC_AUTH
:
5625 /* Node Authentication timeout */
5626 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5627 "0227 Node Authentication timeout\n");
5628 lpfc_disc_flush_list(vport
);
5631 * set port_state to PORT_READY if SLI2.
5632 * cmpl_reg_vpi will set port_state to READY for SLI3.
5634 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5635 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5636 lpfc_issue_reg_vpi(phba
, vport
);
5637 else { /* NPIV Not enabled */
5638 lpfc_issue_clear_la(phba
, vport
);
5639 vport
->port_state
= LPFC_VPORT_READY
;
5644 case LPFC_VPORT_READY
:
5645 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5646 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5647 "0231 RSCN timeout Data: x%x "
5649 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5651 /* Cleanup any outstanding ELS commands */
5652 lpfc_els_flush_cmd(vport
);
5654 lpfc_els_flush_rscn(vport
);
5655 lpfc_disc_flush_list(vport
);
5660 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5661 "0273 Unexpected discovery timeout, "
5662 "vport State x%x\n", vport
->port_state
);
5666 switch (phba
->link_state
) {
5668 /* CLEAR LA timeout */
5669 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5670 "0228 CLEAR LA timeout\n");
5675 lpfc_issue_clear_la(phba
, vport
);
5677 case LPFC_LINK_UNKNOWN
:
5678 case LPFC_WARM_START
:
5679 case LPFC_INIT_START
:
5680 case LPFC_INIT_MBX_CMDS
:
5681 case LPFC_LINK_DOWN
:
5682 case LPFC_HBA_ERROR
:
5683 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5684 "0230 Unexpected timeout, hba link "
5685 "state x%x\n", phba
->link_state
);
5689 case LPFC_HBA_READY
:
5694 lpfc_disc_flush_list(vport
);
5695 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
5696 psli
->sli3_ring
[(LPFC_EXTRA_RING
)].flag
&=
5697 ~LPFC_STOP_IOCB_EVENT
;
5698 psli
->sli3_ring
[LPFC_FCP_RING
].flag
&=
5699 ~LPFC_STOP_IOCB_EVENT
;
5701 vport
->port_state
= LPFC_VPORT_READY
;
5707 * This routine handles processing a NameServer REG_LOGIN mailbox
5708 * command upon completion. It is setup in the LPFC_MBOXQ
5709 * as the completion routine when the command is
5710 * handed off to the SLI layer.
5713 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5715 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5716 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5717 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5718 struct lpfc_vport
*vport
= pmb
->vport
;
5720 pmb
->context1
= NULL
;
5721 pmb
->context2
= NULL
;
5723 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5724 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5725 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5726 ndlp
->nlp_type
|= NLP_FABRIC
;
5727 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5728 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
5729 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5730 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5731 kref_read(&ndlp
->kref
),
5732 ndlp
->nlp_usg_map
, ndlp
);
5734 * Start issuing Fabric-Device Management Interface (FDMI) command to
5735 * 0xfffffa (FDMI well known port).
5736 * DHBA -> DPRT -> RHBA -> RPA (physical port)
5737 * DPRT -> RPRT (vports)
5739 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
5740 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
5742 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
5745 /* decrement the node reference count held for this callback
5749 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5751 mempool_free(pmb
, phba
->mbox_mem_pool
);
5757 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5759 uint16_t *rpi
= param
;
5761 /* check for active node */
5762 if (!NLP_CHK_NODE_ACT(ndlp
))
5765 return ndlp
->nlp_rpi
== *rpi
;
5769 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5771 return memcmp(&ndlp
->nlp_portname
, param
,
5772 sizeof(ndlp
->nlp_portname
)) == 0;
5775 static struct lpfc_nodelist
*
5776 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5778 struct lpfc_nodelist
*ndlp
;
5780 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5781 if (filter(ndlp
, param
)) {
5782 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5783 "3185 FIND node filter %p DID "
5784 "Data: x%p x%x x%x\n",
5785 filter
, ndlp
, ndlp
->nlp_DID
,
5790 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5791 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5796 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5797 * returns the node list element pointer else return NULL.
5799 struct lpfc_nodelist
*
5800 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5802 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5806 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5807 * returns the node element list pointer else return NULL.
5809 struct lpfc_nodelist
*
5810 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5812 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5813 struct lpfc_nodelist
*ndlp
;
5815 spin_lock_irq(shost
->host_lock
);
5816 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5817 spin_unlock_irq(shost
->host_lock
);
5822 * This routine looks up the ndlp lists for the given RPI. If the rpi
5823 * is found, the routine returns the node element list pointer else
5826 struct lpfc_nodelist
*
5827 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5829 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5830 struct lpfc_nodelist
*ndlp
;
5832 spin_lock_irq(shost
->host_lock
);
5833 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
5834 spin_unlock_irq(shost
->host_lock
);
5839 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5840 * @phba: pointer to lpfc hba data structure.
5841 * @vpi: the physical host virtual N_Port identifier.
5843 * This routine finds a vport on a HBA (referred by @phba) through a
5844 * @vpi. The function walks the HBA's vport list and returns the address
5845 * of the vport with the matching @vpi.
5848 * NULL - No vport with the matching @vpi found
5849 * Otherwise - Address to the vport with the matching @vpi.
5852 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
5854 struct lpfc_vport
*vport
;
5855 unsigned long flags
;
5858 /* The physical ports are always vpi 0 - translate is unnecessary. */
5861 * Translate the physical vpi to the logical vpi. The
5862 * vport stores the logical vpi.
5864 for (i
= 0; i
< phba
->max_vpi
; i
++) {
5865 if (vpi
== phba
->vpi_ids
[i
])
5869 if (i
>= phba
->max_vpi
) {
5870 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
5871 "2936 Could not find Vport mapped "
5872 "to vpi %d\n", vpi
);
5877 spin_lock_irqsave(&phba
->hbalock
, flags
);
5878 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
5879 if (vport
->vpi
== i
) {
5880 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5884 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5889 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5892 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5894 lpfc_initialize_node(vport
, ndlp
, did
);
5895 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5896 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
5897 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
5898 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5899 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5900 "map:%x %p\n", ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5902 kref_read(&ndlp
->kref
),
5903 ndlp
->nlp_usg_map
, ndlp
);
5905 ndlp
->active_rrqs_xri_bitmap
=
5906 mempool_alloc(vport
->phba
->active_rrq_pool
,
5908 if (ndlp
->active_rrqs_xri_bitmap
)
5909 memset(ndlp
->active_rrqs_xri_bitmap
, 0,
5910 ndlp
->phba
->cfg_rrq_xri_bitmap_sz
);
5915 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5916 "node init: did:x%x",
5917 ndlp
->nlp_DID
, 0, 0);
5922 /* This routine releases all resources associated with a specifc NPort's ndlp
5923 * and mempool_free's the nodelist.
5926 lpfc_nlp_release(struct kref
*kref
)
5928 struct lpfc_hba
*phba
;
5929 unsigned long flags
;
5930 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5933 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5934 "node release: did:x%x flg:x%x type:x%x",
5935 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5937 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5938 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5939 "usgmap:x%x refcnt:%d rpi:%x\n",
5940 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
5941 kref_read(&ndlp
->kref
), ndlp
->nlp_rpi
);
5943 /* remove ndlp from action. */
5944 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5946 /* clear the ndlp active flag for all release cases */
5948 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5949 NLP_CLR_NODE_ACT(ndlp
);
5950 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5951 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5952 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
5954 /* free ndlp memory for final ndlp release */
5955 if (NLP_CHK_FREE_REQ(ndlp
)) {
5956 kfree(ndlp
->lat_data
);
5957 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5958 mempool_free(ndlp
->active_rrqs_xri_bitmap
,
5959 ndlp
->phba
->active_rrq_pool
);
5960 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5964 /* This routine bumps the reference count for a ndlp structure to ensure
5965 * that one discovery thread won't free a ndlp while another discovery thread
5968 struct lpfc_nodelist
*
5969 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5971 struct lpfc_hba
*phba
;
5972 unsigned long flags
;
5975 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5976 "node get: did:x%x flg:x%x refcnt:x%x",
5977 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5978 kref_read(&ndlp
->kref
));
5979 /* The check of ndlp usage to prevent incrementing the
5980 * ndlp reference count that is in the process of being
5984 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5985 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5986 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5987 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5988 "0276 lpfc_nlp_get: ndlp:x%p "
5989 "usgmap:x%x refcnt:%d\n",
5990 (void *)ndlp
, ndlp
->nlp_usg_map
,
5991 kref_read(&ndlp
->kref
));
5994 kref_get(&ndlp
->kref
);
5995 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6000 /* This routine decrements the reference count for a ndlp structure. If the
6001 * count goes to 0, this indicates the the associated nodelist should be
6002 * freed. Returning 1 indicates the ndlp resource has been released; on the
6003 * other hand, returning 0 indicates the ndlp resource has not been released
6007 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
6009 struct lpfc_hba
*phba
;
6010 unsigned long flags
;
6015 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6016 "node put: did:x%x flg:x%x refcnt:x%x",
6017 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6018 kref_read(&ndlp
->kref
));
6020 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
6021 /* Check the ndlp memory free acknowledge flag to avoid the
6022 * possible race condition that kref_put got invoked again
6023 * after previous one has done ndlp memory free.
6025 if (NLP_CHK_FREE_ACK(ndlp
)) {
6026 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6027 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6028 "0274 lpfc_nlp_put: ndlp:x%p "
6029 "usgmap:x%x refcnt:%d\n",
6030 (void *)ndlp
, ndlp
->nlp_usg_map
,
6031 kref_read(&ndlp
->kref
));
6034 /* Check the ndlp inactivate log flag to avoid the possible
6035 * race condition that kref_put got invoked again after ndlp
6036 * is already in inactivating state.
6038 if (NLP_CHK_IACT_REQ(ndlp
)) {
6039 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6040 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
6041 "0275 lpfc_nlp_put: ndlp:x%p "
6042 "usgmap:x%x refcnt:%d\n",
6043 (void *)ndlp
, ndlp
->nlp_usg_map
,
6044 kref_read(&ndlp
->kref
));
6047 /* For last put, mark the ndlp usage flags to make sure no
6048 * other kref_get and kref_put on the same ndlp shall get
6049 * in between the process when the final kref_put has been
6050 * invoked on this ndlp.
6052 if (kref_read(&ndlp
->kref
) == 1) {
6053 /* Indicate ndlp is put to inactive state. */
6054 NLP_SET_IACT_REQ(ndlp
);
6055 /* Acknowledge ndlp memory free has been seen. */
6056 if (NLP_CHK_FREE_REQ(ndlp
))
6057 NLP_SET_FREE_ACK(ndlp
);
6059 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
6060 /* Note, the kref_put returns 1 when decrementing a reference
6061 * count that was 1, it invokes the release callback function,
6062 * but it still left the reference count as 1 (not actually
6063 * performs the last decrementation). Otherwise, it actually
6064 * decrements the reference count and returns 0.
6066 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
6069 /* This routine free's the specified nodelist if it is not in use
6070 * by any other discovery thread. This routine returns 1 if the
6071 * ndlp has been freed. A return value of 0 indicates the ndlp is
6072 * not yet been released.
6075 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
6077 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
6078 "node not used: did:x%x flg:x%x refcnt:x%x",
6079 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
6080 kref_read(&ndlp
->kref
));
6081 if (kref_read(&ndlp
->kref
) == 1)
6082 if (lpfc_nlp_put(ndlp
))
6088 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6089 * @phba: Pointer to hba context object.
6091 * This function iterate through all FC nodes associated
6092 * will all vports to check if there is any node with
6093 * fc_rports associated with it. If there is an fc_rport
6094 * associated with the node, then the node is either in
6095 * discovered state or its devloss_timer is pending.
6098 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
6100 struct lpfc_vport
**vports
;
6102 struct lpfc_nodelist
*ndlp
;
6103 struct Scsi_Host
*shost
;
6105 vports
= lpfc_create_vport_work_array(phba
);
6107 /* If driver cannot allocate memory, indicate fcf is in use */
6111 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6112 shost
= lpfc_shost_from_vport(vports
[i
]);
6113 spin_lock_irq(shost
->host_lock
);
6115 * IF the CVL_RCVD bit is not set then we have sent the
6117 * If dev_loss fires while we are waiting we do not want to
6120 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
6121 spin_unlock_irq(shost
->host_lock
);
6125 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
6126 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
6127 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
6129 spin_unlock_irq(shost
->host_lock
);
6131 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
6133 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
6134 "2624 RPI %x DID %x flag %x "
6135 "still logged in\n",
6136 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
6140 spin_unlock_irq(shost
->host_lock
);
6143 lpfc_destroy_vport_work_array(phba
, vports
);
6148 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6149 * @phba: Pointer to hba context object.
6150 * @mboxq: Pointer to mailbox object.
6152 * This function frees memory associated with the mailbox command.
6155 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6157 struct lpfc_vport
*vport
= mboxq
->vport
;
6158 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6160 if (mboxq
->u
.mb
.mbxStatus
) {
6161 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6162 "2555 UNREG_VFI mbxStatus error x%x "
6164 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6166 spin_lock_irq(shost
->host_lock
);
6167 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
6168 spin_unlock_irq(shost
->host_lock
);
6169 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6174 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6175 * @phba: Pointer to hba context object.
6176 * @mboxq: Pointer to mailbox object.
6178 * This function frees memory associated with the mailbox command.
6181 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
6183 struct lpfc_vport
*vport
= mboxq
->vport
;
6185 if (mboxq
->u
.mb
.mbxStatus
) {
6186 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6187 "2550 UNREG_FCFI mbxStatus error x%x "
6189 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
6191 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6196 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6197 * @phba: Pointer to hba context object.
6199 * This function prepare the HBA for unregistering the currently registered
6200 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6204 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
6206 struct lpfc_vport
**vports
;
6207 struct lpfc_nodelist
*ndlp
;
6208 struct Scsi_Host
*shost
;
6211 /* Unregister RPIs */
6212 if (lpfc_fcf_inuse(phba
))
6213 lpfc_unreg_hba_rpis(phba
);
6215 /* At this point, all discovery is aborted */
6216 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
6218 /* Unregister VPIs */
6219 vports
= lpfc_create_vport_work_array(phba
);
6220 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
6221 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
6222 /* Stop FLOGI/FDISC retries */
6223 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
6225 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
6226 lpfc_cleanup_pending_mbox(vports
[i
]);
6227 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6228 lpfc_sli4_unreg_all_rpis(vports
[i
]);
6229 lpfc_mbx_unreg_vpi(vports
[i
]);
6230 shost
= lpfc_shost_from_vport(vports
[i
]);
6231 spin_lock_irq(shost
->host_lock
);
6232 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6233 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6234 spin_unlock_irq(shost
->host_lock
);
6236 lpfc_destroy_vport_work_array(phba
, vports
);
6237 if (i
== 0 && (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))) {
6238 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
6240 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
6241 lpfc_cleanup_pending_mbox(phba
->pport
);
6242 if (phba
->sli_rev
== LPFC_SLI_REV4
)
6243 lpfc_sli4_unreg_all_rpis(phba
->pport
);
6244 lpfc_mbx_unreg_vpi(phba
->pport
);
6245 shost
= lpfc_shost_from_vport(phba
->pport
);
6246 spin_lock_irq(shost
->host_lock
);
6247 phba
->pport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
6248 phba
->pport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
6249 spin_unlock_irq(shost
->host_lock
);
6252 /* Cleanup any outstanding ELS commands */
6253 lpfc_els_flush_all_cmd(phba
);
6255 /* Unregister the physical port VFI */
6256 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
6261 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6262 * @phba: Pointer to hba context object.
6264 * This function issues synchronous unregister FCF mailbox command to HBA to
6265 * unregister the currently registered FCF record. The driver does not reset
6266 * the driver FCF usage state flags.
6268 * Return 0 if successfully issued, none-zero otherwise.
6271 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
6276 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6278 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6279 "2551 UNREG_FCFI mbox allocation failed"
6280 "HBA state x%x\n", phba
->pport
->port_state
);
6283 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
6284 mbox
->vport
= phba
->pport
;
6285 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
6286 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6288 if (rc
== MBX_NOT_FINISHED
) {
6289 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6290 "2552 Unregister FCFI command failed rc x%x "
6292 rc
, phba
->pport
->port_state
);
6299 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6300 * @phba: Pointer to hba context object.
6302 * This function unregisters the currently reigstered FCF. This function
6303 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6306 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
6310 /* Preparation for unregistering fcf */
6311 rc
= lpfc_unregister_fcf_prep(phba
);
6313 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6314 "2748 Failed to prepare for unregistering "
6315 "HBA's FCF record: rc=%d\n", rc
);
6319 /* Now, unregister FCF record and reset HBA FCF state */
6320 rc
= lpfc_sli4_unregister_fcf(phba
);
6323 /* Reset HBA FCF states after successful unregister FCF */
6324 phba
->fcf
.fcf_flag
= 0;
6325 phba
->fcf
.current_rec
.flag
= 0;
6328 * If driver is not unloading, check if there is any other
6329 * FCF record that can be used for discovery.
6331 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
6332 (phba
->link_state
< LPFC_LINK_UP
))
6335 /* This is considered as the initial FCF discovery scan */
6336 spin_lock_irq(&phba
->hbalock
);
6337 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
6338 spin_unlock_irq(&phba
->hbalock
);
6340 /* Reset FCF roundrobin bmask for new discovery */
6341 lpfc_sli4_clear_fcf_rr_bmask(phba
);
6343 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
6346 spin_lock_irq(&phba
->hbalock
);
6347 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
6348 spin_unlock_irq(&phba
->hbalock
);
6349 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
6350 "2553 lpfc_unregister_unused_fcf failed "
6351 "to read FCF record HBA state x%x\n",
6352 phba
->pport
->port_state
);
6357 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6358 * @phba: Pointer to hba context object.
6360 * This function just unregisters the currently reigstered FCF. It does not
6361 * try to find another FCF for discovery.
6364 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
6368 /* Preparation for unregistering fcf */
6369 rc
= lpfc_unregister_fcf_prep(phba
);
6371 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
6372 "2749 Failed to prepare for unregistering "
6373 "HBA's FCF record: rc=%d\n", rc
);
6377 /* Now, unregister FCF record and reset HBA FCF state */
6378 rc
= lpfc_sli4_unregister_fcf(phba
);
6381 /* Set proper HBA FCF states after successful unregister FCF */
6382 spin_lock_irq(&phba
->hbalock
);
6383 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6384 spin_unlock_irq(&phba
->hbalock
);
6388 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6389 * @phba: Pointer to hba context object.
6391 * This function check if there are any connected remote port for the FCF and
6392 * if all the devices are disconnected, this function unregister FCFI.
6393 * This function also tries to use another FCF for discovery.
6396 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6399 * If HBA is not running in FIP mode, if HBA does not support
6400 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6401 * registered, do nothing.
6403 spin_lock_irq(&phba
->hbalock
);
6404 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6405 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6406 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6407 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6408 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6409 spin_unlock_irq(&phba
->hbalock
);
6412 spin_unlock_irq(&phba
->hbalock
);
6414 if (lpfc_fcf_inuse(phba
))
6417 lpfc_unregister_fcf_rescan(phba
);
6421 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6422 * @phba: Pointer to hba context object.
6423 * @buff: Buffer containing the FCF connection table as in the config
6425 * This function create driver data structure for the FCF connection
6426 * record table read from config region 23.
6429 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6432 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6433 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6434 struct lpfc_fcf_conn_rec
*conn_rec
;
6435 uint32_t record_count
;
6438 /* Free the current connect table */
6439 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6440 &phba
->fcf_conn_rec_list
, list
) {
6441 list_del_init(&conn_entry
->list
);
6445 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6446 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6447 sizeof(struct lpfc_fcf_conn_rec
);
6449 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6450 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6452 for (i
= 0; i
< record_count
; i
++) {
6453 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6455 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6458 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6459 "2566 Failed to allocate connection"
6464 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6465 sizeof(struct lpfc_fcf_conn_rec
));
6466 list_add_tail(&conn_entry
->list
,
6467 &phba
->fcf_conn_rec_list
);
6470 if (!list_empty(&phba
->fcf_conn_rec_list
)) {
6472 list_for_each_entry(conn_entry
, &phba
->fcf_conn_rec_list
,
6474 conn_rec
= &conn_entry
->conn_rec
;
6475 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6476 "3345 FCF connection list rec[%02d]: "
6477 "flags:x%04x, vtag:x%04x, "
6478 "fabric_name:x%02x:%02x:%02x:%02x:"
6479 "%02x:%02x:%02x:%02x, "
6480 "switch_name:x%02x:%02x:%02x:%02x:"
6481 "%02x:%02x:%02x:%02x\n", i
++,
6482 conn_rec
->flags
, conn_rec
->vlan_tag
,
6483 conn_rec
->fabric_name
[0],
6484 conn_rec
->fabric_name
[1],
6485 conn_rec
->fabric_name
[2],
6486 conn_rec
->fabric_name
[3],
6487 conn_rec
->fabric_name
[4],
6488 conn_rec
->fabric_name
[5],
6489 conn_rec
->fabric_name
[6],
6490 conn_rec
->fabric_name
[7],
6491 conn_rec
->switch_name
[0],
6492 conn_rec
->switch_name
[1],
6493 conn_rec
->switch_name
[2],
6494 conn_rec
->switch_name
[3],
6495 conn_rec
->switch_name
[4],
6496 conn_rec
->switch_name
[5],
6497 conn_rec
->switch_name
[6],
6498 conn_rec
->switch_name
[7]);
6504 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6505 * @phba: Pointer to hba context object.
6506 * @buff: Buffer containing the FCoE parameter data structure.
6508 * This function update driver data structure with config
6509 * parameters read from config region 23.
6512 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6515 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6516 struct lpfc_fcoe_params
*fcoe_param
;
6518 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6520 fcoe_param
= (struct lpfc_fcoe_params
*)
6521 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6523 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6524 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6527 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6528 phba
->valid_vlan
= 1;
6529 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6533 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6534 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6535 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6540 * lpfc_get_rec_conf23 - Get a record type in config region data.
6541 * @buff: Buffer containing config region 23 data.
6542 * @size: Size of the data buffer.
6543 * @rec_type: Record type to be searched.
6545 * This function searches config region data to find the beginning
6546 * of the record specified by record_type. If record found, this
6547 * function return pointer to the record else return NULL.
6550 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6552 uint32_t offset
= 0, rec_length
;
6554 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6555 (size
< sizeof(uint32_t)))
6558 rec_length
= buff
[offset
+ 1];
6561 * One TLV record has one word header and number of data words
6562 * specified in the rec_length field of the record header.
6564 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6566 if (buff
[offset
] == rec_type
)
6567 return &buff
[offset
];
6569 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6572 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6573 rec_length
= buff
[offset
+ 1];
6579 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6580 * @phba: Pointer to lpfc_hba data structure.
6581 * @buff: Buffer containing config region 23 data.
6582 * @size: Size of the data buffer.
6584 * This function parses the FCoE config parameters in config region 23 and
6585 * populate driver data structure with the parameters.
6588 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6592 uint32_t offset
= 0;
6596 * If data size is less than 2 words signature and version cannot be
6599 if (size
< 2*sizeof(uint32_t))
6602 /* Check the region signature first */
6603 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6604 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6605 "2567 Config region 23 has bad signature\n");
6611 /* Check the data structure version */
6612 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6613 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6614 "2568 Config region 23 has bad version\n");
6619 /* Read FCoE param record */
6620 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6621 size
- offset
, FCOE_PARAM_TYPE
);
6623 lpfc_read_fcoe_param(phba
, rec_ptr
);
6625 /* Read FCF connection table */
6626 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6627 size
- offset
, FCOE_CONN_TBL_TYPE
);
6629 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);