1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_sli4.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 static int lpfc_els_retry(struct lpfc_hba
*, struct lpfc_iocbq
*,
49 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba
*, struct lpfc_iocbq
*,
51 static void lpfc_fabric_abort_vport(struct lpfc_vport
*vport
);
52 static int lpfc_issue_els_fdisc(struct lpfc_vport
*vport
,
53 struct lpfc_nodelist
*ndlp
, uint8_t retry
);
54 static int lpfc_issue_fabric_iocb(struct lpfc_hba
*phba
,
55 struct lpfc_iocbq
*iocb
);
57 static int lpfc_max_els_tries
= 3;
60 * lpfc_els_chk_latt - Check host link attention event for a vport
61 * @vport: pointer to a host virtual N_Port data structure.
63 * This routine checks whether there is an outstanding host link
64 * attention event during the discovery process with the @vport. It is done
65 * by reading the HBA's Host Attention (HA) register. If there is any host
66 * link attention events during this @vport's discovery process, the @vport
67 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
68 * be issued if the link state is not already in host link cleared state,
69 * and a return code shall indicate whether the host link attention event
72 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
73 * state in LPFC_VPORT_READY, the request for checking host link attention
74 * event will be ignored and a return code shall indicate no host link
75 * attention event had happened.
78 * 0 - no host link attention event happened
79 * 1 - host link attention event happened
82 lpfc_els_chk_latt(struct lpfc_vport
*vport
)
84 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
85 struct lpfc_hba
*phba
= vport
->phba
;
88 if (vport
->port_state
>= LPFC_VPORT_READY
||
89 phba
->link_state
== LPFC_LINK_DOWN
||
90 phba
->sli_rev
> LPFC_SLI_REV3
)
93 /* Read the HBA Host Attention Register */
94 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
97 if (!(ha_copy
& HA_LATT
))
100 /* Pending Link Event during Discovery */
101 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
102 "0237 Pending Link Event during "
103 "Discovery: State x%x\n",
104 phba
->pport
->port_state
);
106 /* CLEAR_LA should re-enable link attention events and
107 * we should then immediately take a LATT event. The
108 * LATT processing should call lpfc_linkdown() which
109 * will cleanup any left over in-progress discovery
112 spin_lock_irq(shost
->host_lock
);
113 vport
->fc_flag
|= FC_ABORT_DISCOVERY
;
114 spin_unlock_irq(shost
->host_lock
);
116 if (phba
->link_state
!= LPFC_CLEAR_LA
)
117 lpfc_issue_clear_la(phba
, vport
);
123 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
124 * @vport: pointer to a host virtual N_Port data structure.
125 * @expectRsp: flag indicating whether response is expected.
126 * @cmdSize: size of the ELS command.
127 * @retry: number of retries to the command IOCB when it fails.
128 * @ndlp: pointer to a node-list data structure.
129 * @did: destination identifier.
130 * @elscmd: the ELS command code.
132 * This routine is used for allocating a lpfc-IOCB data structure from
133 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
134 * passed into the routine for discovery state machine to issue an Extended
135 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
136 * and preparation routine that is used by all the discovery state machine
137 * routines and the ELS command-specific fields will be later set up by
138 * the individual discovery machine routines after calling this routine
139 * allocating and preparing a generic IOCB data structure. It fills in the
140 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
141 * payload and response payload (if expected). The reference count on the
142 * ndlp is incremented by 1 and the reference to the ndlp is put into
143 * context1 of the IOCB data structure for this IOCB to hold the ndlp
144 * reference for the command's callback function to access later.
147 * Pointer to the newly allocated/prepared els iocb data structure
148 * NULL - when els iocb data structure allocation/preparation failed
151 lpfc_prep_els_iocb(struct lpfc_vport
*vport
, uint8_t expectRsp
,
152 uint16_t cmdSize
, uint8_t retry
,
153 struct lpfc_nodelist
*ndlp
, uint32_t did
,
156 struct lpfc_hba
*phba
= vport
->phba
;
157 struct lpfc_iocbq
*elsiocb
;
158 struct lpfc_dmabuf
*pcmd
, *prsp
, *pbuflist
;
159 struct ulp_bde64
*bpl
;
163 if (!lpfc_is_link_up(phba
))
166 /* Allocate buffer for command iocb */
167 elsiocb
= lpfc_sli_get_iocbq(phba
);
173 * If this command is for fabric controller and HBA running
174 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
176 if ((did
== Fabric_DID
) &&
177 (phba
->hba_flag
& HBA_FIP_SUPPORT
) &&
178 ((elscmd
== ELS_CMD_FLOGI
) ||
179 (elscmd
== ELS_CMD_FDISC
) ||
180 (elscmd
== ELS_CMD_LOGO
)))
183 elsiocb
->iocb_flag
|=
184 ((LPFC_ELS_ID_FLOGI
<< LPFC_FIP_ELS_ID_SHIFT
)
185 & LPFC_FIP_ELS_ID_MASK
);
188 elsiocb
->iocb_flag
|=
189 ((LPFC_ELS_ID_FDISC
<< LPFC_FIP_ELS_ID_SHIFT
)
190 & LPFC_FIP_ELS_ID_MASK
);
193 elsiocb
->iocb_flag
|=
194 ((LPFC_ELS_ID_LOGO
<< LPFC_FIP_ELS_ID_SHIFT
)
195 & LPFC_FIP_ELS_ID_MASK
);
199 elsiocb
->iocb_flag
&= ~LPFC_FIP_ELS_ID_MASK
;
201 icmd
= &elsiocb
->iocb
;
203 /* fill in BDEs for command */
204 /* Allocate buffer for command payload */
205 pcmd
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
207 pcmd
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &pcmd
->phys
);
208 if (!pcmd
|| !pcmd
->virt
)
209 goto els_iocb_free_pcmb_exit
;
211 INIT_LIST_HEAD(&pcmd
->list
);
213 /* Allocate buffer for response payload */
215 prsp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
217 prsp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
219 if (!prsp
|| !prsp
->virt
)
220 goto els_iocb_free_prsp_exit
;
221 INIT_LIST_HEAD(&prsp
->list
);
225 /* Allocate buffer for Buffer ptr list */
226 pbuflist
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
228 pbuflist
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
230 if (!pbuflist
|| !pbuflist
->virt
)
231 goto els_iocb_free_pbuf_exit
;
233 INIT_LIST_HEAD(&pbuflist
->list
);
236 icmd
->un
.elsreq64
.bdl
.addrHigh
= putPaddrHigh(pbuflist
->phys
);
237 icmd
->un
.elsreq64
.bdl
.addrLow
= putPaddrLow(pbuflist
->phys
);
238 icmd
->un
.elsreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
239 icmd
->un
.elsreq64
.bdl
.bdeSize
= (2 * sizeof(struct ulp_bde64
));
241 icmd
->un
.elsreq64
.remoteID
= did
; /* DID */
242 icmd
->ulpCommand
= CMD_ELS_REQUEST64_CR
;
243 if (elscmd
== ELS_CMD_FLOGI
)
244 icmd
->ulpTimeout
= FF_DEF_RATOV
* 2;
246 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
248 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(pbuflist
->phys
);
249 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(pbuflist
->phys
);
250 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
251 icmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(struct ulp_bde64
);
252 icmd
->un
.xseq64
.xmit_els_remoteID
= did
; /* DID */
253 icmd
->ulpCommand
= CMD_XMIT_ELS_RSP64_CX
;
255 icmd
->ulpBdeCount
= 1;
257 icmd
->ulpClass
= CLASS3
;
260 * If we have NPIV enabled, we want to send ELS traffic by VPI.
261 * For SLI4, since the driver controls VPIs we also want to include
262 * all ELS pt2pt protocol traffic as well.
264 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) ||
265 ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
266 (vport
->fc_flag
& FC_PT2PT
))) {
269 icmd
->un
.elsreq64
.myID
= vport
->fc_myDID
;
271 /* For ELS_REQUEST64_CR, use the VPI by default */
272 icmd
->ulpContext
= phba
->vpi_ids
[vport
->vpi
];
276 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
277 if (elscmd
== ELS_CMD_ECHO
)
278 icmd
->ulpCt_l
= 0; /* context = invalid RPI */
280 icmd
->ulpCt_l
= 1; /* context = VPI */
283 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
284 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pcmd
->phys
));
285 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pcmd
->phys
));
286 bpl
->tus
.f
.bdeSize
= cmdSize
;
287 bpl
->tus
.f
.bdeFlags
= 0;
288 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
292 bpl
->addrLow
= le32_to_cpu(putPaddrLow(prsp
->phys
));
293 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(prsp
->phys
));
294 bpl
->tus
.f
.bdeSize
= FCELSSIZE
;
295 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
296 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
299 /* prevent preparing iocb with NULL ndlp reference */
300 elsiocb
->context1
= lpfc_nlp_get(ndlp
);
301 if (!elsiocb
->context1
)
302 goto els_iocb_free_pbuf_exit
;
303 elsiocb
->context2
= pcmd
;
304 elsiocb
->context3
= pbuflist
;
305 elsiocb
->retry
= retry
;
306 elsiocb
->vport
= vport
;
307 elsiocb
->drvrTimeout
= (phba
->fc_ratov
<< 1) + LPFC_DRVR_TIMEOUT
;
310 list_add(&prsp
->list
, &pcmd
->list
);
313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
314 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
315 "0116 Xmit ELS command x%x to remote "
316 "NPORT x%x I/O tag: x%x, port state:x%x"
318 elscmd
, did
, elsiocb
->iotag
,
322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
323 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
324 "0117 Xmit ELS response x%x to remote "
325 "NPORT x%x I/O tag: x%x, size: x%x "
326 "port_state x%x fc_flag x%x\n",
327 elscmd
, ndlp
->nlp_DID
, elsiocb
->iotag
,
328 cmdSize
, vport
->port_state
,
333 els_iocb_free_pbuf_exit
:
335 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
338 els_iocb_free_prsp_exit
:
339 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
342 els_iocb_free_pcmb_exit
:
344 lpfc_sli_release_iocbq(phba
, elsiocb
);
349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
350 * @vport: pointer to a host virtual N_Port data structure.
352 * This routine issues a fabric registration login for a @vport. An
353 * active ndlp node with Fabric_DID must already exist for this @vport.
354 * The routine invokes two mailbox commands to carry out fabric registration
355 * login through the HBA firmware: the first mailbox command requests the
356 * HBA to perform link configuration for the @vport; and the second mailbox
357 * command requests the HBA to perform the actual fabric registration login
361 * 0 - successfully issued fabric registration login for @vport
362 * -ENXIO -- failed to issue fabric registration login for @vport
365 lpfc_issue_fabric_reglogin(struct lpfc_vport
*vport
)
367 struct lpfc_hba
*phba
= vport
->phba
;
369 struct lpfc_dmabuf
*mp
;
370 struct lpfc_nodelist
*ndlp
;
371 struct serv_parm
*sp
;
375 sp
= &phba
->fc_fabparam
;
376 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
377 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
382 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
388 vport
->port_state
= LPFC_FABRIC_CFG_LINK
;
389 lpfc_config_link(phba
, mbox
);
390 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
393 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
394 if (rc
== MBX_NOT_FINISHED
) {
399 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
404 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, Fabric_DID
, (uint8_t *)sp
, mbox
,
411 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_fabric_reg_login
;
413 /* increment the reference count on ndlp to hold reference
414 * for the callback routine.
416 mbox
->context2
= lpfc_nlp_get(ndlp
);
418 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
419 if (rc
== MBX_NOT_FINISHED
) {
421 goto fail_issue_reg_login
;
426 fail_issue_reg_login
:
427 /* decrement the reference count on ndlp just incremented
428 * for the failed mbox command.
431 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
432 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
435 mempool_free(mbox
, phba
->mbox_mem_pool
);
438 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
439 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
440 "0249 Cannot issue Register Fabric login: Err %d\n", err
);
445 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
446 * @vport: pointer to a host virtual N_Port data structure.
448 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
449 * the @vport. This mailbox command is necessary for SLI4 port only.
452 * 0 - successfully issued REG_VFI for @vport
453 * A failure code otherwise.
456 lpfc_issue_reg_vfi(struct lpfc_vport
*vport
)
458 struct lpfc_hba
*phba
= vport
->phba
;
459 LPFC_MBOXQ_t
*mboxq
= NULL
;
460 struct lpfc_nodelist
*ndlp
;
461 struct lpfc_dmabuf
*dmabuf
= NULL
;
464 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
465 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
466 !(phba
->link_flag
& LS_LOOPBACK_MODE
) &&
467 !(vport
->fc_flag
& FC_PT2PT
)) {
468 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
469 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
475 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
481 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
482 if ((vport
->fc_flag
& FC_FABRIC
) || (vport
->fc_flag
& FC_PT2PT
)) {
483 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
488 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &dmabuf
->phys
);
493 memcpy(dmabuf
->virt
, &phba
->fc_fabparam
,
494 sizeof(struct serv_parm
));
497 vport
->port_state
= LPFC_FABRIC_CFG_LINK
;
499 lpfc_reg_vfi(mboxq
, vport
, dmabuf
->phys
);
501 lpfc_reg_vfi(mboxq
, vport
, 0);
503 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vfi
;
504 mboxq
->vport
= vport
;
505 mboxq
->context1
= dmabuf
;
506 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
507 if (rc
== MBX_NOT_FINISHED
) {
515 mempool_free(mboxq
, phba
->mbox_mem_pool
);
518 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
522 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
523 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
524 "0289 Issue Register VFI failed: Err %d\n", rc
);
529 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
530 * @vport: pointer to a host virtual N_Port data structure.
532 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
533 * the @vport. This mailbox command is necessary for SLI4 port only.
536 * 0 - successfully issued REG_VFI for @vport
537 * A failure code otherwise.
540 lpfc_issue_unreg_vfi(struct lpfc_vport
*vport
)
542 struct lpfc_hba
*phba
= vport
->phba
;
543 struct Scsi_Host
*shost
;
547 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
549 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
550 "2556 UNREG_VFI mbox allocation failed"
551 "HBA state x%x\n", phba
->pport
->port_state
);
555 lpfc_unreg_vfi(mboxq
, vport
);
556 mboxq
->vport
= vport
;
557 mboxq
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
559 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
560 if (rc
== MBX_NOT_FINISHED
) {
561 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
562 "2557 UNREG_VFI issue mbox failed rc x%x "
564 rc
, phba
->pport
->port_state
);
565 mempool_free(mboxq
, phba
->mbox_mem_pool
);
569 shost
= lpfc_shost_from_vport(vport
);
570 spin_lock_irq(shost
->host_lock
);
571 vport
->fc_flag
&= ~FC_VFI_REGISTERED
;
572 spin_unlock_irq(shost
->host_lock
);
577 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
578 * @vport: pointer to a host virtual N_Port data structure.
579 * @sp: pointer to service parameter data structure.
581 * This routine is called from FLOGI/FDISC completion handler functions.
582 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
583 * node nodename is changed in the completion service parameter else return
584 * 0. This function also set flag in the vport data structure to delay
585 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
586 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
587 * node nodename is changed in the completion service parameter.
590 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
591 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
595 lpfc_check_clean_addr_bit(struct lpfc_vport
*vport
,
596 struct serv_parm
*sp
)
598 struct lpfc_hba
*phba
= vport
->phba
;
599 uint8_t fabric_param_changed
= 0;
600 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
602 if ((vport
->fc_prevDID
!= vport
->fc_myDID
) ||
603 memcmp(&vport
->fabric_portname
, &sp
->portName
,
604 sizeof(struct lpfc_name
)) ||
605 memcmp(&vport
->fabric_nodename
, &sp
->nodeName
,
606 sizeof(struct lpfc_name
)) ||
607 (vport
->vport_flag
& FAWWPN_PARAM_CHG
)) {
608 fabric_param_changed
= 1;
609 vport
->vport_flag
&= ~FAWWPN_PARAM_CHG
;
612 * Word 1 Bit 31 in common service parameter is overloaded.
613 * Word 1 Bit 31 in FLOGI request is multiple NPort request
614 * Word 1 Bit 31 in FLOGI response is clean address bit
616 * If fabric parameter is changed and clean address bit is
617 * cleared delay nport discovery if
618 * - vport->fc_prevDID != 0 (not initial discovery) OR
619 * - lpfc_delay_discovery module parameter is set.
621 if (fabric_param_changed
&& !sp
->cmn
.clean_address_bit
&&
622 (vport
->fc_prevDID
|| phba
->cfg_delay_discovery
)) {
623 spin_lock_irq(shost
->host_lock
);
624 vport
->fc_flag
|= FC_DISC_DELAYED
;
625 spin_unlock_irq(shost
->host_lock
);
628 return fabric_param_changed
;
633 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
634 * @vport: pointer to a host virtual N_Port data structure.
635 * @ndlp: pointer to a node-list data structure.
636 * @sp: pointer to service parameter data structure.
637 * @irsp: pointer to the IOCB within the lpfc response IOCB.
639 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
640 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
641 * port in a fabric topology. It properly sets up the parameters to the @ndlp
642 * from the IOCB response. It also check the newly assigned N_Port ID to the
643 * @vport against the previously assigned N_Port ID. If it is different from
644 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
645 * is invoked on all the remaining nodes with the @vport to unregister the
646 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
647 * is invoked to register login to the fabric.
650 * 0 - Success (currently, always return 0)
653 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
654 struct serv_parm
*sp
, IOCB_t
*irsp
)
656 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
657 struct lpfc_hba
*phba
= vport
->phba
;
658 struct lpfc_nodelist
*np
;
659 struct lpfc_nodelist
*next_np
;
660 uint8_t fabric_param_changed
;
662 spin_lock_irq(shost
->host_lock
);
663 vport
->fc_flag
|= FC_FABRIC
;
664 spin_unlock_irq(shost
->host_lock
);
666 phba
->fc_edtov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
667 if (sp
->cmn
.edtovResolution
) /* E_D_TOV ticks are in nanoseconds */
668 phba
->fc_edtov
= (phba
->fc_edtov
+ 999999) / 1000000;
670 phba
->fc_edtovResol
= sp
->cmn
.edtovResolution
;
671 phba
->fc_ratov
= (be32_to_cpu(sp
->cmn
.w2
.r_a_tov
) + 999) / 1000;
673 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
674 spin_lock_irq(shost
->host_lock
);
675 vport
->fc_flag
|= FC_PUBLIC_LOOP
;
676 spin_unlock_irq(shost
->host_lock
);
679 vport
->fc_myDID
= irsp
->un
.ulpWord
[4] & Mask_DID
;
680 memcpy(&ndlp
->nlp_portname
, &sp
->portName
, sizeof(struct lpfc_name
));
681 memcpy(&ndlp
->nlp_nodename
, &sp
->nodeName
, sizeof(struct lpfc_name
));
682 ndlp
->nlp_class_sup
= 0;
683 if (sp
->cls1
.classValid
)
684 ndlp
->nlp_class_sup
|= FC_COS_CLASS1
;
685 if (sp
->cls2
.classValid
)
686 ndlp
->nlp_class_sup
|= FC_COS_CLASS2
;
687 if (sp
->cls3
.classValid
)
688 ndlp
->nlp_class_sup
|= FC_COS_CLASS3
;
689 if (sp
->cls4
.classValid
)
690 ndlp
->nlp_class_sup
|= FC_COS_CLASS4
;
691 ndlp
->nlp_maxframe
= ((sp
->cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
692 sp
->cmn
.bbRcvSizeLsb
;
694 fabric_param_changed
= lpfc_check_clean_addr_bit(vport
, sp
);
695 if (fabric_param_changed
) {
696 /* Reset FDMI attribute masks based on config parameter */
697 if (phba
->cfg_enable_SmartSAN
||
698 (phba
->cfg_fdmi_on
== LPFC_FDMI_SUPPORT
)) {
699 /* Setup appropriate attribute masks */
700 vport
->fdmi_hba_mask
= LPFC_FDMI2_HBA_ATTR
;
701 if (phba
->cfg_enable_SmartSAN
)
702 vport
->fdmi_port_mask
= LPFC_FDMI2_SMART_ATTR
;
704 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
706 vport
->fdmi_hba_mask
= 0;
707 vport
->fdmi_port_mask
= 0;
711 memcpy(&vport
->fabric_portname
, &sp
->portName
,
712 sizeof(struct lpfc_name
));
713 memcpy(&vport
->fabric_nodename
, &sp
->nodeName
,
714 sizeof(struct lpfc_name
));
715 memcpy(&phba
->fc_fabparam
, sp
, sizeof(struct serv_parm
));
717 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) {
718 if (sp
->cmn
.response_multiple_NPort
) {
719 lpfc_printf_vlog(vport
, KERN_WARNING
,
721 "1816 FLOGI NPIV supported, "
722 "response data 0x%x\n",
723 sp
->cmn
.response_multiple_NPort
);
724 spin_lock_irq(&phba
->hbalock
);
725 phba
->link_flag
|= LS_NPIV_FAB_SUPPORTED
;
726 spin_unlock_irq(&phba
->hbalock
);
728 /* Because we asked f/w for NPIV it still expects us
729 to call reg_vnpid atleast for the physcial host */
730 lpfc_printf_vlog(vport
, KERN_WARNING
,
732 "1817 Fabric does not support NPIV "
733 "- configuring single port mode.\n");
734 spin_lock_irq(&phba
->hbalock
);
735 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
736 spin_unlock_irq(&phba
->hbalock
);
741 * For FC we need to do some special processing because of the SLI
742 * Port's default settings of the Common Service Parameters.
744 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
745 (phba
->sli4_hba
.lnk_info
.lnk_tp
== LPFC_LNK_TYPE_FC
)) {
746 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
747 if (fabric_param_changed
)
748 lpfc_unregister_fcf_prep(phba
);
750 /* This should just update the VFI CSPs*/
751 if (vport
->fc_flag
& FC_VFI_REGISTERED
)
752 lpfc_issue_reg_vfi(vport
);
755 if (fabric_param_changed
&&
756 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
758 /* If our NportID changed, we need to ensure all
759 * remaining NPORTs get unreg_login'ed.
761 list_for_each_entry_safe(np
, next_np
,
762 &vport
->fc_nodes
, nlp_listp
) {
763 if (!NLP_CHK_NODE_ACT(np
))
765 if ((np
->nlp_state
!= NLP_STE_NPR_NODE
) ||
766 !(np
->nlp_flag
& NLP_NPR_ADISC
))
768 spin_lock_irq(shost
->host_lock
);
769 np
->nlp_flag
&= ~NLP_NPR_ADISC
;
770 spin_unlock_irq(shost
->host_lock
);
771 lpfc_unreg_rpi(vport
, np
);
773 lpfc_cleanup_pending_mbox(vport
);
775 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
776 lpfc_sli4_unreg_all_rpis(vport
);
777 lpfc_mbx_unreg_vpi(vport
);
778 spin_lock_irq(shost
->host_lock
);
779 vport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
780 spin_unlock_irq(shost
->host_lock
);
784 * For SLI3 and SLI4, the VPI needs to be reregistered in
785 * response to this fabric parameter change event.
787 spin_lock_irq(shost
->host_lock
);
788 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
789 spin_unlock_irq(shost
->host_lock
);
790 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
791 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
793 * Driver needs to re-reg VPI in order for f/w
794 * to update the MAC address.
796 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
797 lpfc_register_new_vport(phba
, vport
, ndlp
);
801 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
802 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_REG_LOGIN_ISSUE
);
803 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
&&
804 vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)
805 lpfc_register_new_vport(phba
, vport
, ndlp
);
807 lpfc_issue_fabric_reglogin(vport
);
809 ndlp
->nlp_type
|= NLP_FABRIC
;
810 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
811 if ((!(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) &&
812 (vport
->vpi_state
& LPFC_VPI_REGISTERED
)) {
813 lpfc_start_fdiscs(phba
);
814 lpfc_do_scr_ns_plogi(phba
, vport
);
815 } else if (vport
->fc_flag
& FC_VFI_REGISTERED
)
816 lpfc_issue_init_vpi(vport
);
818 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
819 "3135 Need register VFI: (x%x/%x)\n",
820 vport
->fc_prevDID
, vport
->fc_myDID
);
821 lpfc_issue_reg_vfi(vport
);
828 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
829 * @vport: pointer to a host virtual N_Port data structure.
830 * @ndlp: pointer to a node-list data structure.
831 * @sp: pointer to service parameter data structure.
833 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
834 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
835 * in a point-to-point topology. First, the @vport's N_Port Name is compared
836 * with the received N_Port Name: if the @vport's N_Port Name is greater than
837 * the received N_Port Name lexicographically, this node shall assign local
838 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
839 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
840 * this node shall just wait for the remote node to issue PLOGI and assign
848 lpfc_cmpl_els_flogi_nport(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
849 struct serv_parm
*sp
)
851 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
852 struct lpfc_hba
*phba
= vport
->phba
;
856 spin_lock_irq(shost
->host_lock
);
857 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
858 vport
->fc_flag
|= FC_PT2PT
;
859 spin_unlock_irq(shost
->host_lock
);
861 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
862 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && phba
->fc_topology_changed
) {
863 lpfc_unregister_fcf_prep(phba
);
865 spin_lock_irq(shost
->host_lock
);
866 vport
->fc_flag
&= ~FC_VFI_REGISTERED
;
867 spin_unlock_irq(shost
->host_lock
);
868 phba
->fc_topology_changed
= 0;
871 rc
= memcmp(&vport
->fc_portname
, &sp
->portName
,
872 sizeof(vport
->fc_portname
));
875 /* This side will initiate the PLOGI */
876 spin_lock_irq(shost
->host_lock
);
877 vport
->fc_flag
|= FC_PT2PT_PLOGI
;
878 spin_unlock_irq(shost
->host_lock
);
881 * N_Port ID cannot be 0, set our Id to LocalID
882 * the other side will be RemoteID.
887 vport
->fc_myDID
= PT2PT_LocalID
;
889 /* Decrement ndlp reference count indicating that ndlp can be
890 * safely released when other references to it are done.
894 ndlp
= lpfc_findnode_did(vport
, PT2PT_RemoteID
);
897 * Cannot find existing Fabric ndlp, so allocate a
900 ndlp
= lpfc_nlp_init(vport
, PT2PT_RemoteID
);
903 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
904 ndlp
= lpfc_enable_node(vport
, ndlp
,
905 NLP_STE_UNUSED_NODE
);
910 memcpy(&ndlp
->nlp_portname
, &sp
->portName
,
911 sizeof(struct lpfc_name
));
912 memcpy(&ndlp
->nlp_nodename
, &sp
->nodeName
,
913 sizeof(struct lpfc_name
));
914 /* Set state will put ndlp onto node list if not already done */
915 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
916 spin_lock_irq(shost
->host_lock
);
917 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
918 spin_unlock_irq(shost
->host_lock
);
920 /* This side will wait for the PLOGI, decrement ndlp reference
921 * count indicating that ndlp can be released when other
922 * references to it are done.
926 /* If we are pt2pt with another NPort, force NPIV off! */
927 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
929 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
933 lpfc_config_link(phba
, mbox
);
935 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
937 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
938 if (rc
== MBX_NOT_FINISHED
) {
939 mempool_free(mbox
, phba
->mbox_mem_pool
);
949 * lpfc_cmpl_els_flogi - Completion callback function for flogi
950 * @phba: pointer to lpfc hba data structure.
951 * @cmdiocb: pointer to lpfc command iocb data structure.
952 * @rspiocb: pointer to lpfc response iocb data structure.
954 * This routine is the top-level completion callback function for issuing
955 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
956 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
957 * retry has been made (either immediately or delayed with lpfc_els_retry()
958 * returning 1), the command IOCB will be released and function returned.
959 * If the retry attempt has been given up (possibly reach the maximum
960 * number of retries), one additional decrement of ndlp reference shall be
961 * invoked before going out after releasing the command IOCB. This will
962 * actually release the remote node (Note, lpfc_els_free_iocb() will also
963 * invoke one decrement of ndlp reference count). If no error reported in
964 * the IOCB status, the command Port ID field is used to determine whether
965 * this is a point-to-point topology or a fabric topology: if the Port ID
966 * field is assigned, it is a fabric topology; otherwise, it is a
967 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
968 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
969 * specific topology completion conditions.
972 lpfc_cmpl_els_flogi(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
973 struct lpfc_iocbq
*rspiocb
)
975 struct lpfc_vport
*vport
= cmdiocb
->vport
;
976 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
977 IOCB_t
*irsp
= &rspiocb
->iocb
;
978 struct lpfc_nodelist
*ndlp
= cmdiocb
->context1
;
979 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
, *prsp
;
980 struct serv_parm
*sp
;
984 /* Check to see if link went down during discovery */
985 if (lpfc_els_chk_latt(vport
)) {
986 /* One additional decrement on node reference count to
987 * trigger the release of the node
993 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
994 "FLOGI cmpl: status:x%x/x%x state:x%x",
995 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
998 if (irsp
->ulpStatus
) {
1000 * In case of FIP mode, perform roundrobin FCF failover
1001 * due to new FCF discovery
1003 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) &&
1004 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
)) {
1005 if (phba
->link_state
< LPFC_LINK_UP
)
1006 goto stop_rr_fcf_flogi
;
1007 if ((phba
->fcoe_cvl_eventtag_attn
==
1008 phba
->fcoe_cvl_eventtag
) &&
1009 (irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
1010 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
1012 goto stop_rr_fcf_flogi
;
1014 phba
->fcoe_cvl_eventtag_attn
=
1015 phba
->fcoe_cvl_eventtag
;
1016 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1017 "2611 FLOGI failed on FCF (x%x), "
1018 "status:x%x/x%x, tmo:x%x, perform "
1019 "roundrobin FCF failover\n",
1020 phba
->fcf
.current_rec
.fcf_indx
,
1021 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1023 lpfc_sli4_set_fcf_flogi_fail(phba
,
1024 phba
->fcf
.current_rec
.fcf_indx
);
1025 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
1026 rc
= lpfc_sli4_fcf_rr_next_proc(vport
, fcf_index
);
1033 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1034 "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
1036 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1037 irsp
->ulpTimeout
, phba
->hba_flag
,
1038 phba
->fcf
.fcf_flag
);
1040 /* Check for retry */
1041 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
))
1045 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1046 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1047 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1051 /* If this is not a loop open failure, bail out */
1052 if (!(irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
&&
1053 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
1054 IOERR_LOOP_OPEN_FAILURE
)))
1057 /* FLOGI failed, so there is no fabric */
1058 spin_lock_irq(shost
->host_lock
);
1059 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
1060 spin_unlock_irq(shost
->host_lock
);
1062 /* If private loop, then allow max outstanding els to be
1063 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1064 * alpa map would take too long otherwise.
1066 if (phba
->alpa_map
[0] == 0)
1067 vport
->cfg_discovery_threads
= LPFC_MAX_DISC_THREADS
;
1068 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1069 (!(vport
->fc_flag
& FC_VFI_REGISTERED
) ||
1070 (vport
->fc_prevDID
!= vport
->fc_myDID
) ||
1071 phba
->fc_topology_changed
)) {
1072 if (vport
->fc_flag
& FC_VFI_REGISTERED
) {
1073 if (phba
->fc_topology_changed
) {
1074 lpfc_unregister_fcf_prep(phba
);
1075 spin_lock_irq(shost
->host_lock
);
1076 vport
->fc_flag
&= ~FC_VFI_REGISTERED
;
1077 spin_unlock_irq(shost
->host_lock
);
1078 phba
->fc_topology_changed
= 0;
1080 lpfc_sli4_unreg_all_rpis(vport
);
1084 /* Do not register VFI if the driver aborted FLOGI */
1085 if (!lpfc_error_lost_link(irsp
))
1086 lpfc_issue_reg_vfi(vport
);
1092 spin_lock_irq(shost
->host_lock
);
1093 vport
->fc_flag
&= ~FC_VPORT_CVL_RCVD
;
1094 vport
->fc_flag
&= ~FC_VPORT_LOGO_RCVD
;
1095 spin_unlock_irq(shost
->host_lock
);
1098 * The FLogI succeeded. Sync the data for the CPU before
1101 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
, list
);
1104 sp
= prsp
->virt
+ sizeof(uint32_t);
1106 /* FLOGI completes successfully */
1107 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1108 "0101 FLOGI completes successfully, I/O tag:x%x, "
1109 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb
->iotag
,
1110 irsp
->un
.ulpWord
[4], sp
->cmn
.e_d_tov
,
1111 sp
->cmn
.w2
.r_a_tov
, sp
->cmn
.edtovResolution
,
1112 vport
->port_state
, vport
->fc_flag
);
1114 if (vport
->port_state
== LPFC_FLOGI
) {
1116 * If Common Service Parameters indicate Nport
1117 * we are point to point, if Fport we are Fabric.
1120 rc
= lpfc_cmpl_els_flogi_fabric(vport
, ndlp
, sp
, irsp
);
1121 else if (!(phba
->hba_flag
& HBA_FCOE_MODE
))
1122 rc
= lpfc_cmpl_els_flogi_nport(vport
, ndlp
, sp
);
1124 lpfc_printf_vlog(vport
, KERN_ERR
,
1126 "2831 FLOGI response with cleared Fabric "
1127 "bit fcf_index 0x%x "
1128 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1130 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1131 phba
->fcf
.current_rec
.fcf_indx
,
1132 phba
->fcf
.current_rec
.switch_name
[0],
1133 phba
->fcf
.current_rec
.switch_name
[1],
1134 phba
->fcf
.current_rec
.switch_name
[2],
1135 phba
->fcf
.current_rec
.switch_name
[3],
1136 phba
->fcf
.current_rec
.switch_name
[4],
1137 phba
->fcf
.current_rec
.switch_name
[5],
1138 phba
->fcf
.current_rec
.switch_name
[6],
1139 phba
->fcf
.current_rec
.switch_name
[7],
1140 phba
->fcf
.current_rec
.fabric_name
[0],
1141 phba
->fcf
.current_rec
.fabric_name
[1],
1142 phba
->fcf
.current_rec
.fabric_name
[2],
1143 phba
->fcf
.current_rec
.fabric_name
[3],
1144 phba
->fcf
.current_rec
.fabric_name
[4],
1145 phba
->fcf
.current_rec
.fabric_name
[5],
1146 phba
->fcf
.current_rec
.fabric_name
[6],
1147 phba
->fcf
.current_rec
.fabric_name
[7]);
1149 spin_lock_irq(&phba
->hbalock
);
1150 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
1151 phba
->hba_flag
&= ~(FCF_RR_INPROG
| HBA_DEVLOSS_TMO
);
1152 spin_unlock_irq(&phba
->hbalock
);
1156 /* Mark the FCF discovery process done */
1157 if (phba
->hba_flag
& HBA_FIP_SUPPORT
)
1158 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_FIP
|
1160 "2769 FLOGI to FCF (x%x) "
1161 "completed successfully\n",
1162 phba
->fcf
.current_rec
.fcf_indx
);
1163 spin_lock_irq(&phba
->hbalock
);
1164 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
1165 phba
->hba_flag
&= ~(FCF_RR_INPROG
| HBA_DEVLOSS_TMO
);
1166 spin_unlock_irq(&phba
->hbalock
);
1172 spin_lock_irq(&phba
->hbalock
);
1173 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
1174 spin_unlock_irq(&phba
->hbalock
);
1178 if (!lpfc_error_lost_link(irsp
)) {
1179 /* FLOGI failed, so just use loop map to make discovery list */
1180 lpfc_disc_list_loopmap(vport
);
1182 /* Start discovery */
1183 lpfc_disc_start(vport
);
1184 } else if (((irsp
->ulpStatus
!= IOSTAT_LOCAL_REJECT
) ||
1185 (((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) !=
1186 IOERR_SLI_ABORTED
) &&
1187 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) !=
1188 IOERR_SLI_DOWN
))) &&
1189 (phba
->link_state
!= LPFC_CLEAR_LA
)) {
1190 /* If FLOGI failed enable link interrupt. */
1191 lpfc_issue_clear_la(phba
, vport
);
1194 lpfc_els_free_iocb(phba
, cmdiocb
);
1198 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1199 * @vport: pointer to a host virtual N_Port data structure.
1200 * @ndlp: pointer to a node-list data structure.
1201 * @retry: number of retries to the command IOCB.
1203 * This routine issues a Fabric Login (FLOGI) Request ELS command
1204 * for a @vport. The initiator service parameters are put into the payload
1205 * of the FLOGI Request IOCB and the top-level callback function pointer
1206 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1207 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1208 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1210 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1211 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1212 * will be stored into the context1 field of the IOCB for the completion
1213 * callback function to the FLOGI ELS command.
1216 * 0 - successfully issued flogi iocb for @vport
1217 * 1 - failed to issue flogi iocb for @vport
1220 lpfc_issue_els_flogi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1223 struct lpfc_hba
*phba
= vport
->phba
;
1224 struct serv_parm
*sp
;
1226 struct lpfc_iocbq
*elsiocb
;
1232 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
1233 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
1234 ndlp
->nlp_DID
, ELS_CMD_FLOGI
);
1239 icmd
= &elsiocb
->iocb
;
1240 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
1242 /* For FLOGI request, remainder of payload is service parameters */
1243 *((uint32_t *) (pcmd
)) = ELS_CMD_FLOGI
;
1244 pcmd
+= sizeof(uint32_t);
1245 memcpy(pcmd
, &vport
->fc_sparam
, sizeof(struct serv_parm
));
1246 sp
= (struct serv_parm
*) pcmd
;
1248 /* Setup CSPs accordingly for Fabric */
1249 sp
->cmn
.e_d_tov
= 0;
1250 sp
->cmn
.w2
.r_a_tov
= 0;
1251 sp
->cmn
.virtual_fabric_support
= 0;
1252 sp
->cls1
.classValid
= 0;
1253 if (sp
->cmn
.fcphLow
< FC_PH3
)
1254 sp
->cmn
.fcphLow
= FC_PH3
;
1255 if (sp
->cmn
.fcphHigh
< FC_PH3
)
1256 sp
->cmn
.fcphHigh
= FC_PH3
;
1258 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1259 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
1260 LPFC_SLI_INTF_IF_TYPE_0
) {
1261 elsiocb
->iocb
.ulpCt_h
= ((SLI4_CT_FCFI
>> 1) & 1);
1262 elsiocb
->iocb
.ulpCt_l
= (SLI4_CT_FCFI
& 1);
1263 /* FLOGI needs to be 3 for WQE FCFI */
1264 /* Set the fcfi to the fcfi we registered with */
1265 elsiocb
->iocb
.ulpContext
= phba
->fcf
.fcfi
;
1267 /* Can't do SLI4 class2 without support sequence coalescing */
1268 sp
->cls2
.classValid
= 0;
1269 sp
->cls2
.seqDelivery
= 0;
1271 /* Historical, setting sequential-delivery bit for SLI3 */
1272 sp
->cls2
.seqDelivery
= (sp
->cls2
.classValid
) ? 1 : 0;
1273 sp
->cls3
.seqDelivery
= (sp
->cls3
.classValid
) ? 1 : 0;
1274 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) {
1275 sp
->cmn
.request_multiple_Nport
= 1;
1276 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1280 sp
->cmn
.request_multiple_Nport
= 0;
1283 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
) {
1284 icmd
->un
.elsreq64
.myID
= 0;
1285 icmd
->un
.elsreq64
.fl
= 1;
1288 tmo
= phba
->fc_ratov
;
1289 phba
->fc_ratov
= LPFC_DISC_FLOGI_TMO
;
1290 lpfc_set_disctmo(vport
);
1291 phba
->fc_ratov
= tmo
;
1293 phba
->fc_stat
.elsXmitFLOGI
++;
1294 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_flogi
;
1296 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1297 "Issue FLOGI: opt:x%x",
1298 phba
->sli3_options
, 0, 0);
1300 rc
= lpfc_issue_fabric_iocb(phba
, elsiocb
);
1301 if (rc
== IOCB_ERROR
) {
1302 lpfc_els_free_iocb(phba
, elsiocb
);
1309 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1310 * @phba: pointer to lpfc hba data structure.
1312 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1313 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1314 * list and issues an abort IOCB commond on each outstanding IOCB that
1315 * contains a active Fabric_DID ndlp. Note that this function is to issue
1316 * the abort IOCB command on all the outstanding IOCBs, thus when this
1317 * function returns, it does not guarantee all the IOCBs are actually aborted.
1320 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1323 lpfc_els_abort_flogi(struct lpfc_hba
*phba
)
1325 struct lpfc_sli_ring
*pring
;
1326 struct lpfc_iocbq
*iocb
, *next_iocb
;
1327 struct lpfc_nodelist
*ndlp
;
1330 /* Abort outstanding I/O on NPort <nlp_DID> */
1331 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1332 "0201 Abort outstanding I/O on NPort x%x\n",
1335 pring
= lpfc_phba_elsring(phba
);
1338 * Check the txcmplq for an iocb that matches the nport the driver is
1341 spin_lock_irq(&phba
->hbalock
);
1342 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1344 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) {
1345 ndlp
= (struct lpfc_nodelist
*)(iocb
->context1
);
1346 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1347 (ndlp
->nlp_DID
== Fabric_DID
))
1348 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
1351 spin_unlock_irq(&phba
->hbalock
);
1357 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1358 * @vport: pointer to a host virtual N_Port data structure.
1360 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1361 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1362 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1363 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1364 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1365 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1369 * 0 - failed to issue initial flogi for @vport
1370 * 1 - successfully issued initial flogi for @vport
1373 lpfc_initial_flogi(struct lpfc_vport
*vport
)
1375 struct lpfc_nodelist
*ndlp
;
1377 vport
->port_state
= LPFC_FLOGI
;
1378 lpfc_set_disctmo(vport
);
1380 /* First look for the Fabric ndlp */
1381 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
1383 /* Cannot find existing Fabric ndlp, so allocate a new one */
1384 ndlp
= lpfc_nlp_init(vport
, Fabric_DID
);
1387 /* Set the node type */
1388 ndlp
->nlp_type
|= NLP_FABRIC
;
1389 /* Put ndlp onto node list */
1390 lpfc_enqueue_node(vport
, ndlp
);
1391 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
1392 /* re-setup ndlp without removing from node list */
1393 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
1398 if (lpfc_issue_els_flogi(vport
, ndlp
, 0)) {
1399 /* This decrement of reference count to node shall kick off
1400 * the release of the node.
1409 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1410 * @vport: pointer to a host virtual N_Port data structure.
1412 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1413 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1414 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1415 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1416 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1417 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1421 * 0 - failed to issue initial fdisc for @vport
1422 * 1 - successfully issued initial fdisc for @vport
1425 lpfc_initial_fdisc(struct lpfc_vport
*vport
)
1427 struct lpfc_nodelist
*ndlp
;
1429 /* First look for the Fabric ndlp */
1430 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
1432 /* Cannot find existing Fabric ndlp, so allocate a new one */
1433 ndlp
= lpfc_nlp_init(vport
, Fabric_DID
);
1436 /* Put ndlp onto node list */
1437 lpfc_enqueue_node(vport
, ndlp
);
1438 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
1439 /* re-setup ndlp without removing from node list */
1440 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
1445 if (lpfc_issue_els_fdisc(vport
, ndlp
, 0)) {
1446 /* decrement node reference count to trigger the release of
1456 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1457 * @vport: pointer to a host virtual N_Port data structure.
1459 * This routine checks whether there are more remaining Port Logins
1460 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1461 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1462 * to issue ELS PLOGIs up to the configured discover threads with the
1463 * @vport (@vport->cfg_discovery_threads). The function also decrement
1464 * the @vport's num_disc_node by 1 if it is not already 0.
1467 lpfc_more_plogi(struct lpfc_vport
*vport
)
1469 if (vport
->num_disc_nodes
)
1470 vport
->num_disc_nodes
--;
1472 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1473 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1474 "0232 Continue discovery with %d PLOGIs to go "
1475 "Data: x%x x%x x%x\n",
1476 vport
->num_disc_nodes
, vport
->fc_plogi_cnt
,
1477 vport
->fc_flag
, vport
->port_state
);
1478 /* Check to see if there are more PLOGIs to be sent */
1479 if (vport
->fc_flag
& FC_NLP_MORE
)
1480 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1481 lpfc_els_disc_plogi(vport
);
1487 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1488 * @phba: pointer to lpfc hba data structure.
1489 * @prsp: pointer to response IOCB payload.
1490 * @ndlp: pointer to a node-list data structure.
1492 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1493 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1494 * The following cases are considered N_Port confirmed:
1495 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1496 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1497 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1498 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1499 * 1) if there is a node on vport list other than the @ndlp with the same
1500 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1501 * on that node to release the RPI associated with the node; 2) if there is
1502 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1503 * into, a new node shall be allocated (or activated). In either case, the
1504 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1505 * be released and the new_ndlp shall be put on to the vport node list and
1506 * its pointer returned as the confirmed node.
1508 * Note that before the @ndlp got "released", the keepDID from not-matching
1509 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1510 * of the @ndlp. This is because the release of @ndlp is actually to put it
1511 * into an inactive state on the vport node list and the vport node list
1512 * management algorithm does not allow two node with a same DID.
1515 * pointer to the PLOGI N_Port @ndlp
1517 static struct lpfc_nodelist
*
1518 lpfc_plogi_confirm_nport(struct lpfc_hba
*phba
, uint32_t *prsp
,
1519 struct lpfc_nodelist
*ndlp
)
1521 struct lpfc_vport
*vport
= ndlp
->vport
;
1522 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1523 struct lpfc_nodelist
*new_ndlp
;
1524 struct lpfc_rport_data
*rdata
;
1525 struct fc_rport
*rport
;
1526 struct serv_parm
*sp
;
1527 uint8_t name
[sizeof(struct lpfc_name
)];
1528 uint32_t rc
, keepDID
= 0, keep_nlp_flag
= 0;
1529 uint16_t keep_nlp_state
;
1530 struct lpfc_nvme_rport
*keep_nrport
= NULL
;
1533 unsigned long *active_rrqs_xri_bitmap
= NULL
;
1535 /* Fabric nodes can have the same WWPN so we don't bother searching
1536 * by WWPN. Just return the ndlp that was given to us.
1538 if (ndlp
->nlp_type
& NLP_FABRIC
)
1541 sp
= (struct serv_parm
*) ((uint8_t *) prsp
+ sizeof(uint32_t));
1542 memset(name
, 0, sizeof(struct lpfc_name
));
1544 /* Now we find out if the NPort we are logging into, matches the WWPN
1545 * we have for that ndlp. If not, we have some work to do.
1547 new_ndlp
= lpfc_findnode_wwpn(vport
, &sp
->portName
);
1549 if (new_ndlp
== ndlp
&& NLP_CHK_NODE_ACT(new_ndlp
))
1551 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1552 active_rrqs_xri_bitmap
= mempool_alloc(phba
->active_rrq_pool
,
1554 if (active_rrqs_xri_bitmap
)
1555 memset(active_rrqs_xri_bitmap
, 0,
1556 phba
->cfg_rrq_xri_bitmap_sz
);
1559 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1560 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1561 ndlp
, ndlp
->nlp_DID
, new_ndlp
);
1564 rc
= memcmp(&ndlp
->nlp_portname
, name
,
1565 sizeof(struct lpfc_name
));
1567 if (active_rrqs_xri_bitmap
)
1568 mempool_free(active_rrqs_xri_bitmap
,
1569 phba
->active_rrq_pool
);
1572 new_ndlp
= lpfc_nlp_init(vport
, ndlp
->nlp_DID
);
1574 if (active_rrqs_xri_bitmap
)
1575 mempool_free(active_rrqs_xri_bitmap
,
1576 phba
->active_rrq_pool
);
1579 } else if (!NLP_CHK_NODE_ACT(new_ndlp
)) {
1580 rc
= memcmp(&ndlp
->nlp_portname
, name
,
1581 sizeof(struct lpfc_name
));
1583 if (active_rrqs_xri_bitmap
)
1584 mempool_free(active_rrqs_xri_bitmap
,
1585 phba
->active_rrq_pool
);
1588 new_ndlp
= lpfc_enable_node(vport
, new_ndlp
,
1589 NLP_STE_UNUSED_NODE
);
1591 if (active_rrqs_xri_bitmap
)
1592 mempool_free(active_rrqs_xri_bitmap
,
1593 phba
->active_rrq_pool
);
1596 keepDID
= new_ndlp
->nlp_DID
;
1597 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && active_rrqs_xri_bitmap
)
1598 memcpy(active_rrqs_xri_bitmap
,
1599 new_ndlp
->active_rrqs_xri_bitmap
,
1600 phba
->cfg_rrq_xri_bitmap_sz
);
1602 keepDID
= new_ndlp
->nlp_DID
;
1603 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
1604 active_rrqs_xri_bitmap
)
1605 memcpy(active_rrqs_xri_bitmap
,
1606 new_ndlp
->active_rrqs_xri_bitmap
,
1607 phba
->cfg_rrq_xri_bitmap_sz
);
1610 lpfc_unreg_rpi(vport
, new_ndlp
);
1611 new_ndlp
->nlp_DID
= ndlp
->nlp_DID
;
1612 new_ndlp
->nlp_prev_state
= ndlp
->nlp_prev_state
;
1613 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1614 memcpy(new_ndlp
->active_rrqs_xri_bitmap
,
1615 ndlp
->active_rrqs_xri_bitmap
,
1616 phba
->cfg_rrq_xri_bitmap_sz
);
1618 spin_lock_irq(shost
->host_lock
);
1619 keep_nlp_flag
= new_ndlp
->nlp_flag
;
1620 new_ndlp
->nlp_flag
= ndlp
->nlp_flag
;
1621 ndlp
->nlp_flag
= keep_nlp_flag
;
1622 spin_unlock_irq(shost
->host_lock
);
1624 /* Set nlp_states accordingly */
1625 keep_nlp_state
= new_ndlp
->nlp_state
;
1626 lpfc_nlp_set_state(vport
, new_ndlp
, ndlp
->nlp_state
);
1628 /* interchange the nvme remoteport structs */
1629 keep_nrport
= new_ndlp
->nrport
;
1630 new_ndlp
->nrport
= ndlp
->nrport
;
1632 /* Move this back to NPR state */
1633 if (memcmp(&ndlp
->nlp_portname
, name
, sizeof(struct lpfc_name
)) == 0) {
1634 /* The new_ndlp is replacing ndlp totally, so we need
1635 * to put ndlp on UNUSED list and try to free it.
1637 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1638 "3179 PLOGI confirm NEW: %x %x\n",
1639 new_ndlp
->nlp_DID
, keepDID
);
1641 /* Fix up the rport accordingly */
1642 rport
= ndlp
->rport
;
1644 rdata
= rport
->dd_data
;
1645 if (rdata
->pnode
== ndlp
) {
1646 /* break the link before dropping the ref */
1649 rdata
->pnode
= lpfc_nlp_get(new_ndlp
);
1650 new_ndlp
->rport
= rport
;
1652 new_ndlp
->nlp_type
= ndlp
->nlp_type
;
1655 /* Fix up the nvme rport */
1657 ndlp
->nrport
= NULL
;
1661 /* We shall actually free the ndlp with both nlp_DID and
1662 * nlp_portname fields equals 0 to avoid any ndlp on the
1663 * nodelist never to be used.
1665 if (ndlp
->nlp_DID
== 0) {
1666 spin_lock_irq(&phba
->ndlp_lock
);
1667 NLP_SET_FREE_REQ(ndlp
);
1668 spin_unlock_irq(&phba
->ndlp_lock
);
1671 /* Two ndlps cannot have the same did on the nodelist */
1672 ndlp
->nlp_DID
= keepDID
;
1673 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
1674 active_rrqs_xri_bitmap
)
1675 memcpy(ndlp
->active_rrqs_xri_bitmap
,
1676 active_rrqs_xri_bitmap
,
1677 phba
->cfg_rrq_xri_bitmap_sz
);
1679 if (!NLP_CHK_NODE_ACT(ndlp
))
1680 lpfc_drop_node(vport
, ndlp
);
1683 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1684 "3180 PLOGI confirm SWAP: %x %x\n",
1685 new_ndlp
->nlp_DID
, keepDID
);
1687 lpfc_unreg_rpi(vport
, ndlp
);
1689 /* Two ndlps cannot have the same did */
1690 ndlp
->nlp_DID
= keepDID
;
1691 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
1692 active_rrqs_xri_bitmap
)
1693 memcpy(ndlp
->active_rrqs_xri_bitmap
,
1694 active_rrqs_xri_bitmap
,
1695 phba
->cfg_rrq_xri_bitmap_sz
);
1697 /* Since we are switching over to the new_ndlp,
1698 * reset the old ndlp state
1700 if ((ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) ||
1701 (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
))
1702 keep_nlp_state
= NLP_STE_NPR_NODE
;
1703 lpfc_nlp_set_state(vport
, ndlp
, keep_nlp_state
);
1705 /* Previous ndlp no longer active with nvme host transport.
1706 * Remove reference from earlier registration unless the
1707 * nvme host took care of it.
1711 ndlp
->nrport
= keep_nrport
;
1713 /* Fix up the rport accordingly */
1714 rport
= ndlp
->rport
;
1716 rdata
= rport
->dd_data
;
1717 put_node
= rdata
->pnode
!= NULL
;
1718 put_rport
= ndlp
->rport
!= NULL
;
1719 rdata
->pnode
= NULL
;
1724 put_device(&rport
->dev
);
1727 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
1728 active_rrqs_xri_bitmap
)
1729 mempool_free(active_rrqs_xri_bitmap
,
1730 phba
->active_rrq_pool
);
1735 * lpfc_end_rscn - Check and handle more rscn for a vport
1736 * @vport: pointer to a host virtual N_Port data structure.
1738 * This routine checks whether more Registration State Change
1739 * Notifications (RSCNs) came in while the discovery state machine was in
1740 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1741 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1742 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1743 * handling the RSCNs.
1746 lpfc_end_rscn(struct lpfc_vport
*vport
)
1748 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1750 if (vport
->fc_flag
& FC_RSCN_MODE
) {
1752 * Check to see if more RSCNs came in while we were
1753 * processing this one.
1755 if (vport
->fc_rscn_id_cnt
||
1756 (vport
->fc_flag
& FC_RSCN_DISCOVERY
) != 0)
1757 lpfc_els_handle_rscn(vport
);
1759 spin_lock_irq(shost
->host_lock
);
1760 vport
->fc_flag
&= ~FC_RSCN_MODE
;
1761 spin_unlock_irq(shost
->host_lock
);
1767 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1768 * @phba: pointer to lpfc hba data structure.
1769 * @cmdiocb: pointer to lpfc command iocb data structure.
1770 * @rspiocb: pointer to lpfc response iocb data structure.
1772 * This routine will call the clear rrq function to free the rrq and
1773 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1774 * exist then the clear_rrq is still called because the rrq needs to
1779 lpfc_cmpl_els_rrq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1780 struct lpfc_iocbq
*rspiocb
)
1782 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1784 struct lpfc_nodelist
*ndlp
;
1785 struct lpfc_node_rrq
*rrq
;
1787 /* we pass cmdiocb to state machine which needs rspiocb as well */
1788 rrq
= cmdiocb
->context_un
.rrq
;
1789 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
1791 irsp
= &rspiocb
->iocb
;
1792 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1793 "RRQ cmpl: status:x%x/x%x did:x%x",
1794 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1795 irsp
->un
.elsreq64
.remoteID
);
1797 ndlp
= lpfc_findnode_did(vport
, irsp
->un
.elsreq64
.remoteID
);
1798 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) || ndlp
!= rrq
->ndlp
) {
1799 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1800 "2882 RRQ completes to NPort x%x "
1801 "with no ndlp. Data: x%x x%x x%x\n",
1802 irsp
->un
.elsreq64
.remoteID
,
1803 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1808 /* rrq completes to NPort <nlp_DID> */
1809 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1810 "2880 RRQ completes to NPort x%x "
1811 "Data: x%x x%x x%x x%x x%x\n",
1812 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1813 irsp
->ulpTimeout
, rrq
->xritag
, rrq
->rxid
);
1815 if (irsp
->ulpStatus
) {
1816 /* Check for retry */
1817 /* RRQ failed Don't print the vport to vport rjts */
1818 if (irsp
->ulpStatus
!= IOSTAT_LS_RJT
||
1819 (((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_INVALID_CMD
) &&
1820 ((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_UNABLE_TPC
)) ||
1821 (phba
)->pport
->cfg_log_verbose
& LOG_ELS
)
1822 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1823 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1824 ndlp
->nlp_DID
, irsp
->ulpStatus
,
1825 irsp
->un
.ulpWord
[4]);
1829 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
1830 lpfc_els_free_iocb(phba
, cmdiocb
);
1834 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1835 * @phba: pointer to lpfc hba data structure.
1836 * @cmdiocb: pointer to lpfc command iocb data structure.
1837 * @rspiocb: pointer to lpfc response iocb data structure.
1839 * This routine is the completion callback function for issuing the Port
1840 * Login (PLOGI) command. For PLOGI completion, there must be an active
1841 * ndlp on the vport node list that matches the remote node ID from the
1842 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1843 * ignored and command IOCB released. The PLOGI response IOCB status is
1844 * checked for error conditons. If there is error status reported, PLOGI
1845 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1846 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1847 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1848 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1849 * there are additional N_Port nodes with the vport that need to perform
1850 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1854 lpfc_cmpl_els_plogi(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1855 struct lpfc_iocbq
*rspiocb
)
1857 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1858 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1860 struct lpfc_nodelist
*ndlp
;
1861 struct lpfc_dmabuf
*prsp
;
1864 /* we pass cmdiocb to state machine which needs rspiocb as well */
1865 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
1867 irsp
= &rspiocb
->iocb
;
1868 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1869 "PLOGI cmpl: status:x%x/x%x did:x%x",
1870 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1871 irsp
->un
.elsreq64
.remoteID
);
1873 ndlp
= lpfc_findnode_did(vport
, irsp
->un
.elsreq64
.remoteID
);
1874 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1875 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1876 "0136 PLOGI completes to NPort x%x "
1877 "with no ndlp. Data: x%x x%x x%x\n",
1878 irsp
->un
.elsreq64
.remoteID
,
1879 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1884 /* Since ndlp can be freed in the disc state machine, note if this node
1885 * is being used during discovery.
1887 spin_lock_irq(shost
->host_lock
);
1888 disc
= (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
);
1889 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
1890 spin_unlock_irq(shost
->host_lock
);
1893 /* PLOGI completes to NPort <nlp_DID> */
1894 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1895 "0102 PLOGI completes to NPort x%06x "
1896 "Data: x%x x%x x%x x%x x%x\n",
1897 ndlp
->nlp_DID
, ndlp
->nlp_fc4_type
,
1898 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
1899 disc
, vport
->num_disc_nodes
);
1901 /* Check to see if link went down during discovery */
1902 if (lpfc_els_chk_latt(vport
)) {
1903 spin_lock_irq(shost
->host_lock
);
1904 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1905 spin_unlock_irq(shost
->host_lock
);
1909 if (irsp
->ulpStatus
) {
1910 /* Check for retry */
1911 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
1912 /* ELS command is being retried */
1914 spin_lock_irq(shost
->host_lock
);
1915 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1916 spin_unlock_irq(shost
->host_lock
);
1920 /* PLOGI failed Don't print the vport to vport rjts */
1921 if (irsp
->ulpStatus
!= IOSTAT_LS_RJT
||
1922 (((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_INVALID_CMD
) &&
1923 ((irsp
->un
.ulpWord
[4]) >> 16 != LSRJT_UNABLE_TPC
)) ||
1924 (phba
)->pport
->cfg_log_verbose
& LOG_ELS
)
1925 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1926 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1927 ndlp
->nlp_DID
, irsp
->ulpStatus
,
1928 irsp
->un
.ulpWord
[4]);
1929 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1930 if (lpfc_error_lost_link(irsp
))
1931 rc
= NLP_STE_FREED_NODE
;
1933 rc
= lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1934 NLP_EVT_CMPL_PLOGI
);
1936 /* Good status, call state machine */
1937 prsp
= list_entry(((struct lpfc_dmabuf
*)
1938 cmdiocb
->context2
)->list
.next
,
1939 struct lpfc_dmabuf
, list
);
1940 ndlp
= lpfc_plogi_confirm_nport(phba
, prsp
->virt
, ndlp
);
1941 rc
= lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
1942 NLP_EVT_CMPL_PLOGI
);
1945 if (disc
&& vport
->num_disc_nodes
) {
1946 /* Check to see if there are more PLOGIs to be sent */
1947 lpfc_more_plogi(vport
);
1949 if (vport
->num_disc_nodes
== 0) {
1950 spin_lock_irq(shost
->host_lock
);
1951 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
1952 spin_unlock_irq(shost
->host_lock
);
1954 lpfc_can_disctmo(vport
);
1955 lpfc_end_rscn(vport
);
1960 lpfc_els_free_iocb(phba
, cmdiocb
);
1965 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1966 * @vport: pointer to a host virtual N_Port data structure.
1967 * @did: destination port identifier.
1968 * @retry: number of retries to the command IOCB.
1970 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1971 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1972 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1973 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1974 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1976 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1977 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1978 * will be stored into the context1 field of the IOCB for the completion
1979 * callback function to the PLOGI ELS command.
1982 * 0 - Successfully issued a plogi for @vport
1983 * 1 - failed to issue a plogi for @vport
1986 lpfc_issue_els_plogi(struct lpfc_vport
*vport
, uint32_t did
, uint8_t retry
)
1988 struct lpfc_hba
*phba
= vport
->phba
;
1989 struct Scsi_Host
*shost
;
1990 struct serv_parm
*sp
;
1991 struct lpfc_nodelist
*ndlp
;
1992 struct lpfc_iocbq
*elsiocb
;
1997 ndlp
= lpfc_findnode_did(vport
, did
);
1998 if (ndlp
&& !NLP_CHK_NODE_ACT(ndlp
))
2001 /* If ndlp is not NULL, we will bump the reference count on it */
2002 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
2003 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
, did
,
2008 shost
= lpfc_shost_from_vport(vport
);
2009 spin_lock_irq(shost
->host_lock
);
2010 ndlp
->nlp_flag
&= ~NLP_FCP_PRLI_RJT
;
2011 spin_unlock_irq(shost
->host_lock
);
2013 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2015 /* For PLOGI request, remainder of payload is service parameters */
2016 *((uint32_t *) (pcmd
)) = ELS_CMD_PLOGI
;
2017 pcmd
+= sizeof(uint32_t);
2018 memcpy(pcmd
, &vport
->fc_sparam
, sizeof(struct serv_parm
));
2019 sp
= (struct serv_parm
*) pcmd
;
2022 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2023 * to device on remote loops work.
2025 if ((vport
->fc_flag
& FC_FABRIC
) && !(vport
->fc_flag
& FC_PUBLIC_LOOP
))
2026 sp
->cmn
.altBbCredit
= 1;
2028 if (sp
->cmn
.fcphLow
< FC_PH_4_3
)
2029 sp
->cmn
.fcphLow
= FC_PH_4_3
;
2031 if (sp
->cmn
.fcphHigh
< FC_PH3
)
2032 sp
->cmn
.fcphHigh
= FC_PH3
;
2034 sp
->cmn
.valid_vendor_ver_level
= 0;
2035 memset(sp
->un
.vendorVersion
, 0, sizeof(sp
->un
.vendorVersion
));
2036 sp
->cmn
.bbRcvSizeMsb
&= 0xF;
2038 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2039 "Issue PLOGI: did:x%x",
2042 /* If our firmware supports this feature, convey that
2043 * information to the target using the vendor specific field.
2045 if (phba
->sli
.sli_flag
& LPFC_SLI_SUPPRESS_RSP
) {
2046 sp
->cmn
.valid_vendor_ver_level
= 1;
2047 sp
->un
.vv
.vid
= cpu_to_be32(LPFC_VV_EMLX_ID
);
2048 sp
->un
.vv
.flags
= cpu_to_be32(LPFC_VV_SUPPRESS_RSP
);
2051 phba
->fc_stat
.elsXmitPLOGI
++;
2052 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_plogi
;
2053 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
2055 if (ret
== IOCB_ERROR
) {
2056 lpfc_els_free_iocb(phba
, elsiocb
);
2063 * lpfc_cmpl_els_prli - Completion callback function for prli
2064 * @phba: pointer to lpfc hba data structure.
2065 * @cmdiocb: pointer to lpfc command iocb data structure.
2066 * @rspiocb: pointer to lpfc response iocb data structure.
2068 * This routine is the completion callback function for a Process Login
2069 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2070 * status. If there is error status reported, PRLI retry shall be attempted
2071 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2072 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2073 * ndlp to mark the PRLI completion.
2076 lpfc_cmpl_els_prli(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2077 struct lpfc_iocbq
*rspiocb
)
2079 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2080 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2082 struct lpfc_nodelist
*ndlp
;
2084 /* we pass cmdiocb to state machine which needs rspiocb as well */
2085 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
2087 irsp
= &(rspiocb
->iocb
);
2088 ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2089 spin_lock_irq(shost
->host_lock
);
2090 ndlp
->nlp_flag
&= ~NLP_PRLI_SND
;
2091 spin_unlock_irq(shost
->host_lock
);
2093 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2094 "PRLI cmpl: status:x%x/x%x did:x%x",
2095 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2098 /* Ddriver supports multiple FC4 types. Counters matter. */
2099 vport
->fc_prli_sent
--;
2101 /* PRLI completes to NPort <nlp_DID> */
2102 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2103 "0103 PRLI completes to NPort x%06x "
2104 "Data: x%x x%x x%x x%x\n",
2105 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2106 vport
->num_disc_nodes
, ndlp
->fc4_prli_sent
);
2108 /* Check to see if link went down during discovery */
2109 if (lpfc_els_chk_latt(vport
))
2112 if (irsp
->ulpStatus
) {
2113 /* Check for retry */
2114 ndlp
->fc4_prli_sent
--;
2115 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
2116 /* ELS command is being retried */
2121 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2122 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2124 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2125 irsp
->un
.ulpWord
[4], ndlp
->fc4_prli_sent
);
2127 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2128 if (lpfc_error_lost_link(irsp
))
2131 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2134 /* Good status, call state machine. However, if another
2135 * PRLI is outstanding, don't call the state machine
2136 * because final disposition to Mapped or Unmapped is
2139 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2143 lpfc_els_free_iocb(phba
, cmdiocb
);
2148 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2149 * @vport: pointer to a host virtual N_Port data structure.
2150 * @ndlp: pointer to a node-list data structure.
2151 * @retry: number of retries to the command IOCB.
2153 * This routine issues a Process Login (PRLI) ELS command for the
2154 * @vport. The PRLI service parameters are set up in the payload of the
2155 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2156 * is put to the IOCB completion callback func field before invoking the
2157 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2159 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2160 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2161 * will be stored into the context1 field of the IOCB for the completion
2162 * callback function to the PRLI ELS command.
2165 * 0 - successfully issued prli iocb command for @vport
2166 * 1 - failed to issue prli iocb command for @vport
2169 lpfc_issue_els_prli(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2172 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2173 struct lpfc_hba
*phba
= vport
->phba
;
2175 struct lpfc_nvme_prli
*npr_nvme
;
2176 struct lpfc_iocbq
*elsiocb
;
2179 u32 local_nlp_type
, elscmd
;
2182 * If we are in RSCN mode, the FC4 types supported from a
2183 * previous GFT_ID command may not be accurate. So, if we
2184 * are a NVME Initiator, always look for the possibility of
2185 * the remote NPort beng a NVME Target.
2187 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2188 vport
->fc_flag
& FC_RSCN_MODE
&&
2189 vport
->nvmei_support
)
2190 ndlp
->nlp_fc4_type
|= NLP_FC4_NVME
;
2191 local_nlp_type
= ndlp
->nlp_fc4_type
;
2194 if (local_nlp_type
& NLP_FC4_FCP
) {
2195 /* Payload is 4 + 16 = 20 x14 bytes. */
2196 cmdsize
= (sizeof(uint32_t) + sizeof(PRLI
));
2197 elscmd
= ELS_CMD_PRLI
;
2198 } else if (local_nlp_type
& NLP_FC4_NVME
) {
2199 /* Payload is 4 + 20 = 24 x18 bytes. */
2200 cmdsize
= (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli
));
2201 elscmd
= ELS_CMD_NVMEPRLI
;
2203 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2204 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2205 ndlp
->nlp_fc4_type
, ndlp
->nlp_DID
);
2209 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2210 * FC4 type, implicitly LOGO.
2212 if (phba
->sli_rev
== LPFC_SLI_REV3
&&
2213 ndlp
->nlp_fc4_type
== NLP_FC4_NVME
) {
2214 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2215 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2217 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
2221 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2222 ndlp
->nlp_DID
, elscmd
);
2226 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2228 /* For PRLI request, remainder of payload is service parameters */
2229 memset(pcmd
, 0, cmdsize
);
2231 if (local_nlp_type
& NLP_FC4_FCP
) {
2232 /* Remainder of payload is FCP PRLI parameter page.
2233 * Note: this data structure is defined as
2234 * BE/LE in the structure definition so no
2235 * byte swap call is made.
2237 *((uint32_t *)(pcmd
)) = ELS_CMD_PRLI
;
2238 pcmd
+= sizeof(uint32_t);
2242 * If our firmware version is 3.20 or later,
2243 * set the following bits for FC-TAPE support.
2245 if (phba
->vpd
.rev
.feaLevelHigh
>= 0x02) {
2246 npr
->ConfmComplAllowed
= 1;
2248 npr
->TaskRetryIdReq
= 1;
2250 npr
->estabImagePair
= 1;
2251 npr
->readXferRdyDis
= 1;
2252 if (vport
->cfg_first_burst_size
)
2253 npr
->writeXferRdyDis
= 1;
2255 /* For FCP support */
2256 npr
->prliType
= PRLI_FCP_TYPE
;
2257 npr
->initiatorFunc
= 1;
2258 elsiocb
->iocb_flag
|= LPFC_PRLI_FCP_REQ
;
2260 /* Remove FCP type - processed. */
2261 local_nlp_type
&= ~NLP_FC4_FCP
;
2262 } else if (local_nlp_type
& NLP_FC4_NVME
) {
2263 /* Remainder of payload is NVME PRLI parameter page.
2264 * This data structure is the newer definition that
2265 * uses bf macros so a byte swap is required.
2267 *((uint32_t *)(pcmd
)) = ELS_CMD_NVMEPRLI
;
2268 pcmd
+= sizeof(uint32_t);
2269 npr_nvme
= (struct lpfc_nvme_prli
*)pcmd
;
2270 bf_set(prli_type_code
, npr_nvme
, PRLI_NVME_TYPE
);
2271 bf_set(prli_estabImagePair
, npr_nvme
, 0); /* Should be 0 */
2273 /* Only initiators request first burst. */
2274 if ((phba
->cfg_nvme_enable_fb
) &&
2275 !phba
->nvmet_support
)
2276 bf_set(prli_fba
, npr_nvme
, 1);
2278 if (phba
->nvmet_support
) {
2279 bf_set(prli_tgt
, npr_nvme
, 1);
2280 bf_set(prli_disc
, npr_nvme
, 1);
2283 bf_set(prli_init
, npr_nvme
, 1);
2285 npr_nvme
->word1
= cpu_to_be32(npr_nvme
->word1
);
2286 npr_nvme
->word4
= cpu_to_be32(npr_nvme
->word4
);
2287 elsiocb
->iocb_flag
|= LPFC_PRLI_NVME_REQ
;
2289 /* Remove NVME type - processed. */
2290 local_nlp_type
&= ~NLP_FC4_NVME
;
2293 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2294 "Issue PRLI: did:x%x",
2295 ndlp
->nlp_DID
, 0, 0);
2297 phba
->fc_stat
.elsXmitPRLI
++;
2298 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_prli
;
2299 spin_lock_irq(shost
->host_lock
);
2300 ndlp
->nlp_flag
|= NLP_PRLI_SND
;
2301 spin_unlock_irq(shost
->host_lock
);
2302 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2304 spin_lock_irq(shost
->host_lock
);
2305 ndlp
->nlp_flag
&= ~NLP_PRLI_SND
;
2306 spin_unlock_irq(shost
->host_lock
);
2307 lpfc_els_free_iocb(phba
, elsiocb
);
2311 /* The vport counters are used for lpfc_scan_finished, but
2312 * the ndlp is used to track outstanding PRLIs for different
2315 vport
->fc_prli_sent
++;
2316 ndlp
->fc4_prli_sent
++;
2318 /* The driver supports 2 FC4 types. Make sure
2319 * a PRLI is issued for all types before exiting.
2321 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2322 local_nlp_type
& (NLP_FC4_FCP
| NLP_FC4_NVME
))
2323 goto send_next_prli
;
2329 * lpfc_rscn_disc - Perform rscn discovery for a vport
2330 * @vport: pointer to a host virtual N_Port data structure.
2332 * This routine performs Registration State Change Notification (RSCN)
2333 * discovery for a @vport. If the @vport's node port recovery count is not
2334 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2335 * the nodes that need recovery. If none of the PLOGI were needed through
2336 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2337 * invoked to check and handle possible more RSCN came in during the period
2338 * of processing the current ones.
2341 lpfc_rscn_disc(struct lpfc_vport
*vport
)
2343 lpfc_can_disctmo(vport
);
2345 /* RSCN discovery */
2346 /* go thru NPR nodes and issue ELS PLOGIs */
2347 if (vport
->fc_npr_cnt
)
2348 if (lpfc_els_disc_plogi(vport
))
2351 lpfc_end_rscn(vport
);
2355 * lpfc_adisc_done - Complete the adisc phase of discovery
2356 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2358 * This function is called when the final ADISC is completed during discovery.
2359 * This function handles clearing link attention or issuing reg_vpi depending
2360 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2362 * This function is called with no locks held.
2365 lpfc_adisc_done(struct lpfc_vport
*vport
)
2367 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2368 struct lpfc_hba
*phba
= vport
->phba
;
2371 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2372 * and continue discovery.
2374 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
2375 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
2376 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
2377 /* The ADISCs are complete. Doesn't matter if they
2378 * succeeded or failed because the ADISC completion
2379 * routine guarantees to call the state machine and
2380 * the RPI is either unregistered (failed ADISC response)
2381 * or the RPI is still valid and the node is marked
2382 * mapped for a target. The exchanges should be in the
2383 * correct state. This code is specific to SLI3.
2385 lpfc_issue_clear_la(phba
, vport
);
2386 lpfc_issue_reg_vpi(phba
, vport
);
2390 * For SLI2, we need to set port_state to READY
2391 * and continue discovery.
2393 if (vport
->port_state
< LPFC_VPORT_READY
) {
2394 /* If we get here, there is nothing to ADISC */
2395 lpfc_issue_clear_la(phba
, vport
);
2396 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
2397 vport
->num_disc_nodes
= 0;
2398 /* go thru NPR list, issue ELS PLOGIs */
2399 if (vport
->fc_npr_cnt
)
2400 lpfc_els_disc_plogi(vport
);
2401 if (!vport
->num_disc_nodes
) {
2402 spin_lock_irq(shost
->host_lock
);
2403 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2404 spin_unlock_irq(shost
->host_lock
);
2405 lpfc_can_disctmo(vport
);
2406 lpfc_end_rscn(vport
);
2409 vport
->port_state
= LPFC_VPORT_READY
;
2411 lpfc_rscn_disc(vport
);
2415 * lpfc_more_adisc - Issue more adisc as needed
2416 * @vport: pointer to a host virtual N_Port data structure.
2418 * This routine determines whether there are more ndlps on a @vport
2419 * node list need to have Address Discover (ADISC) issued. If so, it will
2420 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2421 * remaining nodes which need to have ADISC sent.
2424 lpfc_more_adisc(struct lpfc_vport
*vport
)
2426 if (vport
->num_disc_nodes
)
2427 vport
->num_disc_nodes
--;
2428 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2429 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2430 "0210 Continue discovery with %d ADISCs to go "
2431 "Data: x%x x%x x%x\n",
2432 vport
->num_disc_nodes
, vport
->fc_adisc_cnt
,
2433 vport
->fc_flag
, vport
->port_state
);
2434 /* Check to see if there are more ADISCs to be sent */
2435 if (vport
->fc_flag
& FC_NLP_MORE
) {
2436 lpfc_set_disctmo(vport
);
2437 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2438 lpfc_els_disc_adisc(vport
);
2440 if (!vport
->num_disc_nodes
)
2441 lpfc_adisc_done(vport
);
2446 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2447 * @phba: pointer to lpfc hba data structure.
2448 * @cmdiocb: pointer to lpfc command iocb data structure.
2449 * @rspiocb: pointer to lpfc response iocb data structure.
2451 * This routine is the completion function for issuing the Address Discover
2452 * (ADISC) command. It first checks to see whether link went down during
2453 * the discovery process. If so, the node will be marked as node port
2454 * recovery for issuing discover IOCB by the link attention handler and
2455 * exit. Otherwise, the response status is checked. If error was reported
2456 * in the response status, the ADISC command shall be retried by invoking
2457 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2458 * the response status, the state machine is invoked to set transition
2459 * with respect to NLP_EVT_CMPL_ADISC event.
2462 lpfc_cmpl_els_adisc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2463 struct lpfc_iocbq
*rspiocb
)
2465 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2466 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2468 struct lpfc_nodelist
*ndlp
;
2471 /* we pass cmdiocb to state machine which needs rspiocb as well */
2472 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
2474 irsp
= &(rspiocb
->iocb
);
2475 ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2477 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2478 "ADISC cmpl: status:x%x/x%x did:x%x",
2479 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2482 /* Since ndlp can be freed in the disc state machine, note if this node
2483 * is being used during discovery.
2485 spin_lock_irq(shost
->host_lock
);
2486 disc
= (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
);
2487 ndlp
->nlp_flag
&= ~(NLP_ADISC_SND
| NLP_NPR_2B_DISC
);
2488 spin_unlock_irq(shost
->host_lock
);
2489 /* ADISC completes to NPort <nlp_DID> */
2490 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2491 "0104 ADISC completes to NPort x%x "
2492 "Data: x%x x%x x%x x%x x%x\n",
2493 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2494 irsp
->ulpTimeout
, disc
, vport
->num_disc_nodes
);
2495 /* Check to see if link went down during discovery */
2496 if (lpfc_els_chk_latt(vport
)) {
2497 spin_lock_irq(shost
->host_lock
);
2498 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2499 spin_unlock_irq(shost
->host_lock
);
2503 if (irsp
->ulpStatus
) {
2504 /* Check for retry */
2505 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
2506 /* ELS command is being retried */
2508 spin_lock_irq(shost
->host_lock
);
2509 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2510 spin_unlock_irq(shost
->host_lock
);
2511 lpfc_set_disctmo(vport
);
2516 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2517 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2518 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2519 irsp
->un
.ulpWord
[4]);
2520 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2521 if (!lpfc_error_lost_link(irsp
))
2522 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2523 NLP_EVT_CMPL_ADISC
);
2525 /* Good status, call state machine */
2526 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2527 NLP_EVT_CMPL_ADISC
);
2529 /* Check to see if there are more ADISCs to be sent */
2530 if (disc
&& vport
->num_disc_nodes
)
2531 lpfc_more_adisc(vport
);
2533 lpfc_els_free_iocb(phba
, cmdiocb
);
2538 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2539 * @vport: pointer to a virtual N_Port data structure.
2540 * @ndlp: pointer to a node-list data structure.
2541 * @retry: number of retries to the command IOCB.
2543 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2544 * @vport. It prepares the payload of the ADISC ELS command, updates the
2545 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2546 * to issue the ADISC ELS command.
2548 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2549 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2550 * will be stored into the context1 field of the IOCB for the completion
2551 * callback function to the ADISC ELS command.
2554 * 0 - successfully issued adisc
2555 * 1 - failed to issue adisc
2558 lpfc_issue_els_adisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2561 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2562 struct lpfc_hba
*phba
= vport
->phba
;
2564 struct lpfc_iocbq
*elsiocb
;
2568 cmdsize
= (sizeof(uint32_t) + sizeof(ADISC
));
2569 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2570 ndlp
->nlp_DID
, ELS_CMD_ADISC
);
2574 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2576 /* For ADISC request, remainder of payload is service parameters */
2577 *((uint32_t *) (pcmd
)) = ELS_CMD_ADISC
;
2578 pcmd
+= sizeof(uint32_t);
2580 /* Fill in ADISC payload */
2581 ap
= (ADISC
*) pcmd
;
2582 ap
->hardAL_PA
= phba
->fc_pref_ALPA
;
2583 memcpy(&ap
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
2584 memcpy(&ap
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
2585 ap
->DID
= be32_to_cpu(vport
->fc_myDID
);
2587 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2588 "Issue ADISC: did:x%x",
2589 ndlp
->nlp_DID
, 0, 0);
2591 phba
->fc_stat
.elsXmitADISC
++;
2592 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_adisc
;
2593 spin_lock_irq(shost
->host_lock
);
2594 ndlp
->nlp_flag
|= NLP_ADISC_SND
;
2595 spin_unlock_irq(shost
->host_lock
);
2596 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2598 spin_lock_irq(shost
->host_lock
);
2599 ndlp
->nlp_flag
&= ~NLP_ADISC_SND
;
2600 spin_unlock_irq(shost
->host_lock
);
2601 lpfc_els_free_iocb(phba
, elsiocb
);
2608 * lpfc_cmpl_els_logo - Completion callback function for logo
2609 * @phba: pointer to lpfc hba data structure.
2610 * @cmdiocb: pointer to lpfc command iocb data structure.
2611 * @rspiocb: pointer to lpfc response iocb data structure.
2613 * This routine is the completion function for issuing the ELS Logout (LOGO)
2614 * command. If no error status was reported from the LOGO response, the
2615 * state machine of the associated ndlp shall be invoked for transition with
2616 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2617 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2620 lpfc_cmpl_els_logo(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2621 struct lpfc_iocbq
*rspiocb
)
2623 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
2624 struct lpfc_vport
*vport
= ndlp
->vport
;
2625 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2627 struct lpfcMboxq
*mbox
;
2628 unsigned long flags
;
2629 uint32_t skip_recovery
= 0;
2631 /* we pass cmdiocb to state machine which needs rspiocb as well */
2632 cmdiocb
->context_un
.rsp_iocb
= rspiocb
;
2634 irsp
= &(rspiocb
->iocb
);
2635 spin_lock_irq(shost
->host_lock
);
2636 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
2637 spin_unlock_irq(shost
->host_lock
);
2639 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2640 "LOGO cmpl: status:x%x/x%x did:x%x",
2641 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2644 /* LOGO completes to NPort <nlp_DID> */
2645 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2646 "0105 LOGO completes to NPort x%x "
2647 "Data: x%x x%x x%x x%x\n",
2648 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2649 irsp
->ulpTimeout
, vport
->num_disc_nodes
);
2651 if (lpfc_els_chk_latt(vport
)) {
2656 /* Check to see if link went down during discovery */
2657 if (ndlp
->nlp_flag
& NLP_TARGET_REMOVE
) {
2658 /* NLP_EVT_DEVICE_RM should unregister the RPI
2659 * which should abort all outstanding IOs.
2661 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
,
2667 if (irsp
->ulpStatus
) {
2668 /* Check for retry */
2669 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
)) {
2670 /* ELS command is being retried */
2675 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2676 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2677 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2678 irsp
->un
.ulpWord
[4]);
2679 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2680 if (lpfc_error_lost_link(irsp
)) {
2686 /* Call state machine. This will unregister the rpi if needed. */
2687 lpfc_disc_state_machine(vport
, ndlp
, cmdiocb
, NLP_EVT_CMPL_LOGO
);
2690 lpfc_els_free_iocb(phba
, cmdiocb
);
2691 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2692 if ((vport
->fc_flag
& FC_PT2PT
) &&
2693 !(vport
->fc_flag
& FC_PT2PT_PLOGI
)) {
2694 phba
->pport
->fc_myDID
= 0;
2696 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
2697 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
2698 if (phba
->nvmet_support
)
2699 lpfc_nvmet_update_targetport(phba
);
2701 lpfc_nvme_update_localport(phba
->pport
);
2704 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2706 lpfc_config_link(phba
, mbox
);
2707 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2708 mbox
->vport
= vport
;
2709 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
) ==
2711 mempool_free(mbox
, phba
->mbox_mem_pool
);
2718 * If the node is a target, the handling attempts to recover the port.
2719 * For any other port type, the rpi is unregistered as an implicit
2722 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) && (skip_recovery
== 0)) {
2723 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2724 spin_lock_irqsave(shost
->host_lock
, flags
);
2725 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2726 spin_unlock_irqrestore(shost
->host_lock
, flags
);
2728 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2729 "3187 LOGO completes to NPort x%x: Start "
2730 "Recovery Data: x%x x%x x%x x%x\n",
2731 ndlp
->nlp_DID
, irsp
->ulpStatus
,
2732 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
,
2733 vport
->num_disc_nodes
);
2734 lpfc_disc_start(vport
);
2740 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2741 * @vport: pointer to a virtual N_Port data structure.
2742 * @ndlp: pointer to a node-list data structure.
2743 * @retry: number of retries to the command IOCB.
2745 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2746 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2747 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2748 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2750 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2751 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2752 * will be stored into the context1 field of the IOCB for the completion
2753 * callback function to the LOGO ELS command.
2756 * 0 - successfully issued logo
2757 * 1 - failed to issue logo
2760 lpfc_issue_els_logo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2763 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2764 struct lpfc_hba
*phba
= vport
->phba
;
2765 struct lpfc_iocbq
*elsiocb
;
2770 spin_lock_irq(shost
->host_lock
);
2771 if (ndlp
->nlp_flag
& NLP_LOGO_SND
) {
2772 spin_unlock_irq(shost
->host_lock
);
2775 spin_unlock_irq(shost
->host_lock
);
2777 cmdsize
= (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name
);
2778 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2779 ndlp
->nlp_DID
, ELS_CMD_LOGO
);
2783 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2784 *((uint32_t *) (pcmd
)) = ELS_CMD_LOGO
;
2785 pcmd
+= sizeof(uint32_t);
2787 /* Fill in LOGO payload */
2788 *((uint32_t *) (pcmd
)) = be32_to_cpu(vport
->fc_myDID
);
2789 pcmd
+= sizeof(uint32_t);
2790 memcpy(pcmd
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
2792 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2793 "Issue LOGO: did:x%x",
2794 ndlp
->nlp_DID
, 0, 0);
2797 * If we are issuing a LOGO, we may try to recover the remote NPort
2798 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2799 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2800 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2801 * for that ELS cmd. To avoid this situation, lets get rid of the
2802 * RPI right now, before any ELS cmds are sent.
2804 spin_lock_irq(shost
->host_lock
);
2805 ndlp
->nlp_flag
|= NLP_ISSUE_LOGO
;
2806 spin_unlock_irq(shost
->host_lock
);
2807 if (lpfc_unreg_rpi(vport
, ndlp
)) {
2808 lpfc_els_free_iocb(phba
, elsiocb
);
2812 phba
->fc_stat
.elsXmitLOGO
++;
2813 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_logo
;
2814 spin_lock_irq(shost
->host_lock
);
2815 ndlp
->nlp_flag
|= NLP_LOGO_SND
;
2816 ndlp
->nlp_flag
&= ~NLP_ISSUE_LOGO
;
2817 spin_unlock_irq(shost
->host_lock
);
2818 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
2820 if (rc
== IOCB_ERROR
) {
2821 spin_lock_irq(shost
->host_lock
);
2822 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
2823 spin_unlock_irq(shost
->host_lock
);
2824 lpfc_els_free_iocb(phba
, elsiocb
);
2831 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2832 * @phba: pointer to lpfc hba data structure.
2833 * @cmdiocb: pointer to lpfc command iocb data structure.
2834 * @rspiocb: pointer to lpfc response iocb data structure.
2836 * This routine is a generic completion callback function for ELS commands.
2837 * Specifically, it is the callback function which does not need to perform
2838 * any command specific operations. It is currently used by the ELS command
2839 * issuing routines for the ELS State Change Request (SCR),
2840 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2841 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2842 * certain debug loggings, this callback function simply invokes the
2843 * lpfc_els_chk_latt() routine to check whether link went down during the
2844 * discovery process.
2847 lpfc_cmpl_els_cmd(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2848 struct lpfc_iocbq
*rspiocb
)
2850 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2853 irsp
= &rspiocb
->iocb
;
2855 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2856 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2857 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
2858 irsp
->un
.elsreq64
.remoteID
);
2859 /* ELS cmd tag <ulpIoTag> completes */
2860 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
2861 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2862 irsp
->ulpIoTag
, irsp
->ulpStatus
,
2863 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
2864 /* Check to see if link went down during discovery */
2865 lpfc_els_chk_latt(vport
);
2866 lpfc_els_free_iocb(phba
, cmdiocb
);
2871 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2872 * @vport: pointer to a host virtual N_Port data structure.
2873 * @nportid: N_Port identifier to the remote node.
2874 * @retry: number of retries to the command IOCB.
2876 * This routine issues a State Change Request (SCR) to a fabric node
2877 * on a @vport. The remote node @nportid is passed into the function. It
2878 * first search the @vport node list to find the matching ndlp. If no such
2879 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2880 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2881 * routine is invoked to send the SCR IOCB.
2883 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2884 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2885 * will be stored into the context1 field of the IOCB for the completion
2886 * callback function to the SCR ELS command.
2889 * 0 - Successfully issued scr command
2890 * 1 - Failed to issue scr command
2893 lpfc_issue_els_scr(struct lpfc_vport
*vport
, uint32_t nportid
, uint8_t retry
)
2895 struct lpfc_hba
*phba
= vport
->phba
;
2896 struct lpfc_iocbq
*elsiocb
;
2899 struct lpfc_nodelist
*ndlp
;
2901 cmdsize
= (sizeof(uint32_t) + sizeof(SCR
));
2903 ndlp
= lpfc_findnode_did(vport
, nportid
);
2905 ndlp
= lpfc_nlp_init(vport
, nportid
);
2908 lpfc_enqueue_node(vport
, ndlp
);
2909 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
2910 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
2915 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
2916 ndlp
->nlp_DID
, ELS_CMD_SCR
);
2919 /* This will trigger the release of the node just
2926 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
2928 *((uint32_t *) (pcmd
)) = ELS_CMD_SCR
;
2929 pcmd
+= sizeof(uint32_t);
2931 /* For SCR, remainder of payload is SCR parameter page */
2932 memset(pcmd
, 0, sizeof(SCR
));
2933 ((SCR
*) pcmd
)->Function
= SCR_FUNC_FULL
;
2935 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2936 "Issue SCR: did:x%x",
2937 ndlp
->nlp_DID
, 0, 0);
2939 phba
->fc_stat
.elsXmitSCR
++;
2940 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_cmd
;
2941 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
2943 /* The additional lpfc_nlp_put will cause the following
2944 * lpfc_els_free_iocb routine to trigger the rlease of
2948 lpfc_els_free_iocb(phba
, elsiocb
);
2951 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2952 * trigger the release of node.
2960 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2961 * @vport: pointer to a host virtual N_Port data structure.
2962 * @nportid: N_Port identifier to the remote node.
2963 * @retry: number of retries to the command IOCB.
2965 * This routine issues a Fibre Channel Address Resolution Response
2966 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2967 * is passed into the function. It first search the @vport node list to find
2968 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2969 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2970 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2972 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2973 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2974 * will be stored into the context1 field of the IOCB for the completion
2975 * callback function to the PARPR ELS command.
2978 * 0 - Successfully issued farpr command
2979 * 1 - Failed to issue farpr command
2982 lpfc_issue_els_farpr(struct lpfc_vport
*vport
, uint32_t nportid
, uint8_t retry
)
2984 struct lpfc_hba
*phba
= vport
->phba
;
2985 struct lpfc_iocbq
*elsiocb
;
2990 struct lpfc_nodelist
*ondlp
;
2991 struct lpfc_nodelist
*ndlp
;
2993 cmdsize
= (sizeof(uint32_t) + sizeof(FARP
));
2995 ndlp
= lpfc_findnode_did(vport
, nportid
);
2997 ndlp
= lpfc_nlp_init(vport
, nportid
);
3000 lpfc_enqueue_node(vport
, ndlp
);
3001 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
3002 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3007 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
,
3008 ndlp
->nlp_DID
, ELS_CMD_RNID
);
3010 /* This will trigger the release of the node just
3017 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
3019 *((uint32_t *) (pcmd
)) = ELS_CMD_FARPR
;
3020 pcmd
+= sizeof(uint32_t);
3022 /* Fill in FARPR payload */
3023 fp
= (FARP
*) (pcmd
);
3024 memset(fp
, 0, sizeof(FARP
));
3025 lp
= (uint32_t *) pcmd
;
3026 *lp
++ = be32_to_cpu(nportid
);
3027 *lp
++ = be32_to_cpu(vport
->fc_myDID
);
3029 fp
->Mflags
= (FARP_MATCH_PORT
| FARP_MATCH_NODE
);
3031 memcpy(&fp
->RportName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
3032 memcpy(&fp
->RnodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
3033 ondlp
= lpfc_findnode_did(vport
, nportid
);
3034 if (ondlp
&& NLP_CHK_NODE_ACT(ondlp
)) {
3035 memcpy(&fp
->OportName
, &ondlp
->nlp_portname
,
3036 sizeof(struct lpfc_name
));
3037 memcpy(&fp
->OnodeName
, &ondlp
->nlp_nodename
,
3038 sizeof(struct lpfc_name
));
3041 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3042 "Issue FARPR: did:x%x",
3043 ndlp
->nlp_DID
, 0, 0);
3045 phba
->fc_stat
.elsXmitFARPR
++;
3046 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_cmd
;
3047 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
3049 /* The additional lpfc_nlp_put will cause the following
3050 * lpfc_els_free_iocb routine to trigger the release of
3054 lpfc_els_free_iocb(phba
, elsiocb
);
3057 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3058 * trigger the release of the node.
3065 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
3066 * @vport: pointer to a host virtual N_Port data structure.
3067 * @nlp: pointer to a node-list data structure.
3069 * This routine cancels the timer with a delayed IOCB-command retry for
3070 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
3071 * removes the ELS retry event if it presents. In addition, if the
3072 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
3073 * commands are sent for the @vport's nodes that require issuing discovery
3077 lpfc_cancel_retry_delay_tmo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*nlp
)
3079 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3080 struct lpfc_work_evt
*evtp
;
3082 if (!(nlp
->nlp_flag
& NLP_DELAY_TMO
))
3084 spin_lock_irq(shost
->host_lock
);
3085 nlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3086 spin_unlock_irq(shost
->host_lock
);
3087 del_timer_sync(&nlp
->nlp_delayfunc
);
3088 nlp
->nlp_last_elscmd
= 0;
3089 if (!list_empty(&nlp
->els_retry_evt
.evt_listp
)) {
3090 list_del_init(&nlp
->els_retry_evt
.evt_listp
);
3091 /* Decrement nlp reference count held for the delayed retry */
3092 evtp
= &nlp
->els_retry_evt
;
3093 lpfc_nlp_put((struct lpfc_nodelist
*)evtp
->evt_arg1
);
3095 if (nlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
3096 spin_lock_irq(shost
->host_lock
);
3097 nlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
3098 spin_unlock_irq(shost
->host_lock
);
3099 if (vport
->num_disc_nodes
) {
3100 if (vport
->port_state
< LPFC_VPORT_READY
) {
3101 /* Check if there are more ADISCs to be sent */
3102 lpfc_more_adisc(vport
);
3104 /* Check if there are more PLOGIs to be sent */
3105 lpfc_more_plogi(vport
);
3106 if (vport
->num_disc_nodes
== 0) {
3107 spin_lock_irq(shost
->host_lock
);
3108 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3109 spin_unlock_irq(shost
->host_lock
);
3110 lpfc_can_disctmo(vport
);
3111 lpfc_end_rscn(vport
);
3120 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
3121 * @ptr: holder for the pointer to the timer function associated data (ndlp).
3123 * This routine is invoked by the ndlp delayed-function timer to check
3124 * whether there is any pending ELS retry event(s) with the node. If not, it
3125 * simply returns. Otherwise, if there is at least one ELS delayed event, it
3126 * adds the delayed events to the HBA work list and invokes the
3127 * lpfc_worker_wake_up() routine to wake up worker thread to process the
3128 * event. Note that lpfc_nlp_get() is called before posting the event to
3129 * the work list to hold reference count of ndlp so that it guarantees the
3130 * reference to ndlp will still be available when the worker thread gets
3131 * to the event associated with the ndlp.
3134 lpfc_els_retry_delay(struct timer_list
*t
)
3136 struct lpfc_nodelist
*ndlp
= from_timer(ndlp
, t
, nlp_delayfunc
);
3137 struct lpfc_vport
*vport
= ndlp
->vport
;
3138 struct lpfc_hba
*phba
= vport
->phba
;
3139 unsigned long flags
;
3140 struct lpfc_work_evt
*evtp
= &ndlp
->els_retry_evt
;
3142 spin_lock_irqsave(&phba
->hbalock
, flags
);
3143 if (!list_empty(&evtp
->evt_listp
)) {
3144 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3148 /* We need to hold the node by incrementing the reference
3149 * count until the queued work is done
3151 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
3152 if (evtp
->evt_arg1
) {
3153 evtp
->evt
= LPFC_EVT_ELS_RETRY
;
3154 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
3155 lpfc_worker_wake_up(phba
);
3157 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3162 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3163 * @ndlp: pointer to a node-list data structure.
3165 * This routine is the worker-thread handler for processing the @ndlp delayed
3166 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3167 * the last ELS command from the associated ndlp and invokes the proper ELS
3168 * function according to the delayed ELS command to retry the command.
3171 lpfc_els_retry_delay_handler(struct lpfc_nodelist
*ndlp
)
3173 struct lpfc_vport
*vport
= ndlp
->vport
;
3174 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3175 uint32_t cmd
, retry
;
3177 spin_lock_irq(shost
->host_lock
);
3178 cmd
= ndlp
->nlp_last_elscmd
;
3179 ndlp
->nlp_last_elscmd
= 0;
3181 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3182 spin_unlock_irq(shost
->host_lock
);
3186 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3187 spin_unlock_irq(shost
->host_lock
);
3189 * If a discovery event readded nlp_delayfunc after timer
3190 * firing and before processing the timer, cancel the
3193 del_timer_sync(&ndlp
->nlp_delayfunc
);
3194 retry
= ndlp
->nlp_retry
;
3195 ndlp
->nlp_retry
= 0;
3199 lpfc_issue_els_flogi(vport
, ndlp
, retry
);
3202 if (!lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, retry
)) {
3203 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3204 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
3208 if (!lpfc_issue_els_adisc(vport
, ndlp
, retry
)) {
3209 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3210 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
3214 case ELS_CMD_NVMEPRLI
:
3215 if (!lpfc_issue_els_prli(vport
, ndlp
, retry
)) {
3216 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3217 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PRLI_ISSUE
);
3221 if (!lpfc_issue_els_logo(vport
, ndlp
, retry
)) {
3222 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3223 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_LOGO_ISSUE
);
3227 if (!(vport
->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
))
3228 lpfc_issue_els_fdisc(vport
, ndlp
, retry
);
3235 * lpfc_els_retry - Make retry decision on an els command iocb
3236 * @phba: pointer to lpfc hba data structure.
3237 * @cmdiocb: pointer to lpfc command iocb data structure.
3238 * @rspiocb: pointer to lpfc response iocb data structure.
3240 * This routine makes a retry decision on an ELS command IOCB, which has
3241 * failed. The following ELS IOCBs use this function for retrying the command
3242 * when previously issued command responsed with error status: FLOGI, PLOGI,
3243 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3244 * returned error status, it makes the decision whether a retry shall be
3245 * issued for the command, and whether a retry shall be made immediately or
3246 * delayed. In the former case, the corresponding ELS command issuing-function
3247 * is called to retry the command. In the later case, the ELS command shall
3248 * be posted to the ndlp delayed event and delayed function timer set to the
3249 * ndlp for the delayed command issusing.
3252 * 0 - No retry of els command is made
3253 * 1 - Immediate or delayed retry of els command is made
3256 lpfc_els_retry(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3257 struct lpfc_iocbq
*rspiocb
)
3259 struct lpfc_vport
*vport
= cmdiocb
->vport
;
3260 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3261 IOCB_t
*irsp
= &rspiocb
->iocb
;
3262 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
3263 struct lpfc_dmabuf
*pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
3266 int retry
= 0, maxretry
= lpfc_max_els_tries
, delay
= 0;
3272 /* Note: context2 may be 0 for internal driver abort
3273 * of delays ELS command.
3276 if (pcmd
&& pcmd
->virt
) {
3277 elscmd
= (uint32_t *) (pcmd
->virt
);
3281 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
3282 did
= ndlp
->nlp_DID
;
3284 /* We should only hit this case for retrying PLOGI */
3285 did
= irsp
->un
.elsreq64
.remoteID
;
3286 ndlp
= lpfc_findnode_did(vport
, did
);
3287 if ((!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
3288 && (cmd
!= ELS_CMD_PLOGI
))
3292 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3293 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
3294 *(((uint32_t *) irsp
) + 7), irsp
->un
.ulpWord
[4], ndlp
->nlp_DID
);
3296 switch (irsp
->ulpStatus
) {
3297 case IOSTAT_FCP_RSP_ERROR
:
3299 case IOSTAT_REMOTE_STOP
:
3300 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3301 /* This IO was aborted by the target, we don't
3302 * know the rxid and because we did not send the
3303 * ABTS we cannot generate and RRQ.
3305 lpfc_set_rrq_active(phba
, ndlp
,
3306 cmdiocb
->sli4_lxritag
, 0, 0);
3309 case IOSTAT_LOCAL_REJECT
:
3310 switch ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
)) {
3311 case IOERR_LOOP_OPEN_FAILURE
:
3312 if (cmd
== ELS_CMD_FLOGI
) {
3313 if (PCI_DEVICE_ID_HORNET
==
3314 phba
->pcidev
->device
) {
3315 phba
->fc_topology
= LPFC_TOPOLOGY_LOOP
;
3316 phba
->pport
->fc_myDID
= 0;
3317 phba
->alpa_map
[0] = 0;
3318 phba
->alpa_map
[1] = 0;
3321 if (cmd
== ELS_CMD_PLOGI
&& cmdiocb
->retry
== 0)
3326 case IOERR_ILLEGAL_COMMAND
:
3327 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3328 "0124 Retry illegal cmd x%x "
3329 "retry:x%x delay:x%x\n",
3330 cmd
, cmdiocb
->retry
, delay
);
3332 /* All command's retry policy */
3334 if (cmdiocb
->retry
> 2)
3338 case IOERR_NO_RESOURCES
:
3339 logerr
= 1; /* HBA out of resources */
3341 if (cmdiocb
->retry
> 100)
3346 case IOERR_ILLEGAL_FRAME
:
3351 case IOERR_SEQUENCE_TIMEOUT
:
3352 case IOERR_INVALID_RPI
:
3353 if (cmd
== ELS_CMD_PLOGI
&&
3354 did
== NameServer_DID
) {
3355 /* Continue forever if plogi to */
3356 /* the nameserver fails */
3365 case IOSTAT_NPORT_RJT
:
3366 case IOSTAT_FABRIC_RJT
:
3367 if (irsp
->un
.ulpWord
[4] & RJT_UNAVAIL_TEMP
) {
3373 case IOSTAT_NPORT_BSY
:
3374 case IOSTAT_FABRIC_BSY
:
3375 logerr
= 1; /* Fabric / Remote NPort out of resources */
3380 stat
.un
.lsRjtError
= be32_to_cpu(irsp
->un
.ulpWord
[4]);
3381 /* Added for Vendor specifc support
3382 * Just keep retrying for these Rsn / Exp codes
3384 switch (stat
.un
.b
.lsRjtRsnCode
) {
3385 case LSRJT_UNABLE_TPC
:
3386 /* The driver has a VALID PLOGI but the rport has
3387 * rejected the PRLI - can't do it now. Delay
3388 * for 1 second and try again - don't care about
3391 if (cmd
== ELS_CMD_PRLI
|| cmd
== ELS_CMD_NVMEPRLI
) {
3393 maxretry
= lpfc_max_els_tries
+ 1;
3398 /* Legacy bug fix code for targets with PLOGI delays. */
3399 if (stat
.un
.b
.lsRjtRsnCodeExp
==
3400 LSEXP_CMD_IN_PROGRESS
) {
3401 if (cmd
== ELS_CMD_PLOGI
) {
3408 if (stat
.un
.b
.lsRjtRsnCodeExp
==
3409 LSEXP_CANT_GIVE_DATA
) {
3410 if (cmd
== ELS_CMD_PLOGI
) {
3417 if (cmd
== ELS_CMD_PLOGI
) {
3419 maxretry
= lpfc_max_els_tries
+ 1;
3423 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3424 (cmd
== ELS_CMD_FDISC
) &&
3425 (stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_OUT_OF_RESOURCE
)){
3426 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3427 "0125 FDISC Failed (x%x). "
3428 "Fabric out of resources\n",
3429 stat
.un
.lsRjtError
);
3430 lpfc_vport_set_state(vport
,
3431 FC_VPORT_NO_FABRIC_RSCS
);
3435 case LSRJT_LOGICAL_BSY
:
3436 if ((cmd
== ELS_CMD_PLOGI
) ||
3437 (cmd
== ELS_CMD_PRLI
) ||
3438 (cmd
== ELS_CMD_NVMEPRLI
)) {
3441 } else if (cmd
== ELS_CMD_FDISC
) {
3442 /* FDISC retry policy */
3444 if (cmdiocb
->retry
>= 32)
3450 case LSRJT_LOGICAL_ERR
:
3451 /* There are some cases where switches return this
3452 * error when they are not ready and should be returning
3453 * Logical Busy. We should delay every time.
3455 if (cmd
== ELS_CMD_FDISC
&&
3456 stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_PORT_LOGIN_REQ
) {
3460 } else if (cmd
== ELS_CMD_FLOGI
&&
3461 stat
.un
.b
.lsRjtRsnCodeExp
==
3462 LSEXP_NOTHING_MORE
) {
3463 vport
->fc_sparam
.cmn
.bbRcvSizeMsb
&= 0xf;
3465 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3466 "0820 FLOGI Failed (x%x). "
3467 "BBCredit Not Supported\n",
3468 stat
.un
.lsRjtError
);
3472 case LSRJT_PROTOCOL_ERR
:
3473 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3474 (cmd
== ELS_CMD_FDISC
) &&
3475 ((stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_INVALID_PNAME
) ||
3476 (stat
.un
.b
.lsRjtRsnCodeExp
== LSEXP_INVALID_NPORT_ID
))
3478 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3479 "0122 FDISC Failed (x%x). "
3480 "Fabric Detected Bad WWN\n",
3481 stat
.un
.lsRjtError
);
3482 lpfc_vport_set_state(vport
,
3483 FC_VPORT_FABRIC_REJ_WWN
);
3486 case LSRJT_VENDOR_UNIQUE
:
3487 if ((stat
.un
.b
.vendorUnique
== 0x45) &&
3488 (cmd
== ELS_CMD_FLOGI
)) {
3492 case LSRJT_CMD_UNSUPPORTED
:
3493 /* lpfc nvmet returns this type of LS_RJT when it
3494 * receives an FCP PRLI because lpfc nvmet only
3495 * support NVME. ELS request is terminated for FCP4
3498 if (stat
.un
.b
.lsRjtRsnCodeExp
==
3499 LSEXP_REQ_UNSUPPORTED
&& cmd
== ELS_CMD_PRLI
) {
3500 spin_lock_irq(shost
->host_lock
);
3501 ndlp
->nlp_flag
|= NLP_FCP_PRLI_RJT
;
3502 spin_unlock_irq(shost
->host_lock
);
3510 case IOSTAT_INTERMED_RSP
:
3518 if (did
== FDMI_DID
)
3521 if ((cmd
== ELS_CMD_FLOGI
) &&
3522 (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
) &&
3523 !lpfc_error_lost_link(irsp
)) {
3524 /* FLOGI retry policy */
3526 /* retry FLOGI forever */
3527 if (phba
->link_flag
!= LS_LOOPBACK_MODE
)
3532 if (cmdiocb
->retry
>= 100)
3534 else if (cmdiocb
->retry
>= 32)
3536 } else if ((cmd
== ELS_CMD_FDISC
) && !lpfc_error_lost_link(irsp
)) {
3537 /* retry FDISCs every second up to devloss */
3539 maxretry
= vport
->cfg_devloss_tmo
;
3544 if (maxretry
&& (cmdiocb
->retry
>= maxretry
)) {
3545 phba
->fc_stat
.elsRetryExceeded
++;
3549 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
3554 if ((cmd
== ELS_CMD_PLOGI
) || (cmd
== ELS_CMD_FDISC
)) {
3555 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3556 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
3557 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3558 "2849 Stop retry ELS command "
3559 "x%x to remote NPORT x%x, "
3560 "Data: x%x x%x\n", cmd
, did
,
3561 cmdiocb
->retry
, delay
);
3566 /* Retry ELS command <elsCmd> to remote NPORT <did> */
3567 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3568 "0107 Retry ELS command x%x to remote "
3569 "NPORT x%x Data: x%x x%x\n",
3570 cmd
, did
, cmdiocb
->retry
, delay
);
3572 if (((cmd
== ELS_CMD_PLOGI
) || (cmd
== ELS_CMD_ADISC
)) &&
3573 ((irsp
->ulpStatus
!= IOSTAT_LOCAL_REJECT
) ||
3574 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) !=
3575 IOERR_NO_RESOURCES
))) {
3576 /* Don't reset timer for no resources */
3578 /* If discovery / RSCN timer is running, reset it */
3579 if (timer_pending(&vport
->fc_disctmo
) ||
3580 (vport
->fc_flag
& FC_RSCN_MODE
))
3581 lpfc_set_disctmo(vport
);
3584 phba
->fc_stat
.elsXmitRetry
++;
3585 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) && delay
) {
3586 phba
->fc_stat
.elsDelayRetry
++;
3587 ndlp
->nlp_retry
= cmdiocb
->retry
;
3589 /* delay is specified in milliseconds */
3590 mod_timer(&ndlp
->nlp_delayfunc
,
3591 jiffies
+ msecs_to_jiffies(delay
));
3592 spin_lock_irq(shost
->host_lock
);
3593 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
3594 spin_unlock_irq(shost
->host_lock
);
3596 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3597 if ((cmd
== ELS_CMD_PRLI
) ||
3598 (cmd
== ELS_CMD_NVMEPRLI
))
3599 lpfc_nlp_set_state(vport
, ndlp
,
3600 NLP_STE_PRLI_ISSUE
);
3602 lpfc_nlp_set_state(vport
, ndlp
,
3604 ndlp
->nlp_last_elscmd
= cmd
;
3610 lpfc_issue_els_flogi(vport
, ndlp
, cmdiocb
->retry
);
3613 lpfc_issue_els_fdisc(vport
, ndlp
, cmdiocb
->retry
);
3616 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
3617 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3618 lpfc_nlp_set_state(vport
, ndlp
,
3619 NLP_STE_PLOGI_ISSUE
);
3621 lpfc_issue_els_plogi(vport
, did
, cmdiocb
->retry
);
3624 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3625 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
3626 lpfc_issue_els_adisc(vport
, ndlp
, cmdiocb
->retry
);
3629 case ELS_CMD_NVMEPRLI
:
3630 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3631 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PRLI_ISSUE
);
3632 lpfc_issue_els_prli(vport
, ndlp
, cmdiocb
->retry
);
3635 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
3636 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_LOGO_ISSUE
);
3637 lpfc_issue_els_logo(vport
, ndlp
, cmdiocb
->retry
);
3641 /* No retry ELS command <elsCmd> to remote NPORT <did> */
3643 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3644 "0137 No retry ELS command x%x to remote "
3645 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3646 cmd
, did
, irsp
->ulpStatus
,
3647 irsp
->un
.ulpWord
[4]);
3650 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3651 "0108 No retry ELS command x%x to remote "
3652 "NPORT x%x Retried:%d Error:x%x/%x\n",
3653 cmd
, did
, cmdiocb
->retry
, irsp
->ulpStatus
,
3654 irsp
->un
.ulpWord
[4]);
3660 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3661 * @phba: pointer to lpfc hba data structure.
3662 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3664 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3665 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3666 * checks to see whether there is a lpfc DMA buffer associated with the
3667 * response of the command IOCB. If so, it will be released before releasing
3668 * the lpfc DMA buffer associated with the IOCB itself.
3671 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3674 lpfc_els_free_data(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*buf_ptr1
)
3676 struct lpfc_dmabuf
*buf_ptr
;
3678 /* Free the response before processing the command. */
3679 if (!list_empty(&buf_ptr1
->list
)) {
3680 list_remove_head(&buf_ptr1
->list
, buf_ptr
,
3683 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
3686 lpfc_mbuf_free(phba
, buf_ptr1
->virt
, buf_ptr1
->phys
);
3692 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3693 * @phba: pointer to lpfc hba data structure.
3694 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3696 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3697 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3701 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3704 lpfc_els_free_bpl(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*buf_ptr
)
3706 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
3712 * lpfc_els_free_iocb - Free a command iocb and its associated resources
3713 * @phba: pointer to lpfc hba data structure.
3714 * @elsiocb: pointer to lpfc els command iocb data structure.
3716 * This routine frees a command IOCB and its associated resources. The
3717 * command IOCB data structure contains the reference to various associated
3718 * resources, these fields must be set to NULL if the associated reference
3720 * context1 - reference to ndlp
3721 * context2 - reference to cmd
3722 * context2->next - reference to rsp
3723 * context3 - reference to bpl
3725 * It first properly decrements the reference count held on ndlp for the
3726 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3727 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3728 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3729 * adds the DMA buffer the @phba data structure for the delayed release.
3730 * If reference to the Buffer Pointer List (BPL) is present, the
3731 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3732 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3733 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3736 * 0 - Success (currently, always return 0)
3739 lpfc_els_free_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*elsiocb
)
3741 struct lpfc_dmabuf
*buf_ptr
, *buf_ptr1
;
3742 struct lpfc_nodelist
*ndlp
;
3744 ndlp
= (struct lpfc_nodelist
*)elsiocb
->context1
;
3746 if (ndlp
->nlp_flag
& NLP_DEFER_RM
) {
3749 /* If the ndlp is not being used by another discovery
3752 if (!lpfc_nlp_not_used(ndlp
)) {
3753 /* If ndlp is being used by another discovery
3754 * thread, just clear NLP_DEFER_RM
3756 ndlp
->nlp_flag
&= ~NLP_DEFER_RM
;
3761 elsiocb
->context1
= NULL
;
3763 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3764 if (elsiocb
->context2
) {
3765 if (elsiocb
->iocb_flag
& LPFC_DELAY_MEM_FREE
) {
3766 /* Firmware could still be in progress of DMAing
3767 * payload, so don't free data buffer till after
3770 elsiocb
->iocb_flag
&= ~LPFC_DELAY_MEM_FREE
;
3771 buf_ptr
= elsiocb
->context2
;
3772 elsiocb
->context2
= NULL
;
3775 spin_lock_irq(&phba
->hbalock
);
3776 if (!list_empty(&buf_ptr
->list
)) {
3777 list_remove_head(&buf_ptr
->list
,
3778 buf_ptr1
, struct lpfc_dmabuf
,
3780 INIT_LIST_HEAD(&buf_ptr1
->list
);
3781 list_add_tail(&buf_ptr1
->list
,
3785 INIT_LIST_HEAD(&buf_ptr
->list
);
3786 list_add_tail(&buf_ptr
->list
, &phba
->elsbuf
);
3788 spin_unlock_irq(&phba
->hbalock
);
3791 buf_ptr1
= (struct lpfc_dmabuf
*) elsiocb
->context2
;
3792 lpfc_els_free_data(phba
, buf_ptr1
);
3793 elsiocb
->context2
= NULL
;
3797 if (elsiocb
->context3
) {
3798 buf_ptr
= (struct lpfc_dmabuf
*) elsiocb
->context3
;
3799 lpfc_els_free_bpl(phba
, buf_ptr
);
3800 elsiocb
->context3
= NULL
;
3802 lpfc_sli_release_iocbq(phba
, elsiocb
);
3807 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3808 * @phba: pointer to lpfc hba data structure.
3809 * @cmdiocb: pointer to lpfc command iocb data structure.
3810 * @rspiocb: pointer to lpfc response iocb data structure.
3812 * This routine is the completion callback function to the Logout (LOGO)
3813 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3814 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3815 * release the ndlp if it has the last reference remaining (reference count
3816 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3817 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3818 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3819 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3820 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3821 * IOCB data structure.
3824 lpfc_cmpl_els_logo_acc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3825 struct lpfc_iocbq
*rspiocb
)
3827 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
3828 struct lpfc_vport
*vport
= cmdiocb
->vport
;
3831 irsp
= &rspiocb
->iocb
;
3832 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3833 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3834 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], ndlp
->nlp_DID
);
3835 /* ACC to LOGO completes to NPort <nlp_DID> */
3836 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3837 "0109 ACC to LOGO completes to NPort x%x "
3838 "Data: x%x x%x x%x\n",
3839 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3842 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
) {
3843 /* NPort Recovery mode or node is just allocated */
3844 if (!lpfc_nlp_not_used(ndlp
)) {
3845 /* If the ndlp is being used by another discovery
3846 * thread, just unregister the RPI.
3848 lpfc_unreg_rpi(vport
, ndlp
);
3850 /* Indicate the node has already released, should
3851 * not reference to it from within lpfc_els_free_iocb.
3853 cmdiocb
->context1
= NULL
;
3858 * The driver received a LOGO from the rport and has ACK'd it.
3859 * At this point, the driver is done so release the IOCB
3861 lpfc_els_free_iocb(phba
, cmdiocb
);
3865 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3866 * @phba: pointer to lpfc hba data structure.
3867 * @pmb: pointer to the driver internal queue element for mailbox command.
3869 * This routine is the completion callback function for unregister default
3870 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3871 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3872 * decrements the ndlp reference count held for this completion callback
3873 * function. After that, it invokes the lpfc_nlp_not_used() to check
3874 * whether there is only one reference left on the ndlp. If so, it will
3875 * perform one more decrement and trigger the release of the ndlp.
3878 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3880 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3881 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3883 pmb
->context1
= NULL
;
3884 pmb
->context2
= NULL
;
3886 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3888 mempool_free(pmb
, phba
->mbox_mem_pool
);
3890 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3891 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
3892 ndlp
->nlp_rpi
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3893 kref_read(&ndlp
->kref
),
3894 ndlp
->nlp_usg_map
, ndlp
);
3895 if (NLP_CHK_NODE_ACT(ndlp
)) {
3897 /* This is the end of the default RPI cleanup logic for
3898 * this ndlp. If no other discovery threads are using
3899 * this ndlp, free all resources associated with it.
3901 lpfc_nlp_not_used(ndlp
);
3903 lpfc_drop_node(ndlp
->vport
, ndlp
);
3911 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3912 * @phba: pointer to lpfc hba data structure.
3913 * @cmdiocb: pointer to lpfc command iocb data structure.
3914 * @rspiocb: pointer to lpfc response iocb data structure.
3916 * This routine is the completion callback function for ELS Response IOCB
3917 * command. In normal case, this callback function just properly sets the
3918 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3919 * field in the command IOCB is not NULL, the referred mailbox command will
3920 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3921 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3922 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3923 * routine shall be invoked trying to release the ndlp if no other threads
3924 * are currently referring it.
3927 lpfc_cmpl_els_rsp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3928 struct lpfc_iocbq
*rspiocb
)
3930 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
3931 struct lpfc_vport
*vport
= ndlp
? ndlp
->vport
: NULL
;
3932 struct Scsi_Host
*shost
= vport
? lpfc_shost_from_vport(vport
) : NULL
;
3935 LPFC_MBOXQ_t
*mbox
= NULL
;
3936 struct lpfc_dmabuf
*mp
= NULL
;
3937 uint32_t ls_rjt
= 0;
3939 irsp
= &rspiocb
->iocb
;
3941 if (cmdiocb
->context_un
.mbox
)
3942 mbox
= cmdiocb
->context_un
.mbox
;
3944 /* First determine if this is a LS_RJT cmpl. Note, this callback
3945 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3947 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) cmdiocb
->context2
)->virt
);
3948 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
3949 (*((uint32_t *) (pcmd
)) == ELS_CMD_LS_RJT
)) {
3950 /* A LS_RJT associated with Default RPI cleanup has its own
3951 * separate code path.
3953 if (!(ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
))
3957 /* Check to see if link went down during discovery */
3958 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) || lpfc_els_chk_latt(vport
)) {
3960 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
3962 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3965 mempool_free(mbox
, phba
->mbox_mem_pool
);
3967 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
3968 (ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
))
3969 if (lpfc_nlp_not_used(ndlp
)) {
3971 /* Indicate the node has already released,
3972 * should not reference to it from within
3973 * the routine lpfc_els_free_iocb.
3975 cmdiocb
->context1
= NULL
;
3980 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
3981 "ELS rsp cmpl: status:x%x/x%x did:x%x",
3982 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
3983 cmdiocb
->iocb
.un
.elsreq64
.remoteID
);
3984 /* ELS response tag <ulpIoTag> completes */
3985 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
3986 "0110 ELS response tag x%x completes "
3987 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3988 cmdiocb
->iocb
.ulpIoTag
, rspiocb
->iocb
.ulpStatus
,
3989 rspiocb
->iocb
.un
.ulpWord
[4], rspiocb
->iocb
.ulpTimeout
,
3990 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
3993 if ((rspiocb
->iocb
.ulpStatus
== 0)
3994 && (ndlp
->nlp_flag
& NLP_ACC_REGLOGIN
)) {
3995 if (!lpfc_unreg_rpi(vport
, ndlp
) &&
3996 (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3997 ndlp
->nlp_state
== NLP_STE_REG_LOGIN_ISSUE
)) {
3998 lpfc_printf_vlog(vport
, KERN_INFO
,
4000 "0314 PLOGI recov DID x%x "
4001 "Data: x%x x%x x%x\n",
4002 ndlp
->nlp_DID
, ndlp
->nlp_state
,
4003 ndlp
->nlp_rpi
, ndlp
->nlp_flag
);
4004 mp
= mbox
->context1
;
4006 lpfc_mbuf_free(phba
, mp
->virt
,
4010 mempool_free(mbox
, phba
->mbox_mem_pool
);
4014 /* Increment reference count to ndlp to hold the
4015 * reference to ndlp for the callback function.
4017 mbox
->context2
= lpfc_nlp_get(ndlp
);
4018 mbox
->vport
= vport
;
4019 if (ndlp
->nlp_flag
& NLP_RM_DFLT_RPI
) {
4020 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4021 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4024 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_login
;
4025 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4026 lpfc_nlp_set_state(vport
, ndlp
,
4027 NLP_STE_REG_LOGIN_ISSUE
);
4030 ndlp
->nlp_flag
|= NLP_REG_LOGIN_SEND
;
4031 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
4032 != MBX_NOT_FINISHED
)
4035 /* Decrement the ndlp reference count we
4036 * set for this failed mailbox command.
4039 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
4041 /* ELS rsp: Cannot issue reg_login for <NPortid> */
4042 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
4043 "0138 ELS rsp: Cannot issue reg_login for x%x "
4044 "Data: x%x x%x x%x\n",
4045 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
4048 if (lpfc_nlp_not_used(ndlp
)) {
4050 /* Indicate node has already been released,
4051 * should not reference to it from within
4052 * the routine lpfc_els_free_iocb.
4054 cmdiocb
->context1
= NULL
;
4057 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
4058 if (!lpfc_error_lost_link(irsp
) &&
4059 ndlp
->nlp_flag
& NLP_ACC_REGLOGIN
) {
4060 if (lpfc_nlp_not_used(ndlp
)) {
4062 /* Indicate node has already been
4063 * released, should not reference
4064 * to it from within the routine
4065 * lpfc_els_free_iocb.
4067 cmdiocb
->context1
= NULL
;
4071 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
4073 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4076 mempool_free(mbox
, phba
->mbox_mem_pool
);
4079 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)) {
4080 spin_lock_irq(shost
->host_lock
);
4081 ndlp
->nlp_flag
&= ~(NLP_ACC_REGLOGIN
| NLP_RM_DFLT_RPI
);
4082 spin_unlock_irq(shost
->host_lock
);
4084 /* If the node is not being used by another discovery thread,
4085 * and we are sending a reject, we are done with it.
4086 * Release driver reference count here and free associated
4090 if (lpfc_nlp_not_used(ndlp
))
4091 /* Indicate node has already been released,
4092 * should not reference to it from within
4093 * the routine lpfc_els_free_iocb.
4095 cmdiocb
->context1
= NULL
;
4099 lpfc_els_free_iocb(phba
, cmdiocb
);
4104 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
4105 * @vport: pointer to a host virtual N_Port data structure.
4106 * @flag: the els command code to be accepted.
4107 * @oldiocb: pointer to the original lpfc command iocb data structure.
4108 * @ndlp: pointer to a node-list data structure.
4109 * @mbox: pointer to the driver internal queue element for mailbox command.
4111 * This routine prepares and issues an Accept (ACC) response IOCB
4112 * command. It uses the @flag to properly set up the IOCB field for the
4113 * specific ACC response command to be issued and invokes the
4114 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
4115 * @mbox pointer is passed in, it will be put into the context_un.mbox
4116 * field of the IOCB for the completion callback function to issue the
4117 * mailbox command to the HBA later when callback is invoked.
4119 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4120 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4121 * will be stored into the context1 field of the IOCB for the completion
4122 * callback function to the corresponding response ELS IOCB command.
4125 * 0 - Successfully issued acc response
4126 * 1 - Failed to issue acc response
4129 lpfc_els_rsp_acc(struct lpfc_vport
*vport
, uint32_t flag
,
4130 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
,
4133 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4134 struct lpfc_hba
*phba
= vport
->phba
;
4137 struct lpfc_iocbq
*elsiocb
;
4139 struct serv_parm
*sp
;
4142 ELS_PKT
*els_pkt_ptr
;
4144 oldcmd
= &oldiocb
->iocb
;
4148 cmdsize
= sizeof(uint32_t);
4149 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
4150 ndlp
, ndlp
->nlp_DID
, ELS_CMD_ACC
);
4152 spin_lock_irq(shost
->host_lock
);
4153 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4154 spin_unlock_irq(shost
->host_lock
);
4158 icmd
= &elsiocb
->iocb
;
4159 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4160 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4161 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4162 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4163 pcmd
+= sizeof(uint32_t);
4165 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4166 "Issue ACC: did:x%x flg:x%x",
4167 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4171 cmdsize
= (sizeof(struct serv_parm
) + sizeof(uint32_t));
4172 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
4173 ndlp
, ndlp
->nlp_DID
, ELS_CMD_ACC
);
4177 icmd
= &elsiocb
->iocb
;
4178 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4179 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4180 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4183 elsiocb
->context_un
.mbox
= mbox
;
4185 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4186 pcmd
+= sizeof(uint32_t);
4187 sp
= (struct serv_parm
*)pcmd
;
4189 if (flag
== ELS_CMD_FLOGI
) {
4190 /* Copy the received service parameters back */
4191 memcpy(sp
, &phba
->fc_fabparam
,
4192 sizeof(struct serv_parm
));
4194 /* Clear the F_Port bit */
4197 /* Mark all class service parameters as invalid */
4198 sp
->cls1
.classValid
= 0;
4199 sp
->cls2
.classValid
= 0;
4200 sp
->cls3
.classValid
= 0;
4201 sp
->cls4
.classValid
= 0;
4203 /* Copy our worldwide names */
4204 memcpy(&sp
->portName
, &vport
->fc_sparam
.portName
,
4205 sizeof(struct lpfc_name
));
4206 memcpy(&sp
->nodeName
, &vport
->fc_sparam
.nodeName
,
4207 sizeof(struct lpfc_name
));
4209 memcpy(pcmd
, &vport
->fc_sparam
,
4210 sizeof(struct serv_parm
));
4212 sp
->cmn
.valid_vendor_ver_level
= 0;
4213 memset(sp
->un
.vendorVersion
, 0,
4214 sizeof(sp
->un
.vendorVersion
));
4215 sp
->cmn
.bbRcvSizeMsb
&= 0xF;
4217 /* If our firmware supports this feature, convey that
4218 * info to the target using the vendor specific field.
4220 if (phba
->sli
.sli_flag
& LPFC_SLI_SUPPRESS_RSP
) {
4221 sp
->cmn
.valid_vendor_ver_level
= 1;
4222 sp
->un
.vv
.vid
= cpu_to_be32(LPFC_VV_EMLX_ID
);
4224 cpu_to_be32(LPFC_VV_SUPPRESS_RSP
);
4228 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4229 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
4230 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4233 cmdsize
= sizeof(uint32_t) + sizeof(PRLO
);
4234 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
,
4235 ndlp
, ndlp
->nlp_DID
, ELS_CMD_PRLO
);
4239 icmd
= &elsiocb
->iocb
;
4240 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4241 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4242 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4244 memcpy(pcmd
, ((struct lpfc_dmabuf
*) oldiocb
->context2
)->virt
,
4245 sizeof(uint32_t) + sizeof(PRLO
));
4246 *((uint32_t *) (pcmd
)) = ELS_CMD_PRLO_ACC
;
4247 els_pkt_ptr
= (ELS_PKT
*) pcmd
;
4248 els_pkt_ptr
->un
.prlo
.acceptRspCode
= PRLO_REQ_EXECUTED
;
4250 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4251 "Issue ACC PRLO: did:x%x flg:x%x",
4252 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4257 /* Xmit ELS ACC response tag <ulpIoTag> */
4258 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4259 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
4260 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4262 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
4263 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
4264 ndlp
->nlp_rpi
, vport
->fc_flag
);
4265 if (ndlp
->nlp_flag
& NLP_LOGO_ACC
) {
4266 spin_lock_irq(shost
->host_lock
);
4267 if (!(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
||
4268 ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
))
4269 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
4270 spin_unlock_irq(shost
->host_lock
);
4271 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_logo_acc
;
4273 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4276 phba
->fc_stat
.elsXmitACC
++;
4277 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4278 if (rc
== IOCB_ERROR
) {
4279 lpfc_els_free_iocb(phba
, elsiocb
);
4286 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
4287 * @vport: pointer to a virtual N_Port data structure.
4289 * @oldiocb: pointer to the original lpfc command iocb data structure.
4290 * @ndlp: pointer to a node-list data structure.
4291 * @mbox: pointer to the driver internal queue element for mailbox command.
4293 * This routine prepares and issue an Reject (RJT) response IOCB
4294 * command. If a @mbox pointer is passed in, it will be put into the
4295 * context_un.mbox field of the IOCB for the completion callback function
4296 * to issue to the HBA later.
4298 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4299 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4300 * will be stored into the context1 field of the IOCB for the completion
4301 * callback function to the reject response ELS IOCB command.
4304 * 0 - Successfully issued reject response
4305 * 1 - Failed to issue reject response
4308 lpfc_els_rsp_reject(struct lpfc_vport
*vport
, uint32_t rejectError
,
4309 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
,
4312 struct lpfc_hba
*phba
= vport
->phba
;
4315 struct lpfc_iocbq
*elsiocb
;
4320 cmdsize
= 2 * sizeof(uint32_t);
4321 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4322 ndlp
->nlp_DID
, ELS_CMD_LS_RJT
);
4326 icmd
= &elsiocb
->iocb
;
4327 oldcmd
= &oldiocb
->iocb
;
4328 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4329 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4330 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4332 *((uint32_t *) (pcmd
)) = ELS_CMD_LS_RJT
;
4333 pcmd
+= sizeof(uint32_t);
4334 *((uint32_t *) (pcmd
)) = rejectError
;
4337 elsiocb
->context_un
.mbox
= mbox
;
4339 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
4340 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4341 "0129 Xmit ELS RJT x%x response tag x%x "
4342 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4344 rejectError
, elsiocb
->iotag
,
4345 elsiocb
->iocb
.ulpContext
, ndlp
->nlp_DID
,
4346 ndlp
->nlp_flag
, ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4347 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4348 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
4349 ndlp
->nlp_DID
, ndlp
->nlp_flag
, rejectError
);
4351 phba
->fc_stat
.elsXmitLSRJT
++;
4352 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4353 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4355 if (rc
== IOCB_ERROR
) {
4356 lpfc_els_free_iocb(phba
, elsiocb
);
4363 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
4364 * @vport: pointer to a virtual N_Port data structure.
4365 * @oldiocb: pointer to the original lpfc command iocb data structure.
4366 * @ndlp: pointer to a node-list data structure.
4368 * This routine prepares and issues an Accept (ACC) response to Address
4369 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4370 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4372 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4373 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4374 * will be stored into the context1 field of the IOCB for the completion
4375 * callback function to the ADISC Accept response ELS IOCB command.
4378 * 0 - Successfully issued acc adisc response
4379 * 1 - Failed to issue adisc acc response
4382 lpfc_els_rsp_adisc_acc(struct lpfc_vport
*vport
, struct lpfc_iocbq
*oldiocb
,
4383 struct lpfc_nodelist
*ndlp
)
4385 struct lpfc_hba
*phba
= vport
->phba
;
4387 IOCB_t
*icmd
, *oldcmd
;
4388 struct lpfc_iocbq
*elsiocb
;
4393 cmdsize
= sizeof(uint32_t) + sizeof(ADISC
);
4394 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4395 ndlp
->nlp_DID
, ELS_CMD_ACC
);
4399 icmd
= &elsiocb
->iocb
;
4400 oldcmd
= &oldiocb
->iocb
;
4401 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4402 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4404 /* Xmit ADISC ACC response tag <ulpIoTag> */
4405 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4406 "0130 Xmit ADISC ACC response iotag x%x xri: "
4407 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4408 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
4409 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
4411 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4413 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4414 pcmd
+= sizeof(uint32_t);
4416 ap
= (ADISC
*) (pcmd
);
4417 ap
->hardAL_PA
= phba
->fc_pref_ALPA
;
4418 memcpy(&ap
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
4419 memcpy(&ap
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
4420 ap
->DID
= be32_to_cpu(vport
->fc_myDID
);
4422 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4423 "Issue ACC ADISC: did:x%x flg:x%x",
4424 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4426 phba
->fc_stat
.elsXmitACC
++;
4427 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4428 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4429 if (rc
== IOCB_ERROR
) {
4430 lpfc_els_free_iocb(phba
, elsiocb
);
4437 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
4438 * @vport: pointer to a virtual N_Port data structure.
4439 * @oldiocb: pointer to the original lpfc command iocb data structure.
4440 * @ndlp: pointer to a node-list data structure.
4442 * This routine prepares and issues an Accept (ACC) response to Process
4443 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4444 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4446 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4447 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4448 * will be stored into the context1 field of the IOCB for the completion
4449 * callback function to the PRLI Accept response ELS IOCB command.
4452 * 0 - Successfully issued acc prli response
4453 * 1 - Failed to issue acc prli response
4456 lpfc_els_rsp_prli_acc(struct lpfc_vport
*vport
, struct lpfc_iocbq
*oldiocb
,
4457 struct lpfc_nodelist
*ndlp
)
4459 struct lpfc_hba
*phba
= vport
->phba
;
4461 struct lpfc_nvme_prli
*npr_nvme
;
4465 struct lpfc_iocbq
*elsiocb
;
4468 uint32_t prli_fc4_req
, *req_payload
;
4469 struct lpfc_dmabuf
*req_buf
;
4473 /* Need the incoming PRLI payload to determine if the ACC is for an
4474 * FC4 or NVME PRLI type. The PRLI type is at word 1.
4476 req_buf
= (struct lpfc_dmabuf
*)oldiocb
->context2
;
4477 req_payload
= (((uint32_t *)req_buf
->virt
) + 1);
4479 /* PRLI type payload is at byte 3 for FCP or NVME. */
4480 prli_fc4_req
= be32_to_cpu(*req_payload
);
4481 prli_fc4_req
= (prli_fc4_req
>> 24) & 0xff;
4482 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4483 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
4484 prli_fc4_req
, *((uint32_t *)req_payload
));
4486 if (prli_fc4_req
== PRLI_FCP_TYPE
) {
4487 cmdsize
= sizeof(uint32_t) + sizeof(PRLI
);
4488 elsrspcmd
= (ELS_CMD_ACC
| (ELS_CMD_PRLI
& ~ELS_RSP_MASK
));
4489 } else if (prli_fc4_req
& PRLI_NVME_TYPE
) {
4490 cmdsize
= sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli
);
4491 elsrspcmd
= (ELS_CMD_ACC
| (ELS_CMD_NVMEPRLI
& ~ELS_RSP_MASK
));
4496 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4497 ndlp
->nlp_DID
, elsrspcmd
);
4501 icmd
= &elsiocb
->iocb
;
4502 oldcmd
= &oldiocb
->iocb
;
4503 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4504 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4506 /* Xmit PRLI ACC response tag <ulpIoTag> */
4507 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4508 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4509 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4510 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
4511 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
4513 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4514 memset(pcmd
, 0, cmdsize
);
4516 *((uint32_t *)(pcmd
)) = elsrspcmd
;
4517 pcmd
+= sizeof(uint32_t);
4519 /* For PRLI, remainder of payload is PRLI parameter page */
4522 if (prli_fc4_req
== PRLI_FCP_TYPE
) {
4524 * If the remote port is a target and our firmware version
4525 * is 3.20 or later, set the following bits for FC-TAPE
4528 npr
= (PRLI
*) pcmd
;
4529 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4530 (vpd
->rev
.feaLevelHigh
>= 0x02)) {
4531 npr
->ConfmComplAllowed
= 1;
4533 npr
->TaskRetryIdReq
= 1;
4535 npr
->acceptRspCode
= PRLI_REQ_EXECUTED
;
4536 npr
->estabImagePair
= 1;
4537 npr
->readXferRdyDis
= 1;
4538 npr
->ConfmComplAllowed
= 1;
4539 npr
->prliType
= PRLI_FCP_TYPE
;
4540 npr
->initiatorFunc
= 1;
4541 } else if (prli_fc4_req
& PRLI_NVME_TYPE
) {
4542 /* Respond with an NVME PRLI Type */
4543 npr_nvme
= (struct lpfc_nvme_prli
*) pcmd
;
4544 bf_set(prli_type_code
, npr_nvme
, PRLI_NVME_TYPE
);
4545 bf_set(prli_estabImagePair
, npr_nvme
, 0); /* Should be 0 */
4546 bf_set(prli_acc_rsp_code
, npr_nvme
, PRLI_REQ_EXECUTED
);
4547 if (phba
->nvmet_support
) {
4548 bf_set(prli_tgt
, npr_nvme
, 1);
4549 bf_set(prli_disc
, npr_nvme
, 1);
4550 if (phba
->cfg_nvme_enable_fb
) {
4551 bf_set(prli_fba
, npr_nvme
, 1);
4553 /* TBD. Target mode needs to post buffers
4554 * that support the configured first burst
4557 bf_set(prli_fb_sz
, npr_nvme
,
4558 phba
->cfg_nvmet_fb_size
);
4561 bf_set(prli_init
, npr_nvme
, 1);
4564 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
4565 "6015 NVME issue PRLI ACC word1 x%08x "
4566 "word4 x%08x word5 x%08x flag x%x, "
4567 "fcp_info x%x nlp_type x%x\n",
4568 npr_nvme
->word1
, npr_nvme
->word4
,
4569 npr_nvme
->word5
, ndlp
->nlp_flag
,
4570 ndlp
->nlp_fcp_info
, ndlp
->nlp_type
);
4571 npr_nvme
->word1
= cpu_to_be32(npr_nvme
->word1
);
4572 npr_nvme
->word4
= cpu_to_be32(npr_nvme
->word4
);
4573 npr_nvme
->word5
= cpu_to_be32(npr_nvme
->word5
);
4575 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4576 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
4577 prli_fc4_req
, ndlp
->nlp_fc4_type
,
4580 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4581 "Issue ACC PRLI: did:x%x flg:x%x",
4582 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4584 phba
->fc_stat
.elsXmitACC
++;
4585 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4587 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4588 if (rc
== IOCB_ERROR
) {
4589 lpfc_els_free_iocb(phba
, elsiocb
);
4596 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
4597 * @vport: pointer to a virtual N_Port data structure.
4598 * @format: rnid command format.
4599 * @oldiocb: pointer to the original lpfc command iocb data structure.
4600 * @ndlp: pointer to a node-list data structure.
4602 * This routine issues a Request Node Identification Data (RNID) Accept
4603 * (ACC) response. It constructs the RNID ACC response command according to
4604 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4605 * issue the response. Note that this command does not need to hold the ndlp
4606 * reference count for the callback. So, the ndlp reference count taken by
4607 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4608 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4609 * there is no ndlp reference available.
4611 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4612 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4613 * will be stored into the context1 field of the IOCB for the completion
4614 * callback function. However, for the RNID Accept Response ELS command,
4615 * this is undone later by this routine after the IOCB is allocated.
4618 * 0 - Successfully issued acc rnid response
4619 * 1 - Failed to issue acc rnid response
4622 lpfc_els_rsp_rnid_acc(struct lpfc_vport
*vport
, uint8_t format
,
4623 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
4625 struct lpfc_hba
*phba
= vport
->phba
;
4627 IOCB_t
*icmd
, *oldcmd
;
4628 struct lpfc_iocbq
*elsiocb
;
4633 cmdsize
= sizeof(uint32_t) + sizeof(uint32_t)
4634 + (2 * sizeof(struct lpfc_name
));
4636 cmdsize
+= sizeof(RNID_TOP_DISC
);
4638 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4639 ndlp
->nlp_DID
, ELS_CMD_ACC
);
4643 icmd
= &elsiocb
->iocb
;
4644 oldcmd
= &oldiocb
->iocb
;
4645 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
4646 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
4648 /* Xmit RNID ACC response tag <ulpIoTag> */
4649 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4650 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4651 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
);
4652 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4653 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4654 pcmd
+= sizeof(uint32_t);
4656 memset(pcmd
, 0, sizeof(RNID
));
4657 rn
= (RNID
*) (pcmd
);
4658 rn
->Format
= format
;
4659 rn
->CommonLen
= (2 * sizeof(struct lpfc_name
));
4660 memcpy(&rn
->portName
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
4661 memcpy(&rn
->nodeName
, &vport
->fc_nodename
, sizeof(struct lpfc_name
));
4664 rn
->SpecificLen
= 0;
4666 case RNID_TOPOLOGY_DISC
:
4667 rn
->SpecificLen
= sizeof(RNID_TOP_DISC
);
4668 memcpy(&rn
->un
.topologyDisc
.portName
,
4669 &vport
->fc_portname
, sizeof(struct lpfc_name
));
4670 rn
->un
.topologyDisc
.unitType
= RNID_HBA
;
4671 rn
->un
.topologyDisc
.physPort
= 0;
4672 rn
->un
.topologyDisc
.attachedNodes
= 0;
4676 rn
->SpecificLen
= 0;
4680 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4681 "Issue ACC RNID: did:x%x flg:x%x",
4682 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4684 phba
->fc_stat
.elsXmitACC
++;
4685 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4687 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4688 if (rc
== IOCB_ERROR
) {
4689 lpfc_els_free_iocb(phba
, elsiocb
);
4696 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4697 * @vport: pointer to a virtual N_Port data structure.
4698 * @iocb: pointer to the lpfc command iocb data structure.
4699 * @ndlp: pointer to a node-list data structure.
4704 lpfc_els_clear_rrq(struct lpfc_vport
*vport
,
4705 struct lpfc_iocbq
*iocb
, struct lpfc_nodelist
*ndlp
)
4707 struct lpfc_hba
*phba
= vport
->phba
;
4712 struct lpfc_node_rrq
*prrq
;
4715 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) iocb
->context2
)->virt
);
4716 pcmd
+= sizeof(uint32_t);
4717 rrq
= (struct RRQ
*)pcmd
;
4718 rrq
->rrq_exchg
= be32_to_cpu(rrq
->rrq_exchg
);
4719 rxid
= bf_get(rrq_rxid
, rrq
);
4721 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4722 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4724 be32_to_cpu(bf_get(rrq_did
, rrq
)),
4725 bf_get(rrq_oxid
, rrq
),
4727 iocb
->iotag
, iocb
->iocb
.ulpContext
);
4729 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4730 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4731 ndlp
->nlp_DID
, ndlp
->nlp_flag
, rrq
->rrq_exchg
);
4732 if (vport
->fc_myDID
== be32_to_cpu(bf_get(rrq_did
, rrq
)))
4733 xri
= bf_get(rrq_oxid
, rrq
);
4736 prrq
= lpfc_get_active_rrq(vport
, xri
, ndlp
->nlp_DID
);
4738 lpfc_clr_rrq_active(phba
, xri
, prrq
);
4743 * lpfc_els_rsp_echo_acc - Issue echo acc response
4744 * @vport: pointer to a virtual N_Port data structure.
4745 * @data: pointer to echo data to return in the accept.
4746 * @oldiocb: pointer to the original lpfc command iocb data structure.
4747 * @ndlp: pointer to a node-list data structure.
4750 * 0 - Successfully issued acc echo response
4751 * 1 - Failed to issue acc echo response
4754 lpfc_els_rsp_echo_acc(struct lpfc_vport
*vport
, uint8_t *data
,
4755 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
4757 struct lpfc_hba
*phba
= vport
->phba
;
4758 struct lpfc_iocbq
*elsiocb
;
4763 cmdsize
= oldiocb
->iocb
.unsli3
.rcvsli3
.acc_len
;
4765 /* The accumulated length can exceed the BPL_SIZE. For
4766 * now, use this as the limit
4768 if (cmdsize
> LPFC_BPL_SIZE
)
4769 cmdsize
= LPFC_BPL_SIZE
;
4770 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
4771 ndlp
->nlp_DID
, ELS_CMD_ACC
);
4775 elsiocb
->iocb
.ulpContext
= oldiocb
->iocb
.ulpContext
; /* Xri / rx_id */
4776 elsiocb
->iocb
.unsli3
.rcvsli3
.ox_id
= oldiocb
->iocb
.unsli3
.rcvsli3
.ox_id
;
4778 /* Xmit ECHO ACC response tag <ulpIoTag> */
4779 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
4780 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4781 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
);
4782 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
4783 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
4784 pcmd
+= sizeof(uint32_t);
4785 memcpy(pcmd
, data
, cmdsize
- sizeof(uint32_t));
4787 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_RSP
,
4788 "Issue ACC ECHO: did:x%x flg:x%x",
4789 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
4791 phba
->fc_stat
.elsXmitACC
++;
4792 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
4794 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
4795 if (rc
== IOCB_ERROR
) {
4796 lpfc_els_free_iocb(phba
, elsiocb
);
4803 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
4804 * @vport: pointer to a host virtual N_Port data structure.
4806 * This routine issues Address Discover (ADISC) ELS commands to those
4807 * N_Ports which are in node port recovery state and ADISC has not been issued
4808 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4809 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4810 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4811 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4812 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4813 * IOCBs quit for later pick up. On the other hand, after walking through
4814 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4815 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4816 * no more ADISC need to be sent.
4819 * The number of N_Ports with adisc issued.
4822 lpfc_els_disc_adisc(struct lpfc_vport
*vport
)
4824 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4825 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4828 /* go thru NPR nodes and issue any remaining ELS ADISCs */
4829 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4830 if (!NLP_CHK_NODE_ACT(ndlp
))
4832 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
&&
4833 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) != 0 &&
4834 (ndlp
->nlp_flag
& NLP_NPR_ADISC
) != 0) {
4835 spin_lock_irq(shost
->host_lock
);
4836 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4837 spin_unlock_irq(shost
->host_lock
);
4838 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4839 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
4840 lpfc_issue_els_adisc(vport
, ndlp
, 0);
4842 vport
->num_disc_nodes
++;
4843 if (vport
->num_disc_nodes
>=
4844 vport
->cfg_discovery_threads
) {
4845 spin_lock_irq(shost
->host_lock
);
4846 vport
->fc_flag
|= FC_NLP_MORE
;
4847 spin_unlock_irq(shost
->host_lock
);
4852 if (sentadisc
== 0) {
4853 spin_lock_irq(shost
->host_lock
);
4854 vport
->fc_flag
&= ~FC_NLP_MORE
;
4855 spin_unlock_irq(shost
->host_lock
);
4861 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
4862 * @vport: pointer to a host virtual N_Port data structure.
4864 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4865 * which are in node port recovery state, with a @vport. Each time an ELS
4866 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4867 * the per @vport number of discover count (num_disc_nodes) shall be
4868 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4869 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4870 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4871 * later pick up. On the other hand, after walking through all the ndlps with
4872 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4873 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4874 * PLOGI need to be sent.
4877 * The number of N_Ports with plogi issued.
4880 lpfc_els_disc_plogi(struct lpfc_vport
*vport
)
4882 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4883 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4886 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4887 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4888 if (!NLP_CHK_NODE_ACT(ndlp
))
4890 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
&&
4891 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) != 0 &&
4892 (ndlp
->nlp_flag
& NLP_DELAY_TMO
) == 0 &&
4893 (ndlp
->nlp_flag
& NLP_NPR_ADISC
) == 0) {
4894 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
4895 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
4896 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
4898 vport
->num_disc_nodes
++;
4899 if (vport
->num_disc_nodes
>=
4900 vport
->cfg_discovery_threads
) {
4901 spin_lock_irq(shost
->host_lock
);
4902 vport
->fc_flag
|= FC_NLP_MORE
;
4903 spin_unlock_irq(shost
->host_lock
);
4909 lpfc_set_disctmo(vport
);
4912 spin_lock_irq(shost
->host_lock
);
4913 vport
->fc_flag
&= ~FC_NLP_MORE
;
4914 spin_unlock_irq(shost
->host_lock
);
4920 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc
*desc
,
4924 desc
->tag
= cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG
);
4925 desc
->payload
.els_req
= word0
;
4926 desc
->length
= cpu_to_be32(sizeof(desc
->payload
));
4928 return sizeof(struct fc_rdp_link_service_desc
);
4932 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc
*desc
,
4933 uint8_t *page_a0
, uint8_t *page_a2
)
4935 uint16_t wavelength
;
4936 uint16_t temperature
;
4942 struct sff_trasnceiver_codes_byte4
*trasn_code_byte4
;
4943 struct sff_trasnceiver_codes_byte5
*trasn_code_byte5
;
4945 desc
->tag
= cpu_to_be32(RDP_SFP_DESC_TAG
);
4947 trasn_code_byte4
= (struct sff_trasnceiver_codes_byte4
*)
4948 &page_a0
[SSF_TRANSCEIVER_CODE_B4
];
4949 trasn_code_byte5
= (struct sff_trasnceiver_codes_byte5
*)
4950 &page_a0
[SSF_TRANSCEIVER_CODE_B5
];
4952 if ((trasn_code_byte4
->fc_sw_laser
) ||
4953 (trasn_code_byte5
->fc_sw_laser_sl
) ||
4954 (trasn_code_byte5
->fc_sw_laser_sn
)) { /* check if its short WL */
4955 flag
|= (SFP_FLAG_PT_SWLASER
<< SFP_FLAG_PT_SHIFT
);
4956 } else if (trasn_code_byte4
->fc_lw_laser
) {
4957 wavelength
= (page_a0
[SSF_WAVELENGTH_B1
] << 8) |
4958 page_a0
[SSF_WAVELENGTH_B0
];
4959 if (wavelength
== SFP_WAVELENGTH_LC1310
)
4960 flag
|= SFP_FLAG_PT_LWLASER_LC1310
<< SFP_FLAG_PT_SHIFT
;
4961 if (wavelength
== SFP_WAVELENGTH_LL1550
)
4962 flag
|= SFP_FLAG_PT_LWLASER_LL1550
<< SFP_FLAG_PT_SHIFT
;
4964 /* check if its SFP+ */
4965 flag
|= ((page_a0
[SSF_IDENTIFIER
] == SFF_PG0_IDENT_SFP
) ?
4966 SFP_FLAG_CT_SFP_PLUS
: SFP_FLAG_CT_UNKNOWN
)
4967 << SFP_FLAG_CT_SHIFT
;
4969 /* check if its OPTICAL */
4970 flag
|= ((page_a0
[SSF_CONNECTOR
] == SFF_PG0_CONNECTOR_LC
) ?
4971 SFP_FLAG_IS_OPTICAL_PORT
: 0)
4972 << SFP_FLAG_IS_OPTICAL_SHIFT
;
4974 temperature
= (page_a2
[SFF_TEMPERATURE_B1
] << 8 |
4975 page_a2
[SFF_TEMPERATURE_B0
]);
4976 vcc
= (page_a2
[SFF_VCC_B1
] << 8 |
4977 page_a2
[SFF_VCC_B0
]);
4978 tx_power
= (page_a2
[SFF_TXPOWER_B1
] << 8 |
4979 page_a2
[SFF_TXPOWER_B0
]);
4980 tx_bias
= (page_a2
[SFF_TX_BIAS_CURRENT_B1
] << 8 |
4981 page_a2
[SFF_TX_BIAS_CURRENT_B0
]);
4982 rx_power
= (page_a2
[SFF_RXPOWER_B1
] << 8 |
4983 page_a2
[SFF_RXPOWER_B0
]);
4984 desc
->sfp_info
.temperature
= cpu_to_be16(temperature
);
4985 desc
->sfp_info
.rx_power
= cpu_to_be16(rx_power
);
4986 desc
->sfp_info
.tx_bias
= cpu_to_be16(tx_bias
);
4987 desc
->sfp_info
.tx_power
= cpu_to_be16(tx_power
);
4988 desc
->sfp_info
.vcc
= cpu_to_be16(vcc
);
4990 desc
->sfp_info
.flags
= cpu_to_be16(flag
);
4991 desc
->length
= cpu_to_be32(sizeof(desc
->sfp_info
));
4993 return sizeof(struct fc_rdp_sfp_desc
);
4997 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc
*desc
,
5002 desc
->tag
= cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG
);
5004 type
= VN_PT_PHY_PF_PORT
<< VN_PT_PHY_SHIFT
;
5006 desc
->info
.port_type
= cpu_to_be32(type
);
5008 desc
->info
.link_status
.link_failure_cnt
=
5009 cpu_to_be32(stat
->linkFailureCnt
);
5010 desc
->info
.link_status
.loss_of_synch_cnt
=
5011 cpu_to_be32(stat
->lossSyncCnt
);
5012 desc
->info
.link_status
.loss_of_signal_cnt
=
5013 cpu_to_be32(stat
->lossSignalCnt
);
5014 desc
->info
.link_status
.primitive_seq_proto_err
=
5015 cpu_to_be32(stat
->primSeqErrCnt
);
5016 desc
->info
.link_status
.invalid_trans_word
=
5017 cpu_to_be32(stat
->invalidXmitWord
);
5018 desc
->info
.link_status
.invalid_crc_cnt
= cpu_to_be32(stat
->crcCnt
);
5020 desc
->length
= cpu_to_be32(sizeof(desc
->info
));
5022 return sizeof(struct fc_rdp_link_error_status_desc
);
5026 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc
*desc
, READ_LNK_VAR
*stat
,
5027 struct lpfc_vport
*vport
)
5031 desc
->tag
= cpu_to_be32(RDP_BBC_DESC_TAG
);
5033 bbCredit
= vport
->fc_sparam
.cmn
.bbCreditLsb
|
5034 (vport
->fc_sparam
.cmn
.bbCreditMsb
<< 8);
5035 desc
->bbc_info
.port_bbc
= cpu_to_be32(bbCredit
);
5036 if (vport
->phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
) {
5037 bbCredit
= vport
->phba
->fc_fabparam
.cmn
.bbCreditLsb
|
5038 (vport
->phba
->fc_fabparam
.cmn
.bbCreditMsb
<< 8);
5039 desc
->bbc_info
.attached_port_bbc
= cpu_to_be32(bbCredit
);
5041 desc
->bbc_info
.attached_port_bbc
= 0;
5044 desc
->bbc_info
.rtt
= 0;
5045 desc
->length
= cpu_to_be32(sizeof(desc
->bbc_info
));
5047 return sizeof(struct fc_rdp_bbc_desc
);
5051 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba
*phba
,
5052 struct fc_rdp_oed_sfp_desc
*desc
, uint8_t *page_a2
)
5056 desc
->tag
= cpu_to_be32(RDP_OED_DESC_TAG
);
5058 desc
->oed_info
.hi_alarm
= page_a2
[SSF_TEMP_HIGH_ALARM
];
5059 desc
->oed_info
.lo_alarm
= page_a2
[SSF_TEMP_LOW_ALARM
];
5060 desc
->oed_info
.hi_warning
= page_a2
[SSF_TEMP_HIGH_WARNING
];
5061 desc
->oed_info
.lo_warning
= page_a2
[SSF_TEMP_LOW_WARNING
];
5063 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_HIGH_TEMPERATURE
)
5064 flags
|= RDP_OET_HIGH_ALARM
;
5065 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_LOW_TEMPERATURE
)
5066 flags
|= RDP_OET_LOW_ALARM
;
5067 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_HIGH_TEMPERATURE
)
5068 flags
|= RDP_OET_HIGH_WARNING
;
5069 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_LOW_TEMPERATURE
)
5070 flags
|= RDP_OET_LOW_WARNING
;
5072 flags
|= ((0xf & RDP_OED_TEMPERATURE
) << RDP_OED_TYPE_SHIFT
);
5073 desc
->oed_info
.function_flags
= cpu_to_be32(flags
);
5074 desc
->length
= cpu_to_be32(sizeof(desc
->oed_info
));
5075 return sizeof(struct fc_rdp_oed_sfp_desc
);
5079 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba
*phba
,
5080 struct fc_rdp_oed_sfp_desc
*desc
,
5085 desc
->tag
= cpu_to_be32(RDP_OED_DESC_TAG
);
5087 desc
->oed_info
.hi_alarm
= page_a2
[SSF_VOLTAGE_HIGH_ALARM
];
5088 desc
->oed_info
.lo_alarm
= page_a2
[SSF_VOLTAGE_LOW_ALARM
];
5089 desc
->oed_info
.hi_warning
= page_a2
[SSF_VOLTAGE_HIGH_WARNING
];
5090 desc
->oed_info
.lo_warning
= page_a2
[SSF_VOLTAGE_LOW_WARNING
];
5092 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_HIGH_VOLTAGE
)
5093 flags
|= RDP_OET_HIGH_ALARM
;
5094 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_LOW_VOLTAGE
)
5095 flags
|= RDP_OET_LOW_ALARM
;
5096 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_HIGH_VOLTAGE
)
5097 flags
|= RDP_OET_HIGH_WARNING
;
5098 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_LOW_VOLTAGE
)
5099 flags
|= RDP_OET_LOW_WARNING
;
5101 flags
|= ((0xf & RDP_OED_VOLTAGE
) << RDP_OED_TYPE_SHIFT
);
5102 desc
->oed_info
.function_flags
= cpu_to_be32(flags
);
5103 desc
->length
= cpu_to_be32(sizeof(desc
->oed_info
));
5104 return sizeof(struct fc_rdp_oed_sfp_desc
);
5108 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba
*phba
,
5109 struct fc_rdp_oed_sfp_desc
*desc
,
5114 desc
->tag
= cpu_to_be32(RDP_OED_DESC_TAG
);
5116 desc
->oed_info
.hi_alarm
= page_a2
[SSF_BIAS_HIGH_ALARM
];
5117 desc
->oed_info
.lo_alarm
= page_a2
[SSF_BIAS_LOW_ALARM
];
5118 desc
->oed_info
.hi_warning
= page_a2
[SSF_BIAS_HIGH_WARNING
];
5119 desc
->oed_info
.lo_warning
= page_a2
[SSF_BIAS_LOW_WARNING
];
5121 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_HIGH_TXBIAS
)
5122 flags
|= RDP_OET_HIGH_ALARM
;
5123 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_LOW_TXBIAS
)
5124 flags
|= RDP_OET_LOW_ALARM
;
5125 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_HIGH_TXBIAS
)
5126 flags
|= RDP_OET_HIGH_WARNING
;
5127 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_LOW_TXBIAS
)
5128 flags
|= RDP_OET_LOW_WARNING
;
5130 flags
|= ((0xf & RDP_OED_TXBIAS
) << RDP_OED_TYPE_SHIFT
);
5131 desc
->oed_info
.function_flags
= cpu_to_be32(flags
);
5132 desc
->length
= cpu_to_be32(sizeof(desc
->oed_info
));
5133 return sizeof(struct fc_rdp_oed_sfp_desc
);
5137 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba
*phba
,
5138 struct fc_rdp_oed_sfp_desc
*desc
,
5143 desc
->tag
= cpu_to_be32(RDP_OED_DESC_TAG
);
5145 desc
->oed_info
.hi_alarm
= page_a2
[SSF_TXPOWER_HIGH_ALARM
];
5146 desc
->oed_info
.lo_alarm
= page_a2
[SSF_TXPOWER_LOW_ALARM
];
5147 desc
->oed_info
.hi_warning
= page_a2
[SSF_TXPOWER_HIGH_WARNING
];
5148 desc
->oed_info
.lo_warning
= page_a2
[SSF_TXPOWER_LOW_WARNING
];
5150 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_HIGH_TXPOWER
)
5151 flags
|= RDP_OET_HIGH_ALARM
;
5152 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_LOW_TXPOWER
)
5153 flags
|= RDP_OET_LOW_ALARM
;
5154 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_HIGH_TXPOWER
)
5155 flags
|= RDP_OET_HIGH_WARNING
;
5156 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_LOW_TXPOWER
)
5157 flags
|= RDP_OET_LOW_WARNING
;
5159 flags
|= ((0xf & RDP_OED_TXPOWER
) << RDP_OED_TYPE_SHIFT
);
5160 desc
->oed_info
.function_flags
= cpu_to_be32(flags
);
5161 desc
->length
= cpu_to_be32(sizeof(desc
->oed_info
));
5162 return sizeof(struct fc_rdp_oed_sfp_desc
);
5167 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba
*phba
,
5168 struct fc_rdp_oed_sfp_desc
*desc
,
5173 desc
->tag
= cpu_to_be32(RDP_OED_DESC_TAG
);
5175 desc
->oed_info
.hi_alarm
= page_a2
[SSF_RXPOWER_HIGH_ALARM
];
5176 desc
->oed_info
.lo_alarm
= page_a2
[SSF_RXPOWER_LOW_ALARM
];
5177 desc
->oed_info
.hi_warning
= page_a2
[SSF_RXPOWER_HIGH_WARNING
];
5178 desc
->oed_info
.lo_warning
= page_a2
[SSF_RXPOWER_LOW_WARNING
];
5180 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_HIGH_RXPOWER
)
5181 flags
|= RDP_OET_HIGH_ALARM
;
5182 if (phba
->sfp_alarm
& LPFC_TRANSGRESSION_LOW_RXPOWER
)
5183 flags
|= RDP_OET_LOW_ALARM
;
5184 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_HIGH_RXPOWER
)
5185 flags
|= RDP_OET_HIGH_WARNING
;
5186 if (phba
->sfp_warning
& LPFC_TRANSGRESSION_LOW_RXPOWER
)
5187 flags
|= RDP_OET_LOW_WARNING
;
5189 flags
|= ((0xf & RDP_OED_RXPOWER
) << RDP_OED_TYPE_SHIFT
);
5190 desc
->oed_info
.function_flags
= cpu_to_be32(flags
);
5191 desc
->length
= cpu_to_be32(sizeof(desc
->oed_info
));
5192 return sizeof(struct fc_rdp_oed_sfp_desc
);
5196 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc
*desc
,
5197 uint8_t *page_a0
, struct lpfc_vport
*vport
)
5199 desc
->tag
= cpu_to_be32(RDP_OPD_DESC_TAG
);
5200 memcpy(desc
->opd_info
.vendor_name
, &page_a0
[SSF_VENDOR_NAME
], 16);
5201 memcpy(desc
->opd_info
.model_number
, &page_a0
[SSF_VENDOR_PN
], 16);
5202 memcpy(desc
->opd_info
.serial_number
, &page_a0
[SSF_VENDOR_SN
], 16);
5203 memcpy(desc
->opd_info
.revision
, &page_a0
[SSF_VENDOR_REV
], 4);
5204 memcpy(desc
->opd_info
.date
, &page_a0
[SSF_DATE_CODE
], 8);
5205 desc
->length
= cpu_to_be32(sizeof(desc
->opd_info
));
5206 return sizeof(struct fc_rdp_opd_sfp_desc
);
5210 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc
*desc
, READ_LNK_VAR
*stat
)
5212 if (bf_get(lpfc_read_link_stat_gec2
, stat
) == 0)
5214 desc
->tag
= cpu_to_be32(RDP_FEC_DESC_TAG
);
5216 desc
->info
.CorrectedBlocks
=
5217 cpu_to_be32(stat
->fecCorrBlkCount
);
5218 desc
->info
.UncorrectableBlocks
=
5219 cpu_to_be32(stat
->fecUncorrBlkCount
);
5221 desc
->length
= cpu_to_be32(sizeof(desc
->info
));
5223 return sizeof(struct fc_fec_rdp_desc
);
5227 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc
*desc
, struct lpfc_hba
*phba
)
5229 uint16_t rdp_cap
= 0;
5232 desc
->tag
= cpu_to_be32(RDP_PORT_SPEED_DESC_TAG
);
5234 switch (phba
->fc_linkspeed
) {
5235 case LPFC_LINK_SPEED_1GHZ
:
5236 rdp_speed
= RDP_PS_1GB
;
5238 case LPFC_LINK_SPEED_2GHZ
:
5239 rdp_speed
= RDP_PS_2GB
;
5241 case LPFC_LINK_SPEED_4GHZ
:
5242 rdp_speed
= RDP_PS_4GB
;
5244 case LPFC_LINK_SPEED_8GHZ
:
5245 rdp_speed
= RDP_PS_8GB
;
5247 case LPFC_LINK_SPEED_10GHZ
:
5248 rdp_speed
= RDP_PS_10GB
;
5250 case LPFC_LINK_SPEED_16GHZ
:
5251 rdp_speed
= RDP_PS_16GB
;
5253 case LPFC_LINK_SPEED_32GHZ
:
5254 rdp_speed
= RDP_PS_32GB
;
5257 rdp_speed
= RDP_PS_UNKNOWN
;
5261 desc
->info
.port_speed
.speed
= cpu_to_be16(rdp_speed
);
5263 if (phba
->lmt
& LMT_32Gb
)
5264 rdp_cap
|= RDP_PS_32GB
;
5265 if (phba
->lmt
& LMT_16Gb
)
5266 rdp_cap
|= RDP_PS_16GB
;
5267 if (phba
->lmt
& LMT_10Gb
)
5268 rdp_cap
|= RDP_PS_10GB
;
5269 if (phba
->lmt
& LMT_8Gb
)
5270 rdp_cap
|= RDP_PS_8GB
;
5271 if (phba
->lmt
& LMT_4Gb
)
5272 rdp_cap
|= RDP_PS_4GB
;
5273 if (phba
->lmt
& LMT_2Gb
)
5274 rdp_cap
|= RDP_PS_2GB
;
5275 if (phba
->lmt
& LMT_1Gb
)
5276 rdp_cap
|= RDP_PS_1GB
;
5279 rdp_cap
= RDP_CAP_UNKNOWN
;
5280 if (phba
->cfg_link_speed
!= LPFC_USER_LINK_SPEED_AUTO
)
5281 rdp_cap
|= RDP_CAP_USER_CONFIGURED
;
5283 desc
->info
.port_speed
.capabilities
= cpu_to_be16(rdp_cap
);
5284 desc
->length
= cpu_to_be32(sizeof(desc
->info
));
5285 return sizeof(struct fc_rdp_port_speed_desc
);
5289 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc
*desc
,
5290 struct lpfc_vport
*vport
)
5293 desc
->tag
= cpu_to_be32(RDP_PORT_NAMES_DESC_TAG
);
5295 memcpy(desc
->port_names
.wwnn
, &vport
->fc_nodename
,
5296 sizeof(desc
->port_names
.wwnn
));
5298 memcpy(desc
->port_names
.wwpn
, &vport
->fc_portname
,
5299 sizeof(desc
->port_names
.wwpn
));
5301 desc
->length
= cpu_to_be32(sizeof(desc
->port_names
));
5302 return sizeof(struct fc_rdp_port_name_desc
);
5306 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc
*desc
,
5307 struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
5310 desc
->tag
= cpu_to_be32(RDP_PORT_NAMES_DESC_TAG
);
5311 if (vport
->fc_flag
& FC_FABRIC
) {
5312 memcpy(desc
->port_names
.wwnn
, &vport
->fabric_nodename
,
5313 sizeof(desc
->port_names
.wwnn
));
5315 memcpy(desc
->port_names
.wwpn
, &vport
->fabric_portname
,
5316 sizeof(desc
->port_names
.wwpn
));
5317 } else { /* Point to Point */
5318 memcpy(desc
->port_names
.wwnn
, &ndlp
->nlp_nodename
,
5319 sizeof(desc
->port_names
.wwnn
));
5321 memcpy(desc
->port_names
.wwnn
, &ndlp
->nlp_portname
,
5322 sizeof(desc
->port_names
.wwpn
));
5325 desc
->length
= cpu_to_be32(sizeof(desc
->port_names
));
5326 return sizeof(struct fc_rdp_port_name_desc
);
5330 lpfc_els_rdp_cmpl(struct lpfc_hba
*phba
, struct lpfc_rdp_context
*rdp_context
,
5333 struct lpfc_nodelist
*ndlp
= rdp_context
->ndlp
;
5334 struct lpfc_vport
*vport
= ndlp
->vport
;
5335 struct lpfc_iocbq
*elsiocb
;
5336 struct ulp_bde64
*bpl
;
5339 struct ls_rjt
*stat
;
5340 struct fc_rdp_res_frame
*rdp_res
;
5341 uint32_t cmdsize
, len
;
5345 if (status
!= SUCCESS
)
5348 /* This will change once we know the true size of the RDP payload */
5349 cmdsize
= sizeof(struct fc_rdp_res_frame
);
5351 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
,
5352 lpfc_max_els_tries
, rdp_context
->ndlp
,
5353 rdp_context
->ndlp
->nlp_DID
, ELS_CMD_ACC
);
5356 goto free_rdp_context
;
5358 icmd
= &elsiocb
->iocb
;
5359 icmd
->ulpContext
= rdp_context
->rx_id
;
5360 icmd
->unsli3
.rcvsli3
.ox_id
= rdp_context
->ox_id
;
5362 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5363 "2171 Xmit RDP response tag x%x xri x%x, "
5364 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5365 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
5366 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
5368 rdp_res
= (struct fc_rdp_res_frame
*)
5369 (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5370 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5371 memset(pcmd
, 0, sizeof(struct fc_rdp_res_frame
));
5372 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
5374 /* Update Alarm and Warning */
5375 flag_ptr
= (uint16_t *)(rdp_context
->page_a2
+ SSF_ALARM_FLAGS
);
5376 phba
->sfp_alarm
|= *flag_ptr
;
5377 flag_ptr
= (uint16_t *)(rdp_context
->page_a2
+ SSF_WARNING_FLAGS
);
5378 phba
->sfp_warning
|= *flag_ptr
;
5380 /* For RDP payload */
5382 len
+= lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc
*)
5383 (len
+ pcmd
), ELS_CMD_RDP
);
5385 len
+= lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc
*)(len
+ pcmd
),
5386 rdp_context
->page_a0
, rdp_context
->page_a2
);
5387 len
+= lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc
*)(len
+ pcmd
),
5389 len
+= lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc
*)
5390 (len
+ pcmd
), &rdp_context
->link_stat
);
5391 len
+= lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc
*)
5392 (len
+ pcmd
), vport
);
5393 len
+= lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc
*)
5394 (len
+ pcmd
), vport
, ndlp
);
5395 len
+= lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc
*)(len
+ pcmd
),
5396 &rdp_context
->link_stat
);
5397 len
+= lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc
*)(len
+ pcmd
),
5398 &rdp_context
->link_stat
, vport
);
5399 len
+= lpfc_rdp_res_oed_temp_desc(phba
,
5400 (struct fc_rdp_oed_sfp_desc
*)(len
+ pcmd
),
5401 rdp_context
->page_a2
);
5402 len
+= lpfc_rdp_res_oed_voltage_desc(phba
,
5403 (struct fc_rdp_oed_sfp_desc
*)(len
+ pcmd
),
5404 rdp_context
->page_a2
);
5405 len
+= lpfc_rdp_res_oed_txbias_desc(phba
,
5406 (struct fc_rdp_oed_sfp_desc
*)(len
+ pcmd
),
5407 rdp_context
->page_a2
);
5408 len
+= lpfc_rdp_res_oed_txpower_desc(phba
,
5409 (struct fc_rdp_oed_sfp_desc
*)(len
+ pcmd
),
5410 rdp_context
->page_a2
);
5411 len
+= lpfc_rdp_res_oed_rxpower_desc(phba
,
5412 (struct fc_rdp_oed_sfp_desc
*)(len
+ pcmd
),
5413 rdp_context
->page_a2
);
5414 len
+= lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc
*)(len
+ pcmd
),
5415 rdp_context
->page_a0
, vport
);
5417 rdp_res
->length
= cpu_to_be32(len
- 8);
5418 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5420 /* Now that we know the true size of the payload, update the BPL */
5421 bpl
= (struct ulp_bde64
*)
5422 (((struct lpfc_dmabuf
*)(elsiocb
->context3
))->virt
);
5423 bpl
->tus
.f
.bdeSize
= len
;
5424 bpl
->tus
.f
.bdeFlags
= 0;
5425 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
5427 phba
->fc_stat
.elsXmitACC
++;
5428 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
5429 if (rc
== IOCB_ERROR
)
5430 lpfc_els_free_iocb(phba
, elsiocb
);
5436 cmdsize
= 2 * sizeof(uint32_t);
5437 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, lpfc_max_els_tries
,
5438 ndlp
, ndlp
->nlp_DID
, ELS_CMD_LS_RJT
);
5441 goto free_rdp_context
;
5443 icmd
= &elsiocb
->iocb
;
5444 icmd
->ulpContext
= rdp_context
->rx_id
;
5445 icmd
->unsli3
.rcvsli3
.ox_id
= rdp_context
->ox_id
;
5446 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
5448 *((uint32_t *) (pcmd
)) = ELS_CMD_LS_RJT
;
5449 stat
= (struct ls_rjt
*)(pcmd
+ sizeof(uint32_t));
5450 stat
->un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5452 phba
->fc_stat
.elsXmitLSRJT
++;
5453 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5454 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
5456 if (rc
== IOCB_ERROR
)
5457 lpfc_els_free_iocb(phba
, elsiocb
);
5463 lpfc_get_rdp_info(struct lpfc_hba
*phba
, struct lpfc_rdp_context
*rdp_context
)
5465 LPFC_MBOXQ_t
*mbox
= NULL
;
5468 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5470 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_ELS
,
5471 "7105 failed to allocate mailbox memory");
5475 if (lpfc_sli4_dump_page_a0(phba
, mbox
))
5476 goto prep_mbox_fail
;
5477 mbox
->vport
= rdp_context
->ndlp
->vport
;
5478 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_rdp_page_a0
;
5479 mbox
->context2
= (struct lpfc_rdp_context
*) rdp_context
;
5480 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5481 if (rc
== MBX_NOT_FINISHED
)
5482 goto issue_mbox_fail
;
5488 mempool_free(mbox
, phba
->mbox_mem_pool
);
5493 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
5494 * @vport: pointer to a host virtual N_Port data structure.
5495 * @cmdiocb: pointer to lpfc command iocb data structure.
5496 * @ndlp: pointer to a node-list data structure.
5498 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
5499 * IOCB. First, the payload of the unsolicited RDP is checked.
5500 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
5501 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
5502 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
5503 * gather all data and send RDP response.
5506 * 0 - Sent the acc response
5507 * 1 - Sent the reject response.
5510 lpfc_els_rcv_rdp(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5511 struct lpfc_nodelist
*ndlp
)
5513 struct lpfc_hba
*phba
= vport
->phba
;
5514 struct lpfc_dmabuf
*pcmd
;
5515 uint8_t rjt_err
, rjt_expl
= LSEXP_NOTHING_MORE
;
5516 struct fc_rdp_req_frame
*rdp_req
;
5517 struct lpfc_rdp_context
*rdp_context
;
5521 if (phba
->sli_rev
< LPFC_SLI_REV4
||
5522 bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
5523 LPFC_SLI_INTF_IF_TYPE_2
) {
5524 rjt_err
= LSRJT_UNABLE_TPC
;
5525 rjt_expl
= LSEXP_REQ_UNSUPPORTED
;
5529 if (phba
->sli_rev
< LPFC_SLI_REV4
|| (phba
->hba_flag
& HBA_FCOE_MODE
)) {
5530 rjt_err
= LSRJT_UNABLE_TPC
;
5531 rjt_expl
= LSEXP_REQ_UNSUPPORTED
;
5535 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5536 rdp_req
= (struct fc_rdp_req_frame
*) pcmd
->virt
;
5538 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5539 "2422 ELS RDP Request "
5540 "dec len %d tag x%x port_id %d len %d\n",
5541 be32_to_cpu(rdp_req
->rdp_des_length
),
5542 be32_to_cpu(rdp_req
->nport_id_desc
.tag
),
5543 be32_to_cpu(rdp_req
->nport_id_desc
.nport_id
),
5544 be32_to_cpu(rdp_req
->nport_id_desc
.length
));
5546 if (sizeof(struct fc_rdp_nport_desc
) !=
5547 be32_to_cpu(rdp_req
->rdp_des_length
))
5549 if (RDP_N_PORT_DESC_TAG
!= be32_to_cpu(rdp_req
->nport_id_desc
.tag
))
5551 if (RDP_NPORT_ID_SIZE
!=
5552 be32_to_cpu(rdp_req
->nport_id_desc
.length
))
5554 rdp_context
= kzalloc(sizeof(struct lpfc_rdp_context
), GFP_KERNEL
);
5556 rjt_err
= LSRJT_UNABLE_TPC
;
5560 cmd
= &cmdiocb
->iocb
;
5561 rdp_context
->ndlp
= lpfc_nlp_get(ndlp
);
5562 rdp_context
->ox_id
= cmd
->unsli3
.rcvsli3
.ox_id
;
5563 rdp_context
->rx_id
= cmd
->ulpContext
;
5564 rdp_context
->cmpl
= lpfc_els_rdp_cmpl
;
5565 if (lpfc_get_rdp_info(phba
, rdp_context
)) {
5566 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_ELS
,
5567 "2423 Unable to send mailbox");
5569 rjt_err
= LSRJT_UNABLE_TPC
;
5577 rjt_err
= LSRJT_LOGICAL_ERR
;
5580 memset(&stat
, 0, sizeof(stat
));
5581 stat
.un
.b
.lsRjtRsnCode
= rjt_err
;
5582 stat
.un
.b
.lsRjtRsnCodeExp
= rjt_expl
;
5583 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5589 lpfc_els_lcb_rsp(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5594 struct lpfc_iocbq
*elsiocb
;
5595 struct lpfc_nodelist
*ndlp
;
5596 struct ls_rjt
*stat
;
5597 union lpfc_sli4_cfg_shdr
*shdr
;
5598 struct lpfc_lcb_context
*lcb_context
;
5599 struct fc_lcb_res_frame
*lcb_res
;
5600 uint32_t cmdsize
, shdr_status
, shdr_add_status
;
5604 lcb_context
= (struct lpfc_lcb_context
*)pmb
->context1
;
5605 ndlp
= lcb_context
->ndlp
;
5606 pmb
->context1
= NULL
;
5607 pmb
->context2
= NULL
;
5609 shdr
= (union lpfc_sli4_cfg_shdr
*)
5610 &pmb
->u
.mqe
.un
.beacon_config
.header
.cfg_shdr
;
5611 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5612 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5614 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
,
5615 "0194 SET_BEACON_CONFIG mailbox "
5616 "completed with status x%x add_status x%x,"
5617 " mbx status x%x\n",
5618 shdr_status
, shdr_add_status
, mb
->mbxStatus
);
5620 if (mb
->mbxStatus
&& !(shdr_status
&&
5621 shdr_add_status
== ADD_STATUS_OPERATION_ALREADY_ACTIVE
)) {
5622 mempool_free(pmb
, phba
->mbox_mem_pool
);
5626 mempool_free(pmb
, phba
->mbox_mem_pool
);
5627 cmdsize
= sizeof(struct fc_lcb_res_frame
);
5628 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
5629 lpfc_max_els_tries
, ndlp
,
5630 ndlp
->nlp_DID
, ELS_CMD_ACC
);
5632 /* Decrement the ndlp reference count from previous mbox command */
5636 goto free_lcb_context
;
5638 lcb_res
= (struct fc_lcb_res_frame
*)
5639 (((struct lpfc_dmabuf
*)elsiocb
->context2
)->virt
);
5641 icmd
= &elsiocb
->iocb
;
5642 icmd
->ulpContext
= lcb_context
->rx_id
;
5643 icmd
->unsli3
.rcvsli3
.ox_id
= lcb_context
->ox_id
;
5645 pcmd
= (uint8_t *)(((struct lpfc_dmabuf
*)elsiocb
->context2
)->virt
);
5646 *((uint32_t *)(pcmd
)) = ELS_CMD_ACC
;
5647 lcb_res
->lcb_sub_command
= lcb_context
->sub_command
;
5648 lcb_res
->lcb_type
= lcb_context
->type
;
5649 lcb_res
->lcb_frequency
= lcb_context
->frequency
;
5650 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5651 phba
->fc_stat
.elsXmitACC
++;
5652 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
5653 if (rc
== IOCB_ERROR
)
5654 lpfc_els_free_iocb(phba
, elsiocb
);
5660 cmdsize
= sizeof(struct fc_lcb_res_frame
);
5661 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
5662 lpfc_max_els_tries
, ndlp
,
5663 ndlp
->nlp_DID
, ELS_CMD_LS_RJT
);
5666 goto free_lcb_context
;
5668 icmd
= &elsiocb
->iocb
;
5669 icmd
->ulpContext
= lcb_context
->rx_id
;
5670 icmd
->unsli3
.rcvsli3
.ox_id
= lcb_context
->ox_id
;
5671 pcmd
= (uint8_t *)(((struct lpfc_dmabuf
*)elsiocb
->context2
)->virt
);
5673 *((uint32_t *)(pcmd
)) = ELS_CMD_LS_RJT
;
5674 stat
= (struct ls_rjt
*)(pcmd
+ sizeof(uint32_t));
5675 stat
->un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
5677 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
5678 phba
->fc_stat
.elsXmitLSRJT
++;
5679 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
5680 if (rc
== IOCB_ERROR
)
5681 lpfc_els_free_iocb(phba
, elsiocb
);
5687 lpfc_sli4_set_beacon(struct lpfc_vport
*vport
,
5688 struct lpfc_lcb_context
*lcb_context
,
5689 uint32_t beacon_state
)
5691 struct lpfc_hba
*phba
= vport
->phba
;
5692 LPFC_MBOXQ_t
*mbox
= NULL
;
5696 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5700 len
= sizeof(struct lpfc_mbx_set_beacon_config
) -
5701 sizeof(struct lpfc_sli4_cfg_mhdr
);
5702 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5703 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG
, len
,
5704 LPFC_SLI4_MBX_EMBED
);
5705 mbox
->context1
= (void *)lcb_context
;
5706 mbox
->vport
= phba
->pport
;
5707 mbox
->mbox_cmpl
= lpfc_els_lcb_rsp
;
5708 bf_set(lpfc_mbx_set_beacon_port_num
, &mbox
->u
.mqe
.un
.beacon_config
,
5709 phba
->sli4_hba
.physical_port
);
5710 bf_set(lpfc_mbx_set_beacon_state
, &mbox
->u
.mqe
.un
.beacon_config
,
5712 bf_set(lpfc_mbx_set_beacon_port_type
, &mbox
->u
.mqe
.un
.beacon_config
, 1);
5713 bf_set(lpfc_mbx_set_beacon_duration
, &mbox
->u
.mqe
.un
.beacon_config
, 0);
5714 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5715 if (rc
== MBX_NOT_FINISHED
) {
5716 mempool_free(mbox
, phba
->mbox_mem_pool
);
5725 * lpfc_els_rcv_lcb - Process an unsolicited LCB
5726 * @vport: pointer to a host virtual N_Port data structure.
5727 * @cmdiocb: pointer to lpfc command iocb data structure.
5728 * @ndlp: pointer to a node-list data structure.
5730 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
5731 * First, the payload of the unsolicited LCB is checked.
5732 * Then based on Subcommand beacon will either turn on or off.
5735 * 0 - Sent the acc response
5736 * 1 - Sent the reject response.
5739 lpfc_els_rcv_lcb(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
5740 struct lpfc_nodelist
*ndlp
)
5742 struct lpfc_hba
*phba
= vport
->phba
;
5743 struct lpfc_dmabuf
*pcmd
;
5745 struct fc_lcb_request_frame
*beacon
;
5746 struct lpfc_lcb_context
*lcb_context
;
5747 uint8_t state
, rjt_err
;
5750 pcmd
= (struct lpfc_dmabuf
*)cmdiocb
->context2
;
5751 lp
= (uint8_t *)pcmd
->virt
;
5752 beacon
= (struct fc_lcb_request_frame
*)pcmd
->virt
;
5754 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
5755 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
5756 "type x%x frequency %x duration x%x\n",
5757 lp
[0], lp
[1], lp
[2],
5758 beacon
->lcb_command
,
5759 beacon
->lcb_sub_command
,
5761 beacon
->lcb_frequency
,
5762 be16_to_cpu(beacon
->lcb_duration
));
5764 if (phba
->sli_rev
< LPFC_SLI_REV4
||
5765 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
5766 LPFC_SLI_INTF_IF_TYPE_2
)) {
5767 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
5771 if (phba
->hba_flag
& HBA_FCOE_MODE
) {
5772 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
5775 if (beacon
->lcb_sub_command
!= LPFC_LCB_ON
&&
5776 beacon
->lcb_sub_command
!= LPFC_LCB_OFF
) {
5777 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
5780 if (beacon
->lcb_sub_command
== LPFC_LCB_ON
&&
5781 be16_to_cpu(beacon
->lcb_duration
) != 0) {
5782 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
5786 lcb_context
= kmalloc(sizeof(*lcb_context
), GFP_KERNEL
);
5788 rjt_err
= LSRJT_UNABLE_TPC
;
5792 state
= (beacon
->lcb_sub_command
== LPFC_LCB_ON
) ? 1 : 0;
5793 lcb_context
->sub_command
= beacon
->lcb_sub_command
;
5794 lcb_context
->type
= beacon
->lcb_type
;
5795 lcb_context
->frequency
= beacon
->lcb_frequency
;
5796 lcb_context
->ox_id
= cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
;
5797 lcb_context
->rx_id
= cmdiocb
->iocb
.ulpContext
;
5798 lcb_context
->ndlp
= lpfc_nlp_get(ndlp
);
5799 if (lpfc_sli4_set_beacon(vport
, lcb_context
, state
)) {
5800 lpfc_printf_vlog(ndlp
->vport
, KERN_ERR
,
5801 LOG_ELS
, "0193 failed to send mail box");
5804 rjt_err
= LSRJT_UNABLE_TPC
;
5809 memset(&stat
, 0, sizeof(stat
));
5810 stat
.un
.b
.lsRjtRsnCode
= rjt_err
;
5811 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
5817 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
5818 * @vport: pointer to a host virtual N_Port data structure.
5820 * This routine cleans up any Registration State Change Notification
5821 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
5822 * @vport together with the host_lock is used to prevent multiple thread
5823 * trying to access the RSCN array on a same @vport at the same time.
5826 lpfc_els_flush_rscn(struct lpfc_vport
*vport
)
5828 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5829 struct lpfc_hba
*phba
= vport
->phba
;
5832 spin_lock_irq(shost
->host_lock
);
5833 if (vport
->fc_rscn_flush
) {
5834 /* Another thread is walking fc_rscn_id_list on this vport */
5835 spin_unlock_irq(shost
->host_lock
);
5838 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
5839 vport
->fc_rscn_flush
= 1;
5840 spin_unlock_irq(shost
->host_lock
);
5842 for (i
= 0; i
< vport
->fc_rscn_id_cnt
; i
++) {
5843 lpfc_in_buf_free(phba
, vport
->fc_rscn_id_list
[i
]);
5844 vport
->fc_rscn_id_list
[i
] = NULL
;
5846 spin_lock_irq(shost
->host_lock
);
5847 vport
->fc_rscn_id_cnt
= 0;
5848 vport
->fc_flag
&= ~(FC_RSCN_MODE
| FC_RSCN_DISCOVERY
);
5849 spin_unlock_irq(shost
->host_lock
);
5850 lpfc_can_disctmo(vport
);
5851 /* Indicate we are done walking this fc_rscn_id_list */
5852 vport
->fc_rscn_flush
= 0;
5856 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
5857 * @vport: pointer to a host virtual N_Port data structure.
5858 * @did: remote destination port identifier.
5860 * This routine checks whether there is any pending Registration State
5861 * Configuration Notification (RSCN) to a @did on @vport.
5864 * None zero - The @did matched with a pending rscn
5865 * 0 - not able to match @did with a pending rscn
5868 lpfc_rscn_payload_check(struct lpfc_vport
*vport
, uint32_t did
)
5873 uint32_t payload_len
, i
;
5874 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5876 ns_did
.un
.word
= did
;
5878 /* Never match fabric nodes for RSCNs */
5879 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
5882 /* If we are doing a FULL RSCN rediscovery, match everything */
5883 if (vport
->fc_flag
& FC_RSCN_DISCOVERY
)
5886 spin_lock_irq(shost
->host_lock
);
5887 if (vport
->fc_rscn_flush
) {
5888 /* Another thread is walking fc_rscn_id_list on this vport */
5889 spin_unlock_irq(shost
->host_lock
);
5892 /* Indicate we are walking fc_rscn_id_list on this vport */
5893 vport
->fc_rscn_flush
= 1;
5894 spin_unlock_irq(shost
->host_lock
);
5895 for (i
= 0; i
< vport
->fc_rscn_id_cnt
; i
++) {
5896 lp
= vport
->fc_rscn_id_list
[i
]->virt
;
5897 payload_len
= be32_to_cpu(*lp
++ & ~ELS_CMD_MASK
);
5898 payload_len
-= sizeof(uint32_t); /* take off word 0 */
5899 while (payload_len
) {
5900 rscn_did
.un
.word
= be32_to_cpu(*lp
++);
5901 payload_len
-= sizeof(uint32_t);
5902 switch (rscn_did
.un
.b
.resv
& RSCN_ADDRESS_FORMAT_MASK
) {
5903 case RSCN_ADDRESS_FORMAT_PORT
:
5904 if ((ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
5905 && (ns_did
.un
.b
.area
== rscn_did
.un
.b
.area
)
5906 && (ns_did
.un
.b
.id
== rscn_did
.un
.b
.id
))
5907 goto return_did_out
;
5909 case RSCN_ADDRESS_FORMAT_AREA
:
5910 if ((ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
5911 && (ns_did
.un
.b
.area
== rscn_did
.un
.b
.area
))
5912 goto return_did_out
;
5914 case RSCN_ADDRESS_FORMAT_DOMAIN
:
5915 if (ns_did
.un
.b
.domain
== rscn_did
.un
.b
.domain
)
5916 goto return_did_out
;
5918 case RSCN_ADDRESS_FORMAT_FABRIC
:
5919 goto return_did_out
;
5923 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5924 vport
->fc_rscn_flush
= 0;
5927 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5928 vport
->fc_rscn_flush
= 0;
5933 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
5934 * @vport: pointer to a host virtual N_Port data structure.
5936 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
5937 * state machine for a @vport's nodes that are with pending RSCN (Registration
5938 * State Change Notification).
5941 * 0 - Successful (currently alway return 0)
5944 lpfc_rscn_recovery_check(struct lpfc_vport
*vport
)
5946 struct lpfc_nodelist
*ndlp
= NULL
;
5948 /* Move all affected nodes by pending RSCNs to NPR state. */
5949 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5950 if (!NLP_CHK_NODE_ACT(ndlp
) ||
5951 (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) ||
5952 !lpfc_rscn_payload_check(vport
, ndlp
->nlp_DID
))
5955 /* NVME Target mode does not do RSCN Recovery. */
5956 if (vport
->phba
->nvmet_support
)
5959 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
5960 NLP_EVT_DEVICE_RECOVERY
);
5961 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
5967 * lpfc_send_rscn_event - Send an RSCN event to management application
5968 * @vport: pointer to a host virtual N_Port data structure.
5969 * @cmdiocb: pointer to lpfc command iocb data structure.
5971 * lpfc_send_rscn_event sends an RSCN netlink event to management
5975 lpfc_send_rscn_event(struct lpfc_vport
*vport
,
5976 struct lpfc_iocbq
*cmdiocb
)
5978 struct lpfc_dmabuf
*pcmd
;
5979 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5980 uint32_t *payload_ptr
;
5981 uint32_t payload_len
;
5982 struct lpfc_rscn_event_header
*rscn_event_data
;
5984 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
5985 payload_ptr
= (uint32_t *) pcmd
->virt
;
5986 payload_len
= be32_to_cpu(*payload_ptr
& ~ELS_CMD_MASK
);
5988 rscn_event_data
= kmalloc(sizeof(struct lpfc_rscn_event_header
) +
5989 payload_len
, GFP_KERNEL
);
5990 if (!rscn_event_data
) {
5991 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
5992 "0147 Failed to allocate memory for RSCN event\n");
5995 rscn_event_data
->event_type
= FC_REG_RSCN_EVENT
;
5996 rscn_event_data
->payload_length
= payload_len
;
5997 memcpy(rscn_event_data
->rscn_payload
, payload_ptr
,
6000 fc_host_post_vendor_event(shost
,
6001 fc_get_event_number(),
6002 sizeof(struct lpfc_rscn_event_header
) + payload_len
,
6003 (char *)rscn_event_data
,
6006 kfree(rscn_event_data
);
6010 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
6011 * @vport: pointer to a host virtual N_Port data structure.
6012 * @cmdiocb: pointer to lpfc command iocb data structure.
6013 * @ndlp: pointer to a node-list data structure.
6015 * This routine processes an unsolicited RSCN (Registration State Change
6016 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
6017 * to invoke fc_host_post_event() routine to the FC transport layer. If the
6018 * discover state machine is about to begin discovery, it just accepts the
6019 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
6020 * contains N_Port IDs for other vports on this HBA, it just accepts the
6021 * RSCN and ignore processing it. If the state machine is in the recovery
6022 * state, the fc_rscn_id_list of this @vport is walked and the
6023 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
6024 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
6025 * routine is invoked to handle the RSCN event.
6028 * 0 - Just sent the acc response
6029 * 1 - Sent the acc response and waited for name server completion
6032 lpfc_els_rcv_rscn(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6033 struct lpfc_nodelist
*ndlp
)
6035 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6036 struct lpfc_hba
*phba
= vport
->phba
;
6037 struct lpfc_dmabuf
*pcmd
;
6038 uint32_t *lp
, *datap
;
6039 uint32_t payload_len
, length
, nportid
, *cmd
;
6041 int rscn_id
= 0, hba_id
= 0;
6044 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
6045 lp
= (uint32_t *) pcmd
->virt
;
6047 payload_len
= be32_to_cpu(*lp
++ & ~ELS_CMD_MASK
);
6048 payload_len
-= sizeof(uint32_t); /* take off word 0 */
6050 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
6051 "0214 RSCN received Data: x%x x%x x%x x%x\n",
6052 vport
->fc_flag
, payload_len
, *lp
,
6053 vport
->fc_rscn_id_cnt
);
6055 /* Send an RSCN event to the management application */
6056 lpfc_send_rscn_event(vport
, cmdiocb
);
6058 for (i
= 0; i
< payload_len
/sizeof(uint32_t); i
++)
6059 fc_host_post_event(shost
, fc_get_event_number(),
6060 FCH_EVT_RSCN
, lp
[i
]);
6062 /* If we are about to begin discovery, just ACC the RSCN.
6063 * Discovery processing will satisfy it.
6065 if (vport
->port_state
<= LPFC_NS_QRY
) {
6066 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6067 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
6068 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
6070 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
6074 /* If this RSCN just contains NPortIDs for other vports on this HBA,
6075 * just ACC and ignore it.
6077 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
6078 !(vport
->cfg_peer_port_login
)) {
6083 nportid
= ((be32_to_cpu(nportid
)) & Mask_DID
);
6084 i
-= sizeof(uint32_t);
6086 if (lpfc_find_vport_by_did(phba
, nportid
))
6089 if (rscn_id
== hba_id
) {
6090 /* ALL NPortIDs in RSCN are on HBA */
6091 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
6093 "Data: x%x x%x x%x x%x\n",
6094 vport
->fc_flag
, payload_len
,
6095 *lp
, vport
->fc_rscn_id_cnt
);
6096 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6097 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
6098 ndlp
->nlp_DID
, vport
->port_state
,
6101 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
,
6107 spin_lock_irq(shost
->host_lock
);
6108 if (vport
->fc_rscn_flush
) {
6109 /* Another thread is walking fc_rscn_id_list on this vport */
6110 vport
->fc_flag
|= FC_RSCN_DISCOVERY
;
6111 spin_unlock_irq(shost
->host_lock
);
6113 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
6116 /* Indicate we are walking fc_rscn_id_list on this vport */
6117 vport
->fc_rscn_flush
= 1;
6118 spin_unlock_irq(shost
->host_lock
);
6119 /* Get the array count after successfully have the token */
6120 rscn_cnt
= vport
->fc_rscn_id_cnt
;
6121 /* If we are already processing an RSCN, save the received
6122 * RSCN payload buffer, cmdiocb->context2 to process later.
6124 if (vport
->fc_flag
& (FC_RSCN_MODE
| FC_NDISC_ACTIVE
)) {
6125 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6126 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
6127 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
6129 spin_lock_irq(shost
->host_lock
);
6130 vport
->fc_flag
|= FC_RSCN_DEFERRED
;
6131 if ((rscn_cnt
< FC_MAX_HOLD_RSCN
) &&
6132 !(vport
->fc_flag
& FC_RSCN_DISCOVERY
)) {
6133 vport
->fc_flag
|= FC_RSCN_MODE
;
6134 spin_unlock_irq(shost
->host_lock
);
6136 cmd
= vport
->fc_rscn_id_list
[rscn_cnt
-1]->virt
;
6137 length
= be32_to_cpu(*cmd
& ~ELS_CMD_MASK
);
6140 (payload_len
+ length
<= LPFC_BPL_SIZE
)) {
6141 *cmd
&= ELS_CMD_MASK
;
6142 *cmd
|= cpu_to_be32(payload_len
+ length
);
6143 memcpy(((uint8_t *)cmd
) + length
, lp
,
6146 vport
->fc_rscn_id_list
[rscn_cnt
] = pcmd
;
6147 vport
->fc_rscn_id_cnt
++;
6148 /* If we zero, cmdiocb->context2, the calling
6149 * routine will not try to free it.
6151 cmdiocb
->context2
= NULL
;
6154 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
6155 "0235 Deferred RSCN "
6156 "Data: x%x x%x x%x\n",
6157 vport
->fc_rscn_id_cnt
, vport
->fc_flag
,
6160 vport
->fc_flag
|= FC_RSCN_DISCOVERY
;
6161 spin_unlock_irq(shost
->host_lock
);
6162 /* ReDiscovery RSCN */
6163 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
6164 "0234 ReDiscovery RSCN "
6165 "Data: x%x x%x x%x\n",
6166 vport
->fc_rscn_id_cnt
, vport
->fc_flag
,
6169 /* Indicate we are done walking fc_rscn_id_list on this vport */
6170 vport
->fc_rscn_flush
= 0;
6172 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
6173 /* send RECOVERY event for ALL nodes that match RSCN payload */
6174 lpfc_rscn_recovery_check(vport
);
6175 spin_lock_irq(shost
->host_lock
);
6176 vport
->fc_flag
&= ~FC_RSCN_DEFERRED
;
6177 spin_unlock_irq(shost
->host_lock
);
6180 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
6181 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
6182 ndlp
->nlp_DID
, vport
->port_state
, ndlp
->nlp_flag
);
6184 spin_lock_irq(shost
->host_lock
);
6185 vport
->fc_flag
|= FC_RSCN_MODE
;
6186 spin_unlock_irq(shost
->host_lock
);
6187 vport
->fc_rscn_id_list
[vport
->fc_rscn_id_cnt
++] = pcmd
;
6188 /* Indicate we are done walking fc_rscn_id_list on this vport */
6189 vport
->fc_rscn_flush
= 0;
6191 * If we zero, cmdiocb->context2, the calling routine will
6192 * not try to free it.
6194 cmdiocb
->context2
= NULL
;
6195 lpfc_set_disctmo(vport
);
6197 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
6198 /* send RECOVERY event for ALL nodes that match RSCN payload */
6199 lpfc_rscn_recovery_check(vport
);
6200 return lpfc_els_handle_rscn(vport
);
6204 * lpfc_els_handle_rscn - Handle rscn for a vport
6205 * @vport: pointer to a host virtual N_Port data structure.
6207 * This routine handles the Registration State Configuration Notification
6208 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
6209 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
6210 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
6211 * NameServer shall be issued. If CT command to the NameServer fails to be
6212 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
6213 * RSCN activities with the @vport.
6216 * 0 - Cleaned up rscn on the @vport
6217 * 1 - Wait for plogi to name server before proceed
6220 lpfc_els_handle_rscn(struct lpfc_vport
*vport
)
6222 struct lpfc_nodelist
*ndlp
;
6224 /* Ignore RSCN if the port is being torn down. */
6225 if (vport
->load_flag
& FC_UNLOADING
) {
6226 lpfc_els_flush_rscn(vport
);
6230 /* Start timer for RSCN processing */
6231 lpfc_set_disctmo(vport
);
6233 /* RSCN processed */
6234 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
6235 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
6236 vport
->fc_flag
, 0, vport
->fc_rscn_id_cnt
,
6239 /* To process RSCN, first compare RSCN data with NameServer */
6240 vport
->fc_ns_retry
= 0;
6241 vport
->num_disc_nodes
= 0;
6243 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
6244 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
)
6245 && ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) {
6246 /* Good ndlp, issue CT Request to NameServer. Need to
6247 * know how many gidfts were issued. If none, then just
6248 * flush the RSCN. Otherwise, the outstanding requests
6251 vport
->gidft_inp
= 0;
6252 if (lpfc_issue_gidft(vport
) > 0)
6255 /* Nameserver login in question. Revalidate. */
6257 ndlp
= lpfc_enable_node(vport
, ndlp
,
6258 NLP_STE_PLOGI_ISSUE
);
6260 lpfc_els_flush_rscn(vport
);
6263 ndlp
->nlp_prev_state
= NLP_STE_UNUSED_NODE
;
6265 ndlp
= lpfc_nlp_init(vport
, NameServer_DID
);
6267 lpfc_els_flush_rscn(vport
);
6270 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
6271 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
6273 ndlp
->nlp_type
|= NLP_FABRIC
;
6274 lpfc_issue_els_plogi(vport
, NameServer_DID
, 0);
6275 /* Wait for NameServer login cmpl before we can
6281 lpfc_els_flush_rscn(vport
);
6286 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
6287 * @vport: pointer to a host virtual N_Port data structure.
6288 * @cmdiocb: pointer to lpfc command iocb data structure.
6289 * @ndlp: pointer to a node-list data structure.
6291 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6292 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6293 * point topology. As an unsolicited FLOGI should not be received in a loop
6294 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6295 * lpfc_check_sparm() routine is invoked to check the parameters in the
6296 * unsolicited FLOGI. If parameters validation failed, the routine
6297 * lpfc_els_rsp_reject() shall be called with reject reason code set to
6298 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6299 * FLOGI shall be compared with the Port WWN of the @vport to determine who
6300 * will initiate PLOGI. The higher lexicographical value party shall has
6301 * higher priority (as the winning port) and will initiate PLOGI and
6302 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6303 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6304 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6307 * 0 - Successfully processed the unsolicited flogi
6308 * 1 - Failed to process the unsolicited flogi
6311 lpfc_els_rcv_flogi(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6312 struct lpfc_nodelist
*ndlp
)
6314 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
6315 struct lpfc_hba
*phba
= vport
->phba
;
6316 struct lpfc_dmabuf
*pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
6317 uint32_t *lp
= (uint32_t *) pcmd
->virt
;
6318 IOCB_t
*icmd
= &cmdiocb
->iocb
;
6319 struct serv_parm
*sp
;
6323 uint32_t fc_flag
= 0;
6324 uint32_t port_state
= 0;
6327 sp
= (struct serv_parm
*) lp
;
6329 /* FLOGI received */
6331 lpfc_set_disctmo(vport
);
6333 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
6334 /* We should never receive a FLOGI in loop mode, ignore it */
6335 did
= icmd
->un
.elsreq64
.remoteID
;
6337 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
6339 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
6340 "0113 An FLOGI ELS command x%x was "
6341 "received from DID x%x in Loop Mode\n",
6346 (void) lpfc_check_sparm(vport
, ndlp
, sp
, CLASS3
, 1);
6349 * If our portname is greater than the remote portname,
6350 * then we initiate Nport login.
6353 rc
= memcmp(&vport
->fc_portname
, &sp
->portName
,
6354 sizeof(struct lpfc_name
));
6357 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
6358 mbox
= mempool_alloc(phba
->mbox_mem_pool
,
6362 lpfc_linkdown(phba
);
6363 lpfc_init_link(phba
, mbox
,
6365 phba
->cfg_link_speed
);
6366 mbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
6367 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
6368 mbox
->vport
= vport
;
6369 rc
= lpfc_sli_issue_mbox(phba
, mbox
,
6371 lpfc_set_loopback_flag(phba
);
6372 if (rc
== MBX_NOT_FINISHED
)
6373 mempool_free(mbox
, phba
->mbox_mem_pool
);
6377 /* abort the flogi coming back to ourselves
6378 * due to external loopback on the port.
6380 lpfc_els_abort_flogi(phba
);
6383 } else if (rc
> 0) { /* greater than */
6384 spin_lock_irq(shost
->host_lock
);
6385 vport
->fc_flag
|= FC_PT2PT_PLOGI
;
6386 spin_unlock_irq(shost
->host_lock
);
6388 /* If we have the high WWPN we can assign our own
6389 * myDID; otherwise, we have to WAIT for a PLOGI
6390 * from the remote NPort to find out what it
6393 vport
->fc_myDID
= PT2PT_LocalID
;
6395 vport
->fc_myDID
= PT2PT_RemoteID
;
6399 * The vport state should go to LPFC_FLOGI only
6400 * AFTER we issue a FLOGI, not receive one.
6402 spin_lock_irq(shost
->host_lock
);
6403 fc_flag
= vport
->fc_flag
;
6404 port_state
= vport
->port_state
;
6405 vport
->fc_flag
|= FC_PT2PT
;
6406 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
6407 spin_unlock_irq(shost
->host_lock
);
6408 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
6409 "3311 Rcv Flogi PS x%x new PS x%x "
6410 "fc_flag x%x new fc_flag x%x\n",
6411 port_state
, vport
->port_state
,
6412 fc_flag
, vport
->fc_flag
);
6415 * We temporarily set fc_myDID to make it look like we are
6416 * a Fabric. This is done just so we end up with the right
6417 * did / sid on the FLOGI ACC rsp.
6419 did
= vport
->fc_myDID
;
6420 vport
->fc_myDID
= Fabric_DID
;
6422 memcpy(&phba
->fc_fabparam
, sp
, sizeof(struct serv_parm
));
6425 lpfc_els_rsp_acc(vport
, ELS_CMD_FLOGI
, cmdiocb
, ndlp
, NULL
);
6427 /* Now lets put fc_myDID back to what its supposed to be */
6428 vport
->fc_myDID
= did
;
6434 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
6435 * @vport: pointer to a host virtual N_Port data structure.
6436 * @cmdiocb: pointer to lpfc command iocb data structure.
6437 * @ndlp: pointer to a node-list data structure.
6439 * This routine processes Request Node Identification Data (RNID) IOCB
6440 * received as an ELS unsolicited event. Only when the RNID specified format
6441 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
6442 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
6443 * Accept (ACC) the RNID ELS command. All the other RNID formats are
6444 * rejected by invoking the lpfc_els_rsp_reject() routine.
6447 * 0 - Successfully processed rnid iocb (currently always return 0)
6450 lpfc_els_rcv_rnid(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6451 struct lpfc_nodelist
*ndlp
)
6453 struct lpfc_dmabuf
*pcmd
;
6459 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
6460 lp
= (uint32_t *) pcmd
->virt
;
6467 switch (rn
->Format
) {
6469 case RNID_TOPOLOGY_DISC
:
6471 lpfc_els_rsp_rnid_acc(vport
, rn
->Format
, cmdiocb
, ndlp
);
6474 /* Reject this request because format not supported */
6475 stat
.un
.b
.lsRjtRsvd0
= 0;
6476 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
6477 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
6478 stat
.un
.b
.vendorUnique
= 0;
6479 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
6486 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
6487 * @vport: pointer to a host virtual N_Port data structure.
6488 * @cmdiocb: pointer to lpfc command iocb data structure.
6489 * @ndlp: pointer to a node-list data structure.
6492 * 0 - Successfully processed echo iocb (currently always return 0)
6495 lpfc_els_rcv_echo(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6496 struct lpfc_nodelist
*ndlp
)
6500 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) cmdiocb
->context2
)->virt
);
6502 /* skip over first word of echo command to find echo data */
6503 pcmd
+= sizeof(uint32_t);
6505 lpfc_els_rsp_echo_acc(vport
, pcmd
, cmdiocb
, ndlp
);
6510 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
6511 * @vport: pointer to a host virtual N_Port data structure.
6512 * @cmdiocb: pointer to lpfc command iocb data structure.
6513 * @ndlp: pointer to a node-list data structure.
6515 * This routine processes a Link Incident Report Registration(LIRR) IOCB
6516 * received as an ELS unsolicited event. Currently, this function just invokes
6517 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
6520 * 0 - Successfully processed lirr iocb (currently always return 0)
6523 lpfc_els_rcv_lirr(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6524 struct lpfc_nodelist
*ndlp
)
6528 /* For now, unconditionally reject this command */
6529 stat
.un
.b
.lsRjtRsvd0
= 0;
6530 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
6531 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
6532 stat
.un
.b
.vendorUnique
= 0;
6533 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
6538 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
6539 * @vport: pointer to a host virtual N_Port data structure.
6540 * @cmdiocb: pointer to lpfc command iocb data structure.
6541 * @ndlp: pointer to a node-list data structure.
6543 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
6544 * received as an ELS unsolicited event. A request to RRQ shall only
6545 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
6546 * Nx_Port N_Port_ID of the target Exchange is the same as the
6547 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
6548 * not accepted, an LS_RJT with reason code "Unable to perform
6549 * command request" and reason code explanation "Invalid Originator
6550 * S_ID" shall be returned. For now, we just unconditionally accept
6551 * RRQ from the target.
6554 lpfc_els_rcv_rrq(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6555 struct lpfc_nodelist
*ndlp
)
6557 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
6558 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
6559 lpfc_els_clear_rrq(vport
, cmdiocb
, ndlp
);
6563 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6564 * @phba: pointer to lpfc hba data structure.
6565 * @pmb: pointer to the driver internal queue element for mailbox command.
6567 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6568 * mailbox command. This callback function is to actually send the Accept
6569 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6570 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6571 * mailbox command, constructs the RPS response with the link statistics
6572 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6573 * response to the RPS.
6575 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6576 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6577 * will be stored into the context1 field of the IOCB for the completion
6578 * callback function to the RPS Accept Response ELS IOCB command.
6582 lpfc_els_rsp_rls_acc(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6586 struct RLS_RSP
*rls_rsp
;
6588 struct lpfc_iocbq
*elsiocb
;
6589 struct lpfc_nodelist
*ndlp
;
6596 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
6597 rxid
= (uint16_t) ((unsigned long)(pmb
->context1
) & 0xffff);
6598 oxid
= (uint16_t) (((unsigned long)(pmb
->context1
) >> 16) & 0xffff);
6599 pmb
->context1
= NULL
;
6600 pmb
->context2
= NULL
;
6602 if (mb
->mbxStatus
) {
6603 mempool_free(pmb
, phba
->mbox_mem_pool
);
6607 cmdsize
= sizeof(struct RLS_RSP
) + sizeof(uint32_t);
6608 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
6609 lpfc_max_els_tries
, ndlp
,
6610 ndlp
->nlp_DID
, ELS_CMD_ACC
);
6612 /* Decrement the ndlp reference count from previous mbox command */
6616 mempool_free(pmb
, phba
->mbox_mem_pool
);
6620 icmd
= &elsiocb
->iocb
;
6621 icmd
->ulpContext
= rxid
;
6622 icmd
->unsli3
.rcvsli3
.ox_id
= oxid
;
6624 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
6625 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
6626 pcmd
+= sizeof(uint32_t); /* Skip past command */
6627 rls_rsp
= (struct RLS_RSP
*)pcmd
;
6629 rls_rsp
->linkFailureCnt
= cpu_to_be32(mb
->un
.varRdLnk
.linkFailureCnt
);
6630 rls_rsp
->lossSyncCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSyncCnt
);
6631 rls_rsp
->lossSignalCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSignalCnt
);
6632 rls_rsp
->primSeqErrCnt
= cpu_to_be32(mb
->un
.varRdLnk
.primSeqErrCnt
);
6633 rls_rsp
->invalidXmitWord
= cpu_to_be32(mb
->un
.varRdLnk
.invalidXmitWord
);
6634 rls_rsp
->crcCnt
= cpu_to_be32(mb
->un
.varRdLnk
.crcCnt
);
6635 mempool_free(pmb
, phba
->mbox_mem_pool
);
6636 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6637 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
6638 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
6639 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6640 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
6641 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
6643 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
6644 phba
->fc_stat
.elsXmitACC
++;
6645 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
6646 lpfc_els_free_iocb(phba
, elsiocb
);
6650 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6651 * @phba: pointer to lpfc hba data structure.
6652 * @pmb: pointer to the driver internal queue element for mailbox command.
6654 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6655 * mailbox command. This callback function is to actually send the Accept
6656 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6657 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6658 * mailbox command, constructs the RPS response with the link statistics
6659 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6660 * response to the RPS.
6662 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6663 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6664 * will be stored into the context1 field of the IOCB for the completion
6665 * callback function to the RPS Accept Response ELS IOCB command.
6669 lpfc_els_rsp_rps_acc(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6675 struct lpfc_iocbq
*elsiocb
;
6676 struct lpfc_nodelist
*ndlp
;
6684 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
6685 rxid
= (uint16_t) ((unsigned long)(pmb
->context1
) & 0xffff);
6686 oxid
= (uint16_t) (((unsigned long)(pmb
->context1
) >> 16) & 0xffff);
6687 pmb
->context1
= NULL
;
6688 pmb
->context2
= NULL
;
6690 if (mb
->mbxStatus
) {
6691 mempool_free(pmb
, phba
->mbox_mem_pool
);
6695 cmdsize
= sizeof(RPS_RSP
) + sizeof(uint32_t);
6696 mempool_free(pmb
, phba
->mbox_mem_pool
);
6697 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
6698 lpfc_max_els_tries
, ndlp
,
6699 ndlp
->nlp_DID
, ELS_CMD_ACC
);
6701 /* Decrement the ndlp reference count from previous mbox command */
6707 icmd
= &elsiocb
->iocb
;
6708 icmd
->ulpContext
= rxid
;
6709 icmd
->unsli3
.rcvsli3
.ox_id
= oxid
;
6711 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
6712 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
6713 pcmd
+= sizeof(uint32_t); /* Skip past command */
6714 rps_rsp
= (RPS_RSP
*)pcmd
;
6716 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
6720 if (phba
->pport
->fc_flag
& FC_FABRIC
)
6724 rps_rsp
->portStatus
= cpu_to_be16(status
);
6725 rps_rsp
->linkFailureCnt
= cpu_to_be32(mb
->un
.varRdLnk
.linkFailureCnt
);
6726 rps_rsp
->lossSyncCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSyncCnt
);
6727 rps_rsp
->lossSignalCnt
= cpu_to_be32(mb
->un
.varRdLnk
.lossSignalCnt
);
6728 rps_rsp
->primSeqErrCnt
= cpu_to_be32(mb
->un
.varRdLnk
.primSeqErrCnt
);
6729 rps_rsp
->invalidXmitWord
= cpu_to_be32(mb
->un
.varRdLnk
.invalidXmitWord
);
6730 rps_rsp
->crcCnt
= cpu_to_be32(mb
->un
.varRdLnk
.crcCnt
);
6731 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
6732 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
6733 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
6734 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6735 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
6736 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
6738 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
6739 phba
->fc_stat
.elsXmitACC
++;
6740 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
6741 lpfc_els_free_iocb(phba
, elsiocb
);
6746 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
6747 * @vport: pointer to a host virtual N_Port data structure.
6748 * @cmdiocb: pointer to lpfc command iocb data structure.
6749 * @ndlp: pointer to a node-list data structure.
6751 * This routine processes Read Port Status (RPL) IOCB received as an
6752 * ELS unsolicited event. It first checks the remote port state. If the
6753 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6754 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6755 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6756 * for reading the HBA link statistics. It is for the callback function,
6757 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
6758 * to actually sending out RPL Accept (ACC) response.
6761 * 0 - Successfully processed rls iocb (currently always return 0)
6764 lpfc_els_rcv_rls(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6765 struct lpfc_nodelist
*ndlp
)
6767 struct lpfc_hba
*phba
= vport
->phba
;
6771 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
6772 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
6773 /* reject the unsolicited RPS request and done with it */
6776 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_ATOMIC
);
6778 lpfc_read_lnk_stat(phba
, mbox
);
6779 mbox
->context1
= (void *)((unsigned long)
6780 ((cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
<< 16) |
6781 cmdiocb
->iocb
.ulpContext
)); /* rx_id */
6782 mbox
->context2
= lpfc_nlp_get(ndlp
);
6783 mbox
->vport
= vport
;
6784 mbox
->mbox_cmpl
= lpfc_els_rsp_rls_acc
;
6785 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
6786 != MBX_NOT_FINISHED
)
6787 /* Mbox completion will send ELS Response */
6789 /* Decrement reference count used for the failed mbox
6793 mempool_free(mbox
, phba
->mbox_mem_pool
);
6796 /* issue rejection response */
6797 stat
.un
.b
.lsRjtRsvd0
= 0;
6798 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
6799 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
6800 stat
.un
.b
.vendorUnique
= 0;
6801 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
6806 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
6807 * @vport: pointer to a host virtual N_Port data structure.
6808 * @cmdiocb: pointer to lpfc command iocb data structure.
6809 * @ndlp: pointer to a node-list data structure.
6811 * This routine processes Read Timout Value (RTV) IOCB received as an
6812 * ELS unsolicited event. It first checks the remote port state. If the
6813 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6814 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6815 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
6816 * Value (RTV) unsolicited IOCB event.
6818 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6819 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6820 * will be stored into the context1 field of the IOCB for the completion
6821 * callback function to the RPS Accept Response ELS IOCB command.
6824 * 0 - Successfully processed rtv iocb (currently always return 0)
6827 lpfc_els_rcv_rtv(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6828 struct lpfc_nodelist
*ndlp
)
6830 struct lpfc_hba
*phba
= vport
->phba
;
6832 struct RTV_RSP
*rtv_rsp
;
6834 struct lpfc_iocbq
*elsiocb
;
6838 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
6839 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
6840 /* reject the unsolicited RPS request and done with it */
6843 cmdsize
= sizeof(struct RTV_RSP
) + sizeof(uint32_t);
6844 elsiocb
= lpfc_prep_els_iocb(phba
->pport
, 0, cmdsize
,
6845 lpfc_max_els_tries
, ndlp
,
6846 ndlp
->nlp_DID
, ELS_CMD_ACC
);
6851 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
6852 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
6853 pcmd
+= sizeof(uint32_t); /* Skip past command */
6855 /* use the command's xri in the response */
6856 elsiocb
->iocb
.ulpContext
= cmdiocb
->iocb
.ulpContext
; /* Xri / rx_id */
6857 elsiocb
->iocb
.unsli3
.rcvsli3
.ox_id
= cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
;
6859 rtv_rsp
= (struct RTV_RSP
*)pcmd
;
6861 /* populate RTV payload */
6862 rtv_rsp
->ratov
= cpu_to_be32(phba
->fc_ratov
* 1000); /* report msecs */
6863 rtv_rsp
->edtov
= cpu_to_be32(phba
->fc_edtov
);
6864 bf_set(qtov_edtovres
, rtv_rsp
, phba
->fc_edtovResol
? 1 : 0);
6865 bf_set(qtov_rttov
, rtv_rsp
, 0); /* Field is for FC ONLY */
6866 rtv_rsp
->qtov
= cpu_to_be32(rtv_rsp
->qtov
);
6868 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6869 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_ELS
,
6870 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
6871 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
6872 "Data: x%x x%x x%x\n",
6873 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
6874 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
6876 rtv_rsp
->ratov
, rtv_rsp
->edtov
, rtv_rsp
->qtov
);
6877 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
6878 phba
->fc_stat
.elsXmitACC
++;
6879 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) == IOCB_ERROR
)
6880 lpfc_els_free_iocb(phba
, elsiocb
);
6884 /* issue rejection response */
6885 stat
.un
.b
.lsRjtRsvd0
= 0;
6886 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
6887 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
6888 stat
.un
.b
.vendorUnique
= 0;
6889 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
6893 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
6894 * @vport: pointer to a host virtual N_Port data structure.
6895 * @cmdiocb: pointer to lpfc command iocb data structure.
6896 * @ndlp: pointer to a node-list data structure.
6898 * This routine processes Read Port Status (RPS) IOCB received as an
6899 * ELS unsolicited event. It first checks the remote port state. If the
6900 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6901 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
6902 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6903 * for reading the HBA link statistics. It is for the callback function,
6904 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
6905 * to actually sending out RPS Accept (ACC) response.
6908 * 0 - Successfully processed rps iocb (currently always return 0)
6911 lpfc_els_rcv_rps(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
6912 struct lpfc_nodelist
*ndlp
)
6914 struct lpfc_hba
*phba
= vport
->phba
;
6918 struct lpfc_dmabuf
*pcmd
;
6922 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
6923 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))
6924 /* reject the unsolicited RPS request and done with it */
6927 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
6928 lp
= (uint32_t *) pcmd
->virt
;
6929 flag
= (be32_to_cpu(*lp
++) & 0xf);
6933 ((flag
== 1) && (be32_to_cpu(rps
->un
.portNum
) == 0)) ||
6934 ((flag
== 2) && (memcmp(&rps
->un
.portName
, &vport
->fc_portname
,
6935 sizeof(struct lpfc_name
)) == 0))) {
6937 printk("Fix me....\n");
6939 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_ATOMIC
);
6941 lpfc_read_lnk_stat(phba
, mbox
);
6942 mbox
->context1
= (void *)((unsigned long)
6943 ((cmdiocb
->iocb
.unsli3
.rcvsli3
.ox_id
<< 16) |
6944 cmdiocb
->iocb
.ulpContext
)); /* rx_id */
6945 mbox
->context2
= lpfc_nlp_get(ndlp
);
6946 mbox
->vport
= vport
;
6947 mbox
->mbox_cmpl
= lpfc_els_rsp_rps_acc
;
6948 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
6949 != MBX_NOT_FINISHED
)
6950 /* Mbox completion will send ELS Response */
6952 /* Decrement reference count used for the failed mbox
6956 mempool_free(mbox
, phba
->mbox_mem_pool
);
6961 /* issue rejection response */
6962 stat
.un
.b
.lsRjtRsvd0
= 0;
6963 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
6964 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
6965 stat
.un
.b
.vendorUnique
= 0;
6966 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
6970 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
6971 * @vport: pointer to a host virtual N_Port data structure.
6972 * @ndlp: pointer to a node-list data structure.
6973 * @did: DID of the target.
6974 * @rrq: Pointer to the rrq struct.
6976 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
6977 * Successful the the completion handler will clear the RRQ.
6980 * 0 - Successfully sent rrq els iocb.
6981 * 1 - Failed to send rrq els iocb.
6984 lpfc_issue_els_rrq(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
6985 uint32_t did
, struct lpfc_node_rrq
*rrq
)
6987 struct lpfc_hba
*phba
= vport
->phba
;
6988 struct RRQ
*els_rrq
;
6989 struct lpfc_iocbq
*elsiocb
;
6995 if (ndlp
!= rrq
->ndlp
)
6997 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
7000 /* If ndlp is not NULL, we will bump the reference count on it */
7001 cmdsize
= (sizeof(uint32_t) + sizeof(struct RRQ
));
7002 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
, did
,
7007 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
7009 /* For RRQ request, remainder of payload is Exchange IDs */
7010 *((uint32_t *) (pcmd
)) = ELS_CMD_RRQ
;
7011 pcmd
+= sizeof(uint32_t);
7012 els_rrq
= (struct RRQ
*) pcmd
;
7014 bf_set(rrq_oxid
, els_rrq
, phba
->sli4_hba
.xri_ids
[rrq
->xritag
]);
7015 bf_set(rrq_rxid
, els_rrq
, rrq
->rxid
);
7016 bf_set(rrq_did
, els_rrq
, vport
->fc_myDID
);
7017 els_rrq
->rrq
= cpu_to_be32(els_rrq
->rrq
);
7018 els_rrq
->rrq_exchg
= cpu_to_be32(els_rrq
->rrq_exchg
);
7021 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
7022 "Issue RRQ: did:x%x",
7023 did
, rrq
->xritag
, rrq
->rxid
);
7024 elsiocb
->context_un
.rrq
= rrq
;
7025 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rrq
;
7026 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0);
7028 if (ret
== IOCB_ERROR
) {
7029 lpfc_els_free_iocb(phba
, elsiocb
);
7036 * lpfc_send_rrq - Sends ELS RRQ if needed.
7037 * @phba: pointer to lpfc hba data structure.
7038 * @rrq: pointer to the active rrq.
7040 * This routine will call the lpfc_issue_els_rrq if the rrq is
7041 * still active for the xri. If this function returns a failure then
7042 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
7044 * Returns 0 Success.
7048 lpfc_send_rrq(struct lpfc_hba
*phba
, struct lpfc_node_rrq
*rrq
)
7050 struct lpfc_nodelist
*ndlp
= lpfc_findnode_did(rrq
->vport
,
7052 if (lpfc_test_rrq_active(phba
, ndlp
, rrq
->xritag
))
7053 return lpfc_issue_els_rrq(rrq
->vport
, ndlp
,
7060 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
7061 * @vport: pointer to a host virtual N_Port data structure.
7062 * @cmdsize: size of the ELS command.
7063 * @oldiocb: pointer to the original lpfc command iocb data structure.
7064 * @ndlp: pointer to a node-list data structure.
7066 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
7067 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
7069 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7070 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7071 * will be stored into the context1 field of the IOCB for the completion
7072 * callback function to the RPL Accept Response ELS command.
7075 * 0 - Successfully issued ACC RPL ELS command
7076 * 1 - Failed to issue ACC RPL ELS command
7079 lpfc_els_rsp_rpl_acc(struct lpfc_vport
*vport
, uint16_t cmdsize
,
7080 struct lpfc_iocbq
*oldiocb
, struct lpfc_nodelist
*ndlp
)
7082 struct lpfc_hba
*phba
= vport
->phba
;
7083 IOCB_t
*icmd
, *oldcmd
;
7085 struct lpfc_iocbq
*elsiocb
;
7088 elsiocb
= lpfc_prep_els_iocb(vport
, 0, cmdsize
, oldiocb
->retry
, ndlp
,
7089 ndlp
->nlp_DID
, ELS_CMD_ACC
);
7094 icmd
= &elsiocb
->iocb
;
7095 oldcmd
= &oldiocb
->iocb
;
7096 icmd
->ulpContext
= oldcmd
->ulpContext
; /* Xri / rx_id */
7097 icmd
->unsli3
.rcvsli3
.ox_id
= oldcmd
->unsli3
.rcvsli3
.ox_id
;
7099 pcmd
= (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
7100 *((uint32_t *) (pcmd
)) = ELS_CMD_ACC
;
7101 pcmd
+= sizeof(uint16_t);
7102 *((uint16_t *)(pcmd
)) = be16_to_cpu(cmdsize
);
7103 pcmd
+= sizeof(uint16_t);
7105 /* Setup the RPL ACC payload */
7106 rpl_rsp
.listLen
= be32_to_cpu(1);
7108 rpl_rsp
.port_num_blk
.portNum
= 0;
7109 rpl_rsp
.port_num_blk
.portID
= be32_to_cpu(vport
->fc_myDID
);
7110 memcpy(&rpl_rsp
.port_num_blk
.portName
, &vport
->fc_portname
,
7111 sizeof(struct lpfc_name
));
7112 memcpy(pcmd
, &rpl_rsp
, cmdsize
- sizeof(uint32_t));
7113 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
7114 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7115 "0120 Xmit ELS RPL ACC response tag x%x "
7116 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
7118 elsiocb
->iotag
, elsiocb
->iocb
.ulpContext
,
7119 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
7121 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_rsp
;
7122 phba
->fc_stat
.elsXmitACC
++;
7123 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
7125 lpfc_els_free_iocb(phba
, elsiocb
);
7132 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
7133 * @vport: pointer to a host virtual N_Port data structure.
7134 * @cmdiocb: pointer to lpfc command iocb data structure.
7135 * @ndlp: pointer to a node-list data structure.
7137 * This routine processes Read Port List (RPL) IOCB received as an ELS
7138 * unsolicited event. It first checks the remote port state. If the remote
7139 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
7140 * invokes the lpfc_els_rsp_reject() routine to send reject response.
7141 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
7142 * to accept the RPL.
7145 * 0 - Successfully processed rpl iocb (currently always return 0)
7148 lpfc_els_rcv_rpl(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
7149 struct lpfc_nodelist
*ndlp
)
7151 struct lpfc_dmabuf
*pcmd
;
7158 if ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
7159 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
7160 /* issue rejection response */
7161 stat
.un
.b
.lsRjtRsvd0
= 0;
7162 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
7163 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CANT_GIVE_DATA
;
7164 stat
.un
.b
.vendorUnique
= 0;
7165 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
7167 /* rejected the unsolicited RPL request and done with it */
7171 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
7172 lp
= (uint32_t *) pcmd
->virt
;
7173 rpl
= (RPL
*) (lp
+ 1);
7174 maxsize
= be32_to_cpu(rpl
->maxsize
);
7176 /* We support only one port */
7177 if ((rpl
->index
== 0) &&
7179 ((maxsize
* sizeof(uint32_t)) >= sizeof(RPL_RSP
)))) {
7180 cmdsize
= sizeof(uint32_t) + sizeof(RPL_RSP
);
7182 cmdsize
= sizeof(uint32_t) + maxsize
* sizeof(uint32_t);
7184 lpfc_els_rsp_rpl_acc(vport
, cmdsize
, cmdiocb
, ndlp
);
7190 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
7191 * @vport: pointer to a virtual N_Port data structure.
7192 * @cmdiocb: pointer to lpfc command iocb data structure.
7193 * @ndlp: pointer to a node-list data structure.
7195 * This routine processes Fibre Channel Address Resolution Protocol
7196 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
7197 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
7198 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
7199 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
7200 * remote PortName is compared against the FC PortName stored in the @vport
7201 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
7202 * compared against the FC NodeName stored in the @vport data structure.
7203 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
7204 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
7205 * invoked to send out FARP Response to the remote node. Before sending the
7206 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
7207 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
7208 * routine is invoked to log into the remote port first.
7211 * 0 - Either the FARP Match Mode not supported or successfully processed
7214 lpfc_els_rcv_farp(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
7215 struct lpfc_nodelist
*ndlp
)
7217 struct lpfc_dmabuf
*pcmd
;
7221 uint32_t cmd
, cnt
, did
;
7223 icmd
= &cmdiocb
->iocb
;
7224 did
= icmd
->un
.elsreq64
.remoteID
;
7225 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
7226 lp
= (uint32_t *) pcmd
->virt
;
7230 /* FARP-REQ received from DID <did> */
7231 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7232 "0601 FARP-REQ received from DID x%x\n", did
);
7233 /* We will only support match on WWPN or WWNN */
7234 if (fp
->Mflags
& ~(FARP_MATCH_NODE
| FARP_MATCH_PORT
)) {
7239 /* If this FARP command is searching for my portname */
7240 if (fp
->Mflags
& FARP_MATCH_PORT
) {
7241 if (memcmp(&fp
->RportName
, &vport
->fc_portname
,
7242 sizeof(struct lpfc_name
)) == 0)
7246 /* If this FARP command is searching for my nodename */
7247 if (fp
->Mflags
& FARP_MATCH_NODE
) {
7248 if (memcmp(&fp
->RnodeName
, &vport
->fc_nodename
,
7249 sizeof(struct lpfc_name
)) == 0)
7254 if ((ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) ||
7255 (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
7256 /* Log back into the node before sending the FARP. */
7257 if (fp
->Rflags
& FARP_REQUEST_PLOGI
) {
7258 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
7259 lpfc_nlp_set_state(vport
, ndlp
,
7260 NLP_STE_PLOGI_ISSUE
);
7261 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
7264 /* Send a FARP response to that node */
7265 if (fp
->Rflags
& FARP_REQUEST_FARPR
)
7266 lpfc_issue_els_farpr(vport
, did
, 0);
7273 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
7274 * @vport: pointer to a host virtual N_Port data structure.
7275 * @cmdiocb: pointer to lpfc command iocb data structure.
7276 * @ndlp: pointer to a node-list data structure.
7278 * This routine processes Fibre Channel Address Resolution Protocol
7279 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7280 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7281 * the FARP response request.
7284 * 0 - Successfully processed FARPR IOCB (currently always return 0)
7287 lpfc_els_rcv_farpr(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
7288 struct lpfc_nodelist
*ndlp
)
7290 struct lpfc_dmabuf
*pcmd
;
7295 icmd
= &cmdiocb
->iocb
;
7296 did
= icmd
->un
.elsreq64
.remoteID
;
7297 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
7298 lp
= (uint32_t *) pcmd
->virt
;
7301 /* FARP-RSP received from DID <did> */
7302 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7303 "0600 FARP-RSP received from DID x%x\n", did
);
7304 /* ACCEPT the Farp resp request */
7305 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
7311 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
7312 * @vport: pointer to a host virtual N_Port data structure.
7313 * @cmdiocb: pointer to lpfc command iocb data structure.
7314 * @fan_ndlp: pointer to a node-list data structure.
7316 * This routine processes a Fabric Address Notification (FAN) IOCB
7317 * command received as an ELS unsolicited event. The FAN ELS command will
7318 * only be processed on a physical port (i.e., the @vport represents the
7319 * physical port). The fabric NodeName and PortName from the FAN IOCB are
7320 * compared against those in the phba data structure. If any of those is
7321 * different, the lpfc_initial_flogi() routine is invoked to initialize
7322 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7323 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7324 * is invoked to register login to the fabric.
7327 * 0 - Successfully processed fan iocb (currently always return 0).
7330 lpfc_els_rcv_fan(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
,
7331 struct lpfc_nodelist
*fan_ndlp
)
7333 struct lpfc_hba
*phba
= vport
->phba
;
7337 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
, "0265 FAN received\n");
7338 lp
= (uint32_t *)((struct lpfc_dmabuf
*)cmdiocb
->context2
)->virt
;
7340 /* FAN received; Fan does not have a reply sequence */
7341 if ((vport
== phba
->pport
) &&
7342 (vport
->port_state
== LPFC_LOCAL_CFG_LINK
)) {
7343 if ((memcmp(&phba
->fc_fabparam
.nodeName
, &fp
->FnodeName
,
7344 sizeof(struct lpfc_name
))) ||
7345 (memcmp(&phba
->fc_fabparam
.portName
, &fp
->FportName
,
7346 sizeof(struct lpfc_name
)))) {
7347 /* This port has switched fabrics. FLOGI is required */
7348 lpfc_issue_init_vfi(vport
);
7350 /* FAN verified - skip FLOGI */
7351 vport
->fc_myDID
= vport
->fc_prevDID
;
7352 if (phba
->sli_rev
< LPFC_SLI_REV4
)
7353 lpfc_issue_fabric_reglogin(vport
);
7355 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7356 "3138 Need register VFI: (x%x/%x)\n",
7357 vport
->fc_prevDID
, vport
->fc_myDID
);
7358 lpfc_issue_reg_vfi(vport
);
7366 * lpfc_els_timeout - Handler funciton to the els timer
7367 * @ptr: holder for the timer function associated data.
7369 * This routine is invoked by the ELS timer after timeout. It posts the ELS
7370 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7371 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7372 * up the worker thread. It is for the worker thread to invoke the routine
7373 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7376 lpfc_els_timeout(struct timer_list
*t
)
7378 struct lpfc_vport
*vport
= from_timer(vport
, t
, els_tmofunc
);
7379 struct lpfc_hba
*phba
= vport
->phba
;
7380 uint32_t tmo_posted
;
7381 unsigned long iflag
;
7383 spin_lock_irqsave(&vport
->work_port_lock
, iflag
);
7384 tmo_posted
= vport
->work_port_events
& WORKER_ELS_TMO
;
7385 if ((!tmo_posted
) && (!(vport
->load_flag
& FC_UNLOADING
)))
7386 vport
->work_port_events
|= WORKER_ELS_TMO
;
7387 spin_unlock_irqrestore(&vport
->work_port_lock
, iflag
);
7389 if ((!tmo_posted
) && (!(vport
->load_flag
& FC_UNLOADING
)))
7390 lpfc_worker_wake_up(phba
);
7396 * lpfc_els_timeout_handler - Process an els timeout event
7397 * @vport: pointer to a virtual N_Port data structure.
7399 * This routine is the actual handler function that processes an ELS timeout
7400 * event. It walks the ELS ring to get and abort all the IOCBs (except the
7401 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7402 * invoking the lpfc_sli_issue_abort_iotag() routine.
7405 lpfc_els_timeout_handler(struct lpfc_vport
*vport
)
7407 struct lpfc_hba
*phba
= vport
->phba
;
7408 struct lpfc_sli_ring
*pring
;
7409 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
7411 struct lpfc_dmabuf
*pcmd
;
7412 uint32_t els_command
= 0;
7414 uint32_t remote_ID
= 0xffffffff;
7415 LIST_HEAD(abort_list
);
7418 timeout
= (uint32_t)(phba
->fc_ratov
<< 1);
7420 pring
= lpfc_phba_elsring(phba
);
7421 if (unlikely(!pring
))
7424 if ((phba
->pport
->load_flag
& FC_UNLOADING
))
7426 spin_lock_irq(&phba
->hbalock
);
7427 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7428 spin_lock(&pring
->ring_lock
);
7430 if ((phba
->pport
->load_flag
& FC_UNLOADING
)) {
7431 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7432 spin_unlock(&pring
->ring_lock
);
7433 spin_unlock_irq(&phba
->hbalock
);
7437 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txcmplq
, list
) {
7440 if ((piocb
->iocb_flag
& LPFC_IO_LIBDFC
) != 0 ||
7441 piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
7442 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
7445 if (piocb
->vport
!= vport
)
7448 pcmd
= (struct lpfc_dmabuf
*) piocb
->context2
;
7450 els_command
= *(uint32_t *) (pcmd
->virt
);
7452 if (els_command
== ELS_CMD_FARP
||
7453 els_command
== ELS_CMD_FARPR
||
7454 els_command
== ELS_CMD_FDISC
)
7457 if (piocb
->drvrTimeout
> 0) {
7458 if (piocb
->drvrTimeout
>= timeout
)
7459 piocb
->drvrTimeout
-= timeout
;
7461 piocb
->drvrTimeout
= 0;
7465 remote_ID
= 0xffffffff;
7466 if (cmd
->ulpCommand
!= CMD_GEN_REQUEST64_CR
)
7467 remote_ID
= cmd
->un
.elsreq64
.remoteID
;
7469 struct lpfc_nodelist
*ndlp
;
7470 ndlp
= __lpfc_findnode_rpi(vport
, cmd
->ulpContext
);
7471 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
7472 remote_ID
= ndlp
->nlp_DID
;
7474 list_add_tail(&piocb
->dlist
, &abort_list
);
7476 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7477 spin_unlock(&pring
->ring_lock
);
7478 spin_unlock_irq(&phba
->hbalock
);
7480 list_for_each_entry_safe(piocb
, tmp_iocb
, &abort_list
, dlist
) {
7482 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7483 "0127 ELS timeout Data: x%x x%x x%x "
7484 "x%x\n", els_command
,
7485 remote_ID
, cmd
->ulpCommand
, cmd
->ulpIoTag
);
7486 spin_lock_irq(&phba
->hbalock
);
7487 list_del_init(&piocb
->dlist
);
7488 lpfc_sli_issue_abort_iotag(phba
, pring
, piocb
);
7489 spin_unlock_irq(&phba
->hbalock
);
7492 if (!list_empty(&pring
->txcmplq
))
7493 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
7494 mod_timer(&vport
->els_tmofunc
,
7495 jiffies
+ msecs_to_jiffies(1000 * timeout
));
7499 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
7500 * @vport: pointer to a host virtual N_Port data structure.
7502 * This routine is used to clean up all the outstanding ELS commands on a
7503 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
7504 * routine. After that, it walks the ELS transmit queue to remove all the
7505 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
7506 * the IOCBs with a non-NULL completion callback function, the callback
7507 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7508 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
7509 * callback function, the IOCB will simply be released. Finally, it walks
7510 * the ELS transmit completion queue to issue an abort IOCB to any transmit
7511 * completion queue IOCB that is associated with the @vport and is not
7512 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
7513 * part of the discovery state machine) out to HBA by invoking the
7514 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
7515 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
7516 * the IOCBs are aborted when this function returns.
7519 lpfc_els_flush_cmd(struct lpfc_vport
*vport
)
7521 LIST_HEAD(abort_list
);
7522 struct lpfc_hba
*phba
= vport
->phba
;
7523 struct lpfc_sli_ring
*pring
;
7524 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
7527 lpfc_fabric_abort_vport(vport
);
7529 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
7530 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
7531 * ultimately grabs the ring_lock, the driver must splice the list into
7532 * a working list and release the locks before calling the abort.
7534 spin_lock_irq(&phba
->hbalock
);
7535 pring
= lpfc_phba_elsring(phba
);
7537 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
7538 if (unlikely(!pring
)) {
7539 spin_unlock_irq(&phba
->hbalock
);
7543 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7544 spin_lock(&pring
->ring_lock
);
7546 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txcmplq
, list
) {
7547 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
)
7550 if (piocb
->vport
!= vport
)
7552 list_add_tail(&piocb
->dlist
, &abort_list
);
7554 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7555 spin_unlock(&pring
->ring_lock
);
7556 spin_unlock_irq(&phba
->hbalock
);
7557 /* Abort each iocb on the aborted list and remove the dlist links. */
7558 list_for_each_entry_safe(piocb
, tmp_iocb
, &abort_list
, dlist
) {
7559 spin_lock_irq(&phba
->hbalock
);
7560 list_del_init(&piocb
->dlist
);
7561 lpfc_sli_issue_abort_iotag(phba
, pring
, piocb
);
7562 spin_unlock_irq(&phba
->hbalock
);
7564 if (!list_empty(&abort_list
))
7565 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7566 "3387 abort list for txq not empty\n");
7567 INIT_LIST_HEAD(&abort_list
);
7569 spin_lock_irq(&phba
->hbalock
);
7570 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7571 spin_lock(&pring
->ring_lock
);
7573 list_for_each_entry_safe(piocb
, tmp_iocb
, &pring
->txq
, list
) {
7576 if (piocb
->iocb_flag
& LPFC_IO_LIBDFC
) {
7580 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
7581 if (cmd
->ulpCommand
== CMD_QUE_RING_BUF_CN
||
7582 cmd
->ulpCommand
== CMD_QUE_RING_BUF64_CN
||
7583 cmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
7584 cmd
->ulpCommand
== CMD_ABORT_XRI_CN
)
7587 if (piocb
->vport
!= vport
)
7590 list_del_init(&piocb
->list
);
7591 list_add_tail(&piocb
->list
, &abort_list
);
7593 if (phba
->sli_rev
== LPFC_SLI_REV4
)
7594 spin_unlock(&pring
->ring_lock
);
7595 spin_unlock_irq(&phba
->hbalock
);
7597 /* Cancell all the IOCBs from the completions list */
7598 lpfc_sli_cancel_iocbs(phba
, &abort_list
,
7599 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
7605 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
7606 * @phba: pointer to lpfc hba data structure.
7608 * This routine is used to clean up all the outstanding ELS commands on a
7609 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
7610 * routine. After that, it walks the ELS transmit queue to remove all the
7611 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
7612 * the IOCBs with the completion callback function associated, the callback
7613 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7614 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
7615 * callback function associated, the IOCB will simply be released. Finally,
7616 * it walks the ELS transmit completion queue to issue an abort IOCB to any
7617 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
7618 * management plane IOCBs that are not part of the discovery state machine)
7619 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
7622 lpfc_els_flush_all_cmd(struct lpfc_hba
*phba
)
7624 struct lpfc_vport
*vport
;
7625 list_for_each_entry(vport
, &phba
->port_list
, listentry
)
7626 lpfc_els_flush_cmd(vport
);
7632 * lpfc_send_els_failure_event - Posts an ELS command failure event
7633 * @phba: Pointer to hba context object.
7634 * @cmdiocbp: Pointer to command iocb which reported error.
7635 * @rspiocbp: Pointer to response iocb which reported error.
7637 * This function sends an event when there is an ELS command
7641 lpfc_send_els_failure_event(struct lpfc_hba
*phba
,
7642 struct lpfc_iocbq
*cmdiocbp
,
7643 struct lpfc_iocbq
*rspiocbp
)
7645 struct lpfc_vport
*vport
= cmdiocbp
->vport
;
7646 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
7647 struct lpfc_lsrjt_event lsrjt_event
;
7648 struct lpfc_fabric_event_header fabric_event
;
7650 struct lpfc_nodelist
*ndlp
;
7653 ndlp
= cmdiocbp
->context1
;
7654 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
7657 if (rspiocbp
->iocb
.ulpStatus
== IOSTAT_LS_RJT
) {
7658 lsrjt_event
.header
.event_type
= FC_REG_ELS_EVENT
;
7659 lsrjt_event
.header
.subcategory
= LPFC_EVENT_LSRJT_RCV
;
7660 memcpy(lsrjt_event
.header
.wwpn
, &ndlp
->nlp_portname
,
7661 sizeof(struct lpfc_name
));
7662 memcpy(lsrjt_event
.header
.wwnn
, &ndlp
->nlp_nodename
,
7663 sizeof(struct lpfc_name
));
7664 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
7665 cmdiocbp
->context2
)->virt
);
7666 lsrjt_event
.command
= (pcmd
!= NULL
) ? *pcmd
: 0;
7667 stat
.un
.lsRjtError
= be32_to_cpu(rspiocbp
->iocb
.un
.ulpWord
[4]);
7668 lsrjt_event
.reason_code
= stat
.un
.b
.lsRjtRsnCode
;
7669 lsrjt_event
.explanation
= stat
.un
.b
.lsRjtRsnCodeExp
;
7670 fc_host_post_vendor_event(shost
,
7671 fc_get_event_number(),
7672 sizeof(lsrjt_event
),
7673 (char *)&lsrjt_event
,
7677 if ((rspiocbp
->iocb
.ulpStatus
== IOSTAT_NPORT_BSY
) ||
7678 (rspiocbp
->iocb
.ulpStatus
== IOSTAT_FABRIC_BSY
)) {
7679 fabric_event
.event_type
= FC_REG_FABRIC_EVENT
;
7680 if (rspiocbp
->iocb
.ulpStatus
== IOSTAT_NPORT_BSY
)
7681 fabric_event
.subcategory
= LPFC_EVENT_PORT_BUSY
;
7683 fabric_event
.subcategory
= LPFC_EVENT_FABRIC_BUSY
;
7684 memcpy(fabric_event
.wwpn
, &ndlp
->nlp_portname
,
7685 sizeof(struct lpfc_name
));
7686 memcpy(fabric_event
.wwnn
, &ndlp
->nlp_nodename
,
7687 sizeof(struct lpfc_name
));
7688 fc_host_post_vendor_event(shost
,
7689 fc_get_event_number(),
7690 sizeof(fabric_event
),
7691 (char *)&fabric_event
,
7699 * lpfc_send_els_event - Posts unsolicited els event
7700 * @vport: Pointer to vport object.
7701 * @ndlp: Pointer FC node object.
7702 * @cmd: ELS command code.
7704 * This function posts an event when there is an incoming
7705 * unsolicited ELS command.
7708 lpfc_send_els_event(struct lpfc_vport
*vport
,
7709 struct lpfc_nodelist
*ndlp
,
7712 struct lpfc_els_event_header
*els_data
= NULL
;
7713 struct lpfc_logo_event
*logo_data
= NULL
;
7714 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
7716 if (*payload
== ELS_CMD_LOGO
) {
7717 logo_data
= kmalloc(sizeof(struct lpfc_logo_event
), GFP_KERNEL
);
7719 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7720 "0148 Failed to allocate memory "
7721 "for LOGO event\n");
7724 els_data
= &logo_data
->header
;
7726 els_data
= kmalloc(sizeof(struct lpfc_els_event_header
),
7729 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
7730 "0149 Failed to allocate memory "
7735 els_data
->event_type
= FC_REG_ELS_EVENT
;
7738 els_data
->subcategory
= LPFC_EVENT_PLOGI_RCV
;
7741 els_data
->subcategory
= LPFC_EVENT_PRLO_RCV
;
7744 els_data
->subcategory
= LPFC_EVENT_ADISC_RCV
;
7747 els_data
->subcategory
= LPFC_EVENT_LOGO_RCV
;
7748 /* Copy the WWPN in the LOGO payload */
7749 memcpy(logo_data
->logo_wwpn
, &payload
[2],
7750 sizeof(struct lpfc_name
));
7756 memcpy(els_data
->wwpn
, &ndlp
->nlp_portname
, sizeof(struct lpfc_name
));
7757 memcpy(els_data
->wwnn
, &ndlp
->nlp_nodename
, sizeof(struct lpfc_name
));
7758 if (*payload
== ELS_CMD_LOGO
) {
7759 fc_host_post_vendor_event(shost
,
7760 fc_get_event_number(),
7761 sizeof(struct lpfc_logo_event
),
7766 fc_host_post_vendor_event(shost
,
7767 fc_get_event_number(),
7768 sizeof(struct lpfc_els_event_header
),
7779 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
7780 * @phba: pointer to lpfc hba data structure.
7781 * @pring: pointer to a SLI ring.
7782 * @vport: pointer to a host virtual N_Port data structure.
7783 * @elsiocb: pointer to lpfc els command iocb data structure.
7785 * This routine is used for processing the IOCB associated with a unsolicited
7786 * event. It first determines whether there is an existing ndlp that matches
7787 * the DID from the unsolicited IOCB. If not, it will create a new one with
7788 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
7789 * IOCB is then used to invoke the proper routine and to set up proper state
7790 * of the discovery state machine.
7793 lpfc_els_unsol_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7794 struct lpfc_vport
*vport
, struct lpfc_iocbq
*elsiocb
)
7796 struct Scsi_Host
*shost
;
7797 struct lpfc_nodelist
*ndlp
;
7800 uint32_t cmd
, did
, newnode
;
7801 uint8_t rjt_exp
, rjt_err
= 0;
7802 IOCB_t
*icmd
= &elsiocb
->iocb
;
7804 if (!vport
|| !(elsiocb
->context2
))
7808 payload
= ((struct lpfc_dmabuf
*)elsiocb
->context2
)->virt
;
7810 if ((phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) == 0)
7811 lpfc_post_buffer(phba
, pring
, 1);
7813 did
= icmd
->un
.rcvels
.remoteID
;
7814 if (icmd
->ulpStatus
) {
7815 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7816 "RCV Unsol ELS: status:x%x/x%x did:x%x",
7817 icmd
->ulpStatus
, icmd
->un
.ulpWord
[4], did
);
7821 /* Check to see if link went down during discovery */
7822 if (lpfc_els_chk_latt(vport
))
7825 /* Ignore traffic received during vport shutdown. */
7826 if (vport
->load_flag
& FC_UNLOADING
)
7829 /* If NPort discovery is delayed drop incoming ELS */
7830 if ((vport
->fc_flag
& FC_DISC_DELAYED
) &&
7831 (cmd
!= ELS_CMD_PLOGI
))
7834 ndlp
= lpfc_findnode_did(vport
, did
);
7836 /* Cannot find existing Fabric ndlp, so allocate a new one */
7837 ndlp
= lpfc_nlp_init(vport
, did
);
7840 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
7842 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
7843 ndlp
->nlp_type
|= NLP_FABRIC
;
7844 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
7845 ndlp
= lpfc_enable_node(vport
, ndlp
,
7846 NLP_STE_UNUSED_NODE
);
7849 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
7851 if ((did
& Fabric_DID_MASK
) == Fabric_DID_MASK
)
7852 ndlp
->nlp_type
|= NLP_FABRIC
;
7853 } else if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
7854 /* This is similar to the new node path */
7855 ndlp
= lpfc_nlp_get(ndlp
);
7858 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
7862 phba
->fc_stat
.elsRcvFrame
++;
7865 * Do not process any unsolicited ELS commands
7866 * if the ndlp is in DEV_LOSS
7868 shost
= lpfc_shost_from_vport(vport
);
7869 spin_lock_irq(shost
->host_lock
);
7870 if (ndlp
->nlp_flag
& NLP_IN_DEV_LOSS
) {
7871 spin_unlock_irq(shost
->host_lock
);
7874 spin_unlock_irq(shost
->host_lock
);
7876 elsiocb
->context1
= lpfc_nlp_get(ndlp
);
7877 elsiocb
->vport
= vport
;
7879 if ((cmd
& ELS_CMD_MASK
) == ELS_CMD_RSCN
) {
7880 cmd
&= ELS_CMD_MASK
;
7882 /* ELS command <elsCmd> received from NPORT <did> */
7883 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7884 "0112 ELS command x%x received from NPORT x%x "
7885 "Data: x%x x%x x%x x%x\n",
7886 cmd
, did
, vport
->port_state
, vport
->fc_flag
,
7887 vport
->fc_myDID
, vport
->fc_prevDID
);
7889 /* reject till our FLOGI completes */
7890 if ((vport
->port_state
< LPFC_FABRIC_CFG_LINK
) &&
7891 (cmd
!= ELS_CMD_FLOGI
)) {
7892 rjt_err
= LSRJT_LOGICAL_BSY
;
7893 rjt_exp
= LSEXP_NOTHING_MORE
;
7899 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7900 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
7901 did
, vport
->port_state
, ndlp
->nlp_flag
);
7903 phba
->fc_stat
.elsRcvPLOGI
++;
7904 ndlp
= lpfc_plogi_confirm_nport(phba
, payload
, ndlp
);
7905 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
7906 (phba
->pport
->fc_flag
& FC_PT2PT
)) {
7907 vport
->fc_prevDID
= vport
->fc_myDID
;
7908 /* Our DID needs to be updated before registering
7909 * the vfi. This is done in lpfc_rcv_plogi but
7910 * that is called after the reg_vfi.
7912 vport
->fc_myDID
= elsiocb
->iocb
.un
.rcvels
.parmRo
;
7913 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
7914 "3312 Remote port assigned DID x%x "
7915 "%x\n", vport
->fc_myDID
,
7919 lpfc_send_els_event(vport
, ndlp
, payload
);
7921 /* If Nport discovery is delayed, reject PLOGIs */
7922 if (vport
->fc_flag
& FC_DISC_DELAYED
) {
7923 rjt_err
= LSRJT_UNABLE_TPC
;
7924 rjt_exp
= LSEXP_NOTHING_MORE
;
7928 if (vport
->port_state
< LPFC_DISC_AUTH
) {
7929 if (!(phba
->pport
->fc_flag
& FC_PT2PT
) ||
7930 (phba
->pport
->fc_flag
& FC_PT2PT_PLOGI
)) {
7931 rjt_err
= LSRJT_UNABLE_TPC
;
7932 rjt_exp
= LSEXP_NOTHING_MORE
;
7937 spin_lock_irq(shost
->host_lock
);
7938 ndlp
->nlp_flag
&= ~NLP_TARGET_REMOVE
;
7939 spin_unlock_irq(shost
->host_lock
);
7941 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
7946 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7947 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
7948 did
, vport
->port_state
, ndlp
->nlp_flag
);
7950 phba
->fc_stat
.elsRcvFLOGI
++;
7951 lpfc_els_rcv_flogi(vport
, elsiocb
, ndlp
);
7956 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7957 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
7958 did
, vport
->port_state
, ndlp
->nlp_flag
);
7960 phba
->fc_stat
.elsRcvLOGO
++;
7961 lpfc_send_els_event(vport
, ndlp
, payload
);
7962 if (vport
->port_state
< LPFC_DISC_AUTH
) {
7963 rjt_err
= LSRJT_UNABLE_TPC
;
7964 rjt_exp
= LSEXP_NOTHING_MORE
;
7967 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_LOGO
);
7970 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7971 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
7972 did
, vport
->port_state
, ndlp
->nlp_flag
);
7974 phba
->fc_stat
.elsRcvPRLO
++;
7975 lpfc_send_els_event(vport
, ndlp
, payload
);
7976 if (vport
->port_state
< LPFC_DISC_AUTH
) {
7977 rjt_err
= LSRJT_UNABLE_TPC
;
7978 rjt_exp
= LSEXP_NOTHING_MORE
;
7981 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_PRLO
);
7984 phba
->fc_stat
.elsRcvLCB
++;
7985 lpfc_els_rcv_lcb(vport
, elsiocb
, ndlp
);
7988 phba
->fc_stat
.elsRcvRDP
++;
7989 lpfc_els_rcv_rdp(vport
, elsiocb
, ndlp
);
7992 phba
->fc_stat
.elsRcvRSCN
++;
7993 lpfc_els_rcv_rscn(vport
, elsiocb
, ndlp
);
7998 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
7999 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
8000 did
, vport
->port_state
, ndlp
->nlp_flag
);
8002 lpfc_send_els_event(vport
, ndlp
, payload
);
8003 phba
->fc_stat
.elsRcvADISC
++;
8004 if (vport
->port_state
< LPFC_DISC_AUTH
) {
8005 rjt_err
= LSRJT_UNABLE_TPC
;
8006 rjt_exp
= LSEXP_NOTHING_MORE
;
8009 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
8013 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8014 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
8015 did
, vport
->port_state
, ndlp
->nlp_flag
);
8017 phba
->fc_stat
.elsRcvPDISC
++;
8018 if (vport
->port_state
< LPFC_DISC_AUTH
) {
8019 rjt_err
= LSRJT_UNABLE_TPC
;
8020 rjt_exp
= LSEXP_NOTHING_MORE
;
8023 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
,
8027 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8028 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
8029 did
, vport
->port_state
, ndlp
->nlp_flag
);
8031 phba
->fc_stat
.elsRcvFARPR
++;
8032 lpfc_els_rcv_farpr(vport
, elsiocb
, ndlp
);
8035 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8036 "RCV FARP: did:x%x/ste:x%x flg:x%x",
8037 did
, vport
->port_state
, ndlp
->nlp_flag
);
8039 phba
->fc_stat
.elsRcvFARP
++;
8040 lpfc_els_rcv_farp(vport
, elsiocb
, ndlp
);
8043 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8044 "RCV FAN: did:x%x/ste:x%x flg:x%x",
8045 did
, vport
->port_state
, ndlp
->nlp_flag
);
8047 phba
->fc_stat
.elsRcvFAN
++;
8048 lpfc_els_rcv_fan(vport
, elsiocb
, ndlp
);
8051 case ELS_CMD_NVMEPRLI
:
8052 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8053 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
8054 did
, vport
->port_state
, ndlp
->nlp_flag
);
8056 phba
->fc_stat
.elsRcvPRLI
++;
8057 if ((vport
->port_state
< LPFC_DISC_AUTH
) &&
8058 (vport
->fc_flag
& FC_FABRIC
)) {
8059 rjt_err
= LSRJT_UNABLE_TPC
;
8060 rjt_exp
= LSEXP_NOTHING_MORE
;
8064 /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
8065 if (cmd
== ELS_CMD_PRLI
&& phba
->nvmet_support
) {
8066 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
8067 rjt_exp
= LSEXP_REQ_UNSUPPORTED
;
8070 lpfc_disc_state_machine(vport
, ndlp
, elsiocb
, NLP_EVT_RCV_PRLI
);
8073 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8074 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
8075 did
, vport
->port_state
, ndlp
->nlp_flag
);
8077 phba
->fc_stat
.elsRcvLIRR
++;
8078 lpfc_els_rcv_lirr(vport
, elsiocb
, ndlp
);
8083 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8084 "RCV RLS: did:x%x/ste:x%x flg:x%x",
8085 did
, vport
->port_state
, ndlp
->nlp_flag
);
8087 phba
->fc_stat
.elsRcvRLS
++;
8088 lpfc_els_rcv_rls(vport
, elsiocb
, ndlp
);
8093 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8094 "RCV RPS: did:x%x/ste:x%x flg:x%x",
8095 did
, vport
->port_state
, ndlp
->nlp_flag
);
8097 phba
->fc_stat
.elsRcvRPS
++;
8098 lpfc_els_rcv_rps(vport
, elsiocb
, ndlp
);
8103 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8104 "RCV RPL: did:x%x/ste:x%x flg:x%x",
8105 did
, vport
->port_state
, ndlp
->nlp_flag
);
8107 phba
->fc_stat
.elsRcvRPL
++;
8108 lpfc_els_rcv_rpl(vport
, elsiocb
, ndlp
);
8113 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8114 "RCV RNID: did:x%x/ste:x%x flg:x%x",
8115 did
, vport
->port_state
, ndlp
->nlp_flag
);
8117 phba
->fc_stat
.elsRcvRNID
++;
8118 lpfc_els_rcv_rnid(vport
, elsiocb
, ndlp
);
8123 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8124 "RCV RTV: did:x%x/ste:x%x flg:x%x",
8125 did
, vport
->port_state
, ndlp
->nlp_flag
);
8126 phba
->fc_stat
.elsRcvRTV
++;
8127 lpfc_els_rcv_rtv(vport
, elsiocb
, ndlp
);
8132 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8133 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
8134 did
, vport
->port_state
, ndlp
->nlp_flag
);
8136 phba
->fc_stat
.elsRcvRRQ
++;
8137 lpfc_els_rcv_rrq(vport
, elsiocb
, ndlp
);
8142 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8143 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
8144 did
, vport
->port_state
, ndlp
->nlp_flag
);
8146 phba
->fc_stat
.elsRcvECHO
++;
8147 lpfc_els_rcv_echo(vport
, elsiocb
, ndlp
);
8152 /* receive this due to exchange closed */
8153 rjt_err
= LSRJT_UNABLE_TPC
;
8154 rjt_exp
= LSEXP_INVALID_OX_RX
;
8157 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_UNSOL
,
8158 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
8159 cmd
, did
, vport
->port_state
);
8161 /* Unsupported ELS command, reject */
8162 rjt_err
= LSRJT_CMD_UNSUPPORTED
;
8163 rjt_exp
= LSEXP_NOTHING_MORE
;
8165 /* Unknown ELS command <elsCmd> received from NPORT <did> */
8166 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8167 "0115 Unknown ELS command x%x "
8168 "received from NPORT x%x\n", cmd
, did
);
8175 /* check if need to LS_RJT received ELS cmd */
8177 memset(&stat
, 0, sizeof(stat
));
8178 stat
.un
.b
.lsRjtRsnCode
= rjt_err
;
8179 stat
.un
.b
.lsRjtRsnCodeExp
= rjt_exp
;
8180 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, elsiocb
, ndlp
,
8184 lpfc_nlp_put(elsiocb
->context1
);
8185 elsiocb
->context1
= NULL
;
8189 if (vport
&& !(vport
->load_flag
& FC_UNLOADING
))
8190 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8191 "0111 Dropping received ELS cmd "
8192 "Data: x%x x%x x%x\n",
8193 icmd
->ulpStatus
, icmd
->un
.ulpWord
[4], icmd
->ulpTimeout
);
8194 phba
->fc_stat
.elsRcvDrop
++;
8198 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
8199 * @phba: pointer to lpfc hba data structure.
8200 * @pring: pointer to a SLI ring.
8201 * @elsiocb: pointer to lpfc els iocb data structure.
8203 * This routine is used to process an unsolicited event received from a SLI
8204 * (Service Level Interface) ring. The actual processing of the data buffer
8205 * associated with the unsolicited event is done by invoking the routine
8206 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
8207 * SLI ring on which the unsolicited event was received.
8210 lpfc_els_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
8211 struct lpfc_iocbq
*elsiocb
)
8213 struct lpfc_vport
*vport
= phba
->pport
;
8214 IOCB_t
*icmd
= &elsiocb
->iocb
;
8216 struct lpfc_dmabuf
*bdeBuf1
= elsiocb
->context2
;
8217 struct lpfc_dmabuf
*bdeBuf2
= elsiocb
->context3
;
8219 elsiocb
->context1
= NULL
;
8220 elsiocb
->context2
= NULL
;
8221 elsiocb
->context3
= NULL
;
8223 if (icmd
->ulpStatus
== IOSTAT_NEED_BUFFER
) {
8224 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
8225 } else if (icmd
->ulpStatus
== IOSTAT_LOCAL_REJECT
&&
8226 (icmd
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
8227 IOERR_RCV_BUFFER_WAITING
) {
8228 phba
->fc_stat
.NoRcvBuf
++;
8229 /* Not enough posted buffers; Try posting more buffers */
8230 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
8231 lpfc_post_buffer(phba
, pring
, 0);
8235 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
8236 (icmd
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
||
8237 icmd
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
8238 if (icmd
->unsli3
.rcvsli3
.vpi
== 0xffff)
8239 vport
= phba
->pport
;
8241 vport
= lpfc_find_vport_by_vpid(phba
,
8242 icmd
->unsli3
.rcvsli3
.vpi
);
8245 /* If there are no BDEs associated
8246 * with this IOCB, there is nothing to do.
8248 if (icmd
->ulpBdeCount
== 0)
8251 /* type of ELS cmd is first 32bit word
8254 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
8255 elsiocb
->context2
= bdeBuf1
;
8257 paddr
= getPaddr(icmd
->un
.cont64
[0].addrHigh
,
8258 icmd
->un
.cont64
[0].addrLow
);
8259 elsiocb
->context2
= lpfc_sli_ringpostbuf_get(phba
, pring
,
8263 lpfc_els_unsol_buffer(phba
, pring
, vport
, elsiocb
);
8265 * The different unsolicited event handlers would tell us
8266 * if they are done with "mp" by setting context2 to NULL.
8268 if (elsiocb
->context2
) {
8269 lpfc_in_buf_free(phba
, (struct lpfc_dmabuf
*)elsiocb
->context2
);
8270 elsiocb
->context2
= NULL
;
8273 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
8274 if ((phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) &&
8275 icmd
->ulpBdeCount
== 2) {
8276 elsiocb
->context2
= bdeBuf2
;
8277 lpfc_els_unsol_buffer(phba
, pring
, vport
, elsiocb
);
8278 /* free mp if we are done with it */
8279 if (elsiocb
->context2
) {
8280 lpfc_in_buf_free(phba
, elsiocb
->context2
);
8281 elsiocb
->context2
= NULL
;
8287 lpfc_start_fdmi(struct lpfc_vport
*vport
)
8289 struct lpfc_nodelist
*ndlp
;
8291 /* If this is the first time, allocate an ndlp and initialize
8292 * it. Otherwise, make sure the node is enabled and then do the
8295 ndlp
= lpfc_findnode_did(vport
, FDMI_DID
);
8297 ndlp
= lpfc_nlp_init(vport
, FDMI_DID
);
8299 ndlp
->nlp_type
|= NLP_FABRIC
;
8304 if (!NLP_CHK_NODE_ACT(ndlp
))
8305 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
8308 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
8309 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
8314 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
8315 * @phba: pointer to lpfc hba data structure.
8316 * @vport: pointer to a virtual N_Port data structure.
8318 * This routine issues a Port Login (PLOGI) to the Name Server with
8319 * State Change Request (SCR) for a @vport. This routine will create an
8320 * ndlp for the Name Server associated to the @vport if such node does
8321 * not already exist. The PLOGI to Name Server is issued by invoking the
8322 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
8323 * (FDMI) is configured to the @vport, a FDMI node will be created and
8324 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
8327 lpfc_do_scr_ns_plogi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
8329 struct lpfc_nodelist
*ndlp
;
8330 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8333 * If lpfc_delay_discovery parameter is set and the clean address
8334 * bit is cleared and fc fabric parameters chenged, delay FC NPort
8337 spin_lock_irq(shost
->host_lock
);
8338 if (vport
->fc_flag
& FC_DISC_DELAYED
) {
8339 spin_unlock_irq(shost
->host_lock
);
8340 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
8341 "3334 Delay fc port discovery for %d seconds\n",
8343 mod_timer(&vport
->delayed_disc_tmo
,
8344 jiffies
+ msecs_to_jiffies(1000 * phba
->fc_ratov
));
8347 spin_unlock_irq(shost
->host_lock
);
8349 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
8351 ndlp
= lpfc_nlp_init(vport
, NameServer_DID
);
8353 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
8354 lpfc_disc_start(vport
);
8357 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8358 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8359 "0251 NameServer login: no memory\n");
8362 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
8363 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
8365 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
8366 lpfc_disc_start(vport
);
8369 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8370 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8371 "0348 NameServer login: node freed\n");
8375 ndlp
->nlp_type
|= NLP_FABRIC
;
8377 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
8379 if (lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0)) {
8380 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8381 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8382 "0252 Cannot issue NameServer login\n");
8386 if ((phba
->cfg_enable_SmartSAN
||
8387 (phba
->cfg_fdmi_on
== LPFC_FDMI_SUPPORT
)) &&
8388 (vport
->load_flag
& FC_ALLOW_FDMI
))
8389 lpfc_start_fdmi(vport
);
8393 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
8394 * @phba: pointer to lpfc hba data structure.
8395 * @pmb: pointer to the driver internal queue element for mailbox command.
8397 * This routine is the completion callback function to register new vport
8398 * mailbox command. If the new vport mailbox command completes successfully,
8399 * the fabric registration login shall be performed on physical port (the
8400 * new vport created is actually a physical port, with VPI 0) or the port
8401 * login to Name Server for State Change Request (SCR) will be performed
8402 * on virtual port (real virtual port, with VPI greater than 0).
8405 lpfc_cmpl_reg_new_vport(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
8407 struct lpfc_vport
*vport
= pmb
->vport
;
8408 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8409 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
8410 MAILBOX_t
*mb
= &pmb
->u
.mb
;
8413 spin_lock_irq(shost
->host_lock
);
8414 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
8415 spin_unlock_irq(shost
->host_lock
);
8417 if (mb
->mbxStatus
) {
8418 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
8419 "0915 Register VPI failed : Status: x%x"
8420 " upd bit: x%x \n", mb
->mbxStatus
,
8421 mb
->un
.varRegVpi
.upd
);
8422 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
8423 mb
->un
.varRegVpi
.upd
)
8424 goto mbox_err_exit
;
8426 switch (mb
->mbxStatus
) {
8427 case 0x11: /* unsupported feature */
8428 case 0x9603: /* max_vpi exceeded */
8429 case 0x9602: /* Link event since CLEAR_LA */
8430 /* giving up on vport registration */
8431 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8432 spin_lock_irq(shost
->host_lock
);
8433 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
8434 spin_unlock_irq(shost
->host_lock
);
8435 lpfc_can_disctmo(vport
);
8437 /* If reg_vpi fail with invalid VPI status, re-init VPI */
8439 spin_lock_irq(shost
->host_lock
);
8440 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
8441 spin_unlock_irq(shost
->host_lock
);
8442 lpfc_init_vpi(phba
, pmb
, vport
->vpi
);
8444 pmb
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
8445 rc
= lpfc_sli_issue_mbox(phba
, pmb
,
8447 if (rc
== MBX_NOT_FINISHED
) {
8448 lpfc_printf_vlog(vport
,
8450 "2732 Failed to issue INIT_VPI"
8451 " mailbox command\n");
8458 /* Try to recover from this error */
8459 if (phba
->sli_rev
== LPFC_SLI_REV4
)
8460 lpfc_sli4_unreg_all_rpis(vport
);
8461 lpfc_mbx_unreg_vpi(vport
);
8462 spin_lock_irq(shost
->host_lock
);
8463 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
8464 spin_unlock_irq(shost
->host_lock
);
8465 if (mb
->mbxStatus
== MBX_NOT_FINISHED
)
8467 if ((vport
->port_type
== LPFC_PHYSICAL_PORT
) &&
8468 !(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
)) {
8469 if (phba
->sli_rev
== LPFC_SLI_REV4
)
8470 lpfc_issue_init_vfi(vport
);
8472 lpfc_initial_flogi(vport
);
8474 lpfc_initial_fdisc(vport
);
8479 spin_lock_irq(shost
->host_lock
);
8480 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
8481 spin_unlock_irq(shost
->host_lock
);
8482 if (vport
== phba
->pport
) {
8483 if (phba
->sli_rev
< LPFC_SLI_REV4
)
8484 lpfc_issue_fabric_reglogin(vport
);
8487 * If the physical port is instantiated using
8488 * FDISC, do not start vport discovery.
8490 if (vport
->port_state
!= LPFC_FDISC
)
8491 lpfc_start_fdiscs(phba
);
8492 lpfc_do_scr_ns_plogi(phba
, vport
);
8495 lpfc_do_scr_ns_plogi(phba
, vport
);
8498 /* Now, we decrement the ndlp reference count held for this
8503 mempool_free(pmb
, phba
->mbox_mem_pool
);
8508 * lpfc_register_new_vport - Register a new vport with a HBA
8509 * @phba: pointer to lpfc hba data structure.
8510 * @vport: pointer to a host virtual N_Port data structure.
8511 * @ndlp: pointer to a node-list data structure.
8513 * This routine registers the @vport as a new virtual port with a HBA.
8514 * It is done through a registering vpi mailbox command.
8517 lpfc_register_new_vport(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
,
8518 struct lpfc_nodelist
*ndlp
)
8520 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8523 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
8525 lpfc_reg_vpi(vport
, mbox
);
8526 mbox
->vport
= vport
;
8527 mbox
->context2
= lpfc_nlp_get(ndlp
);
8528 mbox
->mbox_cmpl
= lpfc_cmpl_reg_new_vport
;
8529 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
8530 == MBX_NOT_FINISHED
) {
8531 /* mailbox command not success, decrement ndlp
8532 * reference count for this command
8535 mempool_free(mbox
, phba
->mbox_mem_pool
);
8537 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
8538 "0253 Register VPI: Can't send mbox\n");
8542 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
8543 "0254 Register VPI: no memory\n");
8549 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8550 spin_lock_irq(shost
->host_lock
);
8551 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
8552 spin_unlock_irq(shost
->host_lock
);
8557 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
8558 * @phba: pointer to lpfc hba data structure.
8560 * This routine cancels the retry delay timers to all the vports.
8563 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba
*phba
)
8565 struct lpfc_vport
**vports
;
8566 struct lpfc_nodelist
*ndlp
;
8567 uint32_t link_state
;
8570 /* Treat this failure as linkdown for all vports */
8571 link_state
= phba
->link_state
;
8572 lpfc_linkdown(phba
);
8573 phba
->link_state
= link_state
;
8575 vports
= lpfc_create_vport_work_array(phba
);
8578 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
8579 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
8581 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
8582 lpfc_els_flush_cmd(vports
[i
]);
8584 lpfc_destroy_vport_work_array(phba
, vports
);
8589 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
8590 * @phba: pointer to lpfc hba data structure.
8592 * This routine abort all pending discovery commands and
8593 * start a timer to retry FLOGI for the physical port
8597 lpfc_retry_pport_discovery(struct lpfc_hba
*phba
)
8599 struct lpfc_nodelist
*ndlp
;
8600 struct Scsi_Host
*shost
;
8602 /* Cancel the all vports retry delay retry timers */
8603 lpfc_cancel_all_vport_retry_delay_timer(phba
);
8605 /* If fabric require FLOGI, then re-instantiate physical login */
8606 ndlp
= lpfc_findnode_did(phba
->pport
, Fabric_DID
);
8610 shost
= lpfc_shost_from_vport(phba
->pport
);
8611 mod_timer(&ndlp
->nlp_delayfunc
, jiffies
+ msecs_to_jiffies(1000));
8612 spin_lock_irq(shost
->host_lock
);
8613 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
8614 spin_unlock_irq(shost
->host_lock
);
8615 ndlp
->nlp_last_elscmd
= ELS_CMD_FLOGI
;
8616 phba
->pport
->port_state
= LPFC_FLOGI
;
8621 * lpfc_fabric_login_reqd - Check if FLOGI required.
8622 * @phba: pointer to lpfc hba data structure.
8623 * @cmdiocb: pointer to FDISC command iocb.
8624 * @rspiocb: pointer to FDISC response iocb.
8626 * This routine checks if a FLOGI is reguired for FDISC
8630 lpfc_fabric_login_reqd(struct lpfc_hba
*phba
,
8631 struct lpfc_iocbq
*cmdiocb
,
8632 struct lpfc_iocbq
*rspiocb
)
8635 if ((rspiocb
->iocb
.ulpStatus
!= IOSTAT_FABRIC_RJT
) ||
8636 (rspiocb
->iocb
.un
.ulpWord
[4] != RJT_LOGIN_REQUIRED
))
8643 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
8644 * @phba: pointer to lpfc hba data structure.
8645 * @cmdiocb: pointer to lpfc command iocb data structure.
8646 * @rspiocb: pointer to lpfc response iocb data structure.
8648 * This routine is the completion callback function to a Fabric Discover
8649 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
8650 * single threaded, each FDISC completion callback function will reset
8651 * the discovery timer for all vports such that the timers will not get
8652 * unnecessary timeout. The function checks the FDISC IOCB status. If error
8653 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
8654 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
8655 * assigned to the vport has been changed with the completion of the FDISC
8656 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
8657 * are unregistered from the HBA, and then the lpfc_register_new_vport()
8658 * routine is invoked to register new vport with the HBA. Otherwise, the
8659 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
8660 * Server for State Change Request (SCR).
8663 lpfc_cmpl_els_fdisc(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
8664 struct lpfc_iocbq
*rspiocb
)
8666 struct lpfc_vport
*vport
= cmdiocb
->vport
;
8667 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8668 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) cmdiocb
->context1
;
8669 struct lpfc_nodelist
*np
;
8670 struct lpfc_nodelist
*next_np
;
8671 IOCB_t
*irsp
= &rspiocb
->iocb
;
8672 struct lpfc_iocbq
*piocb
;
8673 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
, *prsp
;
8674 struct serv_parm
*sp
;
8675 uint8_t fabric_param_changed
;
8677 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
8678 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
8679 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
8681 /* Since all FDISCs are being single threaded, we
8682 * must reset the discovery timer for ALL vports
8683 * waiting to send FDISC when one completes.
8685 list_for_each_entry(piocb
, &phba
->fabric_iocb_list
, list
) {
8686 lpfc_set_disctmo(piocb
->vport
);
8689 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
8690 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
8691 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], vport
->fc_prevDID
);
8693 if (irsp
->ulpStatus
) {
8695 if (lpfc_fabric_login_reqd(phba
, cmdiocb
, rspiocb
)) {
8696 lpfc_retry_pport_discovery(phba
);
8700 /* Check for retry */
8701 if (lpfc_els_retry(phba
, cmdiocb
, rspiocb
))
8704 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8705 "0126 FDISC failed. (x%x/x%x)\n",
8706 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
8709 spin_lock_irq(shost
->host_lock
);
8710 vport
->fc_flag
&= ~FC_VPORT_CVL_RCVD
;
8711 vport
->fc_flag
&= ~FC_VPORT_LOGO_RCVD
;
8712 vport
->fc_flag
|= FC_FABRIC
;
8713 if (vport
->phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
)
8714 vport
->fc_flag
|= FC_PUBLIC_LOOP
;
8715 spin_unlock_irq(shost
->host_lock
);
8717 vport
->fc_myDID
= irsp
->un
.ulpWord
[4] & Mask_DID
;
8718 lpfc_vport_set_state(vport
, FC_VPORT_ACTIVE
);
8719 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
, list
);
8722 sp
= prsp
->virt
+ sizeof(uint32_t);
8723 fabric_param_changed
= lpfc_check_clean_addr_bit(vport
, sp
);
8724 memcpy(&vport
->fabric_portname
, &sp
->portName
,
8725 sizeof(struct lpfc_name
));
8726 memcpy(&vport
->fabric_nodename
, &sp
->nodeName
,
8727 sizeof(struct lpfc_name
));
8728 if (fabric_param_changed
&&
8729 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
8730 /* If our NportID changed, we need to ensure all
8731 * remaining NPORTs get unreg_login'ed so we can
8734 list_for_each_entry_safe(np
, next_np
,
8735 &vport
->fc_nodes
, nlp_listp
) {
8736 if (!NLP_CHK_NODE_ACT(ndlp
) ||
8737 (np
->nlp_state
!= NLP_STE_NPR_NODE
) ||
8738 !(np
->nlp_flag
& NLP_NPR_ADISC
))
8740 spin_lock_irq(shost
->host_lock
);
8741 np
->nlp_flag
&= ~NLP_NPR_ADISC
;
8742 spin_unlock_irq(shost
->host_lock
);
8743 lpfc_unreg_rpi(vport
, np
);
8745 lpfc_cleanup_pending_mbox(vport
);
8747 if (phba
->sli_rev
== LPFC_SLI_REV4
)
8748 lpfc_sli4_unreg_all_rpis(vport
);
8750 lpfc_mbx_unreg_vpi(vport
);
8751 spin_lock_irq(shost
->host_lock
);
8752 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
8753 if (phba
->sli_rev
== LPFC_SLI_REV4
)
8754 vport
->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
8756 vport
->fc_flag
|= FC_LOGO_RCVD_DID_CHNG
;
8757 spin_unlock_irq(shost
->host_lock
);
8758 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
8759 !(vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)) {
8761 * Driver needs to re-reg VPI in order for f/w
8762 * to update the MAC address.
8764 lpfc_register_new_vport(phba
, vport
, ndlp
);
8768 if (vport
->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
)
8769 lpfc_issue_init_vpi(vport
);
8770 else if (vport
->fc_flag
& FC_VPORT_NEEDS_REG_VPI
)
8771 lpfc_register_new_vport(phba
, vport
, ndlp
);
8773 lpfc_do_scr_ns_plogi(phba
, vport
);
8776 if (vport
->fc_vport
&&
8777 (vport
->fc_vport
->vport_state
!= FC_VPORT_NO_FABRIC_RSCS
))
8778 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8779 /* Cancel discovery timer */
8780 lpfc_can_disctmo(vport
);
8783 lpfc_els_free_iocb(phba
, cmdiocb
);
8787 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
8788 * @vport: pointer to a virtual N_Port data structure.
8789 * @ndlp: pointer to a node-list data structure.
8790 * @retry: number of retries to the command IOCB.
8792 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
8793 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
8794 * routine to issue the IOCB, which makes sure only one outstanding fabric
8795 * IOCB will be sent off HBA at any given time.
8797 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8798 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8799 * will be stored into the context1 field of the IOCB for the completion
8800 * callback function to the FDISC ELS command.
8803 * 0 - Successfully issued fdisc iocb command
8804 * 1 - Failed to issue fdisc iocb command
8807 lpfc_issue_els_fdisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
8810 struct lpfc_hba
*phba
= vport
->phba
;
8812 struct lpfc_iocbq
*elsiocb
;
8813 struct serv_parm
*sp
;
8816 int did
= ndlp
->nlp_DID
;
8819 vport
->port_state
= LPFC_FDISC
;
8820 vport
->fc_myDID
= 0;
8821 cmdsize
= (sizeof(uint32_t) + sizeof(struct serv_parm
));
8822 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, retry
, ndlp
, did
,
8825 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8826 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8827 "0255 Issue FDISC: no IOCB\n");
8831 icmd
= &elsiocb
->iocb
;
8832 icmd
->un
.elsreq64
.myID
= 0;
8833 icmd
->un
.elsreq64
.fl
= 1;
8836 * SLI3 ports require a different context type value than SLI4.
8837 * Catch SLI3 ports here and override the prep.
8839 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
8844 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
8845 *((uint32_t *) (pcmd
)) = ELS_CMD_FDISC
;
8846 pcmd
+= sizeof(uint32_t); /* CSP Word 1 */
8847 memcpy(pcmd
, &vport
->phba
->pport
->fc_sparam
, sizeof(struct serv_parm
));
8848 sp
= (struct serv_parm
*) pcmd
;
8849 /* Setup CSPs accordingly for Fabric */
8850 sp
->cmn
.e_d_tov
= 0;
8851 sp
->cmn
.w2
.r_a_tov
= 0;
8852 sp
->cmn
.virtual_fabric_support
= 0;
8853 sp
->cls1
.classValid
= 0;
8854 sp
->cls2
.seqDelivery
= 1;
8855 sp
->cls3
.seqDelivery
= 1;
8857 pcmd
+= sizeof(uint32_t); /* CSP Word 2 */
8858 pcmd
+= sizeof(uint32_t); /* CSP Word 3 */
8859 pcmd
+= sizeof(uint32_t); /* CSP Word 4 */
8860 pcmd
+= sizeof(uint32_t); /* Port Name */
8861 memcpy(pcmd
, &vport
->fc_portname
, 8);
8862 pcmd
+= sizeof(uint32_t); /* Node Name */
8863 pcmd
+= sizeof(uint32_t); /* Node Name */
8864 memcpy(pcmd
, &vport
->fc_nodename
, 8);
8865 sp
->cmn
.valid_vendor_ver_level
= 0;
8866 memset(sp
->un
.vendorVersion
, 0, sizeof(sp
->un
.vendorVersion
));
8867 lpfc_set_disctmo(vport
);
8869 phba
->fc_stat
.elsXmitFDISC
++;
8870 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_fdisc
;
8872 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
8873 "Issue FDISC: did:x%x",
8876 rc
= lpfc_issue_fabric_iocb(phba
, elsiocb
);
8877 if (rc
== IOCB_ERROR
) {
8878 lpfc_els_free_iocb(phba
, elsiocb
);
8879 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
8880 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
8881 "0256 Issue FDISC: Cannot send IOCB\n");
8884 lpfc_vport_set_state(vport
, FC_VPORT_INITIALIZING
);
8889 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
8890 * @phba: pointer to lpfc hba data structure.
8891 * @cmdiocb: pointer to lpfc command iocb data structure.
8892 * @rspiocb: pointer to lpfc response iocb data structure.
8894 * This routine is the completion callback function to the issuing of a LOGO
8895 * ELS command off a vport. It frees the command IOCB and then decrement the
8896 * reference count held on ndlp for this completion function, indicating that
8897 * the reference to the ndlp is no long needed. Note that the
8898 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
8899 * callback function and an additional explicit ndlp reference decrementation
8900 * will trigger the actual release of the ndlp.
8903 lpfc_cmpl_els_npiv_logo(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
8904 struct lpfc_iocbq
*rspiocb
)
8906 struct lpfc_vport
*vport
= cmdiocb
->vport
;
8908 struct lpfc_nodelist
*ndlp
;
8909 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8911 ndlp
= (struct lpfc_nodelist
*)cmdiocb
->context1
;
8912 irsp
= &rspiocb
->iocb
;
8913 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
8914 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
8915 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4], irsp
->un
.rcvels
.remoteID
);
8917 lpfc_els_free_iocb(phba
, cmdiocb
);
8918 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
8920 /* Trigger the release of the ndlp after logo */
8923 /* NPIV LOGO completes to NPort <nlp_DID> */
8924 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
8925 "2928 NPIV LOGO completes to NPort x%x "
8926 "Data: x%x x%x x%x x%x\n",
8927 ndlp
->nlp_DID
, irsp
->ulpStatus
, irsp
->un
.ulpWord
[4],
8928 irsp
->ulpTimeout
, vport
->num_disc_nodes
);
8930 if (irsp
->ulpStatus
== IOSTAT_SUCCESS
) {
8931 spin_lock_irq(shost
->host_lock
);
8932 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
8933 vport
->fc_flag
&= ~FC_FABRIC
;
8934 spin_unlock_irq(shost
->host_lock
);
8935 lpfc_can_disctmo(vport
);
8940 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
8941 * @vport: pointer to a virtual N_Port data structure.
8942 * @ndlp: pointer to a node-list data structure.
8944 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
8946 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8947 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8948 * will be stored into the context1 field of the IOCB for the completion
8949 * callback function to the LOGO ELS command.
8952 * 0 - Successfully issued logo off the @vport
8953 * 1 - Failed to issue logo off the @vport
8956 lpfc_issue_els_npiv_logo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
8958 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
8959 struct lpfc_hba
*phba
= vport
->phba
;
8960 struct lpfc_iocbq
*elsiocb
;
8964 cmdsize
= 2 * sizeof(uint32_t) + sizeof(struct lpfc_name
);
8965 elsiocb
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
, ndlp
->nlp_DID
,
8970 pcmd
= (uint8_t *) (((struct lpfc_dmabuf
*) elsiocb
->context2
)->virt
);
8971 *((uint32_t *) (pcmd
)) = ELS_CMD_LOGO
;
8972 pcmd
+= sizeof(uint32_t);
8974 /* Fill in LOGO payload */
8975 *((uint32_t *) (pcmd
)) = be32_to_cpu(vport
->fc_myDID
);
8976 pcmd
+= sizeof(uint32_t);
8977 memcpy(pcmd
, &vport
->fc_portname
, sizeof(struct lpfc_name
));
8979 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
8980 "Issue LOGO npiv did:x%x flg:x%x",
8981 ndlp
->nlp_DID
, ndlp
->nlp_flag
, 0);
8983 elsiocb
->iocb_cmpl
= lpfc_cmpl_els_npiv_logo
;
8984 spin_lock_irq(shost
->host_lock
);
8985 ndlp
->nlp_flag
|= NLP_LOGO_SND
;
8986 spin_unlock_irq(shost
->host_lock
);
8987 if (lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, elsiocb
, 0) ==
8989 spin_lock_irq(shost
->host_lock
);
8990 ndlp
->nlp_flag
&= ~NLP_LOGO_SND
;
8991 spin_unlock_irq(shost
->host_lock
);
8992 lpfc_els_free_iocb(phba
, elsiocb
);
8999 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
9000 * @ptr: holder for the timer function associated data.
9002 * This routine is invoked by the fabric iocb block timer after
9003 * timeout. It posts the fabric iocb block timeout event by setting the
9004 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
9005 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
9006 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
9007 * posted event WORKER_FABRIC_BLOCK_TMO.
9010 lpfc_fabric_block_timeout(struct timer_list
*t
)
9012 struct lpfc_hba
*phba
= from_timer(phba
, t
, fabric_block_timer
);
9013 unsigned long iflags
;
9014 uint32_t tmo_posted
;
9016 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
9017 tmo_posted
= phba
->pport
->work_port_events
& WORKER_FABRIC_BLOCK_TMO
;
9019 phba
->pport
->work_port_events
|= WORKER_FABRIC_BLOCK_TMO
;
9020 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
9023 lpfc_worker_wake_up(phba
);
9028 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
9029 * @phba: pointer to lpfc hba data structure.
9031 * This routine issues one fabric iocb from the driver internal list to
9032 * the HBA. It first checks whether it's ready to issue one fabric iocb to
9033 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
9034 * remove one pending fabric iocb from the driver internal list and invokes
9035 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
9038 lpfc_resume_fabric_iocbs(struct lpfc_hba
*phba
)
9040 struct lpfc_iocbq
*iocb
;
9041 unsigned long iflags
;
9047 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9048 /* Post any pending iocb to the SLI layer */
9049 if (atomic_read(&phba
->fabric_iocb_count
) == 0) {
9050 list_remove_head(&phba
->fabric_iocb_list
, iocb
, typeof(*iocb
),
9053 /* Increment fabric iocb count to hold the position */
9054 atomic_inc(&phba
->fabric_iocb_count
);
9056 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9058 iocb
->fabric_iocb_cmpl
= iocb
->iocb_cmpl
;
9059 iocb
->iocb_cmpl
= lpfc_cmpl_fabric_iocb
;
9060 iocb
->iocb_flag
|= LPFC_IO_FABRIC
;
9062 lpfc_debugfs_disc_trc(iocb
->vport
, LPFC_DISC_TRC_ELS_CMD
,
9063 "Fabric sched1: ste:x%x",
9064 iocb
->vport
->port_state
, 0, 0);
9066 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocb
, 0);
9068 if (ret
== IOCB_ERROR
) {
9069 iocb
->iocb_cmpl
= iocb
->fabric_iocb_cmpl
;
9070 iocb
->fabric_iocb_cmpl
= NULL
;
9071 iocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
9073 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
9074 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
9075 iocb
->iocb_cmpl(phba
, iocb
, iocb
);
9077 atomic_dec(&phba
->fabric_iocb_count
);
9086 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
9087 * @phba: pointer to lpfc hba data structure.
9089 * This routine unblocks the issuing fabric iocb command. The function
9090 * will clear the fabric iocb block bit and then invoke the routine
9091 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
9092 * from the driver internal fabric iocb list.
9095 lpfc_unblock_fabric_iocbs(struct lpfc_hba
*phba
)
9097 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
9099 lpfc_resume_fabric_iocbs(phba
);
9104 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
9105 * @phba: pointer to lpfc hba data structure.
9107 * This routine blocks the issuing fabric iocb for a specified amount of
9108 * time (currently 100 ms). This is done by set the fabric iocb block bit
9109 * and set up a timeout timer for 100ms. When the block bit is set, no more
9110 * fabric iocb will be issued out of the HBA.
9113 lpfc_block_fabric_iocbs(struct lpfc_hba
*phba
)
9117 blocked
= test_and_set_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
9118 /* Start a timer to unblock fabric iocbs after 100ms */
9120 mod_timer(&phba
->fabric_block_timer
,
9121 jiffies
+ msecs_to_jiffies(100));
9127 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
9128 * @phba: pointer to lpfc hba data structure.
9129 * @cmdiocb: pointer to lpfc command iocb data structure.
9130 * @rspiocb: pointer to lpfc response iocb data structure.
9132 * This routine is the callback function that is put to the fabric iocb's
9133 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
9134 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
9135 * function first restores and invokes the original iocb's callback function
9136 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
9137 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
9140 lpfc_cmpl_fabric_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
9141 struct lpfc_iocbq
*rspiocb
)
9145 BUG_ON((cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
) != LPFC_IO_FABRIC
);
9147 switch (rspiocb
->iocb
.ulpStatus
) {
9148 case IOSTAT_NPORT_RJT
:
9149 case IOSTAT_FABRIC_RJT
:
9150 if (rspiocb
->iocb
.un
.ulpWord
[4] & RJT_UNAVAIL_TEMP
) {
9151 lpfc_block_fabric_iocbs(phba
);
9155 case IOSTAT_NPORT_BSY
:
9156 case IOSTAT_FABRIC_BSY
:
9157 lpfc_block_fabric_iocbs(phba
);
9161 stat
.un
.lsRjtError
=
9162 be32_to_cpu(rspiocb
->iocb
.un
.ulpWord
[4]);
9163 if ((stat
.un
.b
.lsRjtRsnCode
== LSRJT_UNABLE_TPC
) ||
9164 (stat
.un
.b
.lsRjtRsnCode
== LSRJT_LOGICAL_BSY
))
9165 lpfc_block_fabric_iocbs(phba
);
9169 BUG_ON(atomic_read(&phba
->fabric_iocb_count
) == 0);
9171 cmdiocb
->iocb_cmpl
= cmdiocb
->fabric_iocb_cmpl
;
9172 cmdiocb
->fabric_iocb_cmpl
= NULL
;
9173 cmdiocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
9174 cmdiocb
->iocb_cmpl(phba
, cmdiocb
, rspiocb
);
9176 atomic_dec(&phba
->fabric_iocb_count
);
9177 if (!test_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
)) {
9178 /* Post any pending iocbs to HBA */
9179 lpfc_resume_fabric_iocbs(phba
);
9184 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
9185 * @phba: pointer to lpfc hba data structure.
9186 * @iocb: pointer to lpfc command iocb data structure.
9188 * This routine is used as the top-level API for issuing a fabric iocb command
9189 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
9190 * function makes sure that only one fabric bound iocb will be outstanding at
9191 * any given time. As such, this function will first check to see whether there
9192 * is already an outstanding fabric iocb on the wire. If so, it will put the
9193 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
9194 * issued later. Otherwise, it will issue the iocb on the wire and update the
9195 * fabric iocb count it indicate that there is one fabric iocb on the wire.
9197 * Note, this implementation has a potential sending out fabric IOCBs out of
9198 * order. The problem is caused by the construction of the "ready" boolen does
9199 * not include the condition that the internal fabric IOCB list is empty. As
9200 * such, it is possible a fabric IOCB issued by this routine might be "jump"
9201 * ahead of the fabric IOCBs in the internal list.
9204 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
9205 * IOCB_ERROR - failed to issue fabric iocb
9208 lpfc_issue_fabric_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocb
)
9210 unsigned long iflags
;
9214 BUG_ON(atomic_read(&phba
->fabric_iocb_count
) > 1);
9216 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9217 ready
= atomic_read(&phba
->fabric_iocb_count
) == 0 &&
9218 !test_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
9221 /* Increment fabric iocb count to hold the position */
9222 atomic_inc(&phba
->fabric_iocb_count
);
9223 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9225 iocb
->fabric_iocb_cmpl
= iocb
->iocb_cmpl
;
9226 iocb
->iocb_cmpl
= lpfc_cmpl_fabric_iocb
;
9227 iocb
->iocb_flag
|= LPFC_IO_FABRIC
;
9229 lpfc_debugfs_disc_trc(iocb
->vport
, LPFC_DISC_TRC_ELS_CMD
,
9230 "Fabric sched2: ste:x%x",
9231 iocb
->vport
->port_state
, 0, 0);
9233 ret
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocb
, 0);
9235 if (ret
== IOCB_ERROR
) {
9236 iocb
->iocb_cmpl
= iocb
->fabric_iocb_cmpl
;
9237 iocb
->fabric_iocb_cmpl
= NULL
;
9238 iocb
->iocb_flag
&= ~LPFC_IO_FABRIC
;
9239 atomic_dec(&phba
->fabric_iocb_count
);
9242 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9243 list_add_tail(&iocb
->list
, &phba
->fabric_iocb_list
);
9244 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9251 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
9252 * @vport: pointer to a virtual N_Port data structure.
9254 * This routine aborts all the IOCBs associated with a @vport from the
9255 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9256 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9257 * list, removes each IOCB associated with the @vport off the list, set the
9258 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9259 * associated with the IOCB.
9261 static void lpfc_fabric_abort_vport(struct lpfc_vport
*vport
)
9263 LIST_HEAD(completions
);
9264 struct lpfc_hba
*phba
= vport
->phba
;
9265 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
9267 spin_lock_irq(&phba
->hbalock
);
9268 list_for_each_entry_safe(piocb
, tmp_iocb
, &phba
->fabric_iocb_list
,
9271 if (piocb
->vport
!= vport
)
9274 list_move_tail(&piocb
->list
, &completions
);
9276 spin_unlock_irq(&phba
->hbalock
);
9278 /* Cancel all the IOCBs from the completions list */
9279 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9284 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
9285 * @ndlp: pointer to a node-list data structure.
9287 * This routine aborts all the IOCBs associated with an @ndlp from the
9288 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9289 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9290 * list, removes each IOCB associated with the @ndlp off the list, set the
9291 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9292 * associated with the IOCB.
9294 void lpfc_fabric_abort_nport(struct lpfc_nodelist
*ndlp
)
9296 LIST_HEAD(completions
);
9297 struct lpfc_hba
*phba
= ndlp
->phba
;
9298 struct lpfc_iocbq
*tmp_iocb
, *piocb
;
9299 struct lpfc_sli_ring
*pring
;
9301 pring
= lpfc_phba_elsring(phba
);
9303 if (unlikely(!pring
))
9306 spin_lock_irq(&phba
->hbalock
);
9307 list_for_each_entry_safe(piocb
, tmp_iocb
, &phba
->fabric_iocb_list
,
9309 if ((lpfc_check_sli_ndlp(phba
, pring
, piocb
, ndlp
))) {
9311 list_move_tail(&piocb
->list
, &completions
);
9314 spin_unlock_irq(&phba
->hbalock
);
9316 /* Cancel all the IOCBs from the completions list */
9317 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9322 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
9323 * @phba: pointer to lpfc hba data structure.
9325 * This routine aborts all the IOCBs currently on the driver internal
9326 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
9327 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
9328 * list, removes IOCBs off the list, set the status feild to
9329 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
9332 void lpfc_fabric_abort_hba(struct lpfc_hba
*phba
)
9334 LIST_HEAD(completions
);
9336 spin_lock_irq(&phba
->hbalock
);
9337 list_splice_init(&phba
->fabric_iocb_list
, &completions
);
9338 spin_unlock_irq(&phba
->hbalock
);
9340 /* Cancel all the IOCBs from the completions list */
9341 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9346 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
9347 * @vport: pointer to lpfc vport data structure.
9349 * This routine is invoked by the vport cleanup for deletions and the cleanup
9350 * for an ndlp on removal.
9353 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport
*vport
)
9355 struct lpfc_hba
*phba
= vport
->phba
;
9356 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
9357 unsigned long iflag
= 0;
9359 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9360 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
9361 list_for_each_entry_safe(sglq_entry
, sglq_next
,
9362 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
) {
9363 if (sglq_entry
->ndlp
&& sglq_entry
->ndlp
->vport
== vport
)
9364 sglq_entry
->ndlp
= NULL
;
9366 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
9367 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9372 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
9373 * @phba: pointer to lpfc hba data structure.
9374 * @axri: pointer to the els xri abort wcqe structure.
9376 * This routine is invoked by the worker thread to process a SLI4 slow-path
9380 lpfc_sli4_els_xri_aborted(struct lpfc_hba
*phba
,
9381 struct sli4_wcqe_xri_aborted
*axri
)
9383 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
9384 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
9387 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
9388 unsigned long iflag
= 0;
9389 struct lpfc_nodelist
*ndlp
;
9390 struct lpfc_sli_ring
*pring
;
9392 pring
= lpfc_phba_elsring(phba
);
9394 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9395 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
9396 list_for_each_entry_safe(sglq_entry
, sglq_next
,
9397 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
) {
9398 if (sglq_entry
->sli4_xritag
== xri
) {
9399 list_del(&sglq_entry
->list
);
9400 ndlp
= sglq_entry
->ndlp
;
9401 sglq_entry
->ndlp
= NULL
;
9402 list_add_tail(&sglq_entry
->list
,
9403 &phba
->sli4_hba
.lpfc_els_sgl_list
);
9404 sglq_entry
->state
= SGL_FREED
;
9405 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
9406 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9407 lpfc_set_rrq_active(phba
, ndlp
,
9408 sglq_entry
->sli4_lxritag
,
9411 /* Check if TXQ queue needs to be serviced */
9412 if (pring
&& !list_empty(&pring
->txq
))
9413 lpfc_worker_wake_up(phba
);
9417 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
9418 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
9419 if (lxri
== NO_XRI
) {
9420 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9423 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
9424 sglq_entry
= __lpfc_get_active_sglq(phba
, lxri
);
9425 if (!sglq_entry
|| (sglq_entry
->sli4_xritag
!= xri
)) {
9426 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
9427 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9430 sglq_entry
->state
= SGL_XRI_ABORTED
;
9431 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
9432 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9436 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
9437 * @vport: pointer to virtual port object.
9438 * @ndlp: nodelist pointer for the impacted node.
9440 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
9441 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
9442 * the driver is required to send a LOGO to the remote node before it
9443 * attempts to recover its login to the remote node.
9446 lpfc_sli_abts_recover_port(struct lpfc_vport
*vport
,
9447 struct lpfc_nodelist
*ndlp
)
9449 struct Scsi_Host
*shost
;
9450 struct lpfc_hba
*phba
;
9451 unsigned long flags
= 0;
9453 shost
= lpfc_shost_from_vport(vport
);
9455 if (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
9456 lpfc_printf_log(phba
, KERN_INFO
,
9457 LOG_SLI
, "3093 No rport recovery needed. "
9458 "rport in state 0x%x\n", ndlp
->nlp_state
);
9461 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9462 "3094 Start rport recovery on shost id 0x%x "
9463 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9465 shost
->host_no
, ndlp
->nlp_DID
,
9466 vport
->vpi
, ndlp
->nlp_rpi
, ndlp
->nlp_state
,
9469 * The rport is not responding. Remove the FCP-2 flag to prevent
9470 * an ADISC in the follow-up recovery code.
9472 spin_lock_irqsave(shost
->host_lock
, flags
);
9473 ndlp
->nlp_fcp_info
&= ~NLP_FCP_2_DEVICE
;
9474 spin_unlock_irqrestore(shost
->host_lock
, flags
);
9475 lpfc_issue_els_logo(vport
, ndlp
, 0);
9476 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_LOGO_ISSUE
);