]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - drivers/scsi/lpfc/lpfc_nportdisc.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / lpfc / lpfc_nportdisc.c
... / ...
CommitLineData
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport_fc.h>
33#include <scsi/fc/fc_fs.h>
34
35#include <linux/nvme-fc-driver.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc.h"
44#include "lpfc_scsi.h"
45#include "lpfc_nvme.h"
46#include "lpfc_logmsg.h"
47#include "lpfc_crtn.h"
48#include "lpfc_vport.h"
49#include "lpfc_debugfs.h"
50
51
52/* Called to verify a rcv'ed ADISC was intended for us. */
53static int
54lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
55 struct lpfc_name *nn, struct lpfc_name *pn)
56{
57 /* First, we MUST have a RPI registered */
58 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
59 return 0;
60
61 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
62 * table entry for that node.
63 */
64 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
65 return 0;
66
67 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
68 return 0;
69
70 /* we match, return success */
71 return 1;
72}
73
74int
75lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
76 struct serv_parm *sp, uint32_t class, int flogi)
77{
78 volatile struct serv_parm *hsp = &vport->fc_sparam;
79 uint16_t hsp_value, ssp_value = 0;
80
81 /*
82 * The receive data field size and buffer-to-buffer receive data field
83 * size entries are 16 bits but are represented as two 8-bit fields in
84 * the driver data structure to account for rsvd bits and other control
85 * bits. Reconstruct and compare the fields as a 16-bit values before
86 * correcting the byte values.
87 */
88 if (sp->cls1.classValid) {
89 if (!flogi) {
90 hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
91 hsp->cls1.rcvDataSizeLsb);
92 ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
93 sp->cls1.rcvDataSizeLsb);
94 if (!ssp_value)
95 goto bad_service_param;
96 if (ssp_value > hsp_value) {
97 sp->cls1.rcvDataSizeLsb =
98 hsp->cls1.rcvDataSizeLsb;
99 sp->cls1.rcvDataSizeMsb =
100 hsp->cls1.rcvDataSizeMsb;
101 }
102 }
103 } else if (class == CLASS1)
104 goto bad_service_param;
105 if (sp->cls2.classValid) {
106 if (!flogi) {
107 hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
108 hsp->cls2.rcvDataSizeLsb);
109 ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
110 sp->cls2.rcvDataSizeLsb);
111 if (!ssp_value)
112 goto bad_service_param;
113 if (ssp_value > hsp_value) {
114 sp->cls2.rcvDataSizeLsb =
115 hsp->cls2.rcvDataSizeLsb;
116 sp->cls2.rcvDataSizeMsb =
117 hsp->cls2.rcvDataSizeMsb;
118 }
119 }
120 } else if (class == CLASS2)
121 goto bad_service_param;
122 if (sp->cls3.classValid) {
123 if (!flogi) {
124 hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
125 hsp->cls3.rcvDataSizeLsb);
126 ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
127 sp->cls3.rcvDataSizeLsb);
128 if (!ssp_value)
129 goto bad_service_param;
130 if (ssp_value > hsp_value) {
131 sp->cls3.rcvDataSizeLsb =
132 hsp->cls3.rcvDataSizeLsb;
133 sp->cls3.rcvDataSizeMsb =
134 hsp->cls3.rcvDataSizeMsb;
135 }
136 }
137 } else if (class == CLASS3)
138 goto bad_service_param;
139
140 /*
141 * Preserve the upper four bits of the MSB from the PLOGI response.
142 * These bits contain the Buffer-to-Buffer State Change Number
143 * from the target and need to be passed to the FW.
144 */
145 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
146 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
147 if (ssp_value > hsp_value) {
148 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
149 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
150 (hsp->cmn.bbRcvSizeMsb & 0x0F);
151 }
152
153 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
154 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
155 return 1;
156bad_service_param:
157 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
158 "0207 Device %x "
159 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
160 "invalid service parameters. Ignoring device.\n",
161 ndlp->nlp_DID,
162 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
163 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
164 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
165 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
166 return 0;
167}
168
169static void *
170lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
171 struct lpfc_iocbq *rspiocb)
172{
173 struct lpfc_dmabuf *pcmd, *prsp;
174 uint32_t *lp;
175 void *ptr = NULL;
176 IOCB_t *irsp;
177
178 irsp = &rspiocb->iocb;
179 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
180
181 /* For lpfc_els_abort, context2 could be zero'ed to delay
182 * freeing associated memory till after ABTS completes.
183 */
184 if (pcmd) {
185 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
186 list);
187 if (prsp) {
188 lp = (uint32_t *) prsp->virt;
189 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
190 }
191 } else {
192 /* Force ulpStatus error since we are returning NULL ptr */
193 if (!(irsp->ulpStatus)) {
194 irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
195 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
196 }
197 ptr = NULL;
198 }
199 return ptr;
200}
201
202
203
204/*
205 * Free resources / clean up outstanding I/Os
206 * associated with a LPFC_NODELIST entry. This
207 * routine effectively results in a "software abort".
208 */
209void
210lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
211{
212 LIST_HEAD(abort_list);
213 struct lpfc_sli_ring *pring;
214 struct lpfc_iocbq *iocb, *next_iocb;
215
216 pring = lpfc_phba_elsring(phba);
217
218 /* In case of error recovery path, we might have a NULL pring here */
219 if (unlikely(!pring))
220 return;
221
222 /* Abort outstanding I/O on NPort <nlp_DID> */
223 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
224 "2819 Abort outstanding I/O on NPort x%x "
225 "Data: x%x x%x x%x\n",
226 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
227 ndlp->nlp_rpi);
228 /* Clean up all fabric IOs first.*/
229 lpfc_fabric_abort_nport(ndlp);
230
231 /*
232 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
233 * of all ELS IOs that need an ABTS. The IOs need to stay on the
234 * txcmplq so that the abort operation completes them successfully.
235 */
236 spin_lock_irq(&phba->hbalock);
237 if (phba->sli_rev == LPFC_SLI_REV4)
238 spin_lock(&pring->ring_lock);
239 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
240 /* Add to abort_list on on NDLP match. */
241 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
242 list_add_tail(&iocb->dlist, &abort_list);
243 }
244 if (phba->sli_rev == LPFC_SLI_REV4)
245 spin_unlock(&pring->ring_lock);
246 spin_unlock_irq(&phba->hbalock);
247
248 /* Abort the targeted IOs and remove them from the abort list. */
249 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
250 spin_lock_irq(&phba->hbalock);
251 list_del_init(&iocb->dlist);
252 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
253 spin_unlock_irq(&phba->hbalock);
254 }
255
256 INIT_LIST_HEAD(&abort_list);
257
258 /* Now process the txq */
259 spin_lock_irq(&phba->hbalock);
260 if (phba->sli_rev == LPFC_SLI_REV4)
261 spin_lock(&pring->ring_lock);
262
263 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
264 /* Check to see if iocb matches the nport we are looking for */
265 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
266 list_del_init(&iocb->list);
267 list_add_tail(&iocb->list, &abort_list);
268 }
269 }
270
271 if (phba->sli_rev == LPFC_SLI_REV4)
272 spin_unlock(&pring->ring_lock);
273 spin_unlock_irq(&phba->hbalock);
274
275 /* Cancel all the IOCBs from the completions list */
276 lpfc_sli_cancel_iocbs(phba, &abort_list,
277 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
278
279 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
280}
281
282static int
283lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
284 struct lpfc_iocbq *cmdiocb)
285{
286 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
287 struct lpfc_hba *phba = vport->phba;
288 struct lpfc_dmabuf *pcmd;
289 uint64_t nlp_portwwn = 0;
290 uint32_t *lp;
291 IOCB_t *icmd;
292 struct serv_parm *sp;
293 uint32_t ed_tov;
294 LPFC_MBOXQ_t *mbox;
295 struct ls_rjt stat;
296 uint32_t vid, flag;
297 int rc;
298
299 memset(&stat, 0, sizeof (struct ls_rjt));
300 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
301 lp = (uint32_t *) pcmd->virt;
302 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
303 if (wwn_to_u64(sp->portName.u.wwn) == 0) {
304 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
305 "0140 PLOGI Reject: invalid nname\n");
306 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
307 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
308 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
309 NULL);
310 return 0;
311 }
312 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
313 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
314 "0141 PLOGI Reject: invalid pname\n");
315 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
316 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
317 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
318 NULL);
319 return 0;
320 }
321
322 nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
323 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
324 /* Reject this request because invalid parameters */
325 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
326 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
327 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
328 NULL);
329 return 0;
330 }
331 icmd = &cmdiocb->iocb;
332
333 /* PLOGI chkparm OK */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
335 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
336 "x%x x%x x%x\n",
337 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
338 ndlp->nlp_rpi, vport->port_state,
339 vport->fc_flag);
340
341 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
342 ndlp->nlp_fcp_info |= CLASS2;
343 else
344 ndlp->nlp_fcp_info |= CLASS3;
345
346 ndlp->nlp_class_sup = 0;
347 if (sp->cls1.classValid)
348 ndlp->nlp_class_sup |= FC_COS_CLASS1;
349 if (sp->cls2.classValid)
350 ndlp->nlp_class_sup |= FC_COS_CLASS2;
351 if (sp->cls3.classValid)
352 ndlp->nlp_class_sup |= FC_COS_CLASS3;
353 if (sp->cls4.classValid)
354 ndlp->nlp_class_sup |= FC_COS_CLASS4;
355 ndlp->nlp_maxframe =
356 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
357
358 /* if already logged in, do implicit logout */
359 switch (ndlp->nlp_state) {
360 case NLP_STE_NPR_NODE:
361 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
362 break;
363 case NLP_STE_REG_LOGIN_ISSUE:
364 case NLP_STE_PRLI_ISSUE:
365 case NLP_STE_UNMAPPED_NODE:
366 case NLP_STE_MAPPED_NODE:
367 /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
368 * For target mode, execute implicit logo.
369 * Fabric nodes go into NPR.
370 */
371 if (!(ndlp->nlp_type & NLP_FABRIC) &&
372 !(phba->nvmet_support)) {
373 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
374 ndlp, NULL);
375 return 1;
376 }
377 if (nlp_portwwn != 0 &&
378 nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
379 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
380 "0143 PLOGI recv'd from DID: x%x "
381 "WWPN changed: old %llx new %llx\n",
382 ndlp->nlp_DID,
383 (unsigned long long)nlp_portwwn,
384 (unsigned long long)
385 wwn_to_u64(sp->portName.u.wwn));
386
387 ndlp->nlp_prev_state = ndlp->nlp_state;
388 /* rport needs to be unregistered first */
389 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
390 break;
391 }
392
393 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
394 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
395 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
396 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
397
398 /* Check for Nport to NPort pt2pt protocol */
399 if ((vport->fc_flag & FC_PT2PT) &&
400 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
401 /* rcv'ed PLOGI decides what our NPortId will be */
402 vport->fc_myDID = icmd->un.rcvels.parmRo;
403
404 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
405 if (sp->cmn.edtovResolution) {
406 /* E_D_TOV ticks are in nanoseconds */
407 ed_tov = (phba->fc_edtov + 999999) / 1000000;
408 }
409
410 /*
411 * For pt-to-pt, use the larger EDTOV
412 * RATOV = 2 * EDTOV
413 */
414 if (ed_tov > phba->fc_edtov)
415 phba->fc_edtov = ed_tov;
416 phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
417
418 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
419
420 /* Issue config_link / reg_vfi to account for updated TOV's */
421
422 if (phba->sli_rev == LPFC_SLI_REV4)
423 lpfc_issue_reg_vfi(vport);
424 else {
425 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
426 if (mbox == NULL)
427 goto out;
428 lpfc_config_link(phba, mbox);
429 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
430 mbox->vport = vport;
431 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
432 if (rc == MBX_NOT_FINISHED) {
433 mempool_free(mbox, phba->mbox_mem_pool);
434 goto out;
435 }
436 }
437
438 lpfc_can_disctmo(vport);
439 }
440
441 ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
442 if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
443 sp->cmn.valid_vendor_ver_level) {
444 vid = be32_to_cpu(sp->un.vv.vid);
445 flag = be32_to_cpu(sp->un.vv.flags);
446 if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
447 ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
448 }
449
450 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
451 if (!mbox)
452 goto out;
453
454 /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
455 if (phba->sli_rev == LPFC_SLI_REV4)
456 lpfc_unreg_rpi(vport, ndlp);
457
458 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
459 (uint8_t *) sp, mbox, ndlp->nlp_rpi);
460 if (rc) {
461 mempool_free(mbox, phba->mbox_mem_pool);
462 goto out;
463 }
464
465 /* ACC PLOGI rsp command needs to execute first,
466 * queue this mbox command to be processed later.
467 */
468 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
469 /*
470 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
471 * command issued in lpfc_cmpl_els_acc().
472 */
473 mbox->vport = vport;
474 spin_lock_irq(shost->host_lock);
475 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
476 spin_unlock_irq(shost->host_lock);
477
478 /*
479 * If there is an outstanding PLOGI issued, abort it before
480 * sending ACC rsp for received PLOGI. If pending plogi
481 * is not canceled here, the plogi will be rejected by
482 * remote port and will be retried. On a configuration with
483 * single discovery thread, this will cause a huge delay in
484 * discovery. Also this will cause multiple state machines
485 * running in parallel for this node.
486 * This only applies to a fabric environment.
487 */
488 if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
489 (vport->fc_flag & FC_FABRIC)) {
490 /* software abort outstanding PLOGI */
491 lpfc_els_abort(phba, ndlp);
492 }
493
494 if ((vport->port_type == LPFC_NPIV_PORT &&
495 vport->cfg_restrict_login)) {
496
497 /* In order to preserve RPIs, we want to cleanup
498 * the default RPI the firmware created to rcv
499 * this ELS request. The only way to do this is
500 * to register, then unregister the RPI.
501 */
502 spin_lock_irq(shost->host_lock);
503 ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
504 spin_unlock_irq(shost->host_lock);
505 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
506 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
507 rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
508 ndlp, mbox);
509 if (rc)
510 mempool_free(mbox, phba->mbox_mem_pool);
511 return 1;
512 }
513 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
514 if (rc)
515 mempool_free(mbox, phba->mbox_mem_pool);
516 return 1;
517out:
518 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
519 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
520 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
521 return 0;
522}
523
524/**
525 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
526 * @phba: pointer to lpfc hba data structure.
527 * @mboxq: pointer to mailbox object
528 *
529 * This routine is invoked to issue a completion to a rcv'ed
530 * ADISC or PDISC after the paused RPI has been resumed.
531 **/
532static void
533lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
534{
535 struct lpfc_vport *vport;
536 struct lpfc_iocbq *elsiocb;
537 struct lpfc_nodelist *ndlp;
538 uint32_t cmd;
539
540 elsiocb = (struct lpfc_iocbq *)mboxq->context1;
541 ndlp = (struct lpfc_nodelist *) mboxq->context2;
542 vport = mboxq->vport;
543 cmd = elsiocb->drvrTimeout;
544
545 if (cmd == ELS_CMD_ADISC) {
546 lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
547 } else {
548 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
549 ndlp, NULL);
550 }
551 kfree(elsiocb);
552 mempool_free(mboxq, phba->mbox_mem_pool);
553}
554
555static int
556lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
557 struct lpfc_iocbq *cmdiocb)
558{
559 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
560 struct lpfc_iocbq *elsiocb;
561 struct lpfc_dmabuf *pcmd;
562 struct serv_parm *sp;
563 struct lpfc_name *pnn, *ppn;
564 struct ls_rjt stat;
565 ADISC *ap;
566 IOCB_t *icmd;
567 uint32_t *lp;
568 uint32_t cmd;
569
570 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
571 lp = (uint32_t *) pcmd->virt;
572
573 cmd = *lp++;
574 if (cmd == ELS_CMD_ADISC) {
575 ap = (ADISC *) lp;
576 pnn = (struct lpfc_name *) & ap->nodeName;
577 ppn = (struct lpfc_name *) & ap->portName;
578 } else {
579 sp = (struct serv_parm *) lp;
580 pnn = (struct lpfc_name *) & sp->nodeName;
581 ppn = (struct lpfc_name *) & sp->portName;
582 }
583
584 icmd = &cmdiocb->iocb;
585 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
586
587 /*
588 * As soon as we send ACC, the remote NPort can
589 * start sending us data. Thus, for SLI4 we must
590 * resume the RPI before the ACC goes out.
591 */
592 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
593 elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
594 GFP_KERNEL);
595 if (elsiocb) {
596
597 /* Save info from cmd IOCB used in rsp */
598 memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
599 sizeof(struct lpfc_iocbq));
600
601 /* Save the ELS cmd */
602 elsiocb->drvrTimeout = cmd;
603
604 lpfc_sli4_resume_rpi(ndlp,
605 lpfc_mbx_cmpl_resume_rpi, elsiocb);
606 goto out;
607 }
608 }
609
610 if (cmd == ELS_CMD_ADISC) {
611 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
612 } else {
613 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
614 ndlp, NULL);
615 }
616out:
617 /* If we are authenticated, move to the proper state */
618 if (ndlp->nlp_type & NLP_FCP_TARGET)
619 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
620 else
621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
622
623 return 1;
624 }
625 /* Reject this request because invalid parameters */
626 stat.un.b.lsRjtRsvd0 = 0;
627 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
628 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
629 stat.un.b.vendorUnique = 0;
630 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
631
632 /* 1 sec timeout */
633 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
634
635 spin_lock_irq(shost->host_lock);
636 ndlp->nlp_flag |= NLP_DELAY_TMO;
637 spin_unlock_irq(shost->host_lock);
638 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
639 ndlp->nlp_prev_state = ndlp->nlp_state;
640 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
641 return 0;
642}
643
644static int
645lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
646 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
647{
648 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
649 struct lpfc_hba *phba = vport->phba;
650 struct lpfc_vport **vports;
651 int i, active_vlink_present = 0 ;
652
653 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
654 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
655 * PLOGIs during LOGO storms from a device.
656 */
657 spin_lock_irq(shost->host_lock);
658 ndlp->nlp_flag |= NLP_LOGO_ACC;
659 spin_unlock_irq(shost->host_lock);
660 if (els_cmd == ELS_CMD_PRLO)
661 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
662 else
663 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
664 if (ndlp->nlp_DID == Fabric_DID) {
665 if (vport->port_state <= LPFC_FDISC)
666 goto out;
667 lpfc_linkdown_port(vport);
668 spin_lock_irq(shost->host_lock);
669 vport->fc_flag |= FC_VPORT_LOGO_RCVD;
670 spin_unlock_irq(shost->host_lock);
671 vports = lpfc_create_vport_work_array(phba);
672 if (vports) {
673 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
674 i++) {
675 if ((!(vports[i]->fc_flag &
676 FC_VPORT_LOGO_RCVD)) &&
677 (vports[i]->port_state > LPFC_FDISC)) {
678 active_vlink_present = 1;
679 break;
680 }
681 }
682 lpfc_destroy_vport_work_array(phba, vports);
683 }
684
685 /*
686 * Don't re-instantiate if vport is marked for deletion.
687 * If we are here first then vport_delete is going to wait
688 * for discovery to complete.
689 */
690 if (!(vport->load_flag & FC_UNLOADING) &&
691 active_vlink_present) {
692 /*
693 * If there are other active VLinks present,
694 * re-instantiate the Vlink using FDISC.
695 */
696 mod_timer(&ndlp->nlp_delayfunc,
697 jiffies + msecs_to_jiffies(1000));
698 spin_lock_irq(shost->host_lock);
699 ndlp->nlp_flag |= NLP_DELAY_TMO;
700 spin_unlock_irq(shost->host_lock);
701 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
702 vport->port_state = LPFC_FDISC;
703 } else {
704 spin_lock_irq(shost->host_lock);
705 phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
706 spin_unlock_irq(shost->host_lock);
707 lpfc_retry_pport_discovery(phba);
708 }
709 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
710 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
711 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
712 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
713 /* Only try to re-login if this is NOT a Fabric Node */
714 mod_timer(&ndlp->nlp_delayfunc,
715 jiffies + msecs_to_jiffies(1000 * 1));
716 spin_lock_irq(shost->host_lock);
717 ndlp->nlp_flag |= NLP_DELAY_TMO;
718 spin_unlock_irq(shost->host_lock);
719
720 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
721 }
722out:
723 ndlp->nlp_prev_state = ndlp->nlp_state;
724 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
725
726 spin_lock_irq(shost->host_lock);
727 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
728 spin_unlock_irq(shost->host_lock);
729 /* The driver has to wait until the ACC completes before it continues
730 * processing the LOGO. The action will resume in
731 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
732 * unreg_login, the driver waits so the ACC does not get aborted.
733 */
734 return 0;
735}
736
737static uint32_t
738lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
739 struct lpfc_nodelist *ndlp,
740 struct lpfc_iocbq *cmdiocb)
741{
742 struct ls_rjt stat;
743 uint32_t *payload;
744 uint32_t cmd;
745
746 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
747 cmd = *payload;
748 if (vport->phba->nvmet_support) {
749 /* Must be a NVME PRLI */
750 if (cmd == ELS_CMD_PRLI)
751 goto out;
752 } else {
753 /* Initiator mode. */
754 if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
755 goto out;
756 }
757 return 1;
758out:
759 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
760 "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
761 "state x%x flags x%x\n",
762 cmd, ndlp->nlp_rpi, ndlp->nlp_state,
763 ndlp->nlp_flag);
764 memset(&stat, 0, sizeof(struct ls_rjt));
765 stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
766 stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
767 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
768 ndlp, NULL);
769 return 0;
770}
771
772static void
773lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
774 struct lpfc_iocbq *cmdiocb)
775{
776 struct lpfc_hba *phba = vport->phba;
777 struct lpfc_dmabuf *pcmd;
778 uint32_t *lp;
779 PRLI *npr;
780 struct fc_rport *rport = ndlp->rport;
781 u32 roles;
782
783 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
784 lp = (uint32_t *) pcmd->virt;
785 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
786
787 if ((npr->prliType == PRLI_FCP_TYPE) ||
788 (npr->prliType == PRLI_NVME_TYPE)) {
789 if (npr->initiatorFunc) {
790 if (npr->prliType == PRLI_FCP_TYPE)
791 ndlp->nlp_type |= NLP_FCP_INITIATOR;
792 if (npr->prliType == PRLI_NVME_TYPE)
793 ndlp->nlp_type |= NLP_NVME_INITIATOR;
794 }
795 if (npr->targetFunc) {
796 if (npr->prliType == PRLI_FCP_TYPE)
797 ndlp->nlp_type |= NLP_FCP_TARGET;
798 if (npr->prliType == PRLI_NVME_TYPE)
799 ndlp->nlp_type |= NLP_NVME_TARGET;
800 if (npr->writeXferRdyDis)
801 ndlp->nlp_flag |= NLP_FIRSTBURST;
802 }
803 if (npr->Retry)
804 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
805
806 /* If this driver is in nvme target mode, set the ndlp's fc4
807 * type to NVME provided the PRLI response claims NVME FC4
808 * type. Target mode does not issue gft_id so doesn't get
809 * the fc4 type set until now.
810 */
811 if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
812 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
813 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
814 }
815 if (npr->prliType == PRLI_FCP_TYPE)
816 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
817 }
818 if (rport) {
819 /* We need to update the rport role values */
820 roles = FC_RPORT_ROLE_UNKNOWN;
821 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
822 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
823 if (ndlp->nlp_type & NLP_FCP_TARGET)
824 roles |= FC_RPORT_ROLE_FCP_TARGET;
825
826 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
827 "rport rolechg: role:x%x did:x%x flg:x%x",
828 roles, ndlp->nlp_DID, ndlp->nlp_flag);
829
830 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
831 fc_remote_port_rolechg(rport, roles);
832 }
833}
834
835static uint32_t
836lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
837{
838 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
839
840 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
841 spin_lock_irq(shost->host_lock);
842 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
843 spin_unlock_irq(shost->host_lock);
844 return 0;
845 }
846
847 if (!(vport->fc_flag & FC_PT2PT)) {
848 /* Check config parameter use-adisc or FCP-2 */
849 if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
850 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
851 (ndlp->nlp_type & NLP_FCP_TARGET)))) {
852 spin_lock_irq(shost->host_lock);
853 ndlp->nlp_flag |= NLP_NPR_ADISC;
854 spin_unlock_irq(shost->host_lock);
855 return 1;
856 }
857 }
858
859 spin_lock_irq(shost->host_lock);
860 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
861 spin_unlock_irq(shost->host_lock);
862 lpfc_unreg_rpi(vport, ndlp);
863 return 0;
864}
865
866/**
867 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
868 * @phba : Pointer to lpfc_hba structure.
869 * @vport: Pointer to lpfc_vport structure.
870 * @rpi : rpi to be release.
871 *
872 * This function will send a unreg_login mailbox command to the firmware
873 * to release a rpi.
874 **/
875void
876lpfc_release_rpi(struct lpfc_hba *phba,
877 struct lpfc_vport *vport,
878 uint16_t rpi)
879{
880 LPFC_MBOXQ_t *pmb;
881 int rc;
882
883 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
884 GFP_KERNEL);
885 if (!pmb)
886 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
887 "2796 mailbox memory allocation failed \n");
888 else {
889 lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
890 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
891 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
892 if (rc == MBX_NOT_FINISHED)
893 mempool_free(pmb, phba->mbox_mem_pool);
894 }
895}
896
897static uint32_t
898lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
899 void *arg, uint32_t evt)
900{
901 struct lpfc_hba *phba;
902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
903 uint16_t rpi;
904
905 phba = vport->phba;
906 /* Release the RPI if reglogin completing */
907 if (!(phba->pport->load_flag & FC_UNLOADING) &&
908 (evt == NLP_EVT_CMPL_REG_LOGIN) &&
909 (!pmb->u.mb.mbxStatus)) {
910 rpi = pmb->u.mb.un.varWords[0];
911 lpfc_release_rpi(phba, vport, rpi);
912 }
913 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
914 "0271 Illegal State Transition: node x%x "
915 "event x%x, state x%x Data: x%x x%x\n",
916 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
917 ndlp->nlp_flag);
918 return ndlp->nlp_state;
919}
920
921static uint32_t
922lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
923 void *arg, uint32_t evt)
924{
925 /* This transition is only legal if we previously
926 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
927 * working on the same NPortID, do nothing for this thread
928 * to stop it.
929 */
930 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
931 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
932 "0272 Illegal State Transition: node x%x "
933 "event x%x, state x%x Data: x%x x%x\n",
934 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
935 ndlp->nlp_flag);
936 }
937 return ndlp->nlp_state;
938}
939
940/* Start of Discovery State Machine routines */
941
942static uint32_t
943lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
944 void *arg, uint32_t evt)
945{
946 struct lpfc_iocbq *cmdiocb;
947
948 cmdiocb = (struct lpfc_iocbq *) arg;
949
950 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
951 return ndlp->nlp_state;
952 }
953 return NLP_STE_FREED_NODE;
954}
955
956static uint32_t
957lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
958 void *arg, uint32_t evt)
959{
960 lpfc_issue_els_logo(vport, ndlp, 0);
961 return ndlp->nlp_state;
962}
963
964static uint32_t
965lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
966 void *arg, uint32_t evt)
967{
968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
969 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
970
971 spin_lock_irq(shost->host_lock);
972 ndlp->nlp_flag |= NLP_LOGO_ACC;
973 spin_unlock_irq(shost->host_lock);
974 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
975
976 return ndlp->nlp_state;
977}
978
979static uint32_t
980lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
981 void *arg, uint32_t evt)
982{
983 return NLP_STE_FREED_NODE;
984}
985
986static uint32_t
987lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
988 void *arg, uint32_t evt)
989{
990 return NLP_STE_FREED_NODE;
991}
992
993static uint32_t
994lpfc_device_recov_unused_node(struct lpfc_vport *vport,
995 struct lpfc_nodelist *ndlp,
996 void *arg, uint32_t evt)
997{
998 return ndlp->nlp_state;
999}
1000
1001static uint32_t
1002lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1003 void *arg, uint32_t evt)
1004{
1005 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1006 struct lpfc_hba *phba = vport->phba;
1007 struct lpfc_iocbq *cmdiocb = arg;
1008 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1009 uint32_t *lp = (uint32_t *) pcmd->virt;
1010 struct serv_parm *sp = (struct serv_parm *) (lp + 1);
1011 struct ls_rjt stat;
1012 int port_cmp;
1013
1014 memset(&stat, 0, sizeof (struct ls_rjt));
1015
1016 /* For a PLOGI, we only accept if our portname is less
1017 * than the remote portname.
1018 */
1019 phba->fc_stat.elsLogiCol++;
1020 port_cmp = memcmp(&vport->fc_portname, &sp->portName,
1021 sizeof(struct lpfc_name));
1022
1023 if (port_cmp >= 0) {
1024 /* Reject this request because the remote node will accept
1025 ours */
1026 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1027 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
1028 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
1029 NULL);
1030 } else {
1031 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
1032 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
1033 (vport->num_disc_nodes)) {
1034 spin_lock_irq(shost->host_lock);
1035 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1036 spin_unlock_irq(shost->host_lock);
1037 /* Check if there are more PLOGIs to be sent */
1038 lpfc_more_plogi(vport);
1039 if (vport->num_disc_nodes == 0) {
1040 spin_lock_irq(shost->host_lock);
1041 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1042 spin_unlock_irq(shost->host_lock);
1043 lpfc_can_disctmo(vport);
1044 lpfc_end_rscn(vport);
1045 }
1046 }
1047 } /* If our portname was less */
1048
1049 return ndlp->nlp_state;
1050}
1051
1052static uint32_t
1053lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1054 void *arg, uint32_t evt)
1055{
1056 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1057 struct ls_rjt stat;
1058
1059 memset(&stat, 0, sizeof (struct ls_rjt));
1060 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1061 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1062 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1063 return ndlp->nlp_state;
1064}
1065
1066static uint32_t
1067lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1068 void *arg, uint32_t evt)
1069{
1070 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1071
1072 /* software abort outstanding PLOGI */
1073 lpfc_els_abort(vport->phba, ndlp);
1074
1075 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1076 return ndlp->nlp_state;
1077}
1078
1079static uint32_t
1080lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1081 void *arg, uint32_t evt)
1082{
1083 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1084 struct lpfc_hba *phba = vport->phba;
1085 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1086
1087 /* software abort outstanding PLOGI */
1088 lpfc_els_abort(phba, ndlp);
1089
1090 if (evt == NLP_EVT_RCV_LOGO) {
1091 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1092 } else {
1093 lpfc_issue_els_logo(vport, ndlp, 0);
1094 }
1095
1096 /* Put ndlp in npr state set plogi timer for 1 sec */
1097 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1098 spin_lock_irq(shost->host_lock);
1099 ndlp->nlp_flag |= NLP_DELAY_TMO;
1100 spin_unlock_irq(shost->host_lock);
1101 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1102 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1103 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1104
1105 return ndlp->nlp_state;
1106}
1107
1108static uint32_t
1109lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1110 struct lpfc_nodelist *ndlp,
1111 void *arg,
1112 uint32_t evt)
1113{
1114 struct lpfc_hba *phba = vport->phba;
1115 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1116 struct lpfc_iocbq *cmdiocb, *rspiocb;
1117 struct lpfc_dmabuf *pcmd, *prsp, *mp;
1118 uint32_t *lp;
1119 uint32_t vid, flag;
1120 IOCB_t *irsp;
1121 struct serv_parm *sp;
1122 uint32_t ed_tov;
1123 LPFC_MBOXQ_t *mbox;
1124 int rc;
1125
1126 cmdiocb = (struct lpfc_iocbq *) arg;
1127 rspiocb = cmdiocb->context_un.rsp_iocb;
1128
1129 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1130 /* Recovery from PLOGI collision logic */
1131 return ndlp->nlp_state;
1132 }
1133
1134 irsp = &rspiocb->iocb;
1135
1136 if (irsp->ulpStatus)
1137 goto out;
1138
1139 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1140
1141 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1142 if (!prsp)
1143 goto out;
1144
1145 lp = (uint32_t *) prsp->virt;
1146 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1147
1148 /* Some switches have FDMI servers returning 0 for WWN */
1149 if ((ndlp->nlp_DID != FDMI_DID) &&
1150 (wwn_to_u64(sp->portName.u.wwn) == 0 ||
1151 wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1152 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1153 "0142 PLOGI RSP: Invalid WWN.\n");
1154 goto out;
1155 }
1156 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1157 goto out;
1158 /* PLOGI chkparm OK */
1159 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1160 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1161 ndlp->nlp_DID, ndlp->nlp_state,
1162 ndlp->nlp_flag, ndlp->nlp_rpi);
1163 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1164 ndlp->nlp_fcp_info |= CLASS2;
1165 else
1166 ndlp->nlp_fcp_info |= CLASS3;
1167
1168 ndlp->nlp_class_sup = 0;
1169 if (sp->cls1.classValid)
1170 ndlp->nlp_class_sup |= FC_COS_CLASS1;
1171 if (sp->cls2.classValid)
1172 ndlp->nlp_class_sup |= FC_COS_CLASS2;
1173 if (sp->cls3.classValid)
1174 ndlp->nlp_class_sup |= FC_COS_CLASS3;
1175 if (sp->cls4.classValid)
1176 ndlp->nlp_class_sup |= FC_COS_CLASS4;
1177 ndlp->nlp_maxframe =
1178 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1179
1180 if ((vport->fc_flag & FC_PT2PT) &&
1181 (vport->fc_flag & FC_PT2PT_PLOGI)) {
1182 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
1183 if (sp->cmn.edtovResolution) {
1184 /* E_D_TOV ticks are in nanoseconds */
1185 ed_tov = (phba->fc_edtov + 999999) / 1000000;
1186 }
1187
1188 ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
1189 if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
1190 sp->cmn.valid_vendor_ver_level) {
1191 vid = be32_to_cpu(sp->un.vv.vid);
1192 flag = be32_to_cpu(sp->un.vv.flags);
1193 if ((vid == LPFC_VV_EMLX_ID) &&
1194 (flag & LPFC_VV_SUPPRESS_RSP))
1195 ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
1196 }
1197
1198 /*
1199 * Use the larger EDTOV
1200 * RATOV = 2 * EDTOV for pt-to-pt
1201 */
1202 if (ed_tov > phba->fc_edtov)
1203 phba->fc_edtov = ed_tov;
1204 phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
1205
1206 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
1207
1208 /* Issue config_link / reg_vfi to account for updated TOV's */
1209 if (phba->sli_rev == LPFC_SLI_REV4) {
1210 lpfc_issue_reg_vfi(vport);
1211 } else {
1212 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1213 if (!mbox) {
1214 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1215 "0133 PLOGI: no memory "
1216 "for config_link "
1217 "Data: x%x x%x x%x x%x\n",
1218 ndlp->nlp_DID, ndlp->nlp_state,
1219 ndlp->nlp_flag, ndlp->nlp_rpi);
1220 goto out;
1221 }
1222
1223 lpfc_config_link(phba, mbox);
1224
1225 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1226 mbox->vport = vport;
1227 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1228 if (rc == MBX_NOT_FINISHED) {
1229 mempool_free(mbox, phba->mbox_mem_pool);
1230 goto out;
1231 }
1232 }
1233 }
1234
1235 lpfc_unreg_rpi(vport, ndlp);
1236
1237 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1238 if (!mbox) {
1239 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1240 "0018 PLOGI: no memory for reg_login "
1241 "Data: x%x x%x x%x x%x\n",
1242 ndlp->nlp_DID, ndlp->nlp_state,
1243 ndlp->nlp_flag, ndlp->nlp_rpi);
1244 goto out;
1245 }
1246
1247 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1248 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1249 switch (ndlp->nlp_DID) {
1250 case NameServer_DID:
1251 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1252 break;
1253 case FDMI_DID:
1254 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1255 break;
1256 default:
1257 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1258 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1259 }
1260 mbox->context2 = lpfc_nlp_get(ndlp);
1261 mbox->vport = vport;
1262 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1263 != MBX_NOT_FINISHED) {
1264 lpfc_nlp_set_state(vport, ndlp,
1265 NLP_STE_REG_LOGIN_ISSUE);
1266 return ndlp->nlp_state;
1267 }
1268 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1269 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1270 /* decrement node reference count to the failed mbox
1271 * command
1272 */
1273 lpfc_nlp_put(ndlp);
1274 mp = (struct lpfc_dmabuf *) mbox->context1;
1275 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1276 kfree(mp);
1277 mempool_free(mbox, phba->mbox_mem_pool);
1278
1279 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1280 "0134 PLOGI: cannot issue reg_login "
1281 "Data: x%x x%x x%x x%x\n",
1282 ndlp->nlp_DID, ndlp->nlp_state,
1283 ndlp->nlp_flag, ndlp->nlp_rpi);
1284 } else {
1285 mempool_free(mbox, phba->mbox_mem_pool);
1286
1287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1288 "0135 PLOGI: cannot format reg_login "
1289 "Data: x%x x%x x%x x%x\n",
1290 ndlp->nlp_DID, ndlp->nlp_state,
1291 ndlp->nlp_flag, ndlp->nlp_rpi);
1292 }
1293
1294
1295out:
1296 if (ndlp->nlp_DID == NameServer_DID) {
1297 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1298 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1299 "0261 Cannot Register NameServer login\n");
1300 }
1301
1302 /*
1303 ** In case the node reference counter does not go to zero, ensure that
1304 ** the stale state for the node is not processed.
1305 */
1306
1307 ndlp->nlp_prev_state = ndlp->nlp_state;
1308 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1309 spin_lock_irq(shost->host_lock);
1310 ndlp->nlp_flag |= NLP_DEFER_RM;
1311 spin_unlock_irq(shost->host_lock);
1312 return NLP_STE_FREED_NODE;
1313}
1314
1315static uint32_t
1316lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1317 void *arg, uint32_t evt)
1318{
1319 return ndlp->nlp_state;
1320}
1321
1322static uint32_t
1323lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1324 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1325{
1326 struct lpfc_hba *phba;
1327 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1328 MAILBOX_t *mb = &pmb->u.mb;
1329 uint16_t rpi;
1330
1331 phba = vport->phba;
1332 /* Release the RPI */
1333 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1334 !mb->mbxStatus) {
1335 rpi = pmb->u.mb.un.varWords[0];
1336 lpfc_release_rpi(phba, vport, rpi);
1337 }
1338 return ndlp->nlp_state;
1339}
1340
1341static uint32_t
1342lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1343 void *arg, uint32_t evt)
1344{
1345 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1346
1347 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1348 spin_lock_irq(shost->host_lock);
1349 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1350 spin_unlock_irq(shost->host_lock);
1351 return ndlp->nlp_state;
1352 } else {
1353 /* software abort outstanding PLOGI */
1354 lpfc_els_abort(vport->phba, ndlp);
1355
1356 lpfc_drop_node(vport, ndlp);
1357 return NLP_STE_FREED_NODE;
1358 }
1359}
1360
1361static uint32_t
1362lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1363 struct lpfc_nodelist *ndlp,
1364 void *arg,
1365 uint32_t evt)
1366{
1367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1368 struct lpfc_hba *phba = vport->phba;
1369
1370 /* Don't do anything that will mess up processing of the
1371 * previous RSCN.
1372 */
1373 if (vport->fc_flag & FC_RSCN_DEFERRED)
1374 return ndlp->nlp_state;
1375
1376 /* software abort outstanding PLOGI */
1377 lpfc_els_abort(phba, ndlp);
1378
1379 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1380 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1381 spin_lock_irq(shost->host_lock);
1382 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1383 spin_unlock_irq(shost->host_lock);
1384
1385 return ndlp->nlp_state;
1386}
1387
1388static uint32_t
1389lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1390 void *arg, uint32_t evt)
1391{
1392 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1393 struct lpfc_hba *phba = vport->phba;
1394 struct lpfc_iocbq *cmdiocb;
1395
1396 /* software abort outstanding ADISC */
1397 lpfc_els_abort(phba, ndlp);
1398
1399 cmdiocb = (struct lpfc_iocbq *) arg;
1400
1401 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1402 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1403 spin_lock_irq(shost->host_lock);
1404 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1405 spin_unlock_irq(shost->host_lock);
1406 if (vport->num_disc_nodes)
1407 lpfc_more_adisc(vport);
1408 }
1409 return ndlp->nlp_state;
1410 }
1411 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1412 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1413 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1414
1415 return ndlp->nlp_state;
1416}
1417
1418static uint32_t
1419lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1420 void *arg, uint32_t evt)
1421{
1422 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1423
1424 if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1425 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1426 return ndlp->nlp_state;
1427}
1428
1429static uint32_t
1430lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1431 void *arg, uint32_t evt)
1432{
1433 struct lpfc_hba *phba = vport->phba;
1434 struct lpfc_iocbq *cmdiocb;
1435
1436 cmdiocb = (struct lpfc_iocbq *) arg;
1437
1438 /* software abort outstanding ADISC */
1439 lpfc_els_abort(phba, ndlp);
1440
1441 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1442 return ndlp->nlp_state;
1443}
1444
1445static uint32_t
1446lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1447 struct lpfc_nodelist *ndlp,
1448 void *arg, uint32_t evt)
1449{
1450 struct lpfc_iocbq *cmdiocb;
1451
1452 cmdiocb = (struct lpfc_iocbq *) arg;
1453
1454 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1455 return ndlp->nlp_state;
1456}
1457
1458static uint32_t
1459lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1460 void *arg, uint32_t evt)
1461{
1462 struct lpfc_iocbq *cmdiocb;
1463
1464 cmdiocb = (struct lpfc_iocbq *) arg;
1465
1466 /* Treat like rcv logo */
1467 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1468 return ndlp->nlp_state;
1469}
1470
1471static uint32_t
1472lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1473 struct lpfc_nodelist *ndlp,
1474 void *arg, uint32_t evt)
1475{
1476 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1477 struct lpfc_hba *phba = vport->phba;
1478 struct lpfc_iocbq *cmdiocb, *rspiocb;
1479 IOCB_t *irsp;
1480 ADISC *ap;
1481 int rc;
1482
1483 cmdiocb = (struct lpfc_iocbq *) arg;
1484 rspiocb = cmdiocb->context_un.rsp_iocb;
1485
1486 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1487 irsp = &rspiocb->iocb;
1488
1489 if ((irsp->ulpStatus) ||
1490 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1491 /* 1 sec timeout */
1492 mod_timer(&ndlp->nlp_delayfunc,
1493 jiffies + msecs_to_jiffies(1000));
1494 spin_lock_irq(shost->host_lock);
1495 ndlp->nlp_flag |= NLP_DELAY_TMO;
1496 spin_unlock_irq(shost->host_lock);
1497 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1498
1499 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1500 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1501
1502 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1504 lpfc_unreg_rpi(vport, ndlp);
1505 return ndlp->nlp_state;
1506 }
1507
1508 if (phba->sli_rev == LPFC_SLI_REV4) {
1509 rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1510 if (rc) {
1511 /* Stay in state and retry. */
1512 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1513 return ndlp->nlp_state;
1514 }
1515 }
1516
1517 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1518 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1519 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1520 } else {
1521 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1522 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1523 }
1524
1525 return ndlp->nlp_state;
1526}
1527
1528static uint32_t
1529lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1530 void *arg, uint32_t evt)
1531{
1532 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1533
1534 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1535 spin_lock_irq(shost->host_lock);
1536 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1537 spin_unlock_irq(shost->host_lock);
1538 return ndlp->nlp_state;
1539 } else {
1540 /* software abort outstanding ADISC */
1541 lpfc_els_abort(vport->phba, ndlp);
1542
1543 lpfc_drop_node(vport, ndlp);
1544 return NLP_STE_FREED_NODE;
1545 }
1546}
1547
1548static uint32_t
1549lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1550 struct lpfc_nodelist *ndlp,
1551 void *arg,
1552 uint32_t evt)
1553{
1554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1555 struct lpfc_hba *phba = vport->phba;
1556
1557 /* Don't do anything that will mess up processing of the
1558 * previous RSCN.
1559 */
1560 if (vport->fc_flag & FC_RSCN_DEFERRED)
1561 return ndlp->nlp_state;
1562
1563 /* software abort outstanding ADISC */
1564 lpfc_els_abort(phba, ndlp);
1565
1566 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1567 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1568 spin_lock_irq(shost->host_lock);
1569 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1570 spin_unlock_irq(shost->host_lock);
1571 lpfc_disc_set_adisc(vport, ndlp);
1572 return ndlp->nlp_state;
1573}
1574
1575static uint32_t
1576lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1577 struct lpfc_nodelist *ndlp,
1578 void *arg,
1579 uint32_t evt)
1580{
1581 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1582
1583 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1584 return ndlp->nlp_state;
1585}
1586
1587static uint32_t
1588lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1589 struct lpfc_nodelist *ndlp,
1590 void *arg,
1591 uint32_t evt)
1592{
1593 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1594 struct ls_rjt stat;
1595
1596 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
1597 return ndlp->nlp_state;
1598 }
1599 if (vport->phba->nvmet_support) {
1600 /* NVME Target mode. Handle and respond to the PRLI and
1601 * transition to UNMAPPED provided the RPI has completed
1602 * registration.
1603 */
1604 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
1605 lpfc_rcv_prli(vport, ndlp, cmdiocb);
1606 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1607 } else {
1608 /* RPI registration has not completed. Reject the PRLI
1609 * to prevent an illegal state transition when the
1610 * rpi registration does complete.
1611 */
1612 memset(&stat, 0, sizeof(struct ls_rjt));
1613 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1614 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1615 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
1616 ndlp, NULL);
1617 return ndlp->nlp_state;
1618 }
1619 } else {
1620 /* Initiator mode. */
1621 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1622 }
1623 return ndlp->nlp_state;
1624}
1625
1626static uint32_t
1627lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1628 struct lpfc_nodelist *ndlp,
1629 void *arg,
1630 uint32_t evt)
1631{
1632 struct lpfc_hba *phba = vport->phba;
1633 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1634 LPFC_MBOXQ_t *mb;
1635 LPFC_MBOXQ_t *nextmb;
1636 struct lpfc_dmabuf *mp;
1637
1638 cmdiocb = (struct lpfc_iocbq *) arg;
1639
1640 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1641 if ((mb = phba->sli.mbox_active)) {
1642 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1643 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1644 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1645 lpfc_nlp_put(ndlp);
1646 mb->context2 = NULL;
1647 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1648 }
1649 }
1650
1651 spin_lock_irq(&phba->hbalock);
1652 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1653 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1654 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1655 mp = (struct lpfc_dmabuf *) (mb->context1);
1656 if (mp) {
1657 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1658 kfree(mp);
1659 }
1660 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1661 lpfc_nlp_put(ndlp);
1662 list_del(&mb->list);
1663 phba->sli.mboxq_cnt--;
1664 mempool_free(mb, phba->mbox_mem_pool);
1665 }
1666 }
1667 spin_unlock_irq(&phba->hbalock);
1668
1669 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1670 return ndlp->nlp_state;
1671}
1672
1673static uint32_t
1674lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1675 struct lpfc_nodelist *ndlp,
1676 void *arg,
1677 uint32_t evt)
1678{
1679 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1680
1681 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1682 return ndlp->nlp_state;
1683}
1684
1685static uint32_t
1686lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1687 struct lpfc_nodelist *ndlp,
1688 void *arg,
1689 uint32_t evt)
1690{
1691 struct lpfc_iocbq *cmdiocb;
1692
1693 cmdiocb = (struct lpfc_iocbq *) arg;
1694 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1695 return ndlp->nlp_state;
1696}
1697
1698static uint32_t
1699lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1700 struct lpfc_nodelist *ndlp,
1701 void *arg,
1702 uint32_t evt)
1703{
1704 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1705 struct lpfc_hba *phba = vport->phba;
1706 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1707 MAILBOX_t *mb = &pmb->u.mb;
1708 uint32_t did = mb->un.varWords[1];
1709 int rc = 0;
1710
1711 if (mb->mbxStatus) {
1712 /* RegLogin failed */
1713 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1714 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1715 "x%x\n",
1716 did, mb->mbxStatus, vport->port_state,
1717 mb->un.varRegLogin.vpi,
1718 mb->un.varRegLogin.rpi);
1719 /*
1720 * If RegLogin failed due to lack of HBA resources do not
1721 * retry discovery.
1722 */
1723 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1724 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1725 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1726 return ndlp->nlp_state;
1727 }
1728
1729 /* Put ndlp in npr state set plogi timer for 1 sec */
1730 mod_timer(&ndlp->nlp_delayfunc,
1731 jiffies + msecs_to_jiffies(1000 * 1));
1732 spin_lock_irq(shost->host_lock);
1733 ndlp->nlp_flag |= NLP_DELAY_TMO;
1734 spin_unlock_irq(shost->host_lock);
1735 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1736
1737 lpfc_issue_els_logo(vport, ndlp, 0);
1738 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1739 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1740 return ndlp->nlp_state;
1741 }
1742
1743 /* SLI4 ports have preallocated logical rpis. */
1744 if (phba->sli_rev < LPFC_SLI_REV4)
1745 ndlp->nlp_rpi = mb->un.varWords[0];
1746
1747 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1748
1749 /* Only if we are not a fabric nport do we issue PRLI */
1750 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1751 "3066 RegLogin Complete on x%x x%x x%x\n",
1752 did, ndlp->nlp_type, ndlp->nlp_fc4_type);
1753 if (!(ndlp->nlp_type & NLP_FABRIC) &&
1754 (phba->nvmet_support == 0)) {
1755 /* The driver supports FCP and NVME concurrently. If the
1756 * ndlp's nlp_fc4_type is still zero, the driver doesn't
1757 * know what PRLI to send yet. Figure that out now and
1758 * call PRLI depending on the outcome.
1759 */
1760 if (vport->fc_flag & FC_PT2PT) {
1761 /* If we are pt2pt, there is no Fabric to determine
1762 * the FC4 type of the remote nport. So if NVME
1763 * is configured try it.
1764 */
1765 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1766 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
1767 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
1768 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1769 /* We need to update the localport also */
1770 lpfc_nvme_update_localport(vport);
1771 }
1772
1773 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
1774 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1775
1776 } else if (ndlp->nlp_fc4_type == 0) {
1777 rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
1778 0, ndlp->nlp_DID);
1779 return ndlp->nlp_state;
1780 }
1781
1782 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1783 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1784 lpfc_issue_els_prli(vport, ndlp, 0);
1785 } else {
1786 if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
1787 phba->targetport->port_id = vport->fc_myDID;
1788
1789 /* Only Fabric ports should transition. NVME target
1790 * must complete PRLI.
1791 */
1792 if (ndlp->nlp_type & NLP_FABRIC) {
1793 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1794 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1795 }
1796 }
1797 return ndlp->nlp_state;
1798}
1799
1800static uint32_t
1801lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1802 struct lpfc_nodelist *ndlp,
1803 void *arg,
1804 uint32_t evt)
1805{
1806 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1807
1808 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1809 spin_lock_irq(shost->host_lock);
1810 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1811 spin_unlock_irq(shost->host_lock);
1812 return ndlp->nlp_state;
1813 } else {
1814 lpfc_drop_node(vport, ndlp);
1815 return NLP_STE_FREED_NODE;
1816 }
1817}
1818
1819static uint32_t
1820lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1821 struct lpfc_nodelist *ndlp,
1822 void *arg,
1823 uint32_t evt)
1824{
1825 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1826
1827 /* Don't do anything that will mess up processing of the
1828 * previous RSCN.
1829 */
1830 if (vport->fc_flag & FC_RSCN_DEFERRED)
1831 return ndlp->nlp_state;
1832
1833 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1834 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1835 spin_lock_irq(shost->host_lock);
1836
1837 /* If we are a target we won't immediately transition into PRLI,
1838 * so if REG_LOGIN already completed we don't need to ignore it.
1839 */
1840 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
1841 !vport->phba->nvmet_support)
1842 ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1843
1844 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1845 spin_unlock_irq(shost->host_lock);
1846 lpfc_disc_set_adisc(vport, ndlp);
1847 return ndlp->nlp_state;
1848}
1849
1850static uint32_t
1851lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1852 void *arg, uint32_t evt)
1853{
1854 struct lpfc_iocbq *cmdiocb;
1855
1856 cmdiocb = (struct lpfc_iocbq *) arg;
1857
1858 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1859 return ndlp->nlp_state;
1860}
1861
1862static uint32_t
1863lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1864 void *arg, uint32_t evt)
1865{
1866 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1867
1868 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1869 return ndlp->nlp_state;
1870 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1871 return ndlp->nlp_state;
1872}
1873
1874static uint32_t
1875lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1876 void *arg, uint32_t evt)
1877{
1878 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1879
1880 /* Software abort outstanding PRLI before sending acc */
1881 lpfc_els_abort(vport->phba, ndlp);
1882
1883 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1884 return ndlp->nlp_state;
1885}
1886
1887static uint32_t
1888lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1889 void *arg, uint32_t evt)
1890{
1891 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1892
1893 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1894 return ndlp->nlp_state;
1895}
1896
1897/* This routine is envoked when we rcv a PRLO request from a nport
1898 * we are logged into. We should send back a PRLO rsp setting the
1899 * appropriate bits.
1900 * NEXT STATE = PRLI_ISSUE
1901 */
1902static uint32_t
1903lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1904 void *arg, uint32_t evt)
1905{
1906 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1907
1908 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1909 return ndlp->nlp_state;
1910}
1911
1912static uint32_t
1913lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1914 void *arg, uint32_t evt)
1915{
1916 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1917 struct lpfc_iocbq *cmdiocb, *rspiocb;
1918 struct lpfc_hba *phba = vport->phba;
1919 IOCB_t *irsp;
1920 PRLI *npr;
1921 struct lpfc_nvme_prli *nvpr;
1922 void *temp_ptr;
1923
1924 cmdiocb = (struct lpfc_iocbq *) arg;
1925 rspiocb = cmdiocb->context_un.rsp_iocb;
1926
1927 /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
1928 * format is different so NULL the two PRLI types so that the
1929 * driver correctly gets the correct context.
1930 */
1931 npr = NULL;
1932 nvpr = NULL;
1933 temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1934 if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
1935 npr = (PRLI *) temp_ptr;
1936 else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
1937 nvpr = (struct lpfc_nvme_prli *) temp_ptr;
1938
1939 irsp = &rspiocb->iocb;
1940 if (irsp->ulpStatus) {
1941 if ((vport->port_type == LPFC_NPIV_PORT) &&
1942 vport->cfg_restrict_login) {
1943 goto out;
1944 }
1945
1946 /* When the rport rejected the FCP PRLI as unsupported.
1947 * This should only happen in Pt2Pt so an NVME PRLI
1948 * should be outstanding still.
1949 */
1950 if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
1951 ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
1952 goto out_err;
1953 }
1954
1955 /* The LS Req had some error. Don't let this be a
1956 * target.
1957 */
1958 if ((ndlp->fc4_prli_sent == 1) &&
1959 (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
1960 (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
1961 /* The FCP PRLI completed successfully but
1962 * the NVME PRLI failed. Since they are sent in
1963 * succession, allow the FCP to complete.
1964 */
1965 goto out_err;
1966
1967 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1968 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1969 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1970 return ndlp->nlp_state;
1971 }
1972
1973 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1974 (npr->prliType == PRLI_FCP_TYPE)) {
1975 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
1976 "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
1977 npr->initiatorFunc,
1978 npr->targetFunc);
1979 if (npr->initiatorFunc)
1980 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1981 if (npr->targetFunc) {
1982 ndlp->nlp_type |= NLP_FCP_TARGET;
1983 if (npr->writeXferRdyDis)
1984 ndlp->nlp_flag |= NLP_FIRSTBURST;
1985 }
1986 if (npr->Retry)
1987 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1988
1989 } else if (nvpr &&
1990 (bf_get_be32(prli_acc_rsp_code, nvpr) ==
1991 PRLI_REQ_EXECUTED) &&
1992 (bf_get_be32(prli_type_code, nvpr) ==
1993 PRLI_NVME_TYPE)) {
1994
1995 /* Complete setting up the remote ndlp personality. */
1996 if (bf_get_be32(prli_init, nvpr))
1997 ndlp->nlp_type |= NLP_NVME_INITIATOR;
1998
1999 /* Target driver cannot solicit NVME FB. */
2000 if (bf_get_be32(prli_tgt, nvpr)) {
2001 /* Complete the nvme target roles. The transport
2002 * needs to know if the rport is capable of
2003 * discovery in addition to its role.
2004 */
2005 ndlp->nlp_type |= NLP_NVME_TARGET;
2006 if (bf_get_be32(prli_disc, nvpr))
2007 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
2008
2009 /*
2010 * If prli_fba is set, the Target supports FirstBurst.
2011 * If prli_fb_sz is 0, the FirstBurst size is unlimited,
2012 * otherwise it defines the actual size supported by
2013 * the NVME Target.
2014 */
2015 if ((bf_get_be32(prli_fba, nvpr) == 1) &&
2016 (phba->cfg_nvme_enable_fb) &&
2017 (!phba->nvmet_support)) {
2018 /* Both sides support FB. The target's first
2019 * burst size is a 512 byte encoded value.
2020 */
2021 ndlp->nlp_flag |= NLP_FIRSTBURST;
2022 ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
2023 nvpr);
2024
2025 /* Expressed in units of 512 bytes */
2026 if (ndlp->nvme_fb_size)
2027 ndlp->nvme_fb_size <<=
2028 LPFC_NVME_FB_SHIFT;
2029 else
2030 ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
2031 }
2032 }
2033
2034 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2035 "6029 NVME PRLI Cmpl w1 x%08x "
2036 "w4 x%08x w5 x%08x flag x%x, "
2037 "fcp_info x%x nlp_type x%x\n",
2038 be32_to_cpu(nvpr->word1),
2039 be32_to_cpu(nvpr->word4),
2040 be32_to_cpu(nvpr->word5),
2041 ndlp->nlp_flag, ndlp->nlp_fcp_info,
2042 ndlp->nlp_type);
2043 }
2044 if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
2045 (vport->port_type == LPFC_NPIV_PORT) &&
2046 vport->cfg_restrict_login) {
2047out:
2048 spin_lock_irq(shost->host_lock);
2049 ndlp->nlp_flag |= NLP_TARGET_REMOVE;
2050 spin_unlock_irq(shost->host_lock);
2051 lpfc_issue_els_logo(vport, ndlp, 0);
2052
2053 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2054 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2055 return ndlp->nlp_state;
2056 }
2057
2058out_err:
2059 /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
2060 * are complete.
2061 */
2062 if (ndlp->fc4_prli_sent == 0) {
2063 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2064 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
2065 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
2066 else if (ndlp->nlp_type &
2067 (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
2068 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2069 } else
2070 lpfc_printf_vlog(vport,
2071 KERN_INFO, LOG_ELS,
2072 "3067 PRLI's still outstanding "
2073 "on x%06x - count %d, Pend Node Mode "
2074 "transition...\n",
2075 ndlp->nlp_DID, ndlp->fc4_prli_sent);
2076
2077 return ndlp->nlp_state;
2078}
2079
2080/*! lpfc_device_rm_prli_issue
2081 *
2082 * \pre
2083 * \post
2084 * \param phba
2085 * \param ndlp
2086 * \param arg
2087 * \param evt
2088 * \return uint32_t
2089 *
2090 * \b Description:
2091 * This routine is envoked when we a request to remove a nport we are in the
2092 * process of PRLIing. We should software abort outstanding prli, unreg
2093 * login, send a logout. We will change node state to UNUSED_NODE, put it
2094 * on plogi list so it can be freed when LOGO completes.
2095 *
2096 */
2097
2098static uint32_t
2099lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2100 void *arg, uint32_t evt)
2101{
2102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2103
2104 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2105 spin_lock_irq(shost->host_lock);
2106 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2107 spin_unlock_irq(shost->host_lock);
2108 return ndlp->nlp_state;
2109 } else {
2110 /* software abort outstanding PLOGI */
2111 lpfc_els_abort(vport->phba, ndlp);
2112
2113 lpfc_drop_node(vport, ndlp);
2114 return NLP_STE_FREED_NODE;
2115 }
2116}
2117
2118
2119/*! lpfc_device_recov_prli_issue
2120 *
2121 * \pre
2122 * \post
2123 * \param phba
2124 * \param ndlp
2125 * \param arg
2126 * \param evt
2127 * \return uint32_t
2128 *
2129 * \b Description:
2130 * The routine is envoked when the state of a device is unknown, like
2131 * during a link down. We should remove the nodelist entry from the
2132 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
2133 * outstanding PRLI command, then free the node entry.
2134 */
2135static uint32_t
2136lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
2137 struct lpfc_nodelist *ndlp,
2138 void *arg,
2139 uint32_t evt)
2140{
2141 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2142 struct lpfc_hba *phba = vport->phba;
2143
2144 /* Don't do anything that will mess up processing of the
2145 * previous RSCN.
2146 */
2147 if (vport->fc_flag & FC_RSCN_DEFERRED)
2148 return ndlp->nlp_state;
2149
2150 /* software abort outstanding PRLI */
2151 lpfc_els_abort(phba, ndlp);
2152
2153 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2154 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2155 spin_lock_irq(shost->host_lock);
2156 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2157 spin_unlock_irq(shost->host_lock);
2158 lpfc_disc_set_adisc(vport, ndlp);
2159 return ndlp->nlp_state;
2160}
2161
2162static uint32_t
2163lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2164 void *arg, uint32_t evt)
2165{
2166 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2167 struct ls_rjt stat;
2168
2169 memset(&stat, 0, sizeof(struct ls_rjt));
2170 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2171 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2172 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2173 return ndlp->nlp_state;
2174}
2175
2176static uint32_t
2177lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2178 void *arg, uint32_t evt)
2179{
2180 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2181 struct ls_rjt stat;
2182
2183 memset(&stat, 0, sizeof(struct ls_rjt));
2184 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2185 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2186 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2187 return ndlp->nlp_state;
2188}
2189
2190static uint32_t
2191lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2192 void *arg, uint32_t evt)
2193{
2194 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2195 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2196
2197 spin_lock_irq(shost->host_lock);
2198 ndlp->nlp_flag |= NLP_LOGO_ACC;
2199 spin_unlock_irq(shost->host_lock);
2200 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2201 return ndlp->nlp_state;
2202}
2203
2204static uint32_t
2205lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2206 void *arg, uint32_t evt)
2207{
2208 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2209 struct ls_rjt stat;
2210
2211 memset(&stat, 0, sizeof(struct ls_rjt));
2212 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2213 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2214 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2215 return ndlp->nlp_state;
2216}
2217
2218static uint32_t
2219lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2220 void *arg, uint32_t evt)
2221{
2222 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
2223 struct ls_rjt stat;
2224
2225 memset(&stat, 0, sizeof(struct ls_rjt));
2226 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2227 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2228 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2229 return ndlp->nlp_state;
2230}
2231
2232static uint32_t
2233lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2234 void *arg, uint32_t evt)
2235{
2236 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2237
2238 ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
2239 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2240 spin_lock_irq(shost->host_lock);
2241 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2242 spin_unlock_irq(shost->host_lock);
2243 lpfc_disc_set_adisc(vport, ndlp);
2244 return ndlp->nlp_state;
2245}
2246
2247static uint32_t
2248lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2249 void *arg, uint32_t evt)
2250{
2251 /*
2252 * DevLoss has timed out and is calling for Device Remove.
2253 * In this case, abort the LOGO and cleanup the ndlp
2254 */
2255
2256 lpfc_unreg_rpi(vport, ndlp);
2257 /* software abort outstanding PLOGI */
2258 lpfc_els_abort(vport->phba, ndlp);
2259 lpfc_drop_node(vport, ndlp);
2260 return NLP_STE_FREED_NODE;
2261}
2262
2263static uint32_t
2264lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
2265 struct lpfc_nodelist *ndlp,
2266 void *arg, uint32_t evt)
2267{
2268 /*
2269 * Device Recovery events have no meaning for a node with a LOGO
2270 * outstanding. The LOGO has to complete first and handle the
2271 * node from that point.
2272 */
2273 return ndlp->nlp_state;
2274}
2275
2276static uint32_t
2277lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2278 void *arg, uint32_t evt)
2279{
2280 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2281
2282 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2283 return ndlp->nlp_state;
2284}
2285
2286static uint32_t
2287lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2288 void *arg, uint32_t evt)
2289{
2290 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2291
2292 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2293 return ndlp->nlp_state;
2294
2295 lpfc_rcv_prli(vport, ndlp, cmdiocb);
2296 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2297 return ndlp->nlp_state;
2298}
2299
2300static uint32_t
2301lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2302 void *arg, uint32_t evt)
2303{
2304 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2305
2306 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2307 return ndlp->nlp_state;
2308}
2309
2310static uint32_t
2311lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2312 void *arg, uint32_t evt)
2313{
2314 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2315
2316 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2317 return ndlp->nlp_state;
2318}
2319
2320static uint32_t
2321lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2322 void *arg, uint32_t evt)
2323{
2324 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2325
2326 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2327 return ndlp->nlp_state;
2328}
2329
2330static uint32_t
2331lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2332 struct lpfc_nodelist *ndlp,
2333 void *arg,
2334 uint32_t evt)
2335{
2336 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2337
2338 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2339 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2340 spin_lock_irq(shost->host_lock);
2341 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2342 spin_unlock_irq(shost->host_lock);
2343 lpfc_disc_set_adisc(vport, ndlp);
2344
2345 return ndlp->nlp_state;
2346}
2347
2348static uint32_t
2349lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2350 void *arg, uint32_t evt)
2351{
2352 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2353
2354 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2355 return ndlp->nlp_state;
2356}
2357
2358static uint32_t
2359lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2360 void *arg, uint32_t evt)
2361{
2362 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2363
2364 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2365 return ndlp->nlp_state;
2366 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2367 return ndlp->nlp_state;
2368}
2369
2370static uint32_t
2371lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2372 void *arg, uint32_t evt)
2373{
2374 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2375
2376 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2377 return ndlp->nlp_state;
2378}
2379
2380static uint32_t
2381lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2382 struct lpfc_nodelist *ndlp,
2383 void *arg, uint32_t evt)
2384{
2385 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2386
2387 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2388 return ndlp->nlp_state;
2389}
2390
2391static uint32_t
2392lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2393 void *arg, uint32_t evt)
2394{
2395 struct lpfc_hba *phba = vport->phba;
2396 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2397
2398 /* flush the target */
2399 lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
2400 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2401
2402 /* Treat like rcv logo */
2403 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2404 return ndlp->nlp_state;
2405}
2406
2407static uint32_t
2408lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2409 struct lpfc_nodelist *ndlp,
2410 void *arg,
2411 uint32_t evt)
2412{
2413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2414
2415 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2416 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2417 spin_lock_irq(shost->host_lock);
2418 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2419 spin_unlock_irq(shost->host_lock);
2420 lpfc_disc_set_adisc(vport, ndlp);
2421 return ndlp->nlp_state;
2422}
2423
2424static uint32_t
2425lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2426 void *arg, uint32_t evt)
2427{
2428 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2429 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2430
2431 /* Ignore PLOGI if we have an outstanding LOGO */
2432 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2433 return ndlp->nlp_state;
2434 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2435 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2436 spin_lock_irq(shost->host_lock);
2437 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2438 spin_unlock_irq(shost->host_lock);
2439 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2440 /* send PLOGI immediately, move to PLOGI issue state */
2441 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2442 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2443 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2444 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2445 }
2446 }
2447 return ndlp->nlp_state;
2448}
2449
2450static uint32_t
2451lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2452 void *arg, uint32_t evt)
2453{
2454 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2455 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2456 struct ls_rjt stat;
2457
2458 memset(&stat, 0, sizeof (struct ls_rjt));
2459 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2460 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2461 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2462
2463 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2464 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2465 spin_lock_irq(shost->host_lock);
2466 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2467 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2468 spin_unlock_irq(shost->host_lock);
2469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2470 lpfc_issue_els_adisc(vport, ndlp, 0);
2471 } else {
2472 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2473 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2474 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2475 }
2476 }
2477 return ndlp->nlp_state;
2478}
2479
2480static uint32_t
2481lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2482 void *arg, uint32_t evt)
2483{
2484 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2485
2486 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2487 return ndlp->nlp_state;
2488}
2489
2490static uint32_t
2491lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2492 void *arg, uint32_t evt)
2493{
2494 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2495
2496 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2497 /*
2498 * Do not start discovery if discovery is about to start
2499 * or discovery in progress for this node. Starting discovery
2500 * here will affect the counting of discovery threads.
2501 */
2502 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2503 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2504 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2505 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2506 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2507 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2508 lpfc_issue_els_adisc(vport, ndlp, 0);
2509 } else {
2510 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2511 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2512 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2513 }
2514 }
2515 return ndlp->nlp_state;
2516}
2517
2518static uint32_t
2519lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2520 void *arg, uint32_t evt)
2521{
2522 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2523 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2524
2525 spin_lock_irq(shost->host_lock);
2526 ndlp->nlp_flag |= NLP_LOGO_ACC;
2527 spin_unlock_irq(shost->host_lock);
2528
2529 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2530
2531 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2532 mod_timer(&ndlp->nlp_delayfunc,
2533 jiffies + msecs_to_jiffies(1000 * 1));
2534 spin_lock_irq(shost->host_lock);
2535 ndlp->nlp_flag |= NLP_DELAY_TMO;
2536 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2537 spin_unlock_irq(shost->host_lock);
2538 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2539 } else {
2540 spin_lock_irq(shost->host_lock);
2541 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2542 spin_unlock_irq(shost->host_lock);
2543 }
2544 return ndlp->nlp_state;
2545}
2546
2547static uint32_t
2548lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2549 void *arg, uint32_t evt)
2550{
2551 struct lpfc_iocbq *cmdiocb, *rspiocb;
2552 IOCB_t *irsp;
2553 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2554
2555 cmdiocb = (struct lpfc_iocbq *) arg;
2556 rspiocb = cmdiocb->context_un.rsp_iocb;
2557
2558 irsp = &rspiocb->iocb;
2559 if (irsp->ulpStatus) {
2560 spin_lock_irq(shost->host_lock);
2561 ndlp->nlp_flag |= NLP_DEFER_RM;
2562 spin_unlock_irq(shost->host_lock);
2563 return NLP_STE_FREED_NODE;
2564 }
2565 return ndlp->nlp_state;
2566}
2567
2568static uint32_t
2569lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2570 void *arg, uint32_t evt)
2571{
2572 struct lpfc_iocbq *cmdiocb, *rspiocb;
2573 IOCB_t *irsp;
2574
2575 cmdiocb = (struct lpfc_iocbq *) arg;
2576 rspiocb = cmdiocb->context_un.rsp_iocb;
2577
2578 irsp = &rspiocb->iocb;
2579 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2580 lpfc_drop_node(vport, ndlp);
2581 return NLP_STE_FREED_NODE;
2582 }
2583 return ndlp->nlp_state;
2584}
2585
2586static uint32_t
2587lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2588 void *arg, uint32_t evt)
2589{
2590 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2591
2592 /* For the fabric port just clear the fc flags. */
2593 if (ndlp->nlp_DID == Fabric_DID) {
2594 spin_lock_irq(shost->host_lock);
2595 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2596 spin_unlock_irq(shost->host_lock);
2597 }
2598 lpfc_unreg_rpi(vport, ndlp);
2599 return ndlp->nlp_state;
2600}
2601
2602static uint32_t
2603lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2604 void *arg, uint32_t evt)
2605{
2606 struct lpfc_iocbq *cmdiocb, *rspiocb;
2607 IOCB_t *irsp;
2608
2609 cmdiocb = (struct lpfc_iocbq *) arg;
2610 rspiocb = cmdiocb->context_un.rsp_iocb;
2611
2612 irsp = &rspiocb->iocb;
2613 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2614 lpfc_drop_node(vport, ndlp);
2615 return NLP_STE_FREED_NODE;
2616 }
2617 return ndlp->nlp_state;
2618}
2619
2620static uint32_t
2621lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2622 struct lpfc_nodelist *ndlp,
2623 void *arg, uint32_t evt)
2624{
2625 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2626 MAILBOX_t *mb = &pmb->u.mb;
2627
2628 if (!mb->mbxStatus) {
2629 /* SLI4 ports have preallocated logical rpis. */
2630 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2631 ndlp->nlp_rpi = mb->un.varWords[0];
2632 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2633 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2634 lpfc_unreg_rpi(vport, ndlp);
2635 }
2636 } else {
2637 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2638 lpfc_drop_node(vport, ndlp);
2639 return NLP_STE_FREED_NODE;
2640 }
2641 }
2642 return ndlp->nlp_state;
2643}
2644
2645static uint32_t
2646lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2647 void *arg, uint32_t evt)
2648{
2649 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2650
2651 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2652 spin_lock_irq(shost->host_lock);
2653 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2654 spin_unlock_irq(shost->host_lock);
2655 return ndlp->nlp_state;
2656 }
2657 lpfc_drop_node(vport, ndlp);
2658 return NLP_STE_FREED_NODE;
2659}
2660
2661static uint32_t
2662lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2663 void *arg, uint32_t evt)
2664{
2665 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2666
2667 /* Don't do anything that will mess up processing of the
2668 * previous RSCN.
2669 */
2670 if (vport->fc_flag & FC_RSCN_DEFERRED)
2671 return ndlp->nlp_state;
2672
2673 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2674 spin_lock_irq(shost->host_lock);
2675 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2676 spin_unlock_irq(shost->host_lock);
2677 return ndlp->nlp_state;
2678}
2679
2680
2681/* This next section defines the NPort Discovery State Machine */
2682
2683/* There are 4 different double linked lists nodelist entries can reside on.
2684 * The plogi list and adisc list are used when Link Up discovery or RSCN
2685 * processing is needed. Each list holds the nodes that we will send PLOGI
2686 * or ADISC on. These lists will keep track of what nodes will be effected
2687 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2688 * The unmapped_list will contain all nodes that we have successfully logged
2689 * into at the Fibre Channel level. The mapped_list will contain all nodes
2690 * that are mapped FCP targets.
2691 */
2692/*
2693 * The bind list is a list of undiscovered (potentially non-existent) nodes
2694 * that we have saved binding information on. This information is used when
2695 * nodes transition from the unmapped to the mapped list.
2696 */
2697/* For UNUSED_NODE state, the node has just been allocated .
2698 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2699 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2700 * and put on the unmapped list. For ADISC processing, the node is taken off
2701 * the ADISC list and placed on either the mapped or unmapped list (depending
2702 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2703 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2704 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2705 * node, the node is taken off the unmapped list. The binding list is checked
2706 * for a valid binding, or a binding is automatically assigned. If binding
2707 * assignment is unsuccessful, the node is left on the unmapped list. If
2708 * binding assignment is successful, the associated binding list entry (if
2709 * any) is removed, and the node is placed on the mapped list.
2710 */
2711/*
2712 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2713 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2714 * expire, all effected nodes will receive a DEVICE_RM event.
2715 */
2716/*
2717 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2718 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
2719 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2720 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2721 * we will first process the ADISC list. 32 entries are processed initially and
2722 * ADISC is initited for each one. Completions / Events for each node are
2723 * funnelled thru the state machine. As each node finishes ADISC processing, it
2724 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2725 * waiting, and the ADISC list count is identically 0, then we are done. For
2726 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2727 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2728 * list. 32 entries are processed initially and PLOGI is initited for each one.
2729 * Completions / Events for each node are funnelled thru the state machine. As
2730 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2731 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2732 * indentically 0, then we are done. We have now completed discovery / RSCN
2733 * handling. Upon completion, ALL nodes should be on either the mapped or
2734 * unmapped lists.
2735 */
2736
2737static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2738 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2739 /* Action routine Event Current State */
2740 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
2741 lpfc_rcv_els_unused_node, /* RCV_PRLI */
2742 lpfc_rcv_logo_unused_node, /* RCV_LOGO */
2743 lpfc_rcv_els_unused_node, /* RCV_ADISC */
2744 lpfc_rcv_els_unused_node, /* RCV_PDISC */
2745 lpfc_rcv_els_unused_node, /* RCV_PRLO */
2746 lpfc_disc_illegal, /* CMPL_PLOGI */
2747 lpfc_disc_illegal, /* CMPL_PRLI */
2748 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
2749 lpfc_disc_illegal, /* CMPL_ADISC */
2750 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2751 lpfc_device_rm_unused_node, /* DEVICE_RM */
2752 lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
2753
2754 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
2755 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
2756 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
2757 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
2758 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
2759 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
2760 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
2761 lpfc_disc_illegal, /* CMPL_PRLI */
2762 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
2763 lpfc_disc_illegal, /* CMPL_ADISC */
2764 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
2765 lpfc_device_rm_plogi_issue, /* DEVICE_RM */
2766 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
2767
2768 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
2769 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
2770 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
2771 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
2772 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
2773 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
2774 lpfc_disc_illegal, /* CMPL_PLOGI */
2775 lpfc_disc_illegal, /* CMPL_PRLI */
2776 lpfc_disc_illegal, /* CMPL_LOGO */
2777 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
2778 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2779 lpfc_device_rm_adisc_issue, /* DEVICE_RM */
2780 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
2781
2782 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
2783 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
2784 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
2785 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
2786 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
2787 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
2788 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2789 lpfc_disc_illegal, /* CMPL_PRLI */
2790 lpfc_disc_illegal, /* CMPL_LOGO */
2791 lpfc_disc_illegal, /* CMPL_ADISC */
2792 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
2793 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
2794 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2795
2796 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
2797 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
2798 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
2799 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
2800 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
2801 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
2802 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2803 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
2804 lpfc_disc_illegal, /* CMPL_LOGO */
2805 lpfc_disc_illegal, /* CMPL_ADISC */
2806 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2807 lpfc_device_rm_prli_issue, /* DEVICE_RM */
2808 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
2809
2810 lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
2811 lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
2812 lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
2813 lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
2814 lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
2815 lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
2816 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2817 lpfc_disc_illegal, /* CMPL_PRLI */
2818 lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
2819 lpfc_disc_illegal, /* CMPL_ADISC */
2820 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2821 lpfc_device_rm_logo_issue, /* DEVICE_RM */
2822 lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
2823
2824 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
2825 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
2826 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
2827 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
2828 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
2829 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
2830 lpfc_disc_illegal, /* CMPL_PLOGI */
2831 lpfc_disc_illegal, /* CMPL_PRLI */
2832 lpfc_disc_illegal, /* CMPL_LOGO */
2833 lpfc_disc_illegal, /* CMPL_ADISC */
2834 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2835 lpfc_disc_illegal, /* DEVICE_RM */
2836 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
2837
2838 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
2839 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
2840 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
2841 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
2842 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
2843 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
2844 lpfc_disc_illegal, /* CMPL_PLOGI */
2845 lpfc_disc_illegal, /* CMPL_PRLI */
2846 lpfc_disc_illegal, /* CMPL_LOGO */
2847 lpfc_disc_illegal, /* CMPL_ADISC */
2848 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2849 lpfc_disc_illegal, /* DEVICE_RM */
2850 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
2851
2852 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
2853 lpfc_rcv_prli_npr_node, /* RCV_PRLI */
2854 lpfc_rcv_logo_npr_node, /* RCV_LOGO */
2855 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
2856 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
2857 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
2858 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
2859 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
2860 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
2861 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
2862 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
2863 lpfc_device_rm_npr_node, /* DEVICE_RM */
2864 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
2865};
2866
2867int
2868lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2869 void *arg, uint32_t evt)
2870{
2871 uint32_t cur_state, rc;
2872 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2873 uint32_t);
2874 uint32_t got_ndlp = 0;
2875
2876 if (lpfc_nlp_get(ndlp))
2877 got_ndlp = 1;
2878
2879 cur_state = ndlp->nlp_state;
2880
2881 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2882 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2883 "0211 DSM in event x%x on NPort x%x in "
2884 "state %d Data: x%x x%x\n",
2885 evt, ndlp->nlp_DID, cur_state,
2886 ndlp->nlp_flag, ndlp->nlp_fc4_type);
2887
2888 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2889 "DSM in: evt:%d ste:%d did:x%x",
2890 evt, cur_state, ndlp->nlp_DID);
2891
2892 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2893 rc = (func) (vport, ndlp, arg, evt);
2894
2895 /* DSM out state <rc> on NPort <nlp_DID> */
2896 if (got_ndlp) {
2897 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2898 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2899 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2900
2901 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2902 "DSM out: ste:%d did:x%x flg:x%x",
2903 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2904 /* Decrement the ndlp reference count held for this function */
2905 lpfc_nlp_put(ndlp);
2906 } else {
2907 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2908 "0213 DSM out state %d on NPort free\n", rc);
2909
2910 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2911 "DSM out: ste:%d did:x%x flg:x%x",
2912 rc, 0, 0);
2913 }
2914
2915 return rc;
2916}