]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_els.c
[SCSI] megaraid_sas: Changelog and version update
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
92494144 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
09372820 21/* See Fibre Channel protocol T11 FC-LS for details */
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e
JB
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
da0436e9 32#include "lpfc_hw4.h"
dea3101e
JB
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
da0436e9 35#include "lpfc_sli4.h"
ea2151b4 36#include "lpfc_nl.h"
dea3101e
JB
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
92d7f7b0 42#include "lpfc_vport.h"
858c9f6c 43#include "lpfc_debugfs.h"
dea3101e
JB
44
45static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46 struct lpfc_iocbq *);
92d7f7b0
JS
47static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48 struct lpfc_iocbq *);
a6ababd2
AB
49static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51 struct lpfc_nodelist *ndlp, uint8_t retry);
52static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53 struct lpfc_iocbq *iocb);
92d7f7b0 54
dea3101e
JB
55static int lpfc_max_els_tries = 3;
56
e59058c4 57/**
3621a710 58 * lpfc_els_chk_latt - Check host link attention event for a vport
e59058c4
JS
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
78 **/
858c9f6c 79int
2e0fef85 80lpfc_els_chk_latt(struct lpfc_vport *vport)
dea3101e 81{
2e0fef85
JS
82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83 struct lpfc_hba *phba = vport->phba;
dea3101e 84 uint32_t ha_copy;
dea3101e 85
2e0fef85 86 if (vport->port_state >= LPFC_VPORT_READY ||
3772a991
JS
87 phba->link_state == LPFC_LINK_DOWN ||
88 phba->sli_rev > LPFC_SLI_REV3)
dea3101e
JB
89 return 0;
90
91 /* Read the HBA Host Attention Register */
9940b97b
JS
92 if (lpfc_readl(phba->HAregaddr, &ha_copy))
93 return 1;
dea3101e
JB
94
95 if (!(ha_copy & HA_LATT))
96 return 0;
97
98 /* Pending Link Event during Discovery */
e8b62011
JS
99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
100 "0237 Pending Link Event during "
101 "Discovery: State x%x\n",
102 phba->pport->port_state);
dea3101e
JB
103
104 /* CLEAR_LA should re-enable link attention events and
25985edc 105 * we should then immediately take a LATT event. The
dea3101e
JB
106 * LATT processing should call lpfc_linkdown() which
107 * will cleanup any left over in-progress discovery
108 * events.
109 */
2e0fef85
JS
110 spin_lock_irq(shost->host_lock);
111 vport->fc_flag |= FC_ABORT_DISCOVERY;
112 spin_unlock_irq(shost->host_lock);
dea3101e 113
92d7f7b0 114 if (phba->link_state != LPFC_CLEAR_LA)
ed957684 115 lpfc_issue_clear_la(phba, vport);
dea3101e 116
c9f8735b 117 return 1;
dea3101e
JB
118}
119
e59058c4 120/**
3621a710 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
e59058c4
JS
122 * @vport: pointer to a host virtual N_Port data structure.
123 * @expectRsp: flag indicating whether response is expected.
124 * @cmdSize: size of the ELS command.
125 * @retry: number of retries to the command IOCB when it fails.
126 * @ndlp: pointer to a node-list data structure.
127 * @did: destination identifier.
128 * @elscmd: the ELS command code.
129 *
130 * This routine is used for allocating a lpfc-IOCB data structure from
131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
132 * passed into the routine for discovery state machine to issue an Extended
133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
134 * and preparation routine that is used by all the discovery state machine
135 * routines and the ELS command-specific fields will be later set up by
136 * the individual discovery machine routines after calling this routine
137 * allocating and preparing a generic IOCB data structure. It fills in the
138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
139 * payload and response payload (if expected). The reference count on the
140 * ndlp is incremented by 1 and the reference to the ndlp is put into
141 * context1 of the IOCB data structure for this IOCB to hold the ndlp
142 * reference for the command's callback function to access later.
143 *
144 * Return code
145 * Pointer to the newly allocated/prepared els iocb data structure
146 * NULL - when els iocb data structure allocation/preparation failed
147 **/
f1c3b0fc 148struct lpfc_iocbq *
2e0fef85
JS
149lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
150 uint16_t cmdSize, uint8_t retry,
151 struct lpfc_nodelist *ndlp, uint32_t did,
152 uint32_t elscmd)
dea3101e 153{
2e0fef85 154 struct lpfc_hba *phba = vport->phba;
0bd4ca25 155 struct lpfc_iocbq *elsiocb;
dea3101e
JB
156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
157 struct ulp_bde64 *bpl;
158 IOCB_t *icmd;
159
dea3101e 160
2e0fef85
JS
161 if (!lpfc_is_link_up(phba))
162 return NULL;
dea3101e 163
dea3101e 164 /* Allocate buffer for command iocb */
0bd4ca25 165 elsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
166
167 if (elsiocb == NULL)
168 return NULL;
e47c9093 169
0c287589
JS
170 /*
171 * If this command is for fabric controller and HBA running
172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
173 */
174 if ((did == Fabric_DID) &&
45ed1190 175 (phba->hba_flag & HBA_FIP_SUPPORT) &&
0c287589
JS
176 ((elscmd == ELS_CMD_FLOGI) ||
177 (elscmd == ELS_CMD_FDISC) ||
178 (elscmd == ELS_CMD_LOGO)))
c868595d
JS
179 switch (elscmd) {
180 case ELS_CMD_FLOGI:
f0d9bccc
JS
181 elsiocb->iocb_flag |=
182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
183 & LPFC_FIP_ELS_ID_MASK);
184 break;
185 case ELS_CMD_FDISC:
f0d9bccc
JS
186 elsiocb->iocb_flag |=
187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
188 & LPFC_FIP_ELS_ID_MASK);
189 break;
190 case ELS_CMD_LOGO:
f0d9bccc
JS
191 elsiocb->iocb_flag |=
192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
193 & LPFC_FIP_ELS_ID_MASK);
194 break;
195 }
0c287589 196 else
c868595d 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
0c287589 198
dea3101e
JB
199 icmd = &elsiocb->iocb;
200
201 /* fill in BDEs for command */
202 /* Allocate buffer for command payload */
98c9ea5c
JS
203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
204 if (pcmd)
205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
fa4066b6
JS
206 if (!pcmd || !pcmd->virt)
207 goto els_iocb_free_pcmb_exit;
dea3101e
JB
208
209 INIT_LIST_HEAD(&pcmd->list);
210
211 /* Allocate buffer for response payload */
212 if (expectRsp) {
92d7f7b0 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e
JB
214 if (prsp)
215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
216 &prsp->phys);
fa4066b6
JS
217 if (!prsp || !prsp->virt)
218 goto els_iocb_free_prsp_exit;
dea3101e 219 INIT_LIST_HEAD(&prsp->list);
e47c9093 220 } else
dea3101e 221 prsp = NULL;
dea3101e
JB
222
223 /* Allocate buffer for Buffer ptr list */
92d7f7b0 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 225 if (pbuflist)
ed957684
JS
226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
227 &pbuflist->phys);
fa4066b6
JS
228 if (!pbuflist || !pbuflist->virt)
229 goto els_iocb_free_pbuf_exit;
dea3101e
JB
230
231 INIT_LIST_HEAD(&pbuflist->list);
232
233 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
234 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
34b02dcd 235 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2e0fef85 236 icmd->un.elsreq64.remoteID = did; /* DID */
dea3101e 237 if (expectRsp) {
92d7f7b0 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
dea3101e 239 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
2680eeaa 240 icmd->ulpTimeout = phba->fc_ratov * 2;
dea3101e 241 } else {
92d7f7b0 242 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
dea3101e
JB
243 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
244 }
dea3101e
JB
245 icmd->ulpBdeCount = 1;
246 icmd->ulpLe = 1;
247 icmd->ulpClass = CLASS3;
248
92d7f7b0
JS
249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
250 icmd->un.elsreq64.myID = vport->fc_myDID;
251
252 /* For ELS_REQUEST64_CR, use the VPI by default */
6d368e53 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
92d7f7b0 254 icmd->ulpCt_h = 0;
eada272d
JS
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO)
257 icmd->ulpCt_l = 0; /* context = invalid RPI */
258 else
259 icmd->ulpCt_l = 1; /* context = VPI */
92d7f7b0
JS
260 }
261
dea3101e
JB
262 bpl = (struct ulp_bde64 *) pbuflist->virt;
263 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
264 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
265 bpl->tus.f.bdeSize = cmdSize;
266 bpl->tus.f.bdeFlags = 0;
267 bpl->tus.w = le32_to_cpu(bpl->tus.w);
268
269 if (expectRsp) {
270 bpl++;
271 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
272 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
273 bpl->tus.f.bdeSize = FCELSSIZE;
34b02dcd 274 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
dea3101e
JB
275 bpl->tus.w = le32_to_cpu(bpl->tus.w);
276 }
277
fa4066b6 278 /* prevent preparing iocb with NULL ndlp reference */
51ef4c26 279 elsiocb->context1 = lpfc_nlp_get(ndlp);
fa4066b6
JS
280 if (!elsiocb->context1)
281 goto els_iocb_free_pbuf_exit;
329f9bc7
JS
282 elsiocb->context2 = pcmd;
283 elsiocb->context3 = pbuflist;
dea3101e 284 elsiocb->retry = retry;
2e0fef85 285 elsiocb->vport = vport;
dea3101e
JB
286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
287
288 if (prsp) {
289 list_add(&prsp->list, &pcmd->list);
290 }
dea3101e
JB
291 if (expectRsp) {
292 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
294 "0116 Xmit ELS command x%x to remote "
295 "NPORT x%x I/O tag: x%x, port state: x%x\n",
296 elscmd, did, elsiocb->iotag,
297 vport->port_state);
dea3101e
JB
298 } else {
299 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
e8b62011
JS
300 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
301 "0117 Xmit ELS response x%x to remote "
302 "NPORT x%x I/O tag: x%x, size: x%x\n",
303 elscmd, ndlp->nlp_DID, elsiocb->iotag,
304 cmdSize);
dea3101e 305 }
c9f8735b 306 return elsiocb;
dea3101e 307
fa4066b6 308els_iocb_free_pbuf_exit:
eaf15d5b
JS
309 if (expectRsp)
310 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
fa4066b6
JS
311 kfree(pbuflist);
312
313els_iocb_free_prsp_exit:
314 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
315 kfree(prsp);
316
317els_iocb_free_pcmb_exit:
318 kfree(pcmd);
319 lpfc_sli_release_iocbq(phba, elsiocb);
320 return NULL;
321}
dea3101e 322
e59058c4 323/**
3621a710 324 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
e59058c4
JS
325 * @vport: pointer to a host virtual N_Port data structure.
326 *
327 * This routine issues a fabric registration login for a @vport. An
328 * active ndlp node with Fabric_DID must already exist for this @vport.
329 * The routine invokes two mailbox commands to carry out fabric registration
330 * login through the HBA firmware: the first mailbox command requests the
331 * HBA to perform link configuration for the @vport; and the second mailbox
332 * command requests the HBA to perform the actual fabric registration login
333 * with the @vport.
334 *
335 * Return code
336 * 0 - successfully issued fabric registration login for @vport
337 * -ENXIO -- failed to issue fabric registration login for @vport
338 **/
3772a991 339int
92d7f7b0 340lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
dea3101e 341{
2e0fef85 342 struct lpfc_hba *phba = vport->phba;
dea3101e 343 LPFC_MBOXQ_t *mbox;
14691150 344 struct lpfc_dmabuf *mp;
92d7f7b0
JS
345 struct lpfc_nodelist *ndlp;
346 struct serv_parm *sp;
dea3101e 347 int rc;
98c9ea5c 348 int err = 0;
dea3101e 349
92d7f7b0
JS
350 sp = &phba->fc_fabparam;
351 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093 352 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
98c9ea5c 353 err = 1;
92d7f7b0 354 goto fail;
98c9ea5c 355 }
92d7f7b0
JS
356
357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
358 if (!mbox) {
359 err = 2;
92d7f7b0 360 goto fail;
98c9ea5c 361 }
92d7f7b0
JS
362
363 vport->port_state = LPFC_FABRIC_CFG_LINK;
364 lpfc_config_link(phba, mbox);
365 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
366 mbox->vport = vport;
367
0b727fea 368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
369 if (rc == MBX_NOT_FINISHED) {
370 err = 3;
92d7f7b0 371 goto fail_free_mbox;
98c9ea5c 372 }
92d7f7b0
JS
373
374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
375 if (!mbox) {
376 err = 4;
92d7f7b0 377 goto fail;
98c9ea5c 378 }
4042629e
JS
379 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
380 ndlp->nlp_rpi);
98c9ea5c
JS
381 if (rc) {
382 err = 5;
92d7f7b0 383 goto fail_free_mbox;
98c9ea5c 384 }
92d7f7b0
JS
385
386 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
387 mbox->vport = vport;
e47c9093
JS
388 /* increment the reference count on ndlp to hold reference
389 * for the callback routine.
390 */
92d7f7b0
JS
391 mbox->context2 = lpfc_nlp_get(ndlp);
392
0b727fea 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
394 if (rc == MBX_NOT_FINISHED) {
395 err = 6;
92d7f7b0 396 goto fail_issue_reg_login;
98c9ea5c 397 }
92d7f7b0
JS
398
399 return 0;
400
401fail_issue_reg_login:
e47c9093
JS
402 /* decrement the reference count on ndlp just incremented
403 * for the failed mbox command.
404 */
92d7f7b0
JS
405 lpfc_nlp_put(ndlp);
406 mp = (struct lpfc_dmabuf *) mbox->context1;
407 lpfc_mbuf_free(phba, mp->virt, mp->phys);
408 kfree(mp);
409fail_free_mbox:
410 mempool_free(mbox, phba->mbox_mem_pool);
411
412fail:
413 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 414 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
98c9ea5c 415 "0249 Cannot issue Register Fabric login: Err %d\n", err);
92d7f7b0
JS
416 return -ENXIO;
417}
418
6fb120a7
JS
419/**
420 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
421 * @vport: pointer to a host virtual N_Port data structure.
422 *
423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
424 * the @vport. This mailbox command is necessary for FCoE only.
425 *
426 * Return code
427 * 0 - successfully issued REG_VFI for @vport
428 * A failure code otherwise.
429 **/
430static int
431lpfc_issue_reg_vfi(struct lpfc_vport *vport)
432{
433 struct lpfc_hba *phba = vport->phba;
434 LPFC_MBOXQ_t *mboxq;
435 struct lpfc_nodelist *ndlp;
436 struct serv_parm *sp;
437 struct lpfc_dmabuf *dmabuf;
438 int rc = 0;
439
440 sp = &phba->fc_fabparam;
441 ndlp = lpfc_findnode_did(vport, Fabric_DID);
442 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
443 rc = -ENODEV;
444 goto fail;
445 }
446
447 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
448 if (!dmabuf) {
449 rc = -ENOMEM;
450 goto fail;
451 }
452 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
453 if (!dmabuf->virt) {
454 rc = -ENOMEM;
455 goto fail_free_dmabuf;
456 }
6d368e53 457
6fb120a7
JS
458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
459 if (!mboxq) {
460 rc = -ENOMEM;
461 goto fail_free_coherent;
462 }
463 vport->port_state = LPFC_FABRIC_CFG_LINK;
464 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
465 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
466 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
467 mboxq->vport = vport;
468 mboxq->context1 = dmabuf;
469 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
470 if (rc == MBX_NOT_FINISHED) {
471 rc = -ENXIO;
472 goto fail_free_mbox;
473 }
474 return 0;
475
476fail_free_mbox:
477 mempool_free(mboxq, phba->mbox_mem_pool);
478fail_free_coherent:
479 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
480fail_free_dmabuf:
481 kfree(dmabuf);
482fail:
483 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
484 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
485 "0289 Issue Register VFI failed: Err %d\n", rc);
486 return rc;
487}
488
92494144
JS
489/**
490 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
491 * @vport: pointer to a host virtual N_Port data structure.
492 * @sp: pointer to service parameter data structure.
493 *
494 * This routine is called from FLOGI/FDISC completion handler functions.
495 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
496 * node nodename is changed in the completion service parameter else return
497 * 0. This function also set flag in the vport data structure to delay
498 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
499 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
500 * node nodename is changed in the completion service parameter.
501 *
502 * Return code
503 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
504 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
505 *
506 **/
507static uint8_t
508lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
509 struct serv_parm *sp)
510{
511 uint8_t fabric_param_changed = 0;
512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
513
514 if ((vport->fc_prevDID != vport->fc_myDID) ||
515 memcmp(&vport->fabric_portname, &sp->portName,
516 sizeof(struct lpfc_name)) ||
517 memcmp(&vport->fabric_nodename, &sp->nodeName,
518 sizeof(struct lpfc_name)))
519 fabric_param_changed = 1;
520
521 /*
522 * Word 1 Bit 31 in common service parameter is overloaded.
523 * Word 1 Bit 31 in FLOGI request is multiple NPort request
524 * Word 1 Bit 31 in FLOGI response is clean address bit
525 *
526 * If fabric parameter is changed and clean address bit is
527 * cleared delay nport discovery if
528 * - vport->fc_prevDID != 0 (not initial discovery) OR
529 * - lpfc_delay_discovery module parameter is set.
530 */
531 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
532 (vport->fc_prevDID || lpfc_delay_discovery)) {
533 spin_lock_irq(shost->host_lock);
534 vport->fc_flag |= FC_DISC_DELAYED;
535 spin_unlock_irq(shost->host_lock);
536 }
537
538 return fabric_param_changed;
539}
540
541
e59058c4 542/**
3621a710 543 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
e59058c4
JS
544 * @vport: pointer to a host virtual N_Port data structure.
545 * @ndlp: pointer to a node-list data structure.
546 * @sp: pointer to service parameter data structure.
547 * @irsp: pointer to the IOCB within the lpfc response IOCB.
548 *
549 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
550 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
551 * port in a fabric topology. It properly sets up the parameters to the @ndlp
552 * from the IOCB response. It also check the newly assigned N_Port ID to the
553 * @vport against the previously assigned N_Port ID. If it is different from
554 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
555 * is invoked on all the remaining nodes with the @vport to unregister the
556 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
557 * is invoked to register login to the fabric.
558 *
559 * Return code
560 * 0 - Success (currently, always return 0)
561 **/
92d7f7b0
JS
562static int
563lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
564 struct serv_parm *sp, IOCB_t *irsp)
565{
566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
567 struct lpfc_hba *phba = vport->phba;
568 struct lpfc_nodelist *np;
569 struct lpfc_nodelist *next_np;
92494144 570 uint8_t fabric_param_changed;
92d7f7b0 571
2e0fef85
JS
572 spin_lock_irq(shost->host_lock);
573 vport->fc_flag |= FC_FABRIC;
574 spin_unlock_irq(shost->host_lock);
dea3101e
JB
575
576 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
577 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
578 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
579
12265f68 580 phba->fc_edtovResol = sp->cmn.edtovResolution;
dea3101e
JB
581 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
582
76a95d75 583 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2e0fef85
JS
584 spin_lock_irq(shost->host_lock);
585 vport->fc_flag |= FC_PUBLIC_LOOP;
586 spin_unlock_irq(shost->host_lock);
dea3101e
JB
587 }
588
2e0fef85 589 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
dea3101e 590 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
92d7f7b0 591 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
dea3101e
JB
592 ndlp->nlp_class_sup = 0;
593 if (sp->cls1.classValid)
594 ndlp->nlp_class_sup |= FC_COS_CLASS1;
595 if (sp->cls2.classValid)
596 ndlp->nlp_class_sup |= FC_COS_CLASS2;
597 if (sp->cls3.classValid)
598 ndlp->nlp_class_sup |= FC_COS_CLASS3;
599 if (sp->cls4.classValid)
600 ndlp->nlp_class_sup |= FC_COS_CLASS4;
601 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
602 sp->cmn.bbRcvSizeLsb;
92494144
JS
603
604 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
605 memcpy(&vport->fabric_portname, &sp->portName,
606 sizeof(struct lpfc_name));
607 memcpy(&vport->fabric_nodename, &sp->nodeName,
608 sizeof(struct lpfc_name));
dea3101e
JB
609 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
610
92d7f7b0
JS
611 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
612 if (sp->cmn.response_multiple_NPort) {
e8b62011
JS
613 lpfc_printf_vlog(vport, KERN_WARNING,
614 LOG_ELS | LOG_VPORT,
615 "1816 FLOGI NPIV supported, "
616 "response data 0x%x\n",
617 sp->cmn.response_multiple_NPort);
92d7f7b0 618 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
92d7f7b0
JS
619 } else {
620 /* Because we asked f/w for NPIV it still expects us
e8b62011
JS
621 to call reg_vnpid atleast for the physcial host */
622 lpfc_printf_vlog(vport, KERN_WARNING,
623 LOG_ELS | LOG_VPORT,
624 "1817 Fabric does not support NPIV "
625 "- configuring single port mode.\n");
92d7f7b0
JS
626 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
627 }
628 }
dea3101e 629
92494144 630 if (fabric_param_changed &&
92d7f7b0 631 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
dea3101e 632
92d7f7b0
JS
633 /* If our NportID changed, we need to ensure all
634 * remaining NPORTs get unreg_login'ed.
635 */
636 list_for_each_entry_safe(np, next_np,
637 &vport->fc_nodes, nlp_listp) {
d7c255b2 638 if (!NLP_CHK_NODE_ACT(np))
e47c9093 639 continue;
92d7f7b0
JS
640 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
641 !(np->nlp_flag & NLP_NPR_ADISC))
642 continue;
643 spin_lock_irq(shost->host_lock);
644 np->nlp_flag &= ~NLP_NPR_ADISC;
645 spin_unlock_irq(shost->host_lock);
646 lpfc_unreg_rpi(vport, np);
647 }
78730cfe 648 lpfc_cleanup_pending_mbox(vport);
5af5eee7 649
5248a749 650 if (phba->sli_rev == LPFC_SLI_REV4) {
5af5eee7 651 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 652 lpfc_mbx_unreg_vpi(vport);
09372820 653 spin_lock_irq(shost->host_lock);
92d7f7b0 654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
5248a749
JS
655 /*
656 * If VPI is unreged, driver need to do INIT_VPI
657 * before re-registering
658 */
ecfd03c6
JS
659 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
660 spin_unlock_irq(shost->host_lock);
661 }
38b92ef8
JS
662 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
663 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
664 /*
665 * Driver needs to re-reg VPI in order for f/w
666 * to update the MAC address.
667 */
9589b062 668 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
38b92ef8
JS
669 lpfc_register_new_vport(phba, vport, ndlp);
670 return 0;
92d7f7b0 671 }
dea3101e 672
6fb120a7
JS
673 if (phba->sli_rev < LPFC_SLI_REV4) {
674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
675 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
676 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
677 lpfc_register_new_vport(phba, vport, ndlp);
678 else
679 lpfc_issue_fabric_reglogin(vport);
680 } else {
681 ndlp->nlp_type |= NLP_FABRIC;
682 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
695a814e
JS
683 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
684 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
6fb120a7
JS
685 lpfc_start_fdiscs(phba);
686 lpfc_do_scr_ns_plogi(phba, vport);
695a814e 687 } else if (vport->fc_flag & FC_VFI_REGISTERED)
ecfd03c6 688 lpfc_issue_init_vpi(vport);
695a814e 689 else
6fb120a7 690 lpfc_issue_reg_vfi(vport);
92d7f7b0 691 }
dea3101e 692 return 0;
dea3101e 693}
e59058c4 694/**
3621a710 695 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
e59058c4
JS
696 * @vport: pointer to a host virtual N_Port data structure.
697 * @ndlp: pointer to a node-list data structure.
698 * @sp: pointer to service parameter data structure.
699 *
700 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
701 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
702 * in a point-to-point topology. First, the @vport's N_Port Name is compared
703 * with the received N_Port Name: if the @vport's N_Port Name is greater than
704 * the received N_Port Name lexicographically, this node shall assign local
705 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
706 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
707 * this node shall just wait for the remote node to issue PLOGI and assign
708 * N_Port IDs.
709 *
710 * Return code
711 * 0 - Success
712 * -ENXIO - Fail
713 **/
dea3101e 714static int
2e0fef85
JS
715lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
716 struct serv_parm *sp)
dea3101e 717{
2e0fef85
JS
718 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
719 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
720 LPFC_MBOXQ_t *mbox;
721 int rc;
722
2e0fef85
JS
723 spin_lock_irq(shost->host_lock);
724 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
725 spin_unlock_irq(shost->host_lock);
dea3101e
JB
726
727 phba->fc_edtov = FF_DEF_EDTOV;
728 phba->fc_ratov = FF_DEF_RATOV;
2e0fef85 729 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 730 sizeof(vport->fc_portname));
dea3101e
JB
731 if (rc >= 0) {
732 /* This side will initiate the PLOGI */
2e0fef85
JS
733 spin_lock_irq(shost->host_lock);
734 vport->fc_flag |= FC_PT2PT_PLOGI;
735 spin_unlock_irq(shost->host_lock);
dea3101e
JB
736
737 /*
738 * N_Port ID cannot be 0, set our to LocalID the other
739 * side will be RemoteID.
740 */
741
742 /* not equal */
743 if (rc)
2e0fef85 744 vport->fc_myDID = PT2PT_LocalID;
dea3101e
JB
745
746 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
747 if (!mbox)
748 goto fail;
749
750 lpfc_config_link(phba, mbox);
751
752 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 753 mbox->vport = vport;
0b727fea 754 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
dea3101e
JB
755 if (rc == MBX_NOT_FINISHED) {
756 mempool_free(mbox, phba->mbox_mem_pool);
757 goto fail;
758 }
e47c9093
JS
759 /* Decrement ndlp reference count indicating that ndlp can be
760 * safely released when other references to it are done.
761 */
329f9bc7 762 lpfc_nlp_put(ndlp);
dea3101e 763
2e0fef85 764 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
dea3101e
JB
765 if (!ndlp) {
766 /*
767 * Cannot find existing Fabric ndlp, so allocate a
768 * new one
769 */
770 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
771 if (!ndlp)
772 goto fail;
2e0fef85 773 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
e47c9093
JS
774 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
775 ndlp = lpfc_enable_node(vport, ndlp,
776 NLP_STE_UNUSED_NODE);
777 if(!ndlp)
778 goto fail;
dea3101e
JB
779 }
780
781 memcpy(&ndlp->nlp_portname, &sp->portName,
2e0fef85 782 sizeof(struct lpfc_name));
dea3101e 783 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
2e0fef85 784 sizeof(struct lpfc_name));
e47c9093 785 /* Set state will put ndlp onto node list if not already done */
2e0fef85
JS
786 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
787 spin_lock_irq(shost->host_lock);
dea3101e 788 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 789 spin_unlock_irq(shost->host_lock);
e47c9093
JS
790 } else
791 /* This side will wait for the PLOGI, decrement ndlp reference
792 * count indicating that ndlp can be released when other
793 * references to it are done.
794 */
329f9bc7 795 lpfc_nlp_put(ndlp);
dea3101e 796
09372820
JS
797 /* If we are pt2pt with another NPort, force NPIV off! */
798 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
799
2e0fef85
JS
800 spin_lock_irq(shost->host_lock);
801 vport->fc_flag |= FC_PT2PT;
802 spin_unlock_irq(shost->host_lock);
dea3101e
JB
803
804 /* Start discovery - this should just do CLEAR_LA */
2e0fef85 805 lpfc_disc_start(vport);
dea3101e 806 return 0;
92d7f7b0 807fail:
dea3101e
JB
808 return -ENXIO;
809}
810
e59058c4 811/**
3621a710 812 * lpfc_cmpl_els_flogi - Completion callback function for flogi
e59058c4
JS
813 * @phba: pointer to lpfc hba data structure.
814 * @cmdiocb: pointer to lpfc command iocb data structure.
815 * @rspiocb: pointer to lpfc response iocb data structure.
816 *
817 * This routine is the top-level completion callback function for issuing
818 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
819 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
820 * retry has been made (either immediately or delayed with lpfc_els_retry()
821 * returning 1), the command IOCB will be released and function returned.
822 * If the retry attempt has been given up (possibly reach the maximum
823 * number of retries), one additional decrement of ndlp reference shall be
824 * invoked before going out after releasing the command IOCB. This will
825 * actually release the remote node (Note, lpfc_els_free_iocb() will also
826 * invoke one decrement of ndlp reference count). If no error reported in
827 * the IOCB status, the command Port ID field is used to determine whether
828 * this is a point-to-point topology or a fabric topology: if the Port ID
829 * field is assigned, it is a fabric topology; otherwise, it is a
830 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
831 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
832 * specific topology completion conditions.
833 **/
dea3101e 834static void
329f9bc7
JS
835lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
836 struct lpfc_iocbq *rspiocb)
dea3101e 837{
2e0fef85
JS
838 struct lpfc_vport *vport = cmdiocb->vport;
839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
840 IOCB_t *irsp = &rspiocb->iocb;
841 struct lpfc_nodelist *ndlp = cmdiocb->context1;
842 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
843 struct serv_parm *sp;
0c9ab6f5 844 uint16_t fcf_index;
dea3101e
JB
845 int rc;
846
847 /* Check to see if link went down during discovery */
2e0fef85 848 if (lpfc_els_chk_latt(vport)) {
fa4066b6
JS
849 /* One additional decrement on node reference count to
850 * trigger the release of the node
851 */
329f9bc7 852 lpfc_nlp_put(ndlp);
dea3101e
JB
853 goto out;
854 }
855
858c9f6c
JS
856 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
857 "FLOGI cmpl: status:x%x/x%x state:x%x",
858 irsp->ulpStatus, irsp->un.ulpWord[4],
859 vport->port_state);
860
dea3101e 861 if (irsp->ulpStatus) {
0c9ab6f5 862 /*
a93ff37a 863 * In case of FIP mode, perform roundrobin FCF failover
0c9ab6f5
JS
864 * due to new FCF discovery
865 */
866 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
dbb6b3ab 867 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
9589b062
JS
868 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
869 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
0c9ab6f5 870 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
a93ff37a
JS
871 "2611 FLOGI failed on FCF (x%x), "
872 "status:x%x/x%x, tmo:x%x, perform "
873 "roundrobin FCF failover\n",
38b92ef8
JS
874 phba->fcf.current_rec.fcf_indx,
875 irsp->ulpStatus, irsp->un.ulpWord[4],
876 irsp->ulpTimeout);
7d791df7
JS
877 lpfc_sli4_set_fcf_flogi_fail(phba,
878 phba->fcf.current_rec.fcf_indx);
0c9ab6f5 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
a93ff37a
JS
880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
881 if (rc)
882 goto out;
0c9ab6f5
JS
883 }
884
38b92ef8
JS
885 /* FLOGI failure */
886 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
887 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
888 irsp->ulpStatus, irsp->un.ulpWord[4],
889 irsp->ulpTimeout);
890
dea3101e 891 /* Check for retry */
2e0fef85 892 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 893 goto out;
2e0fef85 894
76a95d75
JS
895 /* FLOGI failure */
896 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
897 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
898 irsp->ulpStatus, irsp->un.ulpWord[4],
899 irsp->ulpTimeout);
900
dea3101e 901 /* FLOGI failed, so there is no fabric */
2e0fef85
JS
902 spin_lock_irq(shost->host_lock);
903 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
904 spin_unlock_irq(shost->host_lock);
dea3101e 905
329f9bc7 906 /* If private loop, then allow max outstanding els to be
dea3101e
JB
907 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
908 * alpa map would take too long otherwise.
909 */
910 if (phba->alpa_map[0] == 0) {
3de2a653 911 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
76a95d75
JS
912 if ((phba->sli_rev == LPFC_SLI_REV4) &&
913 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
914 (vport->fc_prevDID != vport->fc_myDID))) {
915 if (vport->fc_flag & FC_VFI_REGISTERED)
916 lpfc_sli4_unreg_all_rpis(vport);
917 lpfc_issue_reg_vfi(vport);
918 lpfc_nlp_put(ndlp);
919 goto out;
920 }
dea3101e 921 }
dea3101e
JB
922 goto flogifail;
923 }
695a814e
JS
924 spin_lock_irq(shost->host_lock);
925 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 926 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
695a814e 927 spin_unlock_irq(shost->host_lock);
dea3101e
JB
928
929 /*
930 * The FLogI succeeded. Sync the data for the CPU before
931 * accessing it.
932 */
933 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
934
935 sp = prsp->virt + sizeof(uint32_t);
936
937 /* FLOGI completes successfully */
e8b62011 938 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
af901ca1 939 "0101 FLOGI completes successfully "
e8b62011
JS
940 "Data: x%x x%x x%x x%x\n",
941 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
942 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
dea3101e 943
2e0fef85 944 if (vport->port_state == LPFC_FLOGI) {
dea3101e
JB
945 /*
946 * If Common Service Parameters indicate Nport
947 * we are point to point, if Fport we are Fabric.
948 */
949 if (sp->cmn.fPort)
2e0fef85 950 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
76a95d75 951 else if (!(phba->hba_flag & HBA_FCOE_MODE))
2e0fef85 952 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
dbb6b3ab
JS
953 else {
954 lpfc_printf_vlog(vport, KERN_ERR,
955 LOG_FIP | LOG_ELS,
956 "2831 FLOGI response with cleared Fabric "
957 "bit fcf_index 0x%x "
958 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
959 "Fabric Name "
960 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
961 phba->fcf.current_rec.fcf_indx,
962 phba->fcf.current_rec.switch_name[0],
963 phba->fcf.current_rec.switch_name[1],
964 phba->fcf.current_rec.switch_name[2],
965 phba->fcf.current_rec.switch_name[3],
966 phba->fcf.current_rec.switch_name[4],
967 phba->fcf.current_rec.switch_name[5],
968 phba->fcf.current_rec.switch_name[6],
969 phba->fcf.current_rec.switch_name[7],
970 phba->fcf.current_rec.fabric_name[0],
971 phba->fcf.current_rec.fabric_name[1],
972 phba->fcf.current_rec.fabric_name[2],
973 phba->fcf.current_rec.fabric_name[3],
974 phba->fcf.current_rec.fabric_name[4],
975 phba->fcf.current_rec.fabric_name[5],
976 phba->fcf.current_rec.fabric_name[6],
977 phba->fcf.current_rec.fabric_name[7]);
978 lpfc_nlp_put(ndlp);
979 spin_lock_irq(&phba->hbalock);
980 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 981 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
dbb6b3ab
JS
982 spin_unlock_irq(&phba->hbalock);
983 goto out;
984 }
0c9ab6f5
JS
985 if (!rc) {
986 /* Mark the FCF discovery process done */
999d813f
JS
987 if (phba->hba_flag & HBA_FIP_SUPPORT)
988 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
989 LOG_ELS,
a93ff37a
JS
990 "2769 FLOGI to FCF (x%x) "
991 "completed successfully\n",
999d813f 992 phba->fcf.current_rec.fcf_indx);
0c9ab6f5
JS
993 spin_lock_irq(&phba->hbalock);
994 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 995 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
0c9ab6f5 996 spin_unlock_irq(&phba->hbalock);
dea3101e 997 goto out;
0c9ab6f5 998 }
dea3101e
JB
999 }
1000
1001flogifail:
329f9bc7 1002 lpfc_nlp_put(ndlp);
dea3101e 1003
858c9f6c 1004 if (!lpfc_error_lost_link(irsp)) {
dea3101e 1005 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 1006 lpfc_disc_list_loopmap(vport);
dea3101e
JB
1007
1008 /* Start discovery */
2e0fef85 1009 lpfc_disc_start(vport);
87af33fe
JS
1010 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1011 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
1012 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
1013 (phba->link_state != LPFC_CLEAR_LA)) {
1014 /* If FLOGI failed enable link interrupt. */
1015 lpfc_issue_clear_la(phba, vport);
dea3101e 1016 }
dea3101e
JB
1017out:
1018 lpfc_els_free_iocb(phba, cmdiocb);
1019}
1020
e59058c4 1021/**
3621a710 1022 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
e59058c4
JS
1023 * @vport: pointer to a host virtual N_Port data structure.
1024 * @ndlp: pointer to a node-list data structure.
1025 * @retry: number of retries to the command IOCB.
1026 *
1027 * This routine issues a Fabric Login (FLOGI) Request ELS command
1028 * for a @vport. The initiator service parameters are put into the payload
1029 * of the FLOGI Request IOCB and the top-level callback function pointer
1030 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1031 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1032 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1033 *
1034 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1035 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1036 * will be stored into the context1 field of the IOCB for the completion
1037 * callback function to the FLOGI ELS command.
1038 *
1039 * Return code
1040 * 0 - successfully issued flogi iocb for @vport
1041 * 1 - failed to issue flogi iocb for @vport
1042 **/
dea3101e 1043static int
2e0fef85 1044lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1045 uint8_t retry)
1046{
2e0fef85 1047 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1048 struct serv_parm *sp;
1049 IOCB_t *icmd;
1050 struct lpfc_iocbq *elsiocb;
1051 struct lpfc_sli_ring *pring;
1052 uint8_t *pcmd;
1053 uint16_t cmdsize;
1054 uint32_t tmo;
1055 int rc;
1056
1057 pring = &phba->sli.ring[LPFC_ELS_RING];
1058
92d7f7b0 1059 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2e0fef85
JS
1060 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1061 ndlp->nlp_DID, ELS_CMD_FLOGI);
92d7f7b0 1062
488d1469 1063 if (!elsiocb)
c9f8735b 1064 return 1;
dea3101e
JB
1065
1066 icmd = &elsiocb->iocb;
1067 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1068
1069 /* For FLOGI request, remainder of payload is service parameters */
1070 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
92d7f7b0
JS
1071 pcmd += sizeof(uint32_t);
1072 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1073 sp = (struct serv_parm *) pcmd;
1074
1075 /* Setup CSPs accordingly for Fabric */
1076 sp->cmn.e_d_tov = 0;
1077 sp->cmn.w2.r_a_tov = 0;
1078 sp->cls1.classValid = 0;
1079 sp->cls2.seqDelivery = 1;
1080 sp->cls3.seqDelivery = 1;
1081 if (sp->cmn.fcphLow < FC_PH3)
1082 sp->cmn.fcphLow = FC_PH3;
1083 if (sp->cmn.fcphHigh < FC_PH3)
1084 sp->cmn.fcphHigh = FC_PH3;
1085
c31098ce
JS
1086 if (phba->sli_rev == LPFC_SLI_REV4) {
1087 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1088 LPFC_SLI_INTF_IF_TYPE_0) {
1089 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1090 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1091 /* FLOGI needs to be 3 for WQE FCFI */
1092 /* Set the fcfi to the fcfi we registered with */
1093 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1094 }
5248a749
JS
1095 } else {
1096 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1097 sp->cmn.request_multiple_Nport = 1;
1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1099 icmd->ulpCt_h = 1;
1100 icmd->ulpCt_l = 0;
1101 } else
1102 sp->cmn.request_multiple_Nport = 0;
92d7f7b0
JS
1103 }
1104
76a95d75 1105 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
858c9f6c
JS
1106 icmd->un.elsreq64.myID = 0;
1107 icmd->un.elsreq64.fl = 1;
1108 }
1109
dea3101e
JB
1110 tmo = phba->fc_ratov;
1111 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
2e0fef85 1112 lpfc_set_disctmo(vport);
dea3101e
JB
1113 phba->fc_ratov = tmo;
1114
1115 phba->fc_stat.elsXmitFLOGI++;
1116 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
858c9f6c
JS
1117
1118 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1119 "Issue FLOGI: opt:x%x",
1120 phba->sli3_options, 0, 0);
1121
92d7f7b0 1122 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
dea3101e
JB
1123 if (rc == IOCB_ERROR) {
1124 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1125 return 1;
dea3101e 1126 }
c9f8735b 1127 return 0;
dea3101e
JB
1128}
1129
e59058c4 1130/**
3621a710 1131 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
e59058c4
JS
1132 * @phba: pointer to lpfc hba data structure.
1133 *
1134 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1135 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1136 * list and issues an abort IOCB commond on each outstanding IOCB that
1137 * contains a active Fabric_DID ndlp. Note that this function is to issue
1138 * the abort IOCB command on all the outstanding IOCBs, thus when this
1139 * function returns, it does not guarantee all the IOCBs are actually aborted.
1140 *
1141 * Return code
3ad2f3fb 1142 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
e59058c4 1143 **/
dea3101e 1144int
2e0fef85 1145lpfc_els_abort_flogi(struct lpfc_hba *phba)
dea3101e
JB
1146{
1147 struct lpfc_sli_ring *pring;
1148 struct lpfc_iocbq *iocb, *next_iocb;
1149 struct lpfc_nodelist *ndlp;
1150 IOCB_t *icmd;
1151
1152 /* Abort outstanding I/O on NPort <nlp_DID> */
1153 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
e8b62011
JS
1154 "0201 Abort outstanding I/O on NPort x%x\n",
1155 Fabric_DID);
dea3101e
JB
1156
1157 pring = &phba->sli.ring[LPFC_ELS_RING];
1158
1159 /*
1160 * Check the txcmplq for an iocb that matches the nport the driver is
1161 * searching for.
1162 */
2e0fef85 1163 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1164 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1165 icmd = &iocb->iocb;
2e0fef85
JS
1166 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1167 icmd->un.elsreq64.bdl.ulpIoTag32) {
dea3101e 1168 ndlp = (struct lpfc_nodelist *)(iocb->context1);
58da1ffb
JS
1169 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1170 (ndlp->nlp_DID == Fabric_DID))
07951076 1171 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e
JB
1172 }
1173 }
2e0fef85 1174 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1175
1176 return 0;
1177}
1178
e59058c4 1179/**
3621a710 1180 * lpfc_initial_flogi - Issue an initial fabric login for a vport
e59058c4
JS
1181 * @vport: pointer to a host virtual N_Port data structure.
1182 *
1183 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1184 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1185 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1186 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1187 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1188 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1189 * @vport.
1190 *
1191 * Return code
1192 * 0 - failed to issue initial flogi for @vport
1193 * 1 - successfully issued initial flogi for @vport
1194 **/
dea3101e 1195int
2e0fef85 1196lpfc_initial_flogi(struct lpfc_vport *vport)
dea3101e 1197{
2e0fef85 1198 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1199 struct lpfc_nodelist *ndlp;
1200
98c9ea5c
JS
1201 vport->port_state = LPFC_FLOGI;
1202 lpfc_set_disctmo(vport);
1203
c9f8735b 1204 /* First look for the Fabric ndlp */
2e0fef85 1205 ndlp = lpfc_findnode_did(vport, Fabric_DID);
c9f8735b 1206 if (!ndlp) {
dea3101e 1207 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b
JW
1208 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1209 if (!ndlp)
1210 return 0;
2e0fef85 1211 lpfc_nlp_init(vport, ndlp, Fabric_DID);
6fb120a7
JS
1212 /* Set the node type */
1213 ndlp->nlp_type |= NLP_FABRIC;
e47c9093
JS
1214 /* Put ndlp onto node list */
1215 lpfc_enqueue_node(vport, ndlp);
1216 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1217 /* re-setup ndlp without removing from node list */
1218 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1219 if (!ndlp)
1220 return 0;
dea3101e 1221 }
87af33fe 1222
5ac6b303 1223 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
fa4066b6
JS
1224 /* This decrement of reference count to node shall kick off
1225 * the release of the node.
1226 */
329f9bc7 1227 lpfc_nlp_put(ndlp);
5ac6b303
JS
1228 return 0;
1229 }
c9f8735b 1230 return 1;
dea3101e
JB
1231}
1232
e59058c4 1233/**
3621a710 1234 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
e59058c4
JS
1235 * @vport: pointer to a host virtual N_Port data structure.
1236 *
1237 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1238 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1239 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1240 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1241 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1242 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1243 * @vport.
1244 *
1245 * Return code
1246 * 0 - failed to issue initial fdisc for @vport
1247 * 1 - successfully issued initial fdisc for @vport
1248 **/
92d7f7b0
JS
1249int
1250lpfc_initial_fdisc(struct lpfc_vport *vport)
1251{
1252 struct lpfc_hba *phba = vport->phba;
1253 struct lpfc_nodelist *ndlp;
1254
1255 /* First look for the Fabric ndlp */
1256 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1257 if (!ndlp) {
1258 /* Cannot find existing Fabric ndlp, so allocate a new one */
1259 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1260 if (!ndlp)
1261 return 0;
1262 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
1263 /* Put ndlp onto node list */
1264 lpfc_enqueue_node(vport, ndlp);
1265 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1266 /* re-setup ndlp without removing from node list */
1267 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1268 if (!ndlp)
1269 return 0;
92d7f7b0 1270 }
e47c9093 1271
92d7f7b0 1272 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
fa4066b6
JS
1273 /* decrement node reference count to trigger the release of
1274 * the node.
1275 */
92d7f7b0 1276 lpfc_nlp_put(ndlp);
fa4066b6 1277 return 0;
92d7f7b0
JS
1278 }
1279 return 1;
1280}
87af33fe 1281
e59058c4 1282/**
3621a710 1283 * lpfc_more_plogi - Check and issue remaining plogis for a vport
e59058c4
JS
1284 * @vport: pointer to a host virtual N_Port data structure.
1285 *
1286 * This routine checks whether there are more remaining Port Logins
1287 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1288 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1289 * to issue ELS PLOGIs up to the configured discover threads with the
1290 * @vport (@vport->cfg_discovery_threads). The function also decrement
1291 * the @vport's num_disc_node by 1 if it is not already 0.
1292 **/
87af33fe 1293void
2e0fef85 1294lpfc_more_plogi(struct lpfc_vport *vport)
dea3101e
JB
1295{
1296 int sentplogi;
1297
2e0fef85
JS
1298 if (vport->num_disc_nodes)
1299 vport->num_disc_nodes--;
dea3101e
JB
1300
1301 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
e8b62011
JS
1302 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1303 "0232 Continue discovery with %d PLOGIs to go "
1304 "Data: x%x x%x x%x\n",
1305 vport->num_disc_nodes, vport->fc_plogi_cnt,
1306 vport->fc_flag, vport->port_state);
dea3101e 1307 /* Check to see if there are more PLOGIs to be sent */
2e0fef85
JS
1308 if (vport->fc_flag & FC_NLP_MORE)
1309 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1310 sentplogi = lpfc_els_disc_plogi(vport);
1311
dea3101e
JB
1312 return;
1313}
1314
e59058c4 1315/**
3621a710 1316 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
e59058c4
JS
1317 * @phba: pointer to lpfc hba data structure.
1318 * @prsp: pointer to response IOCB payload.
1319 * @ndlp: pointer to a node-list data structure.
1320 *
1321 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1322 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1323 * The following cases are considered N_Port confirmed:
1324 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1325 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1326 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1327 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1328 * 1) if there is a node on vport list other than the @ndlp with the same
1329 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1330 * on that node to release the RPI associated with the node; 2) if there is
1331 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1332 * into, a new node shall be allocated (or activated). In either case, the
1333 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1334 * be released and the new_ndlp shall be put on to the vport node list and
1335 * its pointer returned as the confirmed node.
1336 *
1337 * Note that before the @ndlp got "released", the keepDID from not-matching
1338 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1339 * of the @ndlp. This is because the release of @ndlp is actually to put it
1340 * into an inactive state on the vport node list and the vport node list
1341 * management algorithm does not allow two node with a same DID.
1342 *
1343 * Return code
1344 * pointer to the PLOGI N_Port @ndlp
1345 **/
488d1469 1346static struct lpfc_nodelist *
92d7f7b0 1347lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
488d1469
JS
1348 struct lpfc_nodelist *ndlp)
1349{
2e0fef85 1350 struct lpfc_vport *vport = ndlp->vport;
488d1469 1351 struct lpfc_nodelist *new_ndlp;
0ff10d46
JS
1352 struct lpfc_rport_data *rdata;
1353 struct fc_rport *rport;
488d1469 1354 struct serv_parm *sp;
92d7f7b0 1355 uint8_t name[sizeof(struct lpfc_name)];
58da1ffb 1356 uint32_t rc, keepDID = 0;
38b92ef8
JS
1357 int put_node;
1358 int put_rport;
19ca7609 1359 struct lpfc_node_rrqs rrq;
488d1469 1360
2fb9bd8b
JS
1361 /* Fabric nodes can have the same WWPN so we don't bother searching
1362 * by WWPN. Just return the ndlp that was given to us.
1363 */
1364 if (ndlp->nlp_type & NLP_FABRIC)
1365 return ndlp;
1366
92d7f7b0 1367 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
685f0bf7 1368 memset(name, 0, sizeof(struct lpfc_name));
488d1469 1369
685f0bf7 1370 /* Now we find out if the NPort we are logging into, matches the WWPN
488d1469
JS
1371 * we have for that ndlp. If not, we have some work to do.
1372 */
2e0fef85 1373 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
488d1469 1374
e47c9093 1375 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
488d1469 1376 return ndlp;
19ca7609 1377 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
488d1469
JS
1378
1379 if (!new_ndlp) {
2e0fef85
JS
1380 rc = memcmp(&ndlp->nlp_portname, name,
1381 sizeof(struct lpfc_name));
92795650
JS
1382 if (!rc)
1383 return ndlp;
488d1469
JS
1384 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1385 if (!new_ndlp)
1386 return ndlp;
2e0fef85 1387 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
e47c9093 1388 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
58da1ffb
JS
1389 rc = memcmp(&ndlp->nlp_portname, name,
1390 sizeof(struct lpfc_name));
1391 if (!rc)
1392 return ndlp;
e47c9093
JS
1393 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1394 NLP_STE_UNUSED_NODE);
1395 if (!new_ndlp)
1396 return ndlp;
58da1ffb 1397 keepDID = new_ndlp->nlp_DID;
19ca7609
JS
1398 if (phba->sli_rev == LPFC_SLI_REV4)
1399 memcpy(&rrq.xri_bitmap,
1400 &new_ndlp->active_rrqs.xri_bitmap,
1401 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1402 } else {
58da1ffb 1403 keepDID = new_ndlp->nlp_DID;
19ca7609
JS
1404 if (phba->sli_rev == LPFC_SLI_REV4)
1405 memcpy(&rrq.xri_bitmap,
1406 &new_ndlp->active_rrqs.xri_bitmap,
1407 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1408 }
488d1469 1409
2e0fef85 1410 lpfc_unreg_rpi(vport, new_ndlp);
488d1469 1411 new_ndlp->nlp_DID = ndlp->nlp_DID;
92795650 1412 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
19ca7609
JS
1413 if (phba->sli_rev == LPFC_SLI_REV4)
1414 memcpy(new_ndlp->active_rrqs.xri_bitmap,
1415 &ndlp->active_rrqs.xri_bitmap,
1416 sizeof(ndlp->active_rrqs.xri_bitmap));
0ff10d46
JS
1417
1418 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1419 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1420 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1421
e47c9093 1422 /* Set state will put new_ndlp on to node list if not already done */
2e0fef85 1423 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
488d1469 1424
2e0fef85 1425 /* Move this back to NPR state */
87af33fe
JS
1426 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1427 /* The new_ndlp is replacing ndlp totally, so we need
1428 * to put ndlp on UNUSED list and try to free it.
1429 */
0ff10d46
JS
1430
1431 /* Fix up the rport accordingly */
1432 rport = ndlp->rport;
1433 if (rport) {
1434 rdata = rport->dd_data;
1435 if (rdata->pnode == ndlp) {
1436 lpfc_nlp_put(ndlp);
1437 ndlp->rport = NULL;
1438 rdata->pnode = lpfc_nlp_get(new_ndlp);
1439 new_ndlp->rport = rport;
1440 }
1441 new_ndlp->nlp_type = ndlp->nlp_type;
1442 }
58da1ffb
JS
1443 /* We shall actually free the ndlp with both nlp_DID and
1444 * nlp_portname fields equals 0 to avoid any ndlp on the
1445 * nodelist never to be used.
1446 */
1447 if (ndlp->nlp_DID == 0) {
1448 spin_lock_irq(&phba->ndlp_lock);
1449 NLP_SET_FREE_REQ(ndlp);
1450 spin_unlock_irq(&phba->ndlp_lock);
1451 }
0ff10d46 1452
58da1ffb
JS
1453 /* Two ndlps cannot have the same did on the nodelist */
1454 ndlp->nlp_DID = keepDID;
19ca7609
JS
1455 if (phba->sli_rev == LPFC_SLI_REV4)
1456 memcpy(&ndlp->active_rrqs.xri_bitmap,
1457 &rrq.xri_bitmap,
1458 sizeof(ndlp->active_rrqs.xri_bitmap));
2e0fef85 1459 lpfc_drop_node(vport, ndlp);
87af33fe 1460 }
92795650 1461 else {
2e0fef85 1462 lpfc_unreg_rpi(vport, ndlp);
58da1ffb
JS
1463 /* Two ndlps cannot have the same did */
1464 ndlp->nlp_DID = keepDID;
19ca7609
JS
1465 if (phba->sli_rev == LPFC_SLI_REV4)
1466 memcpy(&ndlp->active_rrqs.xri_bitmap,
1467 &rrq.xri_bitmap,
1468 sizeof(ndlp->active_rrqs.xri_bitmap));
2e0fef85 1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
38b92ef8
JS
1470 /* Since we are swapping the ndlp passed in with the new one
1471 * and the did has already been swapped, copy over the
1472 * state and names.
1473 */
1474 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1475 sizeof(struct lpfc_name));
1476 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1477 sizeof(struct lpfc_name));
1478 new_ndlp->nlp_state = ndlp->nlp_state;
1479 /* Fix up the rport accordingly */
1480 rport = ndlp->rport;
1481 if (rport) {
1482 rdata = rport->dd_data;
1483 put_node = rdata->pnode != NULL;
1484 put_rport = ndlp->rport != NULL;
1485 rdata->pnode = NULL;
1486 ndlp->rport = NULL;
1487 if (put_node)
1488 lpfc_nlp_put(ndlp);
1489 if (put_rport)
1490 put_device(&rport->dev);
1491 }
92795650 1492 }
488d1469
JS
1493 return new_ndlp;
1494}
1495
e59058c4 1496/**
3621a710 1497 * lpfc_end_rscn - Check and handle more rscn for a vport
e59058c4
JS
1498 * @vport: pointer to a host virtual N_Port data structure.
1499 *
1500 * This routine checks whether more Registration State Change
1501 * Notifications (RSCNs) came in while the discovery state machine was in
1502 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1503 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1504 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1505 * handling the RSCNs.
1506 **/
87af33fe
JS
1507void
1508lpfc_end_rscn(struct lpfc_vport *vport)
1509{
1510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1511
1512 if (vport->fc_flag & FC_RSCN_MODE) {
1513 /*
1514 * Check to see if more RSCNs came in while we were
1515 * processing this one.
1516 */
1517 if (vport->fc_rscn_id_cnt ||
1518 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1519 lpfc_els_handle_rscn(vport);
1520 else {
1521 spin_lock_irq(shost->host_lock);
1522 vport->fc_flag &= ~FC_RSCN_MODE;
1523 spin_unlock_irq(shost->host_lock);
1524 }
1525 }
1526}
1527
19ca7609
JS
1528/**
1529 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1530 * @phba: pointer to lpfc hba data structure.
1531 * @cmdiocb: pointer to lpfc command iocb data structure.
1532 * @rspiocb: pointer to lpfc response iocb data structure.
1533 *
1534 * This routine will call the clear rrq function to free the rrq and
1535 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1536 * exist then the clear_rrq is still called because the rrq needs to
1537 * be freed.
1538 **/
1539
1540static void
1541lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1542 struct lpfc_iocbq *rspiocb)
1543{
1544 struct lpfc_vport *vport = cmdiocb->vport;
1545 IOCB_t *irsp;
1546 struct lpfc_nodelist *ndlp;
1547 struct lpfc_node_rrq *rrq;
1548
1549 /* we pass cmdiocb to state machine which needs rspiocb as well */
1550 rrq = cmdiocb->context_un.rrq;
1551 cmdiocb->context_un.rsp_iocb = rspiocb;
1552
1553 irsp = &rspiocb->iocb;
1554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1555 "RRQ cmpl: status:x%x/x%x did:x%x",
1556 irsp->ulpStatus, irsp->un.ulpWord[4],
1557 irsp->un.elsreq64.remoteID);
1558
1559 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1560 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1561 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1562 "2882 RRQ completes to NPort x%x "
1563 "with no ndlp. Data: x%x x%x x%x\n",
1564 irsp->un.elsreq64.remoteID,
1565 irsp->ulpStatus, irsp->un.ulpWord[4],
1566 irsp->ulpIoTag);
1567 goto out;
1568 }
1569
1570 /* rrq completes to NPort <nlp_DID> */
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1572 "2880 RRQ completes to NPort x%x "
1573 "Data: x%x x%x x%x x%x x%x\n",
1574 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1575 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1576
1577 if (irsp->ulpStatus) {
1578 /* Check for retry */
1579 /* RRQ failed Don't print the vport to vport rjts */
1580 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1581 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1582 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1583 (phba)->pport->cfg_log_verbose & LOG_ELS)
1584 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1585 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1586 ndlp->nlp_DID, irsp->ulpStatus,
1587 irsp->un.ulpWord[4]);
1588 }
1589out:
1590 if (rrq)
1591 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1592 lpfc_els_free_iocb(phba, cmdiocb);
1593 return;
1594}
e59058c4 1595/**
3621a710 1596 * lpfc_cmpl_els_plogi - Completion callback function for plogi
e59058c4
JS
1597 * @phba: pointer to lpfc hba data structure.
1598 * @cmdiocb: pointer to lpfc command iocb data structure.
1599 * @rspiocb: pointer to lpfc response iocb data structure.
1600 *
1601 * This routine is the completion callback function for issuing the Port
1602 * Login (PLOGI) command. For PLOGI completion, there must be an active
1603 * ndlp on the vport node list that matches the remote node ID from the
25985edc 1604 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
e59058c4
JS
1605 * ignored and command IOCB released. The PLOGI response IOCB status is
1606 * checked for error conditons. If there is error status reported, PLOGI
1607 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1608 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1609 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1610 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1611 * there are additional N_Port nodes with the vport that need to perform
1612 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1613 * PLOGIs.
1614 **/
dea3101e 1615static void
2e0fef85
JS
1616lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1617 struct lpfc_iocbq *rspiocb)
dea3101e 1618{
2e0fef85
JS
1619 struct lpfc_vport *vport = cmdiocb->vport;
1620 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1621 IOCB_t *irsp;
dea3101e 1622 struct lpfc_nodelist *ndlp;
92795650 1623 struct lpfc_dmabuf *prsp;
dea3101e
JB
1624 int disc, rc, did, type;
1625
dea3101e
JB
1626 /* we pass cmdiocb to state machine which needs rspiocb as well */
1627 cmdiocb->context_un.rsp_iocb = rspiocb;
1628
1629 irsp = &rspiocb->iocb;
858c9f6c
JS
1630 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1631 "PLOGI cmpl: status:x%x/x%x did:x%x",
1632 irsp->ulpStatus, irsp->un.ulpWord[4],
1633 irsp->un.elsreq64.remoteID);
1634
2e0fef85 1635 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
e47c9093 1636 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
e8b62011
JS
1637 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1638 "0136 PLOGI completes to NPort x%x "
1639 "with no ndlp. Data: x%x x%x x%x\n",
1640 irsp->un.elsreq64.remoteID,
1641 irsp->ulpStatus, irsp->un.ulpWord[4],
1642 irsp->ulpIoTag);
488d1469 1643 goto out;
ed957684 1644 }
dea3101e
JB
1645
1646 /* Since ndlp can be freed in the disc state machine, note if this node
1647 * is being used during discovery.
1648 */
2e0fef85 1649 spin_lock_irq(shost->host_lock);
dea3101e 1650 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
488d1469 1651 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85 1652 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1653 rc = 0;
1654
1655 /* PLOGI completes to NPort <nlp_DID> */
e8b62011
JS
1656 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1657 "0102 PLOGI completes to NPort x%x "
1658 "Data: x%x x%x x%x x%x x%x\n",
1659 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1660 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1661 /* Check to see if link went down during discovery */
2e0fef85
JS
1662 if (lpfc_els_chk_latt(vport)) {
1663 spin_lock_irq(shost->host_lock);
dea3101e 1664 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1665 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1666 goto out;
1667 }
1668
1669 /* ndlp could be freed in DSM, save these values now */
1670 type = ndlp->nlp_type;
1671 did = ndlp->nlp_DID;
1672
1673 if (irsp->ulpStatus) {
1674 /* Check for retry */
1675 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1676 /* ELS command is being retried */
1677 if (disc) {
2e0fef85 1678 spin_lock_irq(shost->host_lock);
dea3101e 1679 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1680 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1681 }
1682 goto out;
1683 }
2a9bf3d0
JS
1684 /* PLOGI failed Don't print the vport to vport rjts */
1685 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1686 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1687 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1688 (phba)->pport->cfg_log_verbose & LOG_ELS)
1689 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
e40a02c1
JS
1690 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1691 ndlp->nlp_DID, irsp->ulpStatus,
1692 irsp->un.ulpWord[4]);
dea3101e 1693 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1694 if (lpfc_error_lost_link(irsp))
c9f8735b 1695 rc = NLP_STE_FREED_NODE;
e47c9093 1696 else
2e0fef85 1697 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1698 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1699 } else {
1700 /* Good status, call state machine */
92795650 1701 prsp = list_entry(((struct lpfc_dmabuf *)
92d7f7b0
JS
1702 cmdiocb->context2)->list.next,
1703 struct lpfc_dmabuf, list);
1704 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2e0fef85 1705 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1706 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1707 }
1708
2e0fef85 1709 if (disc && vport->num_disc_nodes) {
dea3101e 1710 /* Check to see if there are more PLOGIs to be sent */
2e0fef85 1711 lpfc_more_plogi(vport);
dea3101e 1712
2e0fef85
JS
1713 if (vport->num_disc_nodes == 0) {
1714 spin_lock_irq(shost->host_lock);
1715 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1716 spin_unlock_irq(shost->host_lock);
dea3101e 1717
2e0fef85 1718 lpfc_can_disctmo(vport);
87af33fe 1719 lpfc_end_rscn(vport);
dea3101e
JB
1720 }
1721 }
1722
1723out:
1724 lpfc_els_free_iocb(phba, cmdiocb);
1725 return;
1726}
1727
e59058c4 1728/**
3621a710 1729 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
e59058c4
JS
1730 * @vport: pointer to a host virtual N_Port data structure.
1731 * @did: destination port identifier.
1732 * @retry: number of retries to the command IOCB.
1733 *
1734 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1735 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1736 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1737 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1738 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1739 *
1740 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1741 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1742 * will be stored into the context1 field of the IOCB for the completion
1743 * callback function to the PLOGI ELS command.
1744 *
1745 * Return code
1746 * 0 - Successfully issued a plogi for @vport
1747 * 1 - failed to issue a plogi for @vport
1748 **/
dea3101e 1749int
2e0fef85 1750lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
dea3101e 1751{
2e0fef85 1752 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1753 struct serv_parm *sp;
1754 IOCB_t *icmd;
98c9ea5c 1755 struct lpfc_nodelist *ndlp;
dea3101e 1756 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1757 struct lpfc_sli *psli;
1758 uint8_t *pcmd;
1759 uint16_t cmdsize;
92d7f7b0 1760 int ret;
dea3101e
JB
1761
1762 psli = &phba->sli;
dea3101e 1763
98c9ea5c 1764 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
1765 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1766 ndlp = NULL;
98c9ea5c 1767
e47c9093 1768 /* If ndlp is not NULL, we will bump the reference count on it */
92d7f7b0 1769 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
98c9ea5c 1770 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2e0fef85 1771 ELS_CMD_PLOGI);
c9f8735b
JW
1772 if (!elsiocb)
1773 return 1;
dea3101e
JB
1774
1775 icmd = &elsiocb->iocb;
1776 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1777
1778 /* For PLOGI request, remainder of payload is service parameters */
1779 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
92d7f7b0
JS
1780 pcmd += sizeof(uint32_t);
1781 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1782 sp = (struct serv_parm *) pcmd;
1783
5ac6b303
JS
1784 /*
1785 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1786 * to device on remote loops work.
1787 */
1788 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1789 sp->cmn.altBbCredit = 1;
1790
dea3101e
JB
1791 if (sp->cmn.fcphLow < FC_PH_4_3)
1792 sp->cmn.fcphLow = FC_PH_4_3;
1793
1794 if (sp->cmn.fcphHigh < FC_PH3)
1795 sp->cmn.fcphHigh = FC_PH3;
1796
858c9f6c
JS
1797 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1798 "Issue PLOGI: did:x%x",
1799 did, 0, 0);
1800
dea3101e
JB
1801 phba->fc_stat.elsXmitPLOGI++;
1802 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
3772a991 1803 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
1804
1805 if (ret == IOCB_ERROR) {
dea3101e 1806 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1807 return 1;
dea3101e 1808 }
c9f8735b 1809 return 0;
dea3101e
JB
1810}
1811
e59058c4 1812/**
3621a710 1813 * lpfc_cmpl_els_prli - Completion callback function for prli
e59058c4
JS
1814 * @phba: pointer to lpfc hba data structure.
1815 * @cmdiocb: pointer to lpfc command iocb data structure.
1816 * @rspiocb: pointer to lpfc response iocb data structure.
1817 *
1818 * This routine is the completion callback function for a Process Login
1819 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1820 * status. If there is error status reported, PRLI retry shall be attempted
1821 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1822 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1823 * ndlp to mark the PRLI completion.
1824 **/
dea3101e 1825static void
2e0fef85
JS
1826lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1827 struct lpfc_iocbq *rspiocb)
dea3101e 1828{
2e0fef85
JS
1829 struct lpfc_vport *vport = cmdiocb->vport;
1830 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
1831 IOCB_t *irsp;
1832 struct lpfc_sli *psli;
1833 struct lpfc_nodelist *ndlp;
1834
1835 psli = &phba->sli;
1836 /* we pass cmdiocb to state machine which needs rspiocb as well */
1837 cmdiocb->context_un.rsp_iocb = rspiocb;
1838
1839 irsp = &(rspiocb->iocb);
1840 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2e0fef85 1841 spin_lock_irq(shost->host_lock);
dea3101e 1842 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1843 spin_unlock_irq(shost->host_lock);
dea3101e 1844
858c9f6c
JS
1845 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1846 "PRLI cmpl: status:x%x/x%x did:x%x",
1847 irsp->ulpStatus, irsp->un.ulpWord[4],
1848 ndlp->nlp_DID);
dea3101e 1849 /* PRLI completes to NPort <nlp_DID> */
e8b62011
JS
1850 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1851 "0103 PRLI completes to NPort x%x "
1852 "Data: x%x x%x x%x x%x\n",
1853 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1854 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 1855
2e0fef85 1856 vport->fc_prli_sent--;
dea3101e 1857 /* Check to see if link went down during discovery */
2e0fef85 1858 if (lpfc_els_chk_latt(vport))
dea3101e
JB
1859 goto out;
1860
1861 if (irsp->ulpStatus) {
1862 /* Check for retry */
1863 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1864 /* ELS command is being retried */
1865 goto out;
1866 }
1867 /* PRLI failed */
e40a02c1
JS
1868 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1869 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1870 ndlp->nlp_DID, irsp->ulpStatus,
1871 irsp->un.ulpWord[4]);
dea3101e 1872 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1873 if (lpfc_error_lost_link(irsp))
dea3101e 1874 goto out;
e47c9093 1875 else
2e0fef85 1876 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1877 NLP_EVT_CMPL_PRLI);
e47c9093 1878 } else
dea3101e 1879 /* Good status, call state machine */
2e0fef85 1880 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1881 NLP_EVT_CMPL_PRLI);
dea3101e
JB
1882out:
1883 lpfc_els_free_iocb(phba, cmdiocb);
1884 return;
1885}
1886
e59058c4 1887/**
3621a710 1888 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
e59058c4
JS
1889 * @vport: pointer to a host virtual N_Port data structure.
1890 * @ndlp: pointer to a node-list data structure.
1891 * @retry: number of retries to the command IOCB.
1892 *
1893 * This routine issues a Process Login (PRLI) ELS command for the
1894 * @vport. The PRLI service parameters are set up in the payload of the
1895 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1896 * is put to the IOCB completion callback func field before invoking the
1897 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1898 *
1899 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1900 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1901 * will be stored into the context1 field of the IOCB for the completion
1902 * callback function to the PRLI ELS command.
1903 *
1904 * Return code
1905 * 0 - successfully issued prli iocb command for @vport
1906 * 1 - failed to issue prli iocb command for @vport
1907 **/
dea3101e 1908int
2e0fef85 1909lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1910 uint8_t retry)
1911{
2e0fef85
JS
1912 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1913 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1914 PRLI *npr;
1915 IOCB_t *icmd;
1916 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1917 uint8_t *pcmd;
1918 uint16_t cmdsize;
1919
92d7f7b0 1920 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2e0fef85
JS
1921 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1922 ndlp->nlp_DID, ELS_CMD_PRLI);
488d1469 1923 if (!elsiocb)
c9f8735b 1924 return 1;
dea3101e
JB
1925
1926 icmd = &elsiocb->iocb;
1927 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1928
1929 /* For PRLI request, remainder of payload is service parameters */
92d7f7b0 1930 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
dea3101e 1931 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
92d7f7b0 1932 pcmd += sizeof(uint32_t);
dea3101e
JB
1933
1934 /* For PRLI, remainder of payload is PRLI parameter page */
1935 npr = (PRLI *) pcmd;
1936 /*
1937 * If our firmware version is 3.20 or later,
1938 * set the following bits for FC-TAPE support.
1939 */
1940 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1941 npr->ConfmComplAllowed = 1;
1942 npr->Retry = 1;
1943 npr->TaskRetryIdReq = 1;
1944 }
1945 npr->estabImagePair = 1;
1946 npr->readXferRdyDis = 1;
1947
1948 /* For FCP support */
1949 npr->prliType = PRLI_FCP_TYPE;
1950 npr->initiatorFunc = 1;
1951
858c9f6c
JS
1952 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1953 "Issue PRLI: did:x%x",
1954 ndlp->nlp_DID, 0, 0);
1955
dea3101e
JB
1956 phba->fc_stat.elsXmitPRLI++;
1957 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2e0fef85 1958 spin_lock_irq(shost->host_lock);
dea3101e 1959 ndlp->nlp_flag |= NLP_PRLI_SND;
2e0fef85 1960 spin_unlock_irq(shost->host_lock);
3772a991
JS
1961 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1962 IOCB_ERROR) {
2e0fef85 1963 spin_lock_irq(shost->host_lock);
dea3101e 1964 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1965 spin_unlock_irq(shost->host_lock);
dea3101e 1966 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1967 return 1;
dea3101e 1968 }
2e0fef85 1969 vport->fc_prli_sent++;
c9f8735b 1970 return 0;
dea3101e
JB
1971}
1972
90160e01 1973/**
3621a710 1974 * lpfc_rscn_disc - Perform rscn discovery for a vport
90160e01
JS
1975 * @vport: pointer to a host virtual N_Port data structure.
1976 *
1977 * This routine performs Registration State Change Notification (RSCN)
1978 * discovery for a @vport. If the @vport's node port recovery count is not
1979 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1980 * the nodes that need recovery. If none of the PLOGI were needed through
1981 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1982 * invoked to check and handle possible more RSCN came in during the period
1983 * of processing the current ones.
1984 **/
1985static void
1986lpfc_rscn_disc(struct lpfc_vport *vport)
1987{
1988 lpfc_can_disctmo(vport);
1989
1990 /* RSCN discovery */
1991 /* go thru NPR nodes and issue ELS PLOGIs */
1992 if (vport->fc_npr_cnt)
1993 if (lpfc_els_disc_plogi(vport))
1994 return;
1995
1996 lpfc_end_rscn(vport);
1997}
1998
1999/**
3621a710 2000 * lpfc_adisc_done - Complete the adisc phase of discovery
90160e01
JS
2001 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2002 *
2003 * This function is called when the final ADISC is completed during discovery.
2004 * This function handles clearing link attention or issuing reg_vpi depending
2005 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2006 * discovery.
2007 * This function is called with no locks held.
2008 **/
2009static void
2010lpfc_adisc_done(struct lpfc_vport *vport)
2011{
2012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2013 struct lpfc_hba *phba = vport->phba;
2014
2015 /*
2016 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2017 * and continue discovery.
2018 */
2019 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6fb120a7
JS
2020 !(vport->fc_flag & FC_RSCN_MODE) &&
2021 (phba->sli_rev < LPFC_SLI_REV4)) {
90160e01
JS
2022 lpfc_issue_reg_vpi(phba, vport);
2023 return;
2024 }
2025 /*
2026 * For SLI2, we need to set port_state to READY
2027 * and continue discovery.
2028 */
2029 if (vport->port_state < LPFC_VPORT_READY) {
2030 /* If we get here, there is nothing to ADISC */
2031 if (vport->port_type == LPFC_PHYSICAL_PORT)
2032 lpfc_issue_clear_la(phba, vport);
2033 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2034 vport->num_disc_nodes = 0;
2035 /* go thru NPR list, issue ELS PLOGIs */
2036 if (vport->fc_npr_cnt)
2037 lpfc_els_disc_plogi(vport);
2038 if (!vport->num_disc_nodes) {
2039 spin_lock_irq(shost->host_lock);
2040 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2041 spin_unlock_irq(shost->host_lock);
2042 lpfc_can_disctmo(vport);
2043 lpfc_end_rscn(vport);
2044 }
2045 }
2046 vport->port_state = LPFC_VPORT_READY;
2047 } else
2048 lpfc_rscn_disc(vport);
2049}
2050
e59058c4 2051/**
3621a710 2052 * lpfc_more_adisc - Issue more adisc as needed
e59058c4
JS
2053 * @vport: pointer to a host virtual N_Port data structure.
2054 *
2055 * This routine determines whether there are more ndlps on a @vport
2056 * node list need to have Address Discover (ADISC) issued. If so, it will
2057 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2058 * remaining nodes which need to have ADISC sent.
2059 **/
0ff10d46 2060void
2e0fef85 2061lpfc_more_adisc(struct lpfc_vport *vport)
dea3101e
JB
2062{
2063 int sentadisc;
2064
2e0fef85
JS
2065 if (vport->num_disc_nodes)
2066 vport->num_disc_nodes--;
dea3101e 2067 /* Continue discovery with <num_disc_nodes> ADISCs to go */
e8b62011
JS
2068 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2069 "0210 Continue discovery with %d ADISCs to go "
2070 "Data: x%x x%x x%x\n",
2071 vport->num_disc_nodes, vport->fc_adisc_cnt,
2072 vport->fc_flag, vport->port_state);
dea3101e 2073 /* Check to see if there are more ADISCs to be sent */
2e0fef85
JS
2074 if (vport->fc_flag & FC_NLP_MORE) {
2075 lpfc_set_disctmo(vport);
2076 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2077 sentadisc = lpfc_els_disc_adisc(vport);
dea3101e 2078 }
90160e01
JS
2079 if (!vport->num_disc_nodes)
2080 lpfc_adisc_done(vport);
dea3101e
JB
2081 return;
2082}
2083
e59058c4 2084/**
3621a710 2085 * lpfc_cmpl_els_adisc - Completion callback function for adisc
e59058c4
JS
2086 * @phba: pointer to lpfc hba data structure.
2087 * @cmdiocb: pointer to lpfc command iocb data structure.
2088 * @rspiocb: pointer to lpfc response iocb data structure.
2089 *
2090 * This routine is the completion function for issuing the Address Discover
2091 * (ADISC) command. It first checks to see whether link went down during
2092 * the discovery process. If so, the node will be marked as node port
2093 * recovery for issuing discover IOCB by the link attention handler and
2094 * exit. Otherwise, the response status is checked. If error was reported
2095 * in the response status, the ADISC command shall be retried by invoking
2096 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2097 * the response status, the state machine is invoked to set transition
2098 * with respect to NLP_EVT_CMPL_ADISC event.
2099 **/
dea3101e 2100static void
2e0fef85
JS
2101lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2102 struct lpfc_iocbq *rspiocb)
dea3101e 2103{
2e0fef85
JS
2104 struct lpfc_vport *vport = cmdiocb->vport;
2105 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2106 IOCB_t *irsp;
dea3101e 2107 struct lpfc_nodelist *ndlp;
2e0fef85 2108 int disc;
dea3101e
JB
2109
2110 /* we pass cmdiocb to state machine which needs rspiocb as well */
2111 cmdiocb->context_un.rsp_iocb = rspiocb;
2112
2113 irsp = &(rspiocb->iocb);
2114 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
dea3101e 2115
858c9f6c
JS
2116 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2117 "ADISC cmpl: status:x%x/x%x did:x%x",
2118 irsp->ulpStatus, irsp->un.ulpWord[4],
2119 ndlp->nlp_DID);
2120
dea3101e
JB
2121 /* Since ndlp can be freed in the disc state machine, note if this node
2122 * is being used during discovery.
2123 */
2e0fef85 2124 spin_lock_irq(shost->host_lock);
dea3101e 2125 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
c9f8735b 2126 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2e0fef85 2127 spin_unlock_irq(shost->host_lock);
dea3101e 2128 /* ADISC completes to NPort <nlp_DID> */
e8b62011
JS
2129 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2130 "0104 ADISC completes to NPort x%x "
2131 "Data: x%x x%x x%x x%x x%x\n",
2132 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2133 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 2134 /* Check to see if link went down during discovery */
2e0fef85
JS
2135 if (lpfc_els_chk_latt(vport)) {
2136 spin_lock_irq(shost->host_lock);
dea3101e 2137 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2138 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2139 goto out;
2140 }
2141
2142 if (irsp->ulpStatus) {
2143 /* Check for retry */
2144 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2145 /* ELS command is being retried */
2146 if (disc) {
2e0fef85 2147 spin_lock_irq(shost->host_lock);
dea3101e 2148 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85
JS
2149 spin_unlock_irq(shost->host_lock);
2150 lpfc_set_disctmo(vport);
dea3101e
JB
2151 }
2152 goto out;
2153 }
2154 /* ADISC failed */
e40a02c1
JS
2155 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2156 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2157 ndlp->nlp_DID, irsp->ulpStatus,
2158 irsp->un.ulpWord[4]);
dea3101e 2159 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2160 if (!lpfc_error_lost_link(irsp))
2e0fef85 2161 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
858c9f6c 2162 NLP_EVT_CMPL_ADISC);
e47c9093 2163 } else
dea3101e 2164 /* Good status, call state machine */
2e0fef85 2165 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
dea3101e 2166 NLP_EVT_CMPL_ADISC);
dea3101e 2167
90160e01
JS
2168 /* Check to see if there are more ADISCs to be sent */
2169 if (disc && vport->num_disc_nodes)
2e0fef85 2170 lpfc_more_adisc(vport);
dea3101e
JB
2171out:
2172 lpfc_els_free_iocb(phba, cmdiocb);
2173 return;
2174}
2175
e59058c4 2176/**
3621a710 2177 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
e59058c4
JS
2178 * @vport: pointer to a virtual N_Port data structure.
2179 * @ndlp: pointer to a node-list data structure.
2180 * @retry: number of retries to the command IOCB.
2181 *
2182 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2183 * @vport. It prepares the payload of the ADISC ELS command, updates the
2184 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2185 * to issue the ADISC ELS command.
2186 *
2187 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2188 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2189 * will be stored into the context1 field of the IOCB for the completion
2190 * callback function to the ADISC ELS command.
2191 *
2192 * Return code
2193 * 0 - successfully issued adisc
2194 * 1 - failed to issue adisc
2195 **/
dea3101e 2196int
2e0fef85 2197lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2198 uint8_t retry)
2199{
2e0fef85
JS
2200 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2201 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2202 ADISC *ap;
2203 IOCB_t *icmd;
2204 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2205 uint8_t *pcmd;
2206 uint16_t cmdsize;
2207
92d7f7b0 2208 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2e0fef85
JS
2209 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2210 ndlp->nlp_DID, ELS_CMD_ADISC);
488d1469 2211 if (!elsiocb)
c9f8735b 2212 return 1;
dea3101e
JB
2213
2214 icmd = &elsiocb->iocb;
2215 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2216
2217 /* For ADISC request, remainder of payload is service parameters */
2218 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
92d7f7b0 2219 pcmd += sizeof(uint32_t);
dea3101e
JB
2220
2221 /* Fill in ADISC payload */
2222 ap = (ADISC *) pcmd;
2223 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
2224 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2225 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2226 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 2227
858c9f6c
JS
2228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2229 "Issue ADISC: did:x%x",
2230 ndlp->nlp_DID, 0, 0);
2231
dea3101e
JB
2232 phba->fc_stat.elsXmitADISC++;
2233 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2e0fef85 2234 spin_lock_irq(shost->host_lock);
dea3101e 2235 ndlp->nlp_flag |= NLP_ADISC_SND;
2e0fef85 2236 spin_unlock_irq(shost->host_lock);
3772a991
JS
2237 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2238 IOCB_ERROR) {
2e0fef85 2239 spin_lock_irq(shost->host_lock);
dea3101e 2240 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2e0fef85 2241 spin_unlock_irq(shost->host_lock);
dea3101e 2242 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2243 return 1;
dea3101e 2244 }
c9f8735b 2245 return 0;
dea3101e
JB
2246}
2247
e59058c4 2248/**
3621a710 2249 * lpfc_cmpl_els_logo - Completion callback function for logo
e59058c4
JS
2250 * @phba: pointer to lpfc hba data structure.
2251 * @cmdiocb: pointer to lpfc command iocb data structure.
2252 * @rspiocb: pointer to lpfc response iocb data structure.
2253 *
2254 * This routine is the completion function for issuing the ELS Logout (LOGO)
2255 * command. If no error status was reported from the LOGO response, the
2256 * state machine of the associated ndlp shall be invoked for transition with
2257 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2258 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2259 **/
dea3101e 2260static void
2e0fef85
JS
2261lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2262 struct lpfc_iocbq *rspiocb)
dea3101e 2263{
2e0fef85
JS
2264 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2265 struct lpfc_vport *vport = ndlp->vport;
2266 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
2267 IOCB_t *irsp;
2268 struct lpfc_sli *psli;
92494144 2269 struct lpfcMboxq *mbox;
dea3101e
JB
2270
2271 psli = &phba->sli;
2272 /* we pass cmdiocb to state machine which needs rspiocb as well */
2273 cmdiocb->context_un.rsp_iocb = rspiocb;
2274
2275 irsp = &(rspiocb->iocb);
2e0fef85 2276 spin_lock_irq(shost->host_lock);
dea3101e 2277 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2278 spin_unlock_irq(shost->host_lock);
dea3101e 2279
858c9f6c
JS
2280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2281 "LOGO cmpl: status:x%x/x%x did:x%x",
2282 irsp->ulpStatus, irsp->un.ulpWord[4],
2283 ndlp->nlp_DID);
dea3101e 2284 /* LOGO completes to NPort <nlp_DID> */
e8b62011
JS
2285 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2286 "0105 LOGO completes to NPort x%x "
2287 "Data: x%x x%x x%x x%x\n",
2288 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2289 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 2290 /* Check to see if link went down during discovery */
2e0fef85 2291 if (lpfc_els_chk_latt(vport))
dea3101e
JB
2292 goto out;
2293
92d7f7b0
JS
2294 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2295 /* NLP_EVT_DEVICE_RM should unregister the RPI
2296 * which should abort all outstanding IOs.
2297 */
2298 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2299 NLP_EVT_DEVICE_RM);
2300 goto out;
2301 }
2302
dea3101e
JB
2303 if (irsp->ulpStatus) {
2304 /* Check for retry */
2e0fef85 2305 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e
JB
2306 /* ELS command is being retried */
2307 goto out;
dea3101e 2308 /* LOGO failed */
e40a02c1
JS
2309 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2310 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2311 ndlp->nlp_DID, irsp->ulpStatus,
2312 irsp->un.ulpWord[4]);
dea3101e 2313 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
858c9f6c 2314 if (lpfc_error_lost_link(irsp))
dea3101e 2315 goto out;
858c9f6c 2316 else
2e0fef85 2317 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2318 NLP_EVT_CMPL_LOGO);
e47c9093 2319 } else
5024ab17
JW
2320 /* Good status, call state machine.
2321 * This will unregister the rpi if needed.
2322 */
2e0fef85 2323 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2324 NLP_EVT_CMPL_LOGO);
dea3101e
JB
2325out:
2326 lpfc_els_free_iocb(phba, cmdiocb);
92494144
JS
2327 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2328 if ((vport->fc_flag & FC_PT2PT) &&
2329 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2330 phba->pport->fc_myDID = 0;
2331 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2332 if (mbox) {
2333 lpfc_config_link(phba, mbox);
2334 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2335 mbox->vport = vport;
2336 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2337 MBX_NOT_FINISHED) {
2338 mempool_free(mbox, phba->mbox_mem_pool);
2339 }
2340 }
2341 }
dea3101e
JB
2342 return;
2343}
2344
e59058c4 2345/**
3621a710 2346 * lpfc_issue_els_logo - Issue a logo to an node on a vport
e59058c4
JS
2347 * @vport: pointer to a virtual N_Port data structure.
2348 * @ndlp: pointer to a node-list data structure.
2349 * @retry: number of retries to the command IOCB.
2350 *
2351 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2352 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2353 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2354 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2355 *
2356 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2357 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2358 * will be stored into the context1 field of the IOCB for the completion
2359 * callback function to the LOGO ELS command.
2360 *
2361 * Return code
2362 * 0 - successfully issued logo
2363 * 1 - failed to issue logo
2364 **/
dea3101e 2365int
2e0fef85 2366lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2367 uint8_t retry)
2368{
2e0fef85
JS
2369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2370 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2371 IOCB_t *icmd;
2372 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2373 uint8_t *pcmd;
2374 uint16_t cmdsize;
92d7f7b0 2375 int rc;
dea3101e 2376
98c9ea5c
JS
2377 spin_lock_irq(shost->host_lock);
2378 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2379 spin_unlock_irq(shost->host_lock);
2380 return 0;
2381 }
2382 spin_unlock_irq(shost->host_lock);
2383
92d7f7b0 2384 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2e0fef85
JS
2385 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2386 ndlp->nlp_DID, ELS_CMD_LOGO);
488d1469 2387 if (!elsiocb)
c9f8735b 2388 return 1;
dea3101e
JB
2389
2390 icmd = &elsiocb->iocb;
2391 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2392 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
92d7f7b0 2393 pcmd += sizeof(uint32_t);
dea3101e
JB
2394
2395 /* Fill in LOGO payload */
2e0fef85 2396 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
92d7f7b0
JS
2397 pcmd += sizeof(uint32_t);
2398 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 2399
858c9f6c
JS
2400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2401 "Issue LOGO: did:x%x",
2402 ndlp->nlp_DID, 0, 0);
2403
dea3101e
JB
2404 phba->fc_stat.elsXmitLOGO++;
2405 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2e0fef85 2406 spin_lock_irq(shost->host_lock);
dea3101e 2407 ndlp->nlp_flag |= NLP_LOGO_SND;
2e0fef85 2408 spin_unlock_irq(shost->host_lock);
3772a991 2409 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2410
2411 if (rc == IOCB_ERROR) {
2e0fef85 2412 spin_lock_irq(shost->host_lock);
dea3101e 2413 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2414 spin_unlock_irq(shost->host_lock);
dea3101e 2415 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2416 return 1;
dea3101e 2417 }
c9f8735b 2418 return 0;
dea3101e
JB
2419}
2420
e59058c4 2421/**
3621a710 2422 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
e59058c4
JS
2423 * @phba: pointer to lpfc hba data structure.
2424 * @cmdiocb: pointer to lpfc command iocb data structure.
2425 * @rspiocb: pointer to lpfc response iocb data structure.
2426 *
2427 * This routine is a generic completion callback function for ELS commands.
2428 * Specifically, it is the callback function which does not need to perform
2429 * any command specific operations. It is currently used by the ELS command
2430 * issuing routines for the ELS State Change Request (SCR),
2431 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2432 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2433 * certain debug loggings, this callback function simply invokes the
2434 * lpfc_els_chk_latt() routine to check whether link went down during the
2435 * discovery process.
2436 **/
dea3101e 2437static void
2e0fef85
JS
2438lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2439 struct lpfc_iocbq *rspiocb)
dea3101e 2440{
2e0fef85 2441 struct lpfc_vport *vport = cmdiocb->vport;
dea3101e
JB
2442 IOCB_t *irsp;
2443
2444 irsp = &rspiocb->iocb;
2445
858c9f6c
JS
2446 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2447 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2448 irsp->ulpStatus, irsp->un.ulpWord[4],
2449 irsp->un.elsreq64.remoteID);
dea3101e 2450 /* ELS cmd tag <ulpIoTag> completes */
e8b62011
JS
2451 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2452 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2453 irsp->ulpIoTag, irsp->ulpStatus,
2454 irsp->un.ulpWord[4], irsp->ulpTimeout);
dea3101e 2455 /* Check to see if link went down during discovery */
2e0fef85 2456 lpfc_els_chk_latt(vport);
dea3101e
JB
2457 lpfc_els_free_iocb(phba, cmdiocb);
2458 return;
2459}
2460
e59058c4 2461/**
3621a710 2462 * lpfc_issue_els_scr - Issue a scr to an node on a vport
e59058c4
JS
2463 * @vport: pointer to a host virtual N_Port data structure.
2464 * @nportid: N_Port identifier to the remote node.
2465 * @retry: number of retries to the command IOCB.
2466 *
2467 * This routine issues a State Change Request (SCR) to a fabric node
2468 * on a @vport. The remote node @nportid is passed into the function. It
2469 * first search the @vport node list to find the matching ndlp. If no such
2470 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2471 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2472 * routine is invoked to send the SCR IOCB.
2473 *
2474 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2475 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2476 * will be stored into the context1 field of the IOCB for the completion
2477 * callback function to the SCR ELS command.
2478 *
2479 * Return code
2480 * 0 - Successfully issued scr command
2481 * 1 - Failed to issue scr command
2482 **/
dea3101e 2483int
2e0fef85 2484lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2485{
2e0fef85 2486 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2487 IOCB_t *icmd;
2488 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2489 struct lpfc_sli *psli;
2490 uint8_t *pcmd;
2491 uint16_t cmdsize;
2492 struct lpfc_nodelist *ndlp;
2493
2494 psli = &phba->sli;
92d7f7b0 2495 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
dea3101e 2496
e47c9093
JS
2497 ndlp = lpfc_findnode_did(vport, nportid);
2498 if (!ndlp) {
2499 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2500 if (!ndlp)
2501 return 1;
2502 lpfc_nlp_init(vport, ndlp, nportid);
2503 lpfc_enqueue_node(vport, ndlp);
2504 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2505 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2506 if (!ndlp)
2507 return 1;
2508 }
2e0fef85
JS
2509
2510 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2511 ndlp->nlp_DID, ELS_CMD_SCR);
dea3101e 2512
488d1469 2513 if (!elsiocb) {
fa4066b6
JS
2514 /* This will trigger the release of the node just
2515 * allocated
2516 */
329f9bc7 2517 lpfc_nlp_put(ndlp);
c9f8735b 2518 return 1;
dea3101e
JB
2519 }
2520
2521 icmd = &elsiocb->iocb;
2522 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2523
2524 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
92d7f7b0 2525 pcmd += sizeof(uint32_t);
dea3101e
JB
2526
2527 /* For SCR, remainder of payload is SCR parameter page */
92d7f7b0 2528 memset(pcmd, 0, sizeof(SCR));
dea3101e
JB
2529 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2530
858c9f6c
JS
2531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2532 "Issue SCR: did:x%x",
2533 ndlp->nlp_DID, 0, 0);
2534
dea3101e
JB
2535 phba->fc_stat.elsXmitSCR++;
2536 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2537 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2538 IOCB_ERROR) {
fa4066b6
JS
2539 /* The additional lpfc_nlp_put will cause the following
2540 * lpfc_els_free_iocb routine to trigger the rlease of
2541 * the node.
2542 */
329f9bc7 2543 lpfc_nlp_put(ndlp);
dea3101e 2544 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2545 return 1;
dea3101e 2546 }
fa4066b6
JS
2547 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2548 * trigger the release of node.
2549 */
329f9bc7 2550 lpfc_nlp_put(ndlp);
c9f8735b 2551 return 0;
dea3101e
JB
2552}
2553
e59058c4 2554/**
3621a710 2555 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
e59058c4
JS
2556 * @vport: pointer to a host virtual N_Port data structure.
2557 * @nportid: N_Port identifier to the remote node.
2558 * @retry: number of retries to the command IOCB.
2559 *
2560 * This routine issues a Fibre Channel Address Resolution Response
2561 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2562 * is passed into the function. It first search the @vport node list to find
2563 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2564 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2565 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2566 *
2567 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2568 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2569 * will be stored into the context1 field of the IOCB for the completion
2570 * callback function to the PARPR ELS command.
2571 *
2572 * Return code
2573 * 0 - Successfully issued farpr command
2574 * 1 - Failed to issue farpr command
2575 **/
dea3101e 2576static int
2e0fef85 2577lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2578{
2e0fef85 2579 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2580 IOCB_t *icmd;
2581 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2582 struct lpfc_sli *psli;
2583 FARP *fp;
2584 uint8_t *pcmd;
2585 uint32_t *lp;
2586 uint16_t cmdsize;
2587 struct lpfc_nodelist *ondlp;
2588 struct lpfc_nodelist *ndlp;
2589
2590 psli = &phba->sli;
92d7f7b0 2591 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
dea3101e 2592
e47c9093
JS
2593 ndlp = lpfc_findnode_did(vport, nportid);
2594 if (!ndlp) {
2595 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2596 if (!ndlp)
2597 return 1;
2598 lpfc_nlp_init(vport, ndlp, nportid);
2599 lpfc_enqueue_node(vport, ndlp);
2600 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2601 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2602 if (!ndlp)
2603 return 1;
2604 }
2e0fef85
JS
2605
2606 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2607 ndlp->nlp_DID, ELS_CMD_RNID);
488d1469 2608 if (!elsiocb) {
fa4066b6
JS
2609 /* This will trigger the release of the node just
2610 * allocated
2611 */
329f9bc7 2612 lpfc_nlp_put(ndlp);
c9f8735b 2613 return 1;
dea3101e
JB
2614 }
2615
2616 icmd = &elsiocb->iocb;
2617 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2618
2619 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
92d7f7b0 2620 pcmd += sizeof(uint32_t);
dea3101e
JB
2621
2622 /* Fill in FARPR payload */
2623 fp = (FARP *) (pcmd);
92d7f7b0 2624 memset(fp, 0, sizeof(FARP));
dea3101e
JB
2625 lp = (uint32_t *) pcmd;
2626 *lp++ = be32_to_cpu(nportid);
2e0fef85 2627 *lp++ = be32_to_cpu(vport->fc_myDID);
dea3101e
JB
2628 fp->Rflags = 0;
2629 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2630
92d7f7b0
JS
2631 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2632 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2633 ondlp = lpfc_findnode_did(vport, nportid);
e47c9093 2634 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
dea3101e 2635 memcpy(&fp->OportName, &ondlp->nlp_portname,
92d7f7b0 2636 sizeof(struct lpfc_name));
dea3101e 2637 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
92d7f7b0 2638 sizeof(struct lpfc_name));
dea3101e
JB
2639 }
2640
858c9f6c
JS
2641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2642 "Issue FARPR: did:x%x",
2643 ndlp->nlp_DID, 0, 0);
2644
dea3101e
JB
2645 phba->fc_stat.elsXmitFARPR++;
2646 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2647 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2648 IOCB_ERROR) {
fa4066b6
JS
2649 /* The additional lpfc_nlp_put will cause the following
2650 * lpfc_els_free_iocb routine to trigger the release of
2651 * the node.
2652 */
329f9bc7 2653 lpfc_nlp_put(ndlp);
dea3101e 2654 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2655 return 1;
dea3101e 2656 }
fa4066b6
JS
2657 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2658 * trigger the release of the node.
2659 */
329f9bc7 2660 lpfc_nlp_put(ndlp);
c9f8735b 2661 return 0;
dea3101e
JB
2662}
2663
e59058c4 2664/**
3621a710 2665 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
e59058c4
JS
2666 * @vport: pointer to a host virtual N_Port data structure.
2667 * @nlp: pointer to a node-list data structure.
2668 *
2669 * This routine cancels the timer with a delayed IOCB-command retry for
2670 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2671 * removes the ELS retry event if it presents. In addition, if the
2672 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2673 * commands are sent for the @vport's nodes that require issuing discovery
2674 * ADISC.
2675 **/
fdcebe28 2676void
2e0fef85 2677lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
fdcebe28 2678{
2e0fef85 2679 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
e47c9093 2680 struct lpfc_work_evt *evtp;
2e0fef85 2681
0d2b6b83
JS
2682 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2683 return;
2e0fef85 2684 spin_lock_irq(shost->host_lock);
fdcebe28 2685 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2686 spin_unlock_irq(shost->host_lock);
fdcebe28
JS
2687 del_timer_sync(&nlp->nlp_delayfunc);
2688 nlp->nlp_last_elscmd = 0;
e47c9093 2689 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
fdcebe28 2690 list_del_init(&nlp->els_retry_evt.evt_listp);
e47c9093
JS
2691 /* Decrement nlp reference count held for the delayed retry */
2692 evtp = &nlp->els_retry_evt;
2693 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2694 }
fdcebe28 2695 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2e0fef85 2696 spin_lock_irq(shost->host_lock);
fdcebe28 2697 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85
JS
2698 spin_unlock_irq(shost->host_lock);
2699 if (vport->num_disc_nodes) {
0d2b6b83
JS
2700 if (vport->port_state < LPFC_VPORT_READY) {
2701 /* Check if there are more ADISCs to be sent */
2702 lpfc_more_adisc(vport);
0d2b6b83
JS
2703 } else {
2704 /* Check if there are more PLOGIs to be sent */
2705 lpfc_more_plogi(vport);
90160e01
JS
2706 if (vport->num_disc_nodes == 0) {
2707 spin_lock_irq(shost->host_lock);
2708 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2709 spin_unlock_irq(shost->host_lock);
2710 lpfc_can_disctmo(vport);
2711 lpfc_end_rscn(vport);
2712 }
fdcebe28
JS
2713 }
2714 }
2715 }
2716 return;
2717}
2718
e59058c4 2719/**
3621a710 2720 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
e59058c4
JS
2721 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2722 *
2723 * This routine is invoked by the ndlp delayed-function timer to check
2724 * whether there is any pending ELS retry event(s) with the node. If not, it
2725 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2726 * adds the delayed events to the HBA work list and invokes the
2727 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2728 * event. Note that lpfc_nlp_get() is called before posting the event to
2729 * the work list to hold reference count of ndlp so that it guarantees the
2730 * reference to ndlp will still be available when the worker thread gets
2731 * to the event associated with the ndlp.
2732 **/
dea3101e
JB
2733void
2734lpfc_els_retry_delay(unsigned long ptr)
2735{
2e0fef85
JS
2736 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2737 struct lpfc_vport *vport = ndlp->vport;
2e0fef85 2738 struct lpfc_hba *phba = vport->phba;
92d7f7b0 2739 unsigned long flags;
2e0fef85 2740 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
dea3101e 2741
92d7f7b0 2742 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2743 if (!list_empty(&evtp->evt_listp)) {
92d7f7b0 2744 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2745 return;
2746 }
2747
fa4066b6
JS
2748 /* We need to hold the node by incrementing the reference
2749 * count until the queued work is done
2750 */
2751 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
2752 if (evtp->evt_arg1) {
2753 evtp->evt = LPFC_EVT_ELS_RETRY;
2754 list_add_tail(&evtp->evt_listp, &phba->work_list);
92d7f7b0 2755 lpfc_worker_wake_up(phba);
5e9d9b82 2756 }
92d7f7b0 2757 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2758 return;
2759}
2760
e59058c4 2761/**
3621a710 2762 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
e59058c4
JS
2763 * @ndlp: pointer to a node-list data structure.
2764 *
2765 * This routine is the worker-thread handler for processing the @ndlp delayed
2766 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2767 * the last ELS command from the associated ndlp and invokes the proper ELS
2768 * function according to the delayed ELS command to retry the command.
2769 **/
dea3101e
JB
2770void
2771lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2772{
2e0fef85
JS
2773 struct lpfc_vport *vport = ndlp->vport;
2774 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2775 uint32_t cmd, did, retry;
dea3101e 2776
2e0fef85 2777 spin_lock_irq(shost->host_lock);
5024ab17
JW
2778 did = ndlp->nlp_DID;
2779 cmd = ndlp->nlp_last_elscmd;
2780 ndlp->nlp_last_elscmd = 0;
dea3101e
JB
2781
2782 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2e0fef85 2783 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2784 return;
2785 }
2786
2787 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2788 spin_unlock_irq(shost->host_lock);
1a169689
JS
2789 /*
2790 * If a discovery event readded nlp_delayfunc after timer
2791 * firing and before processing the timer, cancel the
2792 * nlp_delayfunc.
2793 */
2794 del_timer_sync(&ndlp->nlp_delayfunc);
dea3101e 2795 retry = ndlp->nlp_retry;
4d9ab994 2796 ndlp->nlp_retry = 0;
dea3101e
JB
2797
2798 switch (cmd) {
2799 case ELS_CMD_FLOGI:
2e0fef85 2800 lpfc_issue_els_flogi(vport, ndlp, retry);
dea3101e
JB
2801 break;
2802 case ELS_CMD_PLOGI:
2e0fef85 2803 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
5024ab17 2804 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2805 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6ad42535 2806 }
dea3101e
JB
2807 break;
2808 case ELS_CMD_ADISC:
2e0fef85 2809 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
5024ab17 2810 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2811 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6ad42535 2812 }
dea3101e
JB
2813 break;
2814 case ELS_CMD_PRLI:
2e0fef85 2815 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
5024ab17 2816 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2817 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
6ad42535 2818 }
dea3101e
JB
2819 break;
2820 case ELS_CMD_LOGO:
2e0fef85 2821 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
5024ab17 2822 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2823 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6ad42535 2824 }
dea3101e 2825 break;
92d7f7b0 2826 case ELS_CMD_FDISC:
fedd3b7b
JS
2827 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
2828 lpfc_issue_els_fdisc(vport, ndlp, retry);
92d7f7b0 2829 break;
dea3101e
JB
2830 }
2831 return;
2832}
2833
e59058c4 2834/**
3621a710 2835 * lpfc_els_retry - Make retry decision on an els command iocb
e59058c4
JS
2836 * @phba: pointer to lpfc hba data structure.
2837 * @cmdiocb: pointer to lpfc command iocb data structure.
2838 * @rspiocb: pointer to lpfc response iocb data structure.
2839 *
2840 * This routine makes a retry decision on an ELS command IOCB, which has
2841 * failed. The following ELS IOCBs use this function for retrying the command
2842 * when previously issued command responsed with error status: FLOGI, PLOGI,
2843 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2844 * returned error status, it makes the decision whether a retry shall be
2845 * issued for the command, and whether a retry shall be made immediately or
2846 * delayed. In the former case, the corresponding ELS command issuing-function
2847 * is called to retry the command. In the later case, the ELS command shall
2848 * be posted to the ndlp delayed event and delayed function timer set to the
2849 * ndlp for the delayed command issusing.
2850 *
2851 * Return code
2852 * 0 - No retry of els command is made
2853 * 1 - Immediate or delayed retry of els command is made
2854 **/
dea3101e 2855static int
2e0fef85
JS
2856lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2857 struct lpfc_iocbq *rspiocb)
dea3101e 2858{
2e0fef85
JS
2859 struct lpfc_vport *vport = cmdiocb->vport;
2860 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2861 IOCB_t *irsp = &rspiocb->iocb;
2862 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2863 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
dea3101e
JB
2864 uint32_t *elscmd;
2865 struct ls_rjt stat;
2e0fef85 2866 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
98c9ea5c 2867 int logerr = 0;
2e0fef85 2868 uint32_t cmd = 0;
488d1469 2869 uint32_t did;
dea3101e 2870
488d1469 2871
dea3101e
JB
2872 /* Note: context2 may be 0 for internal driver abort
2873 * of delays ELS command.
2874 */
2875
2876 if (pcmd && pcmd->virt) {
2877 elscmd = (uint32_t *) (pcmd->virt);
2878 cmd = *elscmd++;
2879 }
2880
e47c9093 2881 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
488d1469
JS
2882 did = ndlp->nlp_DID;
2883 else {
2884 /* We should only hit this case for retrying PLOGI */
2885 did = irsp->un.elsreq64.remoteID;
2e0fef85 2886 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
2887 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2888 && (cmd != ELS_CMD_PLOGI))
488d1469
JS
2889 return 1;
2890 }
2891
858c9f6c
JS
2892 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2893 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2894 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2895
dea3101e
JB
2896 switch (irsp->ulpStatus) {
2897 case IOSTAT_FCP_RSP_ERROR:
1151e3ec 2898 break;
dea3101e 2899 case IOSTAT_REMOTE_STOP:
1151e3ec
JS
2900 if (phba->sli_rev == LPFC_SLI_REV4) {
2901 /* This IO was aborted by the target, we don't
2902 * know the rxid and because we did not send the
2903 * ABTS we cannot generate and RRQ.
2904 */
2905 lpfc_set_rrq_active(phba, ndlp,
2906 cmdiocb->sli4_xritag, 0, 0);
2907 }
dea3101e 2908 break;
dea3101e
JB
2909 case IOSTAT_LOCAL_REJECT:
2910 switch ((irsp->un.ulpWord[4] & 0xff)) {
2911 case IOERR_LOOP_OPEN_FAILURE:
eaf15d5b
JS
2912 if (cmd == ELS_CMD_FLOGI) {
2913 if (PCI_DEVICE_ID_HORNET ==
2914 phba->pcidev->device) {
76a95d75 2915 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
eaf15d5b
JS
2916 phba->pport->fc_myDID = 0;
2917 phba->alpa_map[0] = 0;
2918 phba->alpa_map[1] = 0;
2919 }
2920 }
2e0fef85 2921 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
92d7f7b0 2922 delay = 1000;
dea3101e
JB
2923 retry = 1;
2924 break;
2925
92d7f7b0 2926 case IOERR_ILLEGAL_COMMAND:
7f5f3d0d
JS
2927 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2928 "0124 Retry illegal cmd x%x "
2929 "retry:x%x delay:x%x\n",
2930 cmd, cmdiocb->retry, delay);
2931 retry = 1;
2932 /* All command's retry policy */
2933 maxretry = 8;
2934 if (cmdiocb->retry > 2)
2935 delay = 1000;
92d7f7b0
JS
2936 break;
2937
dea3101e 2938 case IOERR_NO_RESOURCES:
98c9ea5c 2939 logerr = 1; /* HBA out of resources */
858c9f6c
JS
2940 retry = 1;
2941 if (cmdiocb->retry > 100)
2942 delay = 100;
2943 maxretry = 250;
2944 break;
2945
2946 case IOERR_ILLEGAL_FRAME:
92d7f7b0 2947 delay = 100;
dea3101e
JB
2948 retry = 1;
2949 break;
2950
858c9f6c 2951 case IOERR_SEQUENCE_TIMEOUT:
dea3101e
JB
2952 case IOERR_INVALID_RPI:
2953 retry = 1;
2954 break;
2955 }
2956 break;
2957
2958 case IOSTAT_NPORT_RJT:
2959 case IOSTAT_FABRIC_RJT:
2960 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2961 retry = 1;
2962 break;
2963 }
2964 break;
2965
2966 case IOSTAT_NPORT_BSY:
2967 case IOSTAT_FABRIC_BSY:
98c9ea5c 2968 logerr = 1; /* Fabric / Remote NPort out of resources */
dea3101e
JB
2969 retry = 1;
2970 break;
2971
2972 case IOSTAT_LS_RJT:
2973 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2974 /* Added for Vendor specifc support
2975 * Just keep retrying for these Rsn / Exp codes
2976 */
2977 switch (stat.un.b.lsRjtRsnCode) {
2978 case LSRJT_UNABLE_TPC:
2979 if (stat.un.b.lsRjtRsnCodeExp ==
2980 LSEXP_CMD_IN_PROGRESS) {
2981 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2982 delay = 1000;
dea3101e
JB
2983 maxretry = 48;
2984 }
2985 retry = 1;
2986 break;
2987 }
ffc95493
JS
2988 if (stat.un.b.lsRjtRsnCodeExp ==
2989 LSEXP_CANT_GIVE_DATA) {
2990 if (cmd == ELS_CMD_PLOGI) {
2991 delay = 1000;
2992 maxretry = 48;
2993 }
2994 retry = 1;
2995 break;
2996 }
dea3101e 2997 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2998 delay = 1000;
dea3101e
JB
2999 maxretry = lpfc_max_els_tries + 1;
3000 retry = 1;
3001 break;
3002 }
92d7f7b0
JS
3003 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3004 (cmd == ELS_CMD_FDISC) &&
3005 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
e8b62011
JS
3006 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3007 "0125 FDISC Failed (x%x). "
3008 "Fabric out of resources\n",
3009 stat.un.lsRjtError);
92d7f7b0
JS
3010 lpfc_vport_set_state(vport,
3011 FC_VPORT_NO_FABRIC_RSCS);
3012 }
dea3101e
JB
3013 break;
3014
3015 case LSRJT_LOGICAL_BSY:
858c9f6c
JS
3016 if ((cmd == ELS_CMD_PLOGI) ||
3017 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3018 delay = 1000;
dea3101e 3019 maxretry = 48;
92d7f7b0 3020 } else if (cmd == ELS_CMD_FDISC) {
51ef4c26
JS
3021 /* FDISC retry policy */
3022 maxretry = 48;
3023 if (cmdiocb->retry >= 32)
3024 delay = 1000;
dea3101e
JB
3025 }
3026 retry = 1;
3027 break;
92d7f7b0
JS
3028
3029 case LSRJT_LOGICAL_ERR:
7f5f3d0d
JS
3030 /* There are some cases where switches return this
3031 * error when they are not ready and should be returning
3032 * Logical Busy. We should delay every time.
3033 */
3034 if (cmd == ELS_CMD_FDISC &&
3035 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3036 maxretry = 3;
3037 delay = 1000;
3038 retry = 1;
3039 break;
3040 }
92d7f7b0
JS
3041 case LSRJT_PROTOCOL_ERR:
3042 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3043 (cmd == ELS_CMD_FDISC) &&
3044 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3045 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3046 ) {
e8b62011 3047 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 3048 "0122 FDISC Failed (x%x). "
e8b62011
JS
3049 "Fabric Detected Bad WWN\n",
3050 stat.un.lsRjtError);
92d7f7b0
JS
3051 lpfc_vport_set_state(vport,
3052 FC_VPORT_FABRIC_REJ_WWN);
3053 }
3054 break;
dea3101e
JB
3055 }
3056 break;
3057
3058 case IOSTAT_INTERMED_RSP:
3059 case IOSTAT_BA_RJT:
3060 break;
3061
3062 default:
3063 break;
3064 }
3065
488d1469 3066 if (did == FDMI_DID)
dea3101e 3067 retry = 1;
dea3101e 3068
695a814e 3069 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
76a95d75 3070 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
1b32f6aa 3071 !lpfc_error_lost_link(irsp)) {
98c9ea5c
JS
3072 /* FLOGI retry policy */
3073 retry = 1;
6669f9bb
JS
3074 /* retry forever */
3075 maxretry = 0;
3076 if (cmdiocb->retry >= 100)
3077 delay = 5000;
3078 else if (cmdiocb->retry >= 32)
98c9ea5c
JS
3079 delay = 1000;
3080 }
3081
6669f9bb
JS
3082 cmdiocb->retry++;
3083 if (maxretry && (cmdiocb->retry >= maxretry)) {
dea3101e
JB
3084 phba->fc_stat.elsRetryExceeded++;
3085 retry = 0;
3086 }
3087
ed957684
JS
3088 if ((vport->load_flag & FC_UNLOADING) != 0)
3089 retry = 0;
3090
dea3101e 3091 if (retry) {
38b92ef8
JS
3092 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3093 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3094 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3095 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3096 "2849 Stop retry ELS command "
3097 "x%x to remote NPORT x%x, "
3098 "Data: x%x x%x\n", cmd, did,
3099 cmdiocb->retry, delay);
3100 return 0;
3101 }
3102 }
dea3101e
JB
3103
3104 /* Retry ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
3105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3106 "0107 Retry ELS command x%x to remote "
3107 "NPORT x%x Data: x%x x%x\n",
3108 cmd, did, cmdiocb->retry, delay);
dea3101e 3109
858c9f6c
JS
3110 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3111 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3112 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
3113 /* Don't reset timer for no resources */
3114
dea3101e 3115 /* If discovery / RSCN timer is running, reset it */
2e0fef85 3116 if (timer_pending(&vport->fc_disctmo) ||
92d7f7b0 3117 (vport->fc_flag & FC_RSCN_MODE))
2e0fef85 3118 lpfc_set_disctmo(vport);
dea3101e
JB
3119 }
3120
3121 phba->fc_stat.elsXmitRetry++;
58da1ffb 3122 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
dea3101e
JB
3123 phba->fc_stat.elsDelayRetry++;
3124 ndlp->nlp_retry = cmdiocb->retry;
3125
92d7f7b0
JS
3126 /* delay is specified in milliseconds */
3127 mod_timer(&ndlp->nlp_delayfunc,
3128 jiffies + msecs_to_jiffies(delay));
2e0fef85 3129 spin_lock_irq(shost->host_lock);
dea3101e 3130 ndlp->nlp_flag |= NLP_DELAY_TMO;
2e0fef85 3131 spin_unlock_irq(shost->host_lock);
dea3101e 3132
5024ab17 3133 ndlp->nlp_prev_state = ndlp->nlp_state;
858c9f6c
JS
3134 if (cmd == ELS_CMD_PRLI)
3135 lpfc_nlp_set_state(vport, ndlp,
3136 NLP_STE_REG_LOGIN_ISSUE);
3137 else
3138 lpfc_nlp_set_state(vport, ndlp,
3139 NLP_STE_NPR_NODE);
dea3101e
JB
3140 ndlp->nlp_last_elscmd = cmd;
3141
c9f8735b 3142 return 1;
dea3101e
JB
3143 }
3144 switch (cmd) {
3145 case ELS_CMD_FLOGI:
2e0fef85 3146 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
c9f8735b 3147 return 1;
92d7f7b0
JS
3148 case ELS_CMD_FDISC:
3149 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3150 return 1;
dea3101e 3151 case ELS_CMD_PLOGI:
58da1ffb 3152 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
488d1469 3153 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3154 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 3155 NLP_STE_PLOGI_ISSUE);
488d1469 3156 }
2e0fef85 3157 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
c9f8735b 3158 return 1;
dea3101e 3159 case ELS_CMD_ADISC:
5024ab17 3160 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3161 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3162 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
c9f8735b 3163 return 1;
dea3101e 3164 case ELS_CMD_PRLI:
5024ab17 3165 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3166 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3167 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
c9f8735b 3168 return 1;
dea3101e 3169 case ELS_CMD_LOGO:
5024ab17 3170 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3172 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
c9f8735b 3173 return 1;
dea3101e
JB
3174 }
3175 }
dea3101e 3176 /* No retry ELS command <elsCmd> to remote NPORT <did> */
98c9ea5c
JS
3177 if (logerr) {
3178 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3179 "0137 No retry ELS command x%x to remote "
3180 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3181 cmd, did, irsp->ulpStatus,
3182 irsp->un.ulpWord[4]);
3183 }
3184 else {
3185 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
a58cbd52
JS
3186 "0108 No retry ELS command x%x to remote "
3187 "NPORT x%x Retried:%d Error:x%x/%x\n",
3188 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3189 irsp->un.ulpWord[4]);
98c9ea5c 3190 }
c9f8735b 3191 return 0;
dea3101e
JB
3192}
3193
e59058c4 3194/**
3621a710 3195 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
e59058c4
JS
3196 * @phba: pointer to lpfc hba data structure.
3197 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3198 *
3199 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3200 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3201 * checks to see whether there is a lpfc DMA buffer associated with the
3202 * response of the command IOCB. If so, it will be released before releasing
3203 * the lpfc DMA buffer associated with the IOCB itself.
3204 *
3205 * Return code
3206 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3207 **/
09372820 3208static int
87af33fe
JS
3209lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3210{
3211 struct lpfc_dmabuf *buf_ptr;
3212
e59058c4 3213 /* Free the response before processing the command. */
87af33fe
JS
3214 if (!list_empty(&buf_ptr1->list)) {
3215 list_remove_head(&buf_ptr1->list, buf_ptr,
3216 struct lpfc_dmabuf,
3217 list);
3218 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3219 kfree(buf_ptr);
3220 }
3221 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3222 kfree(buf_ptr1);
3223 return 0;
3224}
3225
e59058c4 3226/**
3621a710 3227 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
e59058c4
JS
3228 * @phba: pointer to lpfc hba data structure.
3229 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3230 *
3231 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3232 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3233 * pool.
3234 *
3235 * Return code
3236 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3237 **/
09372820 3238static int
87af33fe
JS
3239lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3240{
3241 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3242 kfree(buf_ptr);
3243 return 0;
3244}
3245
e59058c4 3246/**
3621a710 3247 * lpfc_els_free_iocb - Free a command iocb and its associated resources
e59058c4
JS
3248 * @phba: pointer to lpfc hba data structure.
3249 * @elsiocb: pointer to lpfc els command iocb data structure.
3250 *
3251 * This routine frees a command IOCB and its associated resources. The
3252 * command IOCB data structure contains the reference to various associated
3253 * resources, these fields must be set to NULL if the associated reference
3254 * not present:
3255 * context1 - reference to ndlp
3256 * context2 - reference to cmd
3257 * context2->next - reference to rsp
3258 * context3 - reference to bpl
3259 *
3260 * It first properly decrements the reference count held on ndlp for the
3261 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3262 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3263 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3264 * adds the DMA buffer the @phba data structure for the delayed release.
3265 * If reference to the Buffer Pointer List (BPL) is present, the
3266 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3267 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3268 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3269 *
3270 * Return code
3271 * 0 - Success (currently, always return 0)
3272 **/
dea3101e 3273int
329f9bc7 3274lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
dea3101e
JB
3275{
3276 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
a8adb832
JS
3277 struct lpfc_nodelist *ndlp;
3278
3279 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3280 if (ndlp) {
3281 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3282 lpfc_nlp_put(ndlp);
dea3101e 3283
a8adb832
JS
3284 /* If the ndlp is not being used by another discovery
3285 * thread, free it.
3286 */
3287 if (!lpfc_nlp_not_used(ndlp)) {
3288 /* If ndlp is being used by another discovery
3289 * thread, just clear NLP_DEFER_RM
3290 */
3291 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3292 }
3293 }
3294 else
3295 lpfc_nlp_put(ndlp);
329f9bc7
JS
3296 elsiocb->context1 = NULL;
3297 }
dea3101e
JB
3298 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3299 if (elsiocb->context2) {
0ff10d46
JS
3300 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3301 /* Firmware could still be in progress of DMAing
3302 * payload, so don't free data buffer till after
3303 * a hbeat.
3304 */
3305 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3306 buf_ptr = elsiocb->context2;
3307 elsiocb->context2 = NULL;
3308 if (buf_ptr) {
3309 buf_ptr1 = NULL;
3310 spin_lock_irq(&phba->hbalock);
3311 if (!list_empty(&buf_ptr->list)) {
3312 list_remove_head(&buf_ptr->list,
3313 buf_ptr1, struct lpfc_dmabuf,
3314 list);
3315 INIT_LIST_HEAD(&buf_ptr1->list);
3316 list_add_tail(&buf_ptr1->list,
3317 &phba->elsbuf);
3318 phba->elsbuf_cnt++;
3319 }
3320 INIT_LIST_HEAD(&buf_ptr->list);
3321 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3322 phba->elsbuf_cnt++;
3323 spin_unlock_irq(&phba->hbalock);
3324 }
3325 } else {
3326 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3327 lpfc_els_free_data(phba, buf_ptr1);
3328 }
dea3101e
JB
3329 }
3330
3331 if (elsiocb->context3) {
3332 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
87af33fe 3333 lpfc_els_free_bpl(phba, buf_ptr);
dea3101e 3334 }
604a3e30 3335 lpfc_sli_release_iocbq(phba, elsiocb);
dea3101e
JB
3336 return 0;
3337}
3338
e59058c4 3339/**
3621a710 3340 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
e59058c4
JS
3341 * @phba: pointer to lpfc hba data structure.
3342 * @cmdiocb: pointer to lpfc command iocb data structure.
3343 * @rspiocb: pointer to lpfc response iocb data structure.
3344 *
3345 * This routine is the completion callback function to the Logout (LOGO)
3346 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3347 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3348 * release the ndlp if it has the last reference remaining (reference count
3349 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3350 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3351 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3352 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3353 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3354 * IOCB data structure.
3355 **/
dea3101e 3356static void
2e0fef85
JS
3357lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3358 struct lpfc_iocbq *rspiocb)
dea3101e 3359{
2e0fef85
JS
3360 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3361 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c
JS
3362 IOCB_t *irsp;
3363
3364 irsp = &rspiocb->iocb;
3365 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3366 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3367 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
dea3101e 3368 /* ACC to LOGO completes to NPort <nlp_DID> */
e8b62011
JS
3369 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3370 "0109 ACC to LOGO completes to NPort x%x "
3371 "Data: x%x x%x x%x\n",
3372 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3373 ndlp->nlp_rpi);
87af33fe
JS
3374
3375 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3376 /* NPort Recovery mode or node is just allocated */
3377 if (!lpfc_nlp_not_used(ndlp)) {
3378 /* If the ndlp is being used by another discovery
3379 * thread, just unregister the RPI.
3380 */
3381 lpfc_unreg_rpi(vport, ndlp);
fa4066b6
JS
3382 } else {
3383 /* Indicate the node has already released, should
3384 * not reference to it from within lpfc_els_free_iocb.
3385 */
3386 cmdiocb->context1 = NULL;
87af33fe 3387 }
dea3101e
JB
3388 }
3389 lpfc_els_free_iocb(phba, cmdiocb);
3390 return;
3391}
3392
e59058c4 3393/**
3621a710 3394 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
e59058c4
JS
3395 * @phba: pointer to lpfc hba data structure.
3396 * @pmb: pointer to the driver internal queue element for mailbox command.
3397 *
3398 * This routine is the completion callback function for unregister default
3399 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3400 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3401 * decrements the ndlp reference count held for this completion callback
3402 * function. After that, it invokes the lpfc_nlp_not_used() to check
3403 * whether there is only one reference left on the ndlp. If so, it will
3404 * perform one more decrement and trigger the release of the ndlp.
3405 **/
858c9f6c
JS
3406void
3407lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3408{
3409 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3410 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3411
3412 pmb->context1 = NULL;
d439d286
JS
3413 pmb->context2 = NULL;
3414
858c9f6c
JS
3415 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3416 kfree(mp);
3417 mempool_free(pmb, phba->mbox_mem_pool);
58da1ffb 3418 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
a8adb832 3419 lpfc_nlp_put(ndlp);
a8adb832
JS
3420 /* This is the end of the default RPI cleanup logic for this
3421 * ndlp. If no other discovery threads are using this ndlp.
3422 * we should free all resources associated with it.
3423 */
3424 lpfc_nlp_not_used(ndlp);
3425 }
3772a991 3426
858c9f6c
JS
3427 return;
3428}
3429
e59058c4 3430/**
3621a710 3431 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
e59058c4
JS
3432 * @phba: pointer to lpfc hba data structure.
3433 * @cmdiocb: pointer to lpfc command iocb data structure.
3434 * @rspiocb: pointer to lpfc response iocb data structure.
3435 *
3436 * This routine is the completion callback function for ELS Response IOCB
3437 * command. In normal case, this callback function just properly sets the
3438 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3439 * field in the command IOCB is not NULL, the referred mailbox command will
3440 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3441 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3442 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3443 * routine shall be invoked trying to release the ndlp if no other threads
3444 * are currently referring it.
3445 **/
dea3101e 3446static void
858c9f6c 3447lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
329f9bc7 3448 struct lpfc_iocbq *rspiocb)
dea3101e 3449{
2e0fef85
JS
3450 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3451 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3452 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
87af33fe
JS
3453 IOCB_t *irsp;
3454 uint8_t *pcmd;
dea3101e 3455 LPFC_MBOXQ_t *mbox = NULL;
2e0fef85 3456 struct lpfc_dmabuf *mp = NULL;
87af33fe 3457 uint32_t ls_rjt = 0;
dea3101e 3458
33ccf8d1
JS
3459 irsp = &rspiocb->iocb;
3460
dea3101e
JB
3461 if (cmdiocb->context_un.mbox)
3462 mbox = cmdiocb->context_un.mbox;
3463
fa4066b6
JS
3464 /* First determine if this is a LS_RJT cmpl. Note, this callback
3465 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3466 */
87af33fe 3467 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
58da1ffb
JS
3468 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3469 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
fa4066b6 3470 /* A LS_RJT associated with Default RPI cleanup has its own
3ad2f3fb 3471 * separate code path.
87af33fe
JS
3472 */
3473 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3474 ls_rjt = 1;
3475 }
3476
dea3101e 3477 /* Check to see if link went down during discovery */
58da1ffb 3478 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
dea3101e 3479 if (mbox) {
14691150
JS
3480 mp = (struct lpfc_dmabuf *) mbox->context1;
3481 if (mp) {
3482 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3483 kfree(mp);
3484 }
329f9bc7 3485 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3486 }
58da1ffb
JS
3487 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3488 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
fa4066b6 3489 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3490 ndlp = NULL;
fa4066b6
JS
3491 /* Indicate the node has already released,
3492 * should not reference to it from within
3493 * the routine lpfc_els_free_iocb.
3494 */
3495 cmdiocb->context1 = NULL;
3496 }
dea3101e
JB
3497 goto out;
3498 }
3499
858c9f6c 3500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
51ef4c26 3501 "ELS rsp cmpl: status:x%x/x%x did:x%x",
858c9f6c 3502 irsp->ulpStatus, irsp->un.ulpWord[4],
51ef4c26 3503 cmdiocb->iocb.un.elsreq64.remoteID);
dea3101e 3504 /* ELS response tag <ulpIoTag> completes */
e8b62011
JS
3505 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3506 "0110 ELS response tag x%x completes "
3507 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3508 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3509 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3510 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3511 ndlp->nlp_rpi);
dea3101e
JB
3512 if (mbox) {
3513 if ((rspiocb->iocb.ulpStatus == 0)
3514 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2e0fef85 3515 lpfc_unreg_rpi(vport, ndlp);
e47c9093
JS
3516 /* Increment reference count to ndlp to hold the
3517 * reference to ndlp for the callback function.
3518 */
329f9bc7 3519 mbox->context2 = lpfc_nlp_get(ndlp);
2e0fef85 3520 mbox->vport = vport;
858c9f6c
JS
3521 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3522 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3523 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3524 }
3525 else {
3526 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3527 ndlp->nlp_prev_state = ndlp->nlp_state;
3528 lpfc_nlp_set_state(vport, ndlp,
2e0fef85 3529 NLP_STE_REG_LOGIN_ISSUE);
858c9f6c 3530 }
0b727fea 3531 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
e47c9093 3532 != MBX_NOT_FINISHED)
dea3101e 3533 goto out;
e47c9093
JS
3534 else
3535 /* Decrement the ndlp reference count we
3536 * set for this failed mailbox command.
3537 */
3538 lpfc_nlp_put(ndlp);
98c9ea5c
JS
3539
3540 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3541 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3542 "0138 ELS rsp: Cannot issue reg_login for x%x "
3543 "Data: x%x x%x x%x\n",
3544 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3545 ndlp->nlp_rpi);
3546
fa4066b6 3547 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3548 ndlp = NULL;
fa4066b6
JS
3549 /* Indicate node has already been released,
3550 * should not reference to it from within
3551 * the routine lpfc_els_free_iocb.
3552 */
3553 cmdiocb->context1 = NULL;
3554 }
dea3101e 3555 } else {
858c9f6c
JS
3556 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3557 if (!lpfc_error_lost_link(irsp) &&
3558 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
fa4066b6 3559 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3560 ndlp = NULL;
fa4066b6
JS
3561 /* Indicate node has already been
3562 * released, should not reference
3563 * to it from within the routine
3564 * lpfc_els_free_iocb.
3565 */
3566 cmdiocb->context1 = NULL;
3567 }
dea3101e
JB
3568 }
3569 }
14691150
JS
3570 mp = (struct lpfc_dmabuf *) mbox->context1;
3571 if (mp) {
3572 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3573 kfree(mp);
3574 }
3575 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e
JB
3576 }
3577out:
58da1ffb 3578 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2e0fef85 3579 spin_lock_irq(shost->host_lock);
858c9f6c 3580 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2e0fef85 3581 spin_unlock_irq(shost->host_lock);
87af33fe
JS
3582
3583 /* If the node is not being used by another discovery thread,
3584 * and we are sending a reject, we are done with it.
3585 * Release driver reference count here and free associated
3586 * resources.
3587 */
3588 if (ls_rjt)
fa4066b6
JS
3589 if (lpfc_nlp_not_used(ndlp))
3590 /* Indicate node has already been released,
3591 * should not reference to it from within
3592 * the routine lpfc_els_free_iocb.
3593 */
3594 cmdiocb->context1 = NULL;
dea3101e 3595 }
87af33fe 3596
dea3101e
JB
3597 lpfc_els_free_iocb(phba, cmdiocb);
3598 return;
3599}
3600
e59058c4 3601/**
3621a710 3602 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
e59058c4
JS
3603 * @vport: pointer to a host virtual N_Port data structure.
3604 * @flag: the els command code to be accepted.
3605 * @oldiocb: pointer to the original lpfc command iocb data structure.
3606 * @ndlp: pointer to a node-list data structure.
3607 * @mbox: pointer to the driver internal queue element for mailbox command.
3608 *
3609 * This routine prepares and issues an Accept (ACC) response IOCB
3610 * command. It uses the @flag to properly set up the IOCB field for the
3611 * specific ACC response command to be issued and invokes the
3612 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3613 * @mbox pointer is passed in, it will be put into the context_un.mbox
3614 * field of the IOCB for the completion callback function to issue the
3615 * mailbox command to the HBA later when callback is invoked.
3616 *
3617 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3618 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3619 * will be stored into the context1 field of the IOCB for the completion
3620 * callback function to the corresponding response ELS IOCB command.
3621 *
3622 * Return code
3623 * 0 - Successfully issued acc response
3624 * 1 - Failed to issue acc response
3625 **/
dea3101e 3626int
2e0fef85
JS
3627lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3628 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
51ef4c26 3629 LPFC_MBOXQ_t *mbox)
dea3101e 3630{
2e0fef85
JS
3631 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3632 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3633 IOCB_t *icmd;
3634 IOCB_t *oldcmd;
3635 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3636 struct lpfc_sli *psli;
3637 uint8_t *pcmd;
3638 uint16_t cmdsize;
3639 int rc;
82d9a2a2 3640 ELS_PKT *els_pkt_ptr;
dea3101e
JB
3641
3642 psli = &phba->sli;
dea3101e
JB
3643 oldcmd = &oldiocb->iocb;
3644
3645 switch (flag) {
3646 case ELS_CMD_ACC:
92d7f7b0 3647 cmdsize = sizeof(uint32_t);
2e0fef85
JS
3648 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3649 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3650 if (!elsiocb) {
2e0fef85 3651 spin_lock_irq(shost->host_lock);
5024ab17 3652 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3653 spin_unlock_irq(shost->host_lock);
c9f8735b 3654 return 1;
dea3101e 3655 }
2e0fef85 3656
dea3101e 3657 icmd = &elsiocb->iocb;
7851fe2c
JS
3658 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3659 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3662 pcmd += sizeof(uint32_t);
858c9f6c
JS
3663
3664 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3665 "Issue ACC: did:x%x flg:x%x",
3666 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e
JB
3667 break;
3668 case ELS_CMD_PLOGI:
92d7f7b0 3669 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2e0fef85
JS
3670 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3671 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3672 if (!elsiocb)
c9f8735b 3673 return 1;
488d1469 3674
dea3101e 3675 icmd = &elsiocb->iocb;
7851fe2c
JS
3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3677 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3678 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3679
3680 if (mbox)
3681 elsiocb->context_un.mbox = mbox;
3682
3683 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0
JS
3684 pcmd += sizeof(uint32_t);
3685 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
858c9f6c
JS
3686
3687 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3688 "Issue ACC PLOGI: did:x%x flg:x%x",
3689 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3690 break;
82d9a2a2 3691 case ELS_CMD_PRLO:
92d7f7b0 3692 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2e0fef85 3693 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
82d9a2a2
JS
3694 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3695 if (!elsiocb)
3696 return 1;
3697
3698 icmd = &elsiocb->iocb;
7851fe2c
JS
3699 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3700 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
82d9a2a2
JS
3701 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3702
3703 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
92d7f7b0 3704 sizeof(uint32_t) + sizeof(PRLO));
82d9a2a2
JS
3705 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3706 els_pkt_ptr = (ELS_PKT *) pcmd;
3707 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
858c9f6c
JS
3708
3709 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3710 "Issue ACC PRLO: did:x%x flg:x%x",
3711 ndlp->nlp_DID, ndlp->nlp_flag, 0);
82d9a2a2 3712 break;
dea3101e 3713 default:
c9f8735b 3714 return 1;
dea3101e 3715 }
dea3101e 3716 /* Xmit ELS ACC response tag <ulpIoTag> */
e8b62011
JS
3717 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3718 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3719 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3720 elsiocb->iotag, elsiocb->iocb.ulpContext,
3721 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3722 ndlp->nlp_rpi);
dea3101e 3723 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2e0fef85 3724 spin_lock_irq(shost->host_lock);
c9f8735b 3725 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3726 spin_unlock_irq(shost->host_lock);
dea3101e
JB
3727 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3728 } else {
858c9f6c 3729 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e
JB
3730 }
3731
3732 phba->fc_stat.elsXmitACC++;
3772a991 3733 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3734 if (rc == IOCB_ERROR) {
3735 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3736 return 1;
dea3101e 3737 }
c9f8735b 3738 return 0;
dea3101e
JB
3739}
3740
e59058c4 3741/**
3621a710 3742 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
e59058c4
JS
3743 * @vport: pointer to a virtual N_Port data structure.
3744 * @rejectError:
3745 * @oldiocb: pointer to the original lpfc command iocb data structure.
3746 * @ndlp: pointer to a node-list data structure.
3747 * @mbox: pointer to the driver internal queue element for mailbox command.
3748 *
3749 * This routine prepares and issue an Reject (RJT) response IOCB
3750 * command. If a @mbox pointer is passed in, it will be put into the
3751 * context_un.mbox field of the IOCB for the completion callback function
3752 * to issue to the HBA later.
3753 *
3754 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3755 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3756 * will be stored into the context1 field of the IOCB for the completion
3757 * callback function to the reject response ELS IOCB command.
3758 *
3759 * Return code
3760 * 0 - Successfully issued reject response
3761 * 1 - Failed to issue reject response
3762 **/
dea3101e 3763int
2e0fef85 3764lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
858c9f6c
JS
3765 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3766 LPFC_MBOXQ_t *mbox)
dea3101e 3767{
2e0fef85 3768 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3769 IOCB_t *icmd;
3770 IOCB_t *oldcmd;
3771 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3772 struct lpfc_sli *psli;
3773 uint8_t *pcmd;
3774 uint16_t cmdsize;
3775 int rc;
3776
3777 psli = &phba->sli;
92d7f7b0 3778 cmdsize = 2 * sizeof(uint32_t);
2e0fef85
JS
3779 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3780 ndlp->nlp_DID, ELS_CMD_LS_RJT);
488d1469 3781 if (!elsiocb)
c9f8735b 3782 return 1;
dea3101e
JB
3783
3784 icmd = &elsiocb->iocb;
3785 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3786 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3787 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3788 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3789
3790 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
92d7f7b0 3791 pcmd += sizeof(uint32_t);
dea3101e
JB
3792 *((uint32_t *) (pcmd)) = rejectError;
3793
51ef4c26 3794 if (mbox)
858c9f6c 3795 elsiocb->context_un.mbox = mbox;
858c9f6c 3796
dea3101e 3797 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
e8b62011
JS
3798 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3799 "0129 Xmit ELS RJT x%x response tag x%x "
3800 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3801 "rpi x%x\n",
3802 rejectError, elsiocb->iotag,
3803 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3804 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
858c9f6c
JS
3805 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3806 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3807 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3808
dea3101e 3809 phba->fc_stat.elsXmitLSRJT++;
858c9f6c 3810 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3811 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
51ef4c26 3812
dea3101e
JB
3813 if (rc == IOCB_ERROR) {
3814 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3815 return 1;
dea3101e 3816 }
c9f8735b 3817 return 0;
dea3101e
JB
3818}
3819
e59058c4 3820/**
3621a710 3821 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
e59058c4
JS
3822 * @vport: pointer to a virtual N_Port data structure.
3823 * @oldiocb: pointer to the original lpfc command iocb data structure.
3824 * @ndlp: pointer to a node-list data structure.
3825 *
3826 * This routine prepares and issues an Accept (ACC) response to Address
3827 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3828 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3829 *
3830 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3831 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3832 * will be stored into the context1 field of the IOCB for the completion
3833 * callback function to the ADISC Accept response ELS IOCB command.
3834 *
3835 * Return code
3836 * 0 - Successfully issued acc adisc response
3837 * 1 - Failed to issue adisc acc response
3838 **/
dea3101e 3839int
2e0fef85
JS
3840lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3841 struct lpfc_nodelist *ndlp)
dea3101e 3842{
2e0fef85 3843 struct lpfc_hba *phba = vport->phba;
dea3101e 3844 ADISC *ap;
2e0fef85 3845 IOCB_t *icmd, *oldcmd;
dea3101e 3846 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3847 uint8_t *pcmd;
3848 uint16_t cmdsize;
3849 int rc;
3850
92d7f7b0 3851 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2e0fef85
JS
3852 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3853 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3854 if (!elsiocb)
c9f8735b 3855 return 1;
dea3101e 3856
5b8bd0c9
JS
3857 icmd = &elsiocb->iocb;
3858 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3859 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3860 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5b8bd0c9 3861
dea3101e 3862 /* Xmit ADISC ACC response tag <ulpIoTag> */
e8b62011
JS
3863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3864 "0130 Xmit ADISC ACC response iotag x%x xri: "
3865 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3866 elsiocb->iotag, elsiocb->iocb.ulpContext,
3867 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3868 ndlp->nlp_rpi);
dea3101e
JB
3869 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3870
3871 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3872 pcmd += sizeof(uint32_t);
dea3101e
JB
3873
3874 ap = (ADISC *) (pcmd);
3875 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
3876 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3877 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 3878 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 3879
858c9f6c
JS
3880 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3881 "Issue ACC ADISC: did:x%x flg:x%x",
3882 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3883
dea3101e 3884 phba->fc_stat.elsXmitACC++;
858c9f6c 3885 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3886 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3887 if (rc == IOCB_ERROR) {
3888 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3889 return 1;
dea3101e 3890 }
c9f8735b 3891 return 0;
dea3101e
JB
3892}
3893
e59058c4 3894/**
3621a710 3895 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
e59058c4
JS
3896 * @vport: pointer to a virtual N_Port data structure.
3897 * @oldiocb: pointer to the original lpfc command iocb data structure.
3898 * @ndlp: pointer to a node-list data structure.
3899 *
3900 * This routine prepares and issues an Accept (ACC) response to Process
3901 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3902 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3903 *
3904 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3905 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3906 * will be stored into the context1 field of the IOCB for the completion
3907 * callback function to the PRLI Accept response ELS IOCB command.
3908 *
3909 * Return code
3910 * 0 - Successfully issued acc prli response
3911 * 1 - Failed to issue acc prli response
3912 **/
dea3101e 3913int
2e0fef85 3914lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5b8bd0c9 3915 struct lpfc_nodelist *ndlp)
dea3101e 3916{
2e0fef85 3917 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3918 PRLI *npr;
3919 lpfc_vpd_t *vpd;
3920 IOCB_t *icmd;
3921 IOCB_t *oldcmd;
3922 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3923 struct lpfc_sli *psli;
3924 uint8_t *pcmd;
3925 uint16_t cmdsize;
3926 int rc;
3927
3928 psli = &phba->sli;
dea3101e 3929
92d7f7b0 3930 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2e0fef85 3931 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
92d7f7b0 3932 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
c9f8735b
JW
3933 if (!elsiocb)
3934 return 1;
dea3101e 3935
5b8bd0c9
JS
3936 icmd = &elsiocb->iocb;
3937 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3938 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3939 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3940
dea3101e 3941 /* Xmit PRLI ACC response tag <ulpIoTag> */
e8b62011
JS
3942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3943 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3944 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3945 elsiocb->iotag, elsiocb->iocb.ulpContext,
3946 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3947 ndlp->nlp_rpi);
dea3101e
JB
3948 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3949
3950 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
92d7f7b0 3951 pcmd += sizeof(uint32_t);
dea3101e
JB
3952
3953 /* For PRLI, remainder of payload is PRLI parameter page */
92d7f7b0 3954 memset(pcmd, 0, sizeof(PRLI));
dea3101e
JB
3955
3956 npr = (PRLI *) pcmd;
3957 vpd = &phba->vpd;
3958 /*
0d2b6b83
JS
3959 * If the remote port is a target and our firmware version is 3.20 or
3960 * later, set the following bits for FC-TAPE support.
dea3101e 3961 */
0d2b6b83
JS
3962 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3963 (vpd->rev.feaLevelHigh >= 0x02)) {
dea3101e
JB
3964 npr->ConfmComplAllowed = 1;
3965 npr->Retry = 1;
3966 npr->TaskRetryIdReq = 1;
3967 }
3968
3969 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3970 npr->estabImagePair = 1;
3971 npr->readXferRdyDis = 1;
3972 npr->ConfmComplAllowed = 1;
3973
3974 npr->prliType = PRLI_FCP_TYPE;
3975 npr->initiatorFunc = 1;
3976
858c9f6c
JS
3977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3978 "Issue ACC PRLI: did:x%x flg:x%x",
3979 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3980
dea3101e 3981 phba->fc_stat.elsXmitACC++;
858c9f6c 3982 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 3983
3772a991 3984 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3985 if (rc == IOCB_ERROR) {
3986 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3987 return 1;
dea3101e 3988 }
c9f8735b 3989 return 0;
dea3101e
JB
3990}
3991
e59058c4 3992/**
3621a710 3993 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
e59058c4
JS
3994 * @vport: pointer to a virtual N_Port data structure.
3995 * @format: rnid command format.
3996 * @oldiocb: pointer to the original lpfc command iocb data structure.
3997 * @ndlp: pointer to a node-list data structure.
3998 *
3999 * This routine issues a Request Node Identification Data (RNID) Accept
4000 * (ACC) response. It constructs the RNID ACC response command according to
4001 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4002 * issue the response. Note that this command does not need to hold the ndlp
4003 * reference count for the callback. So, the ndlp reference count taken by
4004 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4005 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4006 * there is no ndlp reference available.
4007 *
4008 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4009 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4010 * will be stored into the context1 field of the IOCB for the completion
4011 * callback function. However, for the RNID Accept Response ELS command,
4012 * this is undone later by this routine after the IOCB is allocated.
4013 *
4014 * Return code
4015 * 0 - Successfully issued acc rnid response
4016 * 1 - Failed to issue acc rnid response
4017 **/
dea3101e 4018static int
2e0fef85 4019lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
329f9bc7 4020 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
dea3101e 4021{
2e0fef85 4022 struct lpfc_hba *phba = vport->phba;
dea3101e 4023 RNID *rn;
2e0fef85 4024 IOCB_t *icmd, *oldcmd;
dea3101e 4025 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4026 struct lpfc_sli *psli;
4027 uint8_t *pcmd;
4028 uint16_t cmdsize;
4029 int rc;
4030
4031 psli = &phba->sli;
92d7f7b0
JS
4032 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4033 + (2 * sizeof(struct lpfc_name));
dea3101e 4034 if (format)
92d7f7b0 4035 cmdsize += sizeof(RNID_TOP_DISC);
dea3101e 4036
2e0fef85
JS
4037 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4038 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4039 if (!elsiocb)
c9f8735b 4040 return 1;
dea3101e 4041
5b8bd0c9
JS
4042 icmd = &elsiocb->iocb;
4043 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4044 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4045 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4046
dea3101e 4047 /* Xmit RNID ACC response tag <ulpIoTag> */
e8b62011
JS
4048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4049 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4050 elsiocb->iotag, elsiocb->iocb.ulpContext);
dea3101e 4051 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
dea3101e 4052 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4053 pcmd += sizeof(uint32_t);
dea3101e 4054
92d7f7b0 4055 memset(pcmd, 0, sizeof(RNID));
dea3101e
JB
4056 rn = (RNID *) (pcmd);
4057 rn->Format = format;
92d7f7b0
JS
4058 rn->CommonLen = (2 * sizeof(struct lpfc_name));
4059 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4060 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
dea3101e
JB
4061 switch (format) {
4062 case 0:
4063 rn->SpecificLen = 0;
4064 break;
4065 case RNID_TOPOLOGY_DISC:
92d7f7b0 4066 rn->SpecificLen = sizeof(RNID_TOP_DISC);
dea3101e 4067 memcpy(&rn->un.topologyDisc.portName,
92d7f7b0 4068 &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e
JB
4069 rn->un.topologyDisc.unitType = RNID_HBA;
4070 rn->un.topologyDisc.physPort = 0;
4071 rn->un.topologyDisc.attachedNodes = 0;
4072 break;
4073 default:
4074 rn->CommonLen = 0;
4075 rn->SpecificLen = 0;
4076 break;
4077 }
4078
858c9f6c
JS
4079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4080 "Issue ACC RNID: did:x%x flg:x%x",
4081 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4082
dea3101e 4083 phba->fc_stat.elsXmitACC++;
858c9f6c 4084 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4085
3772a991 4086 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4087 if (rc == IOCB_ERROR) {
4088 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4089 return 1;
dea3101e 4090 }
c9f8735b 4091 return 0;
dea3101e
JB
4092}
4093
19ca7609
JS
4094/**
4095 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4096 * @vport: pointer to a virtual N_Port data structure.
4097 * @iocb: pointer to the lpfc command iocb data structure.
4098 * @ndlp: pointer to a node-list data structure.
4099 *
4100 * Return
4101 **/
4102static void
4103lpfc_els_clear_rrq(struct lpfc_vport *vport,
4104 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4105{
4106 struct lpfc_hba *phba = vport->phba;
4107 uint8_t *pcmd;
4108 struct RRQ *rrq;
4109 uint16_t rxid;
1151e3ec 4110 uint16_t xri;
19ca7609
JS
4111 struct lpfc_node_rrq *prrq;
4112
4113
4114 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4115 pcmd += sizeof(uint32_t);
4116 rrq = (struct RRQ *)pcmd;
1151e3ec 4117 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
9589b062 4118 rxid = bf_get(rrq_rxid, rrq);
19ca7609
JS
4119
4120 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4121 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4122 " x%x x%x\n",
1151e3ec 4123 be32_to_cpu(bf_get(rrq_did, rrq)),
9589b062 4124 bf_get(rrq_oxid, rrq),
19ca7609
JS
4125 rxid,
4126 iocb->iotag, iocb->iocb.ulpContext);
4127
4128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4129 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4130 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
1151e3ec 4131 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
9589b062 4132 xri = bf_get(rrq_oxid, rrq);
1151e3ec
JS
4133 else
4134 xri = rxid;
4135 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
19ca7609 4136 if (prrq)
1151e3ec 4137 lpfc_clr_rrq_active(phba, xri, prrq);
19ca7609
JS
4138 return;
4139}
4140
12265f68
JS
4141/**
4142 * lpfc_els_rsp_echo_acc - Issue echo acc response
4143 * @vport: pointer to a virtual N_Port data structure.
4144 * @data: pointer to echo data to return in the accept.
4145 * @oldiocb: pointer to the original lpfc command iocb data structure.
4146 * @ndlp: pointer to a node-list data structure.
4147 *
4148 * Return code
4149 * 0 - Successfully issued acc echo response
4150 * 1 - Failed to issue acc echo response
4151 **/
4152static int
4153lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4154 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4155{
4156 struct lpfc_hba *phba = vport->phba;
4157 struct lpfc_iocbq *elsiocb;
4158 struct lpfc_sli *psli;
4159 uint8_t *pcmd;
4160 uint16_t cmdsize;
4161 int rc;
4162
4163 psli = &phba->sli;
4164 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4165
bf08611b
JS
4166 /* The accumulated length can exceed the BPL_SIZE. For
4167 * now, use this as the limit
4168 */
4169 if (cmdsize > LPFC_BPL_SIZE)
4170 cmdsize = LPFC_BPL_SIZE;
12265f68
JS
4171 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4172 ndlp->nlp_DID, ELS_CMD_ACC);
4173 if (!elsiocb)
4174 return 1;
4175
7851fe2c
JS
4176 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4177 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4178
12265f68
JS
4179 /* Xmit ECHO ACC response tag <ulpIoTag> */
4180 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4181 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4182 elsiocb->iotag, elsiocb->iocb.ulpContext);
4183 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4184 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4185 pcmd += sizeof(uint32_t);
4186 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4187
4188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4189 "Issue ACC ECHO: did:x%x flg:x%x",
4190 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4191
4192 phba->fc_stat.elsXmitACC++;
4193 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
12265f68
JS
4194
4195 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4196 if (rc == IOCB_ERROR) {
4197 lpfc_els_free_iocb(phba, elsiocb);
4198 return 1;
4199 }
4200 return 0;
4201}
4202
e59058c4 4203/**
3621a710 4204 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
e59058c4
JS
4205 * @vport: pointer to a host virtual N_Port data structure.
4206 *
4207 * This routine issues Address Discover (ADISC) ELS commands to those
4208 * N_Ports which are in node port recovery state and ADISC has not been issued
4209 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4210 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4211 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4212 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4213 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4214 * IOCBs quit for later pick up. On the other hand, after walking through
4215 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4216 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4217 * no more ADISC need to be sent.
4218 *
4219 * Return code
4220 * The number of N_Ports with adisc issued.
4221 **/
dea3101e 4222int
2e0fef85 4223lpfc_els_disc_adisc(struct lpfc_vport *vport)
dea3101e 4224{
2e0fef85 4225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4226 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4227 int sentadisc = 0;
dea3101e 4228
685f0bf7 4229 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2e0fef85 4230 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4231 if (!NLP_CHK_NODE_ACT(ndlp))
4232 continue;
685f0bf7
JS
4233 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4234 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4235 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2e0fef85 4236 spin_lock_irq(shost->host_lock);
685f0bf7 4237 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2e0fef85 4238 spin_unlock_irq(shost->host_lock);
685f0bf7 4239 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4240 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4241 lpfc_issue_els_adisc(vport, ndlp, 0);
685f0bf7 4242 sentadisc++;
2e0fef85
JS
4243 vport->num_disc_nodes++;
4244 if (vport->num_disc_nodes >=
3de2a653 4245 vport->cfg_discovery_threads) {
2e0fef85
JS
4246 spin_lock_irq(shost->host_lock);
4247 vport->fc_flag |= FC_NLP_MORE;
4248 spin_unlock_irq(shost->host_lock);
685f0bf7 4249 break;
dea3101e
JB
4250 }
4251 }
4252 }
4253 if (sentadisc == 0) {
2e0fef85
JS
4254 spin_lock_irq(shost->host_lock);
4255 vport->fc_flag &= ~FC_NLP_MORE;
4256 spin_unlock_irq(shost->host_lock);
dea3101e 4257 }
2fe165b6 4258 return sentadisc;
dea3101e
JB
4259}
4260
e59058c4 4261/**
3621a710 4262 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
e59058c4
JS
4263 * @vport: pointer to a host virtual N_Port data structure.
4264 *
4265 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4266 * which are in node port recovery state, with a @vport. Each time an ELS
4267 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4268 * the per @vport number of discover count (num_disc_nodes) shall be
4269 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4270 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4271 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4272 * later pick up. On the other hand, after walking through all the ndlps with
4273 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4274 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4275 * PLOGI need to be sent.
4276 *
4277 * Return code
4278 * The number of N_Ports with plogi issued.
4279 **/
dea3101e 4280int
2e0fef85 4281lpfc_els_disc_plogi(struct lpfc_vport *vport)
dea3101e 4282{
2e0fef85 4283 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4284 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4285 int sentplogi = 0;
dea3101e 4286
2e0fef85
JS
4287 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4288 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4289 if (!NLP_CHK_NODE_ACT(ndlp))
4290 continue;
685f0bf7
JS
4291 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4292 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4293 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4294 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4295 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4296 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4297 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
685f0bf7 4298 sentplogi++;
2e0fef85
JS
4299 vport->num_disc_nodes++;
4300 if (vport->num_disc_nodes >=
3de2a653 4301 vport->cfg_discovery_threads) {
2e0fef85
JS
4302 spin_lock_irq(shost->host_lock);
4303 vport->fc_flag |= FC_NLP_MORE;
4304 spin_unlock_irq(shost->host_lock);
685f0bf7 4305 break;
dea3101e
JB
4306 }
4307 }
4308 }
87af33fe
JS
4309 if (sentplogi) {
4310 lpfc_set_disctmo(vport);
4311 }
4312 else {
2e0fef85
JS
4313 spin_lock_irq(shost->host_lock);
4314 vport->fc_flag &= ~FC_NLP_MORE;
4315 spin_unlock_irq(shost->host_lock);
dea3101e 4316 }
2fe165b6 4317 return sentplogi;
dea3101e
JB
4318}
4319
e59058c4 4320/**
3621a710 4321 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
e59058c4
JS
4322 * @vport: pointer to a host virtual N_Port data structure.
4323 *
4324 * This routine cleans up any Registration State Change Notification
4325 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4326 * @vport together with the host_lock is used to prevent multiple thread
4327 * trying to access the RSCN array on a same @vport at the same time.
4328 **/
92d7f7b0 4329void
2e0fef85 4330lpfc_els_flush_rscn(struct lpfc_vport *vport)
dea3101e 4331{
2e0fef85
JS
4332 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4333 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4334 int i;
4335
7f5f3d0d
JS
4336 spin_lock_irq(shost->host_lock);
4337 if (vport->fc_rscn_flush) {
4338 /* Another thread is walking fc_rscn_id_list on this vport */
4339 spin_unlock_irq(shost->host_lock);
4340 return;
4341 }
4342 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
4343 vport->fc_rscn_flush = 1;
4344 spin_unlock_irq(shost->host_lock);
4345
2e0fef85 4346 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0 4347 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2e0fef85 4348 vport->fc_rscn_id_list[i] = NULL;
dea3101e 4349 }
2e0fef85
JS
4350 spin_lock_irq(shost->host_lock);
4351 vport->fc_rscn_id_cnt = 0;
4352 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4353 spin_unlock_irq(shost->host_lock);
4354 lpfc_can_disctmo(vport);
7f5f3d0d
JS
4355 /* Indicate we are done walking this fc_rscn_id_list */
4356 vport->fc_rscn_flush = 0;
dea3101e
JB
4357}
4358
e59058c4 4359/**
3621a710 4360 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
e59058c4
JS
4361 * @vport: pointer to a host virtual N_Port data structure.
4362 * @did: remote destination port identifier.
4363 *
4364 * This routine checks whether there is any pending Registration State
4365 * Configuration Notification (RSCN) to a @did on @vport.
4366 *
4367 * Return code
4368 * None zero - The @did matched with a pending rscn
4369 * 0 - not able to match @did with a pending rscn
4370 **/
dea3101e 4371int
2e0fef85 4372lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
dea3101e
JB
4373{
4374 D_ID ns_did;
4375 D_ID rscn_did;
dea3101e 4376 uint32_t *lp;
92d7f7b0 4377 uint32_t payload_len, i;
7f5f3d0d 4378 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
4379
4380 ns_did.un.word = did;
dea3101e
JB
4381
4382 /* Never match fabric nodes for RSCNs */
4383 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2e0fef85 4384 return 0;
dea3101e
JB
4385
4386 /* If we are doing a FULL RSCN rediscovery, match everything */
2e0fef85 4387 if (vport->fc_flag & FC_RSCN_DISCOVERY)
c9f8735b 4388 return did;
dea3101e 4389
7f5f3d0d
JS
4390 spin_lock_irq(shost->host_lock);
4391 if (vport->fc_rscn_flush) {
4392 /* Another thread is walking fc_rscn_id_list on this vport */
4393 spin_unlock_irq(shost->host_lock);
4394 return 0;
4395 }
4396 /* Indicate we are walking fc_rscn_id_list on this vport */
4397 vport->fc_rscn_flush = 1;
4398 spin_unlock_irq(shost->host_lock);
2e0fef85 4399 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0
JS
4400 lp = vport->fc_rscn_id_list[i]->virt;
4401 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4402 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 4403 while (payload_len) {
92d7f7b0
JS
4404 rscn_did.un.word = be32_to_cpu(*lp++);
4405 payload_len -= sizeof(uint32_t);
eaf15d5b
JS
4406 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4407 case RSCN_ADDRESS_FORMAT_PORT:
6fb120a7
JS
4408 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4409 && (ns_did.un.b.area == rscn_did.un.b.area)
4410 && (ns_did.un.b.id == rscn_did.un.b.id))
7f5f3d0d 4411 goto return_did_out;
dea3101e 4412 break;
eaf15d5b 4413 case RSCN_ADDRESS_FORMAT_AREA:
dea3101e
JB
4414 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4415 && (ns_did.un.b.area == rscn_did.un.b.area))
7f5f3d0d 4416 goto return_did_out;
dea3101e 4417 break;
eaf15d5b 4418 case RSCN_ADDRESS_FORMAT_DOMAIN:
dea3101e 4419 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7f5f3d0d 4420 goto return_did_out;
dea3101e 4421 break;
eaf15d5b 4422 case RSCN_ADDRESS_FORMAT_FABRIC:
7f5f3d0d 4423 goto return_did_out;
dea3101e
JB
4424 }
4425 }
92d7f7b0 4426 }
7f5f3d0d
JS
4427 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4428 vport->fc_rscn_flush = 0;
92d7f7b0 4429 return 0;
7f5f3d0d
JS
4430return_did_out:
4431 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4432 vport->fc_rscn_flush = 0;
4433 return did;
dea3101e
JB
4434}
4435
e59058c4 4436/**
3621a710 4437 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
e59058c4
JS
4438 * @vport: pointer to a host virtual N_Port data structure.
4439 *
4440 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4441 * state machine for a @vport's nodes that are with pending RSCN (Registration
4442 * State Change Notification).
4443 *
4444 * Return code
4445 * 0 - Successful (currently alway return 0)
4446 **/
dea3101e 4447static int
2e0fef85 4448lpfc_rscn_recovery_check(struct lpfc_vport *vport)
dea3101e 4449{
685f0bf7 4450 struct lpfc_nodelist *ndlp = NULL;
dea3101e 4451
0d2b6b83 4452 /* Move all affected nodes by pending RSCNs to NPR state. */
2e0fef85 4453 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093 4454 if (!NLP_CHK_NODE_ACT(ndlp) ||
0d2b6b83
JS
4455 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4456 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
685f0bf7 4457 continue;
2e0fef85 4458 lpfc_disc_state_machine(vport, ndlp, NULL,
0d2b6b83
JS
4459 NLP_EVT_DEVICE_RECOVERY);
4460 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 4461 }
c9f8735b 4462 return 0;
dea3101e
JB
4463}
4464
ddcc50f0 4465/**
3621a710 4466 * lpfc_send_rscn_event - Send an RSCN event to management application
ddcc50f0
JS
4467 * @vport: pointer to a host virtual N_Port data structure.
4468 * @cmdiocb: pointer to lpfc command iocb data structure.
4469 *
4470 * lpfc_send_rscn_event sends an RSCN netlink event to management
4471 * applications.
4472 */
4473static void
4474lpfc_send_rscn_event(struct lpfc_vport *vport,
4475 struct lpfc_iocbq *cmdiocb)
4476{
4477 struct lpfc_dmabuf *pcmd;
4478 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4479 uint32_t *payload_ptr;
4480 uint32_t payload_len;
4481 struct lpfc_rscn_event_header *rscn_event_data;
4482
4483 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4484 payload_ptr = (uint32_t *) pcmd->virt;
4485 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4486
4487 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4488 payload_len, GFP_KERNEL);
4489 if (!rscn_event_data) {
4490 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4491 "0147 Failed to allocate memory for RSCN event\n");
4492 return;
4493 }
4494 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4495 rscn_event_data->payload_length = payload_len;
4496 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4497 payload_len);
4498
4499 fc_host_post_vendor_event(shost,
4500 fc_get_event_number(),
4501 sizeof(struct lpfc_els_event_header) + payload_len,
4502 (char *)rscn_event_data,
4503 LPFC_NL_VENDOR_ID);
4504
4505 kfree(rscn_event_data);
4506}
4507
e59058c4 4508/**
3621a710 4509 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
e59058c4
JS
4510 * @vport: pointer to a host virtual N_Port data structure.
4511 * @cmdiocb: pointer to lpfc command iocb data structure.
4512 * @ndlp: pointer to a node-list data structure.
4513 *
4514 * This routine processes an unsolicited RSCN (Registration State Change
4515 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4516 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4517 * discover state machine is about to begin discovery, it just accepts the
4518 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4519 * contains N_Port IDs for other vports on this HBA, it just accepts the
4520 * RSCN and ignore processing it. If the state machine is in the recovery
4521 * state, the fc_rscn_id_list of this @vport is walked and the
4522 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4523 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4524 * routine is invoked to handle the RSCN event.
4525 *
4526 * Return code
4527 * 0 - Just sent the acc response
4528 * 1 - Sent the acc response and waited for name server completion
4529 **/
dea3101e 4530static int
2e0fef85 4531lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4532 struct lpfc_nodelist *ndlp)
dea3101e 4533{
2e0fef85
JS
4534 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4535 struct lpfc_hba *phba = vport->phba;
dea3101e 4536 struct lpfc_dmabuf *pcmd;
92d7f7b0 4537 uint32_t *lp, *datap;
dea3101e 4538 IOCB_t *icmd;
92d7f7b0 4539 uint32_t payload_len, length, nportid, *cmd;
7f5f3d0d 4540 int rscn_cnt;
92d7f7b0 4541 int rscn_id = 0, hba_id = 0;
d2873e4c 4542 int i;
dea3101e
JB
4543
4544 icmd = &cmdiocb->iocb;
4545 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4546 lp = (uint32_t *) pcmd->virt;
4547
92d7f7b0
JS
4548 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4549 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 4550 /* RSCN received */
e8b62011
JS
4551 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4552 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7f5f3d0d
JS
4553 vport->fc_flag, payload_len, *lp,
4554 vport->fc_rscn_id_cnt);
ddcc50f0
JS
4555
4556 /* Send an RSCN event to the management application */
4557 lpfc_send_rscn_event(vport, cmdiocb);
4558
d2873e4c 4559 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2e0fef85 4560 fc_host_post_event(shost, fc_get_event_number(),
d2873e4c
JS
4561 FCH_EVT_RSCN, lp[i]);
4562
dea3101e
JB
4563 /* If we are about to begin discovery, just ACC the RSCN.
4564 * Discovery processing will satisfy it.
4565 */
2e0fef85 4566 if (vport->port_state <= LPFC_NS_QRY) {
858c9f6c
JS
4567 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4568 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4569 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4570
51ef4c26 4571 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
c9f8735b 4572 return 0;
dea3101e
JB
4573 }
4574
92d7f7b0
JS
4575 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4576 * just ACC and ignore it.
4577 */
4578 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3de2a653 4579 !(vport->cfg_peer_port_login)) {
92d7f7b0
JS
4580 i = payload_len;
4581 datap = lp;
4582 while (i > 0) {
4583 nportid = *datap++;
4584 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4585 i -= sizeof(uint32_t);
4586 rscn_id++;
549e55cd
JS
4587 if (lpfc_find_vport_by_did(phba, nportid))
4588 hba_id++;
92d7f7b0
JS
4589 }
4590 if (rscn_id == hba_id) {
4591 /* ALL NPortIDs in RSCN are on HBA */
e8b62011 4592 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
d7c255b2 4593 "0219 Ignore RSCN "
e8b62011
JS
4594 "Data: x%x x%x x%x x%x\n",
4595 vport->fc_flag, payload_len,
7f5f3d0d 4596 *lp, vport->fc_rscn_id_cnt);
858c9f6c
JS
4597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4598 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4599 ndlp->nlp_DID, vport->port_state,
4600 ndlp->nlp_flag);
4601
92d7f7b0 4602 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
51ef4c26 4603 ndlp, NULL);
92d7f7b0
JS
4604 return 0;
4605 }
4606 }
4607
7f5f3d0d
JS
4608 spin_lock_irq(shost->host_lock);
4609 if (vport->fc_rscn_flush) {
4610 /* Another thread is walking fc_rscn_id_list on this vport */
7f5f3d0d 4611 vport->fc_flag |= FC_RSCN_DISCOVERY;
97957244 4612 spin_unlock_irq(shost->host_lock);
58da1ffb
JS
4613 /* Send back ACC */
4614 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7f5f3d0d
JS
4615 return 0;
4616 }
4617 /* Indicate we are walking fc_rscn_id_list on this vport */
4618 vport->fc_rscn_flush = 1;
4619 spin_unlock_irq(shost->host_lock);
af901ca1 4620 /* Get the array count after successfully have the token */
7f5f3d0d 4621 rscn_cnt = vport->fc_rscn_id_cnt;
dea3101e
JB
4622 /* If we are already processing an RSCN, save the received
4623 * RSCN payload buffer, cmdiocb->context2 to process later.
4624 */
2e0fef85 4625 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
858c9f6c
JS
4626 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4627 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4628 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4629
09372820 4630 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
4631 vport->fc_flag |= FC_RSCN_DEFERRED;
4632 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2e0fef85 4633 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2e0fef85
JS
4634 vport->fc_flag |= FC_RSCN_MODE;
4635 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
4636 if (rscn_cnt) {
4637 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4638 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4639 }
4640 if ((rscn_cnt) &&
4641 (payload_len + length <= LPFC_BPL_SIZE)) {
4642 *cmd &= ELS_CMD_MASK;
7f5f3d0d 4643 *cmd |= cpu_to_be32(payload_len + length);
92d7f7b0
JS
4644 memcpy(((uint8_t *)cmd) + length, lp,
4645 payload_len);
4646 } else {
4647 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4648 vport->fc_rscn_id_cnt++;
4649 /* If we zero, cmdiocb->context2, the calling
4650 * routine will not try to free it.
4651 */
4652 cmdiocb->context2 = NULL;
4653 }
dea3101e 4654 /* Deferred RSCN */
e8b62011
JS
4655 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4656 "0235 Deferred RSCN "
4657 "Data: x%x x%x x%x\n",
4658 vport->fc_rscn_id_cnt, vport->fc_flag,
4659 vport->port_state);
dea3101e 4660 } else {
2e0fef85
JS
4661 vport->fc_flag |= FC_RSCN_DISCOVERY;
4662 spin_unlock_irq(shost->host_lock);
dea3101e 4663 /* ReDiscovery RSCN */
e8b62011
JS
4664 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4665 "0234 ReDiscovery RSCN "
4666 "Data: x%x x%x x%x\n",
4667 vport->fc_rscn_id_cnt, vport->fc_flag,
4668 vport->port_state);
dea3101e 4669 }
7f5f3d0d
JS
4670 /* Indicate we are done walking fc_rscn_id_list on this vport */
4671 vport->fc_rscn_flush = 0;
dea3101e 4672 /* Send back ACC */
51ef4c26 4673 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4674 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4675 lpfc_rscn_recovery_check(vport);
09372820 4676 spin_lock_irq(shost->host_lock);
92d7f7b0 4677 vport->fc_flag &= ~FC_RSCN_DEFERRED;
09372820 4678 spin_unlock_irq(shost->host_lock);
c9f8735b 4679 return 0;
dea3101e 4680 }
858c9f6c
JS
4681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4682 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4683 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4684
2e0fef85
JS
4685 spin_lock_irq(shost->host_lock);
4686 vport->fc_flag |= FC_RSCN_MODE;
4687 spin_unlock_irq(shost->host_lock);
4688 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7f5f3d0d
JS
4689 /* Indicate we are done walking fc_rscn_id_list on this vport */
4690 vport->fc_rscn_flush = 0;
dea3101e
JB
4691 /*
4692 * If we zero, cmdiocb->context2, the calling routine will
4693 * not try to free it.
4694 */
4695 cmdiocb->context2 = NULL;
2e0fef85 4696 lpfc_set_disctmo(vport);
dea3101e 4697 /* Send back ACC */
51ef4c26 4698 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4699 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4700 lpfc_rscn_recovery_check(vport);
2e0fef85 4701 return lpfc_els_handle_rscn(vport);
dea3101e
JB
4702}
4703
e59058c4 4704/**
3621a710 4705 * lpfc_els_handle_rscn - Handle rscn for a vport
e59058c4
JS
4706 * @vport: pointer to a host virtual N_Port data structure.
4707 *
4708 * This routine handles the Registration State Configuration Notification
4709 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4710 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4711 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4712 * NameServer shall be issued. If CT command to the NameServer fails to be
4713 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4714 * RSCN activities with the @vport.
4715 *
4716 * Return code
4717 * 0 - Cleaned up rscn on the @vport
4718 * 1 - Wait for plogi to name server before proceed
4719 **/
dea3101e 4720int
2e0fef85 4721lpfc_els_handle_rscn(struct lpfc_vport *vport)
dea3101e
JB
4722{
4723 struct lpfc_nodelist *ndlp;
2e0fef85 4724 struct lpfc_hba *phba = vport->phba;
dea3101e 4725
92d7f7b0
JS
4726 /* Ignore RSCN if the port is being torn down. */
4727 if (vport->load_flag & FC_UNLOADING) {
4728 lpfc_els_flush_rscn(vport);
4729 return 0;
4730 }
4731
dea3101e 4732 /* Start timer for RSCN processing */
2e0fef85 4733 lpfc_set_disctmo(vport);
dea3101e
JB
4734
4735 /* RSCN processed */
e8b62011
JS
4736 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4737 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4738 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4739 vport->port_state);
dea3101e
JB
4740
4741 /* To process RSCN, first compare RSCN data with NameServer */
2e0fef85 4742 vport->fc_ns_retry = 0;
0ff10d46
JS
4743 vport->num_disc_nodes = 0;
4744
2e0fef85 4745 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093
JS
4746 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4747 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
dea3101e 4748 /* Good ndlp, issue CT Request to NameServer */
92d7f7b0 4749 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
dea3101e
JB
4750 /* Wait for NameServer query cmpl before we can
4751 continue */
c9f8735b 4752 return 1;
dea3101e
JB
4753 } else {
4754 /* If login to NameServer does not exist, issue one */
4755 /* Good status, issue PLOGI to NameServer */
2e0fef85 4756 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 4757 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
dea3101e
JB
4758 /* Wait for NameServer login cmpl before we can
4759 continue */
c9f8735b 4760 return 1;
2e0fef85 4761
e47c9093
JS
4762 if (ndlp) {
4763 ndlp = lpfc_enable_node(vport, ndlp,
4764 NLP_STE_PLOGI_ISSUE);
4765 if (!ndlp) {
4766 lpfc_els_flush_rscn(vport);
4767 return 0;
4768 }
4769 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
dea3101e 4770 } else {
e47c9093
JS
4771 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4772 if (!ndlp) {
4773 lpfc_els_flush_rscn(vport);
4774 return 0;
4775 }
2e0fef85 4776 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5024ab17 4777 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 4778 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
dea3101e 4779 }
e47c9093
JS
4780 ndlp->nlp_type |= NLP_FABRIC;
4781 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4782 /* Wait for NameServer login cmpl before we can
4783 * continue
4784 */
4785 return 1;
dea3101e
JB
4786 }
4787
2e0fef85 4788 lpfc_els_flush_rscn(vport);
c9f8735b 4789 return 0;
dea3101e
JB
4790}
4791
e59058c4 4792/**
3621a710 4793 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
e59058c4
JS
4794 * @vport: pointer to a host virtual N_Port data structure.
4795 * @cmdiocb: pointer to lpfc command iocb data structure.
4796 * @ndlp: pointer to a node-list data structure.
4797 *
4798 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4799 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4800 * point topology. As an unsolicited FLOGI should not be received in a loop
4801 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4802 * lpfc_check_sparm() routine is invoked to check the parameters in the
4803 * unsolicited FLOGI. If parameters validation failed, the routine
4804 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4805 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4806 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4807 * will initiate PLOGI. The higher lexicographical value party shall has
4808 * higher priority (as the winning port) and will initiate PLOGI and
4809 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4810 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4811 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4812 *
4813 * Return code
4814 * 0 - Successfully processed the unsolicited flogi
4815 * 1 - Failed to process the unsolicited flogi
4816 **/
dea3101e 4817static int
2e0fef85 4818lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4819 struct lpfc_nodelist *ndlp)
dea3101e 4820{
2e0fef85
JS
4821 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4822 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4823 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4824 uint32_t *lp = (uint32_t *) pcmd->virt;
4825 IOCB_t *icmd = &cmdiocb->iocb;
4826 struct serv_parm *sp;
4827 LPFC_MBOXQ_t *mbox;
4828 struct ls_rjt stat;
4829 uint32_t cmd, did;
4830 int rc;
4831
4832 cmd = *lp++;
4833 sp = (struct serv_parm *) lp;
4834
4835 /* FLOGI received */
4836
2e0fef85 4837 lpfc_set_disctmo(vport);
dea3101e 4838
76a95d75 4839 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
dea3101e
JB
4840 /* We should never receive a FLOGI in loop mode, ignore it */
4841 did = icmd->un.elsreq64.remoteID;
4842
4843 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4844 Loop Mode */
e8b62011
JS
4845 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4846 "0113 An FLOGI ELS command x%x was "
4847 "received from DID x%x in Loop Mode\n",
4848 cmd, did);
c9f8735b 4849 return 1;
dea3101e
JB
4850 }
4851
4852 did = Fabric_DID;
4853
341af102 4854 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
dea3101e
JB
4855 /* For a FLOGI we accept, then if our portname is greater
4856 * then the remote portname we initiate Nport login.
4857 */
4858
2e0fef85 4859 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 4860 sizeof(struct lpfc_name));
dea3101e
JB
4861
4862 if (!rc) {
2e0fef85
JS
4863 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4864 if (!mbox)
c9f8735b 4865 return 1;
2e0fef85 4866
dea3101e
JB
4867 lpfc_linkdown(phba);
4868 lpfc_init_link(phba, mbox,
4869 phba->cfg_topology,
4870 phba->cfg_link_speed);
04c68496 4871 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
dea3101e 4872 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 4873 mbox->vport = vport;
0b727fea 4874 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5b8bd0c9 4875 lpfc_set_loopback_flag(phba);
dea3101e 4876 if (rc == MBX_NOT_FINISHED) {
329f9bc7 4877 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 4878 }
c9f8735b 4879 return 1;
2fe165b6 4880 } else if (rc > 0) { /* greater than */
2e0fef85
JS
4881 spin_lock_irq(shost->host_lock);
4882 vport->fc_flag |= FC_PT2PT_PLOGI;
4883 spin_unlock_irq(shost->host_lock);
dea3101e 4884 }
2e0fef85
JS
4885 spin_lock_irq(shost->host_lock);
4886 vport->fc_flag |= FC_PT2PT;
4887 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4888 spin_unlock_irq(shost->host_lock);
dea3101e
JB
4889 } else {
4890 /* Reject this request because invalid parameters */
4891 stat.un.b.lsRjtRsvd0 = 0;
4892 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4893 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4894 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4895 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4896 NULL);
c9f8735b 4897 return 1;
dea3101e
JB
4898 }
4899
4900 /* Send back ACC */
51ef4c26 4901 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
dea3101e 4902
c9f8735b 4903 return 0;
dea3101e
JB
4904}
4905
e59058c4 4906/**
3621a710 4907 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
e59058c4
JS
4908 * @vport: pointer to a host virtual N_Port data structure.
4909 * @cmdiocb: pointer to lpfc command iocb data structure.
4910 * @ndlp: pointer to a node-list data structure.
4911 *
4912 * This routine processes Request Node Identification Data (RNID) IOCB
4913 * received as an ELS unsolicited event. Only when the RNID specified format
4914 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4915 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4916 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4917 * rejected by invoking the lpfc_els_rsp_reject() routine.
4918 *
4919 * Return code
4920 * 0 - Successfully processed rnid iocb (currently always return 0)
4921 **/
dea3101e 4922static int
2e0fef85
JS
4923lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4924 struct lpfc_nodelist *ndlp)
dea3101e
JB
4925{
4926 struct lpfc_dmabuf *pcmd;
4927 uint32_t *lp;
4928 IOCB_t *icmd;
4929 RNID *rn;
4930 struct ls_rjt stat;
4931 uint32_t cmd, did;
4932
4933 icmd = &cmdiocb->iocb;
4934 did = icmd->un.elsreq64.remoteID;
4935 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4936 lp = (uint32_t *) pcmd->virt;
4937
4938 cmd = *lp++;
4939 rn = (RNID *) lp;
4940
4941 /* RNID received */
4942
4943 switch (rn->Format) {
4944 case 0:
4945 case RNID_TOPOLOGY_DISC:
4946 /* Send back ACC */
2e0fef85 4947 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
dea3101e
JB
4948 break;
4949 default:
4950 /* Reject this request because format not supported */
4951 stat.un.b.lsRjtRsvd0 = 0;
4952 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4953 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4954 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4955 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4956 NULL);
dea3101e 4957 }
c9f8735b 4958 return 0;
dea3101e
JB
4959}
4960
12265f68
JS
4961/**
4962 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
4963 * @vport: pointer to a host virtual N_Port data structure.
4964 * @cmdiocb: pointer to lpfc command iocb data structure.
4965 * @ndlp: pointer to a node-list data structure.
4966 *
4967 * Return code
4968 * 0 - Successfully processed echo iocb (currently always return 0)
4969 **/
4970static int
4971lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4972 struct lpfc_nodelist *ndlp)
4973{
4974 uint8_t *pcmd;
4975
4976 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4977
4978 /* skip over first word of echo command to find echo data */
4979 pcmd += sizeof(uint32_t);
4980
4981 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
4982 return 0;
4983}
4984
e59058c4 4985/**
3621a710 4986 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
e59058c4
JS
4987 * @vport: pointer to a host virtual N_Port data structure.
4988 * @cmdiocb: pointer to lpfc command iocb data structure.
4989 * @ndlp: pointer to a node-list data structure.
4990 *
4991 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4992 * received as an ELS unsolicited event. Currently, this function just invokes
4993 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4994 *
4995 * Return code
4996 * 0 - Successfully processed lirr iocb (currently always return 0)
4997 **/
dea3101e 4998static int
2e0fef85
JS
4999lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5000 struct lpfc_nodelist *ndlp)
7bb3b137
JW
5001{
5002 struct ls_rjt stat;
5003
5004 /* For now, unconditionally reject this command */
5005 stat.un.b.lsRjtRsvd0 = 0;
5006 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5007 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5008 stat.un.b.vendorUnique = 0;
858c9f6c 5009 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
5010 return 0;
5011}
5012
5ffc266e
JS
5013/**
5014 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
5015 * @vport: pointer to a host virtual N_Port data structure.
5016 * @cmdiocb: pointer to lpfc command iocb data structure.
5017 * @ndlp: pointer to a node-list data structure.
5018 *
5019 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
5020 * received as an ELS unsolicited event. A request to RRQ shall only
5021 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
5022 * Nx_Port N_Port_ID of the target Exchange is the same as the
5023 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
5024 * not accepted, an LS_RJT with reason code "Unable to perform
5025 * command request" and reason code explanation "Invalid Originator
5026 * S_ID" shall be returned. For now, we just unconditionally accept
5027 * RRQ from the target.
5028 **/
5029static void
5030lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5031 struct lpfc_nodelist *ndlp)
5032{
5033 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
19ca7609
JS
5034 if (vport->phba->sli_rev == LPFC_SLI_REV4)
5035 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5ffc266e
JS
5036}
5037
12265f68
JS
5038/**
5039 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5040 * @phba: pointer to lpfc hba data structure.
5041 * @pmb: pointer to the driver internal queue element for mailbox command.
5042 *
5043 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5044 * mailbox command. This callback function is to actually send the Accept
5045 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5046 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5047 * mailbox command, constructs the RPS response with the link statistics
5048 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5049 * response to the RPS.
5050 *
5051 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5052 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5053 * will be stored into the context1 field of the IOCB for the completion
5054 * callback function to the RPS Accept Response ELS IOCB command.
5055 *
5056 **/
5057static void
5058lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5059{
5060 MAILBOX_t *mb;
5061 IOCB_t *icmd;
5062 struct RLS_RSP *rls_rsp;
5063 uint8_t *pcmd;
5064 struct lpfc_iocbq *elsiocb;
5065 struct lpfc_nodelist *ndlp;
7851fe2c
JS
5066 uint16_t oxid;
5067 uint16_t rxid;
12265f68
JS
5068 uint32_t cmdsize;
5069
5070 mb = &pmb->u.mb;
5071
5072 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
5073 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5074 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
12265f68
JS
5075 pmb->context1 = NULL;
5076 pmb->context2 = NULL;
5077
5078 if (mb->mbxStatus) {
5079 mempool_free(pmb, phba->mbox_mem_pool);
5080 return;
5081 }
5082
5083 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5084 mempool_free(pmb, phba->mbox_mem_pool);
5085 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5086 lpfc_max_els_tries, ndlp,
5087 ndlp->nlp_DID, ELS_CMD_ACC);
5088
5089 /* Decrement the ndlp reference count from previous mbox command */
5090 lpfc_nlp_put(ndlp);
5091
5092 if (!elsiocb)
5093 return;
5094
5095 icmd = &elsiocb->iocb;
7851fe2c
JS
5096 icmd->ulpContext = rxid;
5097 icmd->unsli3.rcvsli3.ox_id = oxid;
12265f68
JS
5098
5099 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5100 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5101 pcmd += sizeof(uint32_t); /* Skip past command */
5102 rls_rsp = (struct RLS_RSP *)pcmd;
5103
5104 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5105 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5106 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5107 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5108 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5109 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5110
5111 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5112 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5113 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5114 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5115 elsiocb->iotag, elsiocb->iocb.ulpContext,
5116 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5117 ndlp->nlp_rpi);
5118 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5119 phba->fc_stat.elsXmitACC++;
5120 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5121 lpfc_els_free_iocb(phba, elsiocb);
5122}
5123
e59058c4 5124/**
3621a710 5125 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
e59058c4
JS
5126 * @phba: pointer to lpfc hba data structure.
5127 * @pmb: pointer to the driver internal queue element for mailbox command.
5128 *
5129 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5130 * mailbox command. This callback function is to actually send the Accept
5131 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5132 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5133 * mailbox command, constructs the RPS response with the link statistics
5134 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5135 * response to the RPS.
5136 *
5137 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5138 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5139 * will be stored into the context1 field of the IOCB for the completion
5140 * callback function to the RPS Accept Response ELS IOCB command.
5141 *
5142 **/
082c0266 5143static void
329f9bc7 5144lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7bb3b137 5145{
7bb3b137
JW
5146 MAILBOX_t *mb;
5147 IOCB_t *icmd;
5148 RPS_RSP *rps_rsp;
5149 uint8_t *pcmd;
5150 struct lpfc_iocbq *elsiocb;
5151 struct lpfc_nodelist *ndlp;
7851fe2c
JS
5152 uint16_t status;
5153 uint16_t oxid;
5154 uint16_t rxid;
7bb3b137
JW
5155 uint32_t cmdsize;
5156
04c68496 5157 mb = &pmb->u.mb;
7bb3b137
JW
5158
5159 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
5160 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5161 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
041976fb
RD
5162 pmb->context1 = NULL;
5163 pmb->context2 = NULL;
7bb3b137
JW
5164
5165 if (mb->mbxStatus) {
329f9bc7 5166 mempool_free(pmb, phba->mbox_mem_pool);
7bb3b137
JW
5167 return;
5168 }
5169
5170 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
329f9bc7 5171 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
5172 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5173 lpfc_max_els_tries, ndlp,
5174 ndlp->nlp_DID, ELS_CMD_ACC);
fa4066b6
JS
5175
5176 /* Decrement the ndlp reference count from previous mbox command */
329f9bc7 5177 lpfc_nlp_put(ndlp);
fa4066b6 5178
c9f8735b 5179 if (!elsiocb)
7bb3b137 5180 return;
7bb3b137
JW
5181
5182 icmd = &elsiocb->iocb;
7851fe2c
JS
5183 icmd->ulpContext = rxid;
5184 icmd->unsli3.rcvsli3.ox_id = oxid;
7bb3b137
JW
5185
5186 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5187 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 5188 pcmd += sizeof(uint32_t); /* Skip past command */
7bb3b137
JW
5189 rps_rsp = (RPS_RSP *)pcmd;
5190
76a95d75 5191 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
7bb3b137
JW
5192 status = 0x10;
5193 else
5194 status = 0x8;
2e0fef85 5195 if (phba->pport->fc_flag & FC_FABRIC)
7bb3b137
JW
5196 status |= 0x4;
5197
5198 rps_rsp->rsvd1 = 0;
09372820
JS
5199 rps_rsp->portStatus = cpu_to_be16(status);
5200 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5201 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5202 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5203 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5204 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5205 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7bb3b137 5206 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
e8b62011
JS
5207 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5208 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5209 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5210 elsiocb->iotag, elsiocb->iocb.ulpContext,
5211 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5212 ndlp->nlp_rpi);
858c9f6c 5213 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 5214 phba->fc_stat.elsXmitACC++;
3772a991 5215 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7bb3b137 5216 lpfc_els_free_iocb(phba, elsiocb);
7bb3b137
JW
5217 return;
5218}
5219
e59058c4 5220/**
12265f68
JS
5221 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
5222 * @vport: pointer to a host virtual N_Port data structure.
5223 * @cmdiocb: pointer to lpfc command iocb data structure.
5224 * @ndlp: pointer to a node-list data structure.
5225 *
5226 * This routine processes Read Port Status (RPL) IOCB received as an
5227 * ELS unsolicited event. It first checks the remote port state. If the
5228 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5229 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5230 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5231 * for reading the HBA link statistics. It is for the callback function,
5232 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
5233 * to actually sending out RPL Accept (ACC) response.
5234 *
5235 * Return codes
5236 * 0 - Successfully processed rls iocb (currently always return 0)
5237 **/
5238static int
5239lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5240 struct lpfc_nodelist *ndlp)
5241{
5242 struct lpfc_hba *phba = vport->phba;
5243 LPFC_MBOXQ_t *mbox;
5244 struct lpfc_dmabuf *pcmd;
5245 struct ls_rjt stat;
5246
5247 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5248 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5249 /* reject the unsolicited RPS request and done with it */
5250 goto reject_out;
5251
5252 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5253
5254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5255 if (mbox) {
5256 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
5257 mbox->context1 = (void *)((unsigned long)
5258 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5259 cmdiocb->iocb.ulpContext)); /* rx_id */
12265f68
JS
5260 mbox->context2 = lpfc_nlp_get(ndlp);
5261 mbox->vport = vport;
5262 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5263 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5264 != MBX_NOT_FINISHED)
5265 /* Mbox completion will send ELS Response */
5266 return 0;
5267 /* Decrement reference count used for the failed mbox
5268 * command.
5269 */
5270 lpfc_nlp_put(ndlp);
5271 mempool_free(mbox, phba->mbox_mem_pool);
5272 }
5273reject_out:
5274 /* issue rejection response */
5275 stat.un.b.lsRjtRsvd0 = 0;
5276 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5277 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5278 stat.un.b.vendorUnique = 0;
5279 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5280 return 0;
5281}
5282
5283/**
5284 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5285 * @vport: pointer to a host virtual N_Port data structure.
5286 * @cmdiocb: pointer to lpfc command iocb data structure.
5287 * @ndlp: pointer to a node-list data structure.
5288 *
5289 * This routine processes Read Timout Value (RTV) IOCB received as an
5290 * ELS unsolicited event. It first checks the remote port state. If the
5291 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5292 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5293 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5294 * Value (RTV) unsolicited IOCB event.
5295 *
5296 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5297 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5298 * will be stored into the context1 field of the IOCB for the completion
5299 * callback function to the RPS Accept Response ELS IOCB command.
5300 *
5301 * Return codes
5302 * 0 - Successfully processed rtv iocb (currently always return 0)
5303 **/
5304static int
5305lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5306 struct lpfc_nodelist *ndlp)
5307{
5308 struct lpfc_hba *phba = vport->phba;
5309 struct ls_rjt stat;
5310 struct RTV_RSP *rtv_rsp;
5311 uint8_t *pcmd;
5312 struct lpfc_iocbq *elsiocb;
5313 uint32_t cmdsize;
5314
5315
5316 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5317 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5318 /* reject the unsolicited RPS request and done with it */
5319 goto reject_out;
5320
5321 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5322 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5323 lpfc_max_els_tries, ndlp,
5324 ndlp->nlp_DID, ELS_CMD_ACC);
5325
5326 if (!elsiocb)
5327 return 1;
5328
5329 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5330 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5331 pcmd += sizeof(uint32_t); /* Skip past command */
5332
5333 /* use the command's xri in the response */
7851fe2c
JS
5334 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5335 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
12265f68
JS
5336
5337 rtv_rsp = (struct RTV_RSP *)pcmd;
5338
5339 /* populate RTV payload */
5340 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5341 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5342 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5343 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5344 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5345
5346 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5347 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5348 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5349 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5350 "Data: x%x x%x x%x\n",
5351 elsiocb->iotag, elsiocb->iocb.ulpContext,
5352 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5353 ndlp->nlp_rpi,
5354 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5355 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5356 phba->fc_stat.elsXmitACC++;
5357 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5358 lpfc_els_free_iocb(phba, elsiocb);
5359 return 0;
5360
5361reject_out:
5362 /* issue rejection response */
5363 stat.un.b.lsRjtRsvd0 = 0;
5364 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5365 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5366 stat.un.b.vendorUnique = 0;
5367 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5368 return 0;
5369}
5370
5371/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
e59058c4
JS
5372 * @vport: pointer to a host virtual N_Port data structure.
5373 * @cmdiocb: pointer to lpfc command iocb data structure.
5374 * @ndlp: pointer to a node-list data structure.
5375 *
5376 * This routine processes Read Port Status (RPS) IOCB received as an
5377 * ELS unsolicited event. It first checks the remote port state. If the
5378 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5379 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5380 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5381 * for reading the HBA link statistics. It is for the callback function,
5382 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5383 * to actually sending out RPS Accept (ACC) response.
5384 *
5385 * Return codes
5386 * 0 - Successfully processed rps iocb (currently always return 0)
5387 **/
7bb3b137 5388static int
2e0fef85
JS
5389lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5390 struct lpfc_nodelist *ndlp)
dea3101e 5391{
2e0fef85 5392 struct lpfc_hba *phba = vport->phba;
dea3101e 5393 uint32_t *lp;
7bb3b137
JW
5394 uint8_t flag;
5395 LPFC_MBOXQ_t *mbox;
5396 struct lpfc_dmabuf *pcmd;
5397 RPS *rps;
5398 struct ls_rjt stat;
5399
2fe165b6 5400 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
90160e01
JS
5401 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5402 /* reject the unsolicited RPS request and done with it */
5403 goto reject_out;
7bb3b137
JW
5404
5405 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5406 lp = (uint32_t *) pcmd->virt;
5407 flag = (be32_to_cpu(*lp++) & 0xf);
5408 rps = (RPS *) lp;
5409
5410 if ((flag == 0) ||
5411 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2e0fef85 5412 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
92d7f7b0 5413 sizeof(struct lpfc_name)) == 0))) {
2e0fef85 5414
92d7f7b0
JS
5415 printk("Fix me....\n");
5416 dump_stack();
2e0fef85
JS
5417 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5418 if (mbox) {
7bb3b137 5419 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
5420 mbox->context1 = (void *)((unsigned long)
5421 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5422 cmdiocb->iocb.ulpContext)); /* rx_id */
329f9bc7 5423 mbox->context2 = lpfc_nlp_get(ndlp);
92d7f7b0 5424 mbox->vport = vport;
7bb3b137 5425 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
fa4066b6 5426 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
0b727fea 5427 != MBX_NOT_FINISHED)
7bb3b137
JW
5428 /* Mbox completion will send ELS Response */
5429 return 0;
fa4066b6
JS
5430 /* Decrement reference count used for the failed mbox
5431 * command.
5432 */
329f9bc7 5433 lpfc_nlp_put(ndlp);
7bb3b137
JW
5434 mempool_free(mbox, phba->mbox_mem_pool);
5435 }
5436 }
90160e01
JS
5437
5438reject_out:
5439 /* issue rejection response */
7bb3b137
JW
5440 stat.un.b.lsRjtRsvd0 = 0;
5441 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5442 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5443 stat.un.b.vendorUnique = 0;
858c9f6c 5444 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
5445 return 0;
5446}
5447
19ca7609
JS
5448/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5449 * @vport: pointer to a host virtual N_Port data structure.
5450 * @ndlp: pointer to a node-list data structure.
5451 * @did: DID of the target.
5452 * @rrq: Pointer to the rrq struct.
5453 *
5454 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5455 * Successful the the completion handler will clear the RRQ.
5456 *
5457 * Return codes
5458 * 0 - Successfully sent rrq els iocb.
5459 * 1 - Failed to send rrq els iocb.
5460 **/
5461static int
5462lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5463 uint32_t did, struct lpfc_node_rrq *rrq)
5464{
5465 struct lpfc_hba *phba = vport->phba;
5466 struct RRQ *els_rrq;
5467 IOCB_t *icmd;
5468 struct lpfc_iocbq *elsiocb;
5469 uint8_t *pcmd;
5470 uint16_t cmdsize;
5471 int ret;
5472
5473
5474 if (ndlp != rrq->ndlp)
5475 ndlp = rrq->ndlp;
5476 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5477 return 1;
5478
5479 /* If ndlp is not NULL, we will bump the reference count on it */
5480 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5481 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5482 ELS_CMD_RRQ);
5483 if (!elsiocb)
5484 return 1;
5485
5486 icmd = &elsiocb->iocb;
5487 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5488
5489 /* For RRQ request, remainder of payload is Exchange IDs */
5490 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5491 pcmd += sizeof(uint32_t);
5492 els_rrq = (struct RRQ *) pcmd;
5493
5494 bf_set(rrq_oxid, els_rrq, rrq->xritag);
5495 bf_set(rrq_rxid, els_rrq, rrq->rxid);
5496 bf_set(rrq_did, els_rrq, vport->fc_myDID);
5497 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5498 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5499
5500
5501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5502 "Issue RRQ: did:x%x",
5503 did, rrq->xritag, rrq->rxid);
5504 elsiocb->context_un.rrq = rrq;
5505 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5506 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5507
5508 if (ret == IOCB_ERROR) {
5509 lpfc_els_free_iocb(phba, elsiocb);
5510 return 1;
5511 }
5512 return 0;
5513}
5514
5515/**
5516 * lpfc_send_rrq - Sends ELS RRQ if needed.
5517 * @phba: pointer to lpfc hba data structure.
5518 * @rrq: pointer to the active rrq.
5519 *
5520 * This routine will call the lpfc_issue_els_rrq if the rrq is
5521 * still active for the xri. If this function returns a failure then
5522 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5523 *
5524 * Returns 0 Success.
5525 * 1 Failure.
5526 **/
5527int
5528lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5529{
5530 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5531 rrq->nlp_DID);
5532 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5533 return lpfc_issue_els_rrq(rrq->vport, ndlp,
5534 rrq->nlp_DID, rrq);
5535 else
5536 return 1;
5537}
5538
e59058c4 5539/**
3621a710 5540 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
e59058c4
JS
5541 * @vport: pointer to a host virtual N_Port data structure.
5542 * @cmdsize: size of the ELS command.
5543 * @oldiocb: pointer to the original lpfc command iocb data structure.
5544 * @ndlp: pointer to a node-list data structure.
5545 *
5546 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
5547 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
5548 *
5549 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5550 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5551 * will be stored into the context1 field of the IOCB for the completion
5552 * callback function to the RPL Accept Response ELS command.
5553 *
5554 * Return code
5555 * 0 - Successfully issued ACC RPL ELS command
5556 * 1 - Failed to issue ACC RPL ELS command
5557 **/
082c0266 5558static int
2e0fef85
JS
5559lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5560 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7bb3b137 5561{
2e0fef85
JS
5562 struct lpfc_hba *phba = vport->phba;
5563 IOCB_t *icmd, *oldcmd;
7bb3b137
JW
5564 RPL_RSP rpl_rsp;
5565 struct lpfc_iocbq *elsiocb;
7bb3b137 5566 uint8_t *pcmd;
dea3101e 5567
2e0fef85
JS
5568 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5569 ndlp->nlp_DID, ELS_CMD_ACC);
7bb3b137 5570
488d1469 5571 if (!elsiocb)
7bb3b137 5572 return 1;
488d1469 5573
7bb3b137
JW
5574 icmd = &elsiocb->iocb;
5575 oldcmd = &oldiocb->iocb;
7851fe2c
JS
5576 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5577 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7bb3b137
JW
5578
5579 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5580 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 5581 pcmd += sizeof(uint16_t);
7bb3b137
JW
5582 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
5583 pcmd += sizeof(uint16_t);
5584
5585 /* Setup the RPL ACC payload */
5586 rpl_rsp.listLen = be32_to_cpu(1);
5587 rpl_rsp.index = 0;
5588 rpl_rsp.port_num_blk.portNum = 0;
2e0fef85
JS
5589 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
5590 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7bb3b137 5591 sizeof(struct lpfc_name));
7bb3b137 5592 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7bb3b137 5593 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
e8b62011
JS
5594 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5595 "0120 Xmit ELS RPL ACC response tag x%x "
5596 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5597 "rpi x%x\n",
5598 elsiocb->iotag, elsiocb->iocb.ulpContext,
5599 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5600 ndlp->nlp_rpi);
858c9f6c 5601 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 5602 phba->fc_stat.elsXmitACC++;
3772a991
JS
5603 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
5604 IOCB_ERROR) {
7bb3b137
JW
5605 lpfc_els_free_iocb(phba, elsiocb);
5606 return 1;
5607 }
5608 return 0;
5609}
5610
e59058c4 5611/**
3621a710 5612 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
e59058c4
JS
5613 * @vport: pointer to a host virtual N_Port data structure.
5614 * @cmdiocb: pointer to lpfc command iocb data structure.
5615 * @ndlp: pointer to a node-list data structure.
5616 *
5617 * This routine processes Read Port List (RPL) IOCB received as an ELS
5618 * unsolicited event. It first checks the remote port state. If the remote
5619 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
5620 * invokes the lpfc_els_rsp_reject() routine to send reject response.
5621 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
5622 * to accept the RPL.
5623 *
5624 * Return code
5625 * 0 - Successfully processed rpl iocb (currently always return 0)
5626 **/
7bb3b137 5627static int
2e0fef85
JS
5628lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5629 struct lpfc_nodelist *ndlp)
7bb3b137
JW
5630{
5631 struct lpfc_dmabuf *pcmd;
5632 uint32_t *lp;
5633 uint32_t maxsize;
5634 uint16_t cmdsize;
5635 RPL *rpl;
5636 struct ls_rjt stat;
5637
2fe165b6
JW
5638 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5639 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
90160e01 5640 /* issue rejection response */
7bb3b137
JW
5641 stat.un.b.lsRjtRsvd0 = 0;
5642 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5643 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5644 stat.un.b.vendorUnique = 0;
858c9f6c
JS
5645 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5646 NULL);
90160e01
JS
5647 /* rejected the unsolicited RPL request and done with it */
5648 return 0;
7bb3b137
JW
5649 }
5650
dea3101e
JB
5651 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5652 lp = (uint32_t *) pcmd->virt;
7bb3b137 5653 rpl = (RPL *) (lp + 1);
7bb3b137 5654 maxsize = be32_to_cpu(rpl->maxsize);
dea3101e 5655
7bb3b137
JW
5656 /* We support only one port */
5657 if ((rpl->index == 0) &&
5658 ((maxsize == 0) ||
5659 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
5660 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2fe165b6 5661 } else {
7bb3b137
JW
5662 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
5663 }
2e0fef85 5664 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
dea3101e
JB
5665
5666 return 0;
5667}
5668
e59058c4 5669/**
3621a710 5670 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
e59058c4
JS
5671 * @vport: pointer to a virtual N_Port data structure.
5672 * @cmdiocb: pointer to lpfc command iocb data structure.
5673 * @ndlp: pointer to a node-list data structure.
5674 *
5675 * This routine processes Fibre Channel Address Resolution Protocol
5676 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
5677 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
5678 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
5679 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
5680 * remote PortName is compared against the FC PortName stored in the @vport
5681 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
5682 * compared against the FC NodeName stored in the @vport data structure.
5683 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
5684 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
5685 * invoked to send out FARP Response to the remote node. Before sending the
5686 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
5687 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
5688 * routine is invoked to log into the remote port first.
5689 *
5690 * Return code
5691 * 0 - Either the FARP Match Mode not supported or successfully processed
5692 **/
dea3101e 5693static int
2e0fef85
JS
5694lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5695 struct lpfc_nodelist *ndlp)
dea3101e
JB
5696{
5697 struct lpfc_dmabuf *pcmd;
5698 uint32_t *lp;
5699 IOCB_t *icmd;
5700 FARP *fp;
5701 uint32_t cmd, cnt, did;
5702
5703 icmd = &cmdiocb->iocb;
5704 did = icmd->un.elsreq64.remoteID;
5705 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5706 lp = (uint32_t *) pcmd->virt;
5707
5708 cmd = *lp++;
5709 fp = (FARP *) lp;
dea3101e 5710 /* FARP-REQ received from DID <did> */
e8b62011
JS
5711 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5712 "0601 FARP-REQ received from DID x%x\n", did);
dea3101e
JB
5713 /* We will only support match on WWPN or WWNN */
5714 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
c9f8735b 5715 return 0;
dea3101e
JB
5716 }
5717
5718 cnt = 0;
5719 /* If this FARP command is searching for my portname */
5720 if (fp->Mflags & FARP_MATCH_PORT) {
2e0fef85 5721 if (memcmp(&fp->RportName, &vport->fc_portname,
92d7f7b0 5722 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
5723 cnt = 1;
5724 }
5725
5726 /* If this FARP command is searching for my nodename */
5727 if (fp->Mflags & FARP_MATCH_NODE) {
2e0fef85 5728 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
92d7f7b0 5729 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
5730 cnt = 1;
5731 }
5732
5733 if (cnt) {
5734 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
5735 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
5736 /* Log back into the node before sending the FARP. */
5737 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5024ab17 5738 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 5739 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 5740 NLP_STE_PLOGI_ISSUE);
2e0fef85 5741 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
dea3101e
JB
5742 }
5743
5744 /* Send a FARP response to that node */
2e0fef85
JS
5745 if (fp->Rflags & FARP_REQUEST_FARPR)
5746 lpfc_issue_els_farpr(vport, did, 0);
dea3101e
JB
5747 }
5748 }
c9f8735b 5749 return 0;
dea3101e
JB
5750}
5751
e59058c4 5752/**
3621a710 5753 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
e59058c4
JS
5754 * @vport: pointer to a host virtual N_Port data structure.
5755 * @cmdiocb: pointer to lpfc command iocb data structure.
5756 * @ndlp: pointer to a node-list data structure.
5757 *
5758 * This routine processes Fibre Channel Address Resolution Protocol
5759 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5760 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5761 * the FARP response request.
5762 *
5763 * Return code
5764 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5765 **/
dea3101e 5766static int
2e0fef85
JS
5767lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5768 struct lpfc_nodelist *ndlp)
dea3101e
JB
5769{
5770 struct lpfc_dmabuf *pcmd;
5771 uint32_t *lp;
5772 IOCB_t *icmd;
5773 uint32_t cmd, did;
5774
5775 icmd = &cmdiocb->iocb;
5776 did = icmd->un.elsreq64.remoteID;
5777 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5778 lp = (uint32_t *) pcmd->virt;
5779
5780 cmd = *lp++;
5781 /* FARP-RSP received from DID <did> */
e8b62011
JS
5782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5783 "0600 FARP-RSP received from DID x%x\n", did);
dea3101e 5784 /* ACCEPT the Farp resp request */
51ef4c26 5785 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e
JB
5786
5787 return 0;
5788}
5789
e59058c4 5790/**
3621a710 5791 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
e59058c4
JS
5792 * @vport: pointer to a host virtual N_Port data structure.
5793 * @cmdiocb: pointer to lpfc command iocb data structure.
5794 * @fan_ndlp: pointer to a node-list data structure.
5795 *
5796 * This routine processes a Fabric Address Notification (FAN) IOCB
5797 * command received as an ELS unsolicited event. The FAN ELS command will
5798 * only be processed on a physical port (i.e., the @vport represents the
5799 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5800 * compared against those in the phba data structure. If any of those is
5801 * different, the lpfc_initial_flogi() routine is invoked to initialize
5802 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5803 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5804 * is invoked to register login to the fabric.
5805 *
5806 * Return code
5807 * 0 - Successfully processed fan iocb (currently always return 0).
5808 **/
dea3101e 5809static int
2e0fef85
JS
5810lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5811 struct lpfc_nodelist *fan_ndlp)
dea3101e 5812{
0d2b6b83 5813 struct lpfc_hba *phba = vport->phba;
dea3101e 5814 uint32_t *lp;
5024ab17 5815 FAN *fp;
dea3101e 5816
0d2b6b83
JS
5817 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5818 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5819 fp = (FAN *) ++lp;
5024ab17 5820 /* FAN received; Fan does not have a reply sequence */
0d2b6b83
JS
5821 if ((vport == phba->pport) &&
5822 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5024ab17 5823 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
0d2b6b83 5824 sizeof(struct lpfc_name))) ||
5024ab17 5825 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
0d2b6b83
JS
5826 sizeof(struct lpfc_name)))) {
5827 /* This port has switched fabrics. FLOGI is required */
76a95d75 5828 lpfc_issue_init_vfi(vport);
0d2b6b83
JS
5829 } else {
5830 /* FAN verified - skip FLOGI */
5831 vport->fc_myDID = vport->fc_prevDID;
6fb120a7
JS
5832 if (phba->sli_rev < LPFC_SLI_REV4)
5833 lpfc_issue_fabric_reglogin(vport);
5834 else
5835 lpfc_issue_reg_vfi(vport);
5024ab17 5836 }
dea3101e 5837 }
c9f8735b 5838 return 0;
dea3101e
JB
5839}
5840
e59058c4 5841/**
3621a710 5842 * lpfc_els_timeout - Handler funciton to the els timer
e59058c4
JS
5843 * @ptr: holder for the timer function associated data.
5844 *
5845 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5846 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5847 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5848 * up the worker thread. It is for the worker thread to invoke the routine
5849 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5850 **/
dea3101e
JB
5851void
5852lpfc_els_timeout(unsigned long ptr)
5853{
2e0fef85
JS
5854 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5855 struct lpfc_hba *phba = vport->phba;
5e9d9b82 5856 uint32_t tmo_posted;
dea3101e
JB
5857 unsigned long iflag;
5858
2e0fef85 5859 spin_lock_irqsave(&vport->work_port_lock, iflag);
5e9d9b82
JS
5860 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5861 if (!tmo_posted)
2e0fef85 5862 vport->work_port_events |= WORKER_ELS_TMO;
5e9d9b82 5863 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
92d7f7b0 5864
5e9d9b82
JS
5865 if (!tmo_posted)
5866 lpfc_worker_wake_up(phba);
dea3101e
JB
5867 return;
5868}
5869
2a9bf3d0 5870
e59058c4 5871/**
3621a710 5872 * lpfc_els_timeout_handler - Process an els timeout event
e59058c4
JS
5873 * @vport: pointer to a virtual N_Port data structure.
5874 *
5875 * This routine is the actual handler function that processes an ELS timeout
5876 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5877 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5878 * invoking the lpfc_sli_issue_abort_iotag() routine.
5879 **/
dea3101e 5880void
2e0fef85 5881lpfc_els_timeout_handler(struct lpfc_vport *vport)
dea3101e 5882{
2e0fef85 5883 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
5884 struct lpfc_sli_ring *pring;
5885 struct lpfc_iocbq *tmp_iocb, *piocb;
5886 IOCB_t *cmd = NULL;
5887 struct lpfc_dmabuf *pcmd;
2e0fef85 5888 uint32_t els_command = 0;
dea3101e 5889 uint32_t timeout;
2e0fef85 5890 uint32_t remote_ID = 0xffffffff;
2a9bf3d0
JS
5891 LIST_HEAD(txcmplq_completions);
5892 LIST_HEAD(abort_list);
5893
dea3101e 5894
dea3101e
JB
5895 timeout = (uint32_t)(phba->fc_ratov << 1);
5896
5897 pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 5898
2a9bf3d0
JS
5899 spin_lock_irq(&phba->hbalock);
5900 list_splice_init(&pring->txcmplq, &txcmplq_completions);
5901 spin_unlock_irq(&phba->hbalock);
5902
5903 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
dea3101e
JB
5904 cmd = &piocb->iocb;
5905
2e0fef85
JS
5906 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5907 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5908 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
dea3101e 5909 continue;
2e0fef85
JS
5910
5911 if (piocb->vport != vport)
5912 continue;
5913
dea3101e 5914 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2e0fef85
JS
5915 if (pcmd)
5916 els_command = *(uint32_t *) (pcmd->virt);
dea3101e 5917
92d7f7b0
JS
5918 if (els_command == ELS_CMD_FARP ||
5919 els_command == ELS_CMD_FARPR ||
5920 els_command == ELS_CMD_FDISC)
5921 continue;
5922
dea3101e 5923 if (piocb->drvrTimeout > 0) {
92d7f7b0 5924 if (piocb->drvrTimeout >= timeout)
dea3101e 5925 piocb->drvrTimeout -= timeout;
92d7f7b0 5926 else
dea3101e 5927 piocb->drvrTimeout = 0;
dea3101e
JB
5928 continue;
5929 }
5930
2e0fef85
JS
5931 remote_ID = 0xffffffff;
5932 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
dea3101e 5933 remote_ID = cmd->un.elsreq64.remoteID;
2e0fef85
JS
5934 else {
5935 struct lpfc_nodelist *ndlp;
5936 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
58da1ffb 5937 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2e0fef85 5938 remote_ID = ndlp->nlp_DID;
dea3101e 5939 }
2a9bf3d0
JS
5940 list_add_tail(&piocb->dlist, &abort_list);
5941 }
5942 spin_lock_irq(&phba->hbalock);
5943 list_splice(&txcmplq_completions, &pring->txcmplq);
5944 spin_unlock_irq(&phba->hbalock);
5945
5946 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
e8b62011 5947 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2a9bf3d0
JS
5948 "0127 ELS timeout Data: x%x x%x x%x "
5949 "x%x\n", els_command,
5950 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5951 spin_lock_irq(&phba->hbalock);
5952 list_del_init(&piocb->dlist);
07951076 5953 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
2a9bf3d0 5954 spin_unlock_irq(&phba->hbalock);
dea3101e 5955 }
5a0e326d 5956
2e0fef85
JS
5957 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5958 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
dea3101e
JB
5959}
5960
e59058c4 5961/**
3621a710 5962 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
e59058c4
JS
5963 * @vport: pointer to a host virtual N_Port data structure.
5964 *
5965 * This routine is used to clean up all the outstanding ELS commands on a
5966 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5967 * routine. After that, it walks the ELS transmit queue to remove all the
5968 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5969 * the IOCBs with a non-NULL completion callback function, the callback
5970 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5971 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5972 * callback function, the IOCB will simply be released. Finally, it walks
5973 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5974 * completion queue IOCB that is associated with the @vport and is not
5975 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5976 * part of the discovery state machine) out to HBA by invoking the
5977 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5978 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5979 * the IOCBs are aborted when this function returns.
5980 **/
dea3101e 5981void
2e0fef85 5982lpfc_els_flush_cmd(struct lpfc_vport *vport)
dea3101e 5983{
2534ba75 5984 LIST_HEAD(completions);
2e0fef85 5985 struct lpfc_hba *phba = vport->phba;
329f9bc7 5986 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e
JB
5987 struct lpfc_iocbq *tmp_iocb, *piocb;
5988 IOCB_t *cmd = NULL;
92d7f7b0
JS
5989
5990 lpfc_fabric_abort_vport(vport);
dea3101e 5991
2e0fef85 5992 spin_lock_irq(&phba->hbalock);
dea3101e
JB
5993 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5994 cmd = &piocb->iocb;
5995
5996 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5997 continue;
5998 }
5999
6000 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
329f9bc7
JS
6001 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6002 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6003 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6004 cmd->ulpCommand == CMD_ABORT_XRI_CN)
dea3101e 6005 continue;
dea3101e 6006
2e0fef85
JS
6007 if (piocb->vport != vport)
6008 continue;
6009
2534ba75 6010 list_move_tail(&piocb->list, &completions);
1dcb58e5 6011 pring->txq_cnt--;
dea3101e
JB
6012 }
6013
6014 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
dea3101e
JB
6015 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6016 continue;
6017 }
dea3101e 6018
2e0fef85
JS
6019 if (piocb->vport != vport)
6020 continue;
6021
07951076 6022 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
dea3101e 6023 }
2e0fef85 6024 spin_unlock_irq(&phba->hbalock);
2534ba75 6025
a257bf90
JS
6026 /* Cancell all the IOCBs from the completions list */
6027 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6028 IOERR_SLI_ABORTED);
2534ba75 6029
dea3101e
JB
6030 return;
6031}
6032
e59058c4 6033/**
3621a710 6034 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
e59058c4
JS
6035 * @phba: pointer to lpfc hba data structure.
6036 *
6037 * This routine is used to clean up all the outstanding ELS commands on a
6038 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
6039 * routine. After that, it walks the ELS transmit queue to remove all the
6040 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
6041 * the IOCBs with the completion callback function associated, the callback
6042 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6043 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
6044 * callback function associated, the IOCB will simply be released. Finally,
6045 * it walks the ELS transmit completion queue to issue an abort IOCB to any
6046 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
6047 * management plane IOCBs that are not part of the discovery state machine)
6048 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
6049 **/
549e55cd
JS
6050void
6051lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6052{
6053 LIST_HEAD(completions);
6054 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6055 struct lpfc_iocbq *tmp_iocb, *piocb;
6056 IOCB_t *cmd = NULL;
6057
6058 lpfc_fabric_abort_hba(phba);
6059 spin_lock_irq(&phba->hbalock);
6060 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6061 cmd = &piocb->iocb;
6062 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6063 continue;
6064 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6065 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6066 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6067 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6068 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6069 continue;
6070 list_move_tail(&piocb->list, &completions);
6071 pring->txq_cnt--;
6072 }
6073 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6074 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6075 continue;
6076 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6077 }
6078 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
6079
6080 /* Cancel all the IOCBs from the completions list */
6081 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6082 IOERR_SLI_ABORTED);
6083
549e55cd
JS
6084 return;
6085}
6086
ea2151b4 6087/**
3621a710 6088 * lpfc_send_els_failure_event - Posts an ELS command failure event
ea2151b4
JS
6089 * @phba: Pointer to hba context object.
6090 * @cmdiocbp: Pointer to command iocb which reported error.
6091 * @rspiocbp: Pointer to response iocb which reported error.
6092 *
6093 * This function sends an event when there is an ELS command
6094 * failure.
6095 **/
6096void
6097lpfc_send_els_failure_event(struct lpfc_hba *phba,
6098 struct lpfc_iocbq *cmdiocbp,
6099 struct lpfc_iocbq *rspiocbp)
6100{
6101 struct lpfc_vport *vport = cmdiocbp->vport;
6102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6103 struct lpfc_lsrjt_event lsrjt_event;
6104 struct lpfc_fabric_event_header fabric_event;
6105 struct ls_rjt stat;
6106 struct lpfc_nodelist *ndlp;
6107 uint32_t *pcmd;
6108
6109 ndlp = cmdiocbp->context1;
6110 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6111 return;
6112
6113 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
6114 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
6115 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
6116 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
6117 sizeof(struct lpfc_name));
6118 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
6119 sizeof(struct lpfc_name));
6120 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
6121 cmdiocbp->context2)->virt);
49198b37 6122 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
ea2151b4
JS
6123 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
6124 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
6125 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
6126 fc_host_post_vendor_event(shost,
6127 fc_get_event_number(),
6128 sizeof(lsrjt_event),
6129 (char *)&lsrjt_event,
ddcc50f0 6130 LPFC_NL_VENDOR_ID);
ea2151b4
JS
6131 return;
6132 }
6133 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
6134 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
6135 fabric_event.event_type = FC_REG_FABRIC_EVENT;
6136 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
6137 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
6138 else
6139 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
6140 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
6141 sizeof(struct lpfc_name));
6142 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
6143 sizeof(struct lpfc_name));
6144 fc_host_post_vendor_event(shost,
6145 fc_get_event_number(),
6146 sizeof(fabric_event),
6147 (char *)&fabric_event,
ddcc50f0 6148 LPFC_NL_VENDOR_ID);
ea2151b4
JS
6149 return;
6150 }
6151
6152}
6153
6154/**
3621a710 6155 * lpfc_send_els_event - Posts unsolicited els event
ea2151b4
JS
6156 * @vport: Pointer to vport object.
6157 * @ndlp: Pointer FC node object.
6158 * @cmd: ELS command code.
6159 *
6160 * This function posts an event when there is an incoming
6161 * unsolicited ELS command.
6162 **/
6163static void
6164lpfc_send_els_event(struct lpfc_vport *vport,
6165 struct lpfc_nodelist *ndlp,
ddcc50f0 6166 uint32_t *payload)
ea2151b4 6167{
ddcc50f0
JS
6168 struct lpfc_els_event_header *els_data = NULL;
6169 struct lpfc_logo_event *logo_data = NULL;
ea2151b4
JS
6170 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6171
ddcc50f0
JS
6172 if (*payload == ELS_CMD_LOGO) {
6173 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
6174 if (!logo_data) {
6175 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6176 "0148 Failed to allocate memory "
6177 "for LOGO event\n");
6178 return;
6179 }
6180 els_data = &logo_data->header;
6181 } else {
6182 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
6183 GFP_KERNEL);
6184 if (!els_data) {
6185 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6186 "0149 Failed to allocate memory "
6187 "for ELS event\n");
6188 return;
6189 }
6190 }
6191 els_data->event_type = FC_REG_ELS_EVENT;
6192 switch (*payload) {
ea2151b4 6193 case ELS_CMD_PLOGI:
ddcc50f0 6194 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
ea2151b4
JS
6195 break;
6196 case ELS_CMD_PRLO:
ddcc50f0 6197 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
ea2151b4
JS
6198 break;
6199 case ELS_CMD_ADISC:
ddcc50f0
JS
6200 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
6201 break;
6202 case ELS_CMD_LOGO:
6203 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
6204 /* Copy the WWPN in the LOGO payload */
6205 memcpy(logo_data->logo_wwpn, &payload[2],
6206 sizeof(struct lpfc_name));
ea2151b4
JS
6207 break;
6208 default:
e916141c 6209 kfree(els_data);
ea2151b4
JS
6210 return;
6211 }
ddcc50f0
JS
6212 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
6213 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
6214 if (*payload == ELS_CMD_LOGO) {
6215 fc_host_post_vendor_event(shost,
6216 fc_get_event_number(),
6217 sizeof(struct lpfc_logo_event),
6218 (char *)logo_data,
6219 LPFC_NL_VENDOR_ID);
6220 kfree(logo_data);
6221 } else {
6222 fc_host_post_vendor_event(shost,
6223 fc_get_event_number(),
6224 sizeof(struct lpfc_els_event_header),
6225 (char *)els_data,
6226 LPFC_NL_VENDOR_ID);
6227 kfree(els_data);
6228 }
ea2151b4
JS
6229
6230 return;
6231}
6232
6233
e59058c4 6234/**
3621a710 6235 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
e59058c4
JS
6236 * @phba: pointer to lpfc hba data structure.
6237 * @pring: pointer to a SLI ring.
6238 * @vport: pointer to a host virtual N_Port data structure.
6239 * @elsiocb: pointer to lpfc els command iocb data structure.
6240 *
6241 * This routine is used for processing the IOCB associated with a unsolicited
6242 * event. It first determines whether there is an existing ndlp that matches
6243 * the DID from the unsolicited IOCB. If not, it will create a new one with
6244 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
6245 * IOCB is then used to invoke the proper routine and to set up proper state
6246 * of the discovery state machine.
6247 **/
ed957684
JS
6248static void
6249lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
92d7f7b0 6250 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
dea3101e 6251{
87af33fe 6252 struct Scsi_Host *shost;
dea3101e 6253 struct lpfc_nodelist *ndlp;
dea3101e 6254 struct ls_rjt stat;
92d7f7b0 6255 uint32_t *payload;
2e0fef85 6256 uint32_t cmd, did, newnode, rjt_err = 0;
ed957684 6257 IOCB_t *icmd = &elsiocb->iocb;
dea3101e 6258
e47c9093 6259 if (!vport || !(elsiocb->context2))
dea3101e 6260 goto dropit;
2e0fef85 6261
dea3101e 6262 newnode = 0;
92d7f7b0
JS
6263 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
6264 cmd = *payload;
ed957684 6265 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
495a714c 6266 lpfc_post_buffer(phba, pring, 1);
dea3101e 6267
858c9f6c
JS
6268 did = icmd->un.rcvels.remoteID;
6269 if (icmd->ulpStatus) {
6270 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6271 "RCV Unsol ELS: status:x%x/x%x did:x%x",
6272 icmd->ulpStatus, icmd->un.ulpWord[4], did);
dea3101e 6273 goto dropit;
858c9f6c 6274 }
dea3101e
JB
6275
6276 /* Check to see if link went down during discovery */
ed957684 6277 if (lpfc_els_chk_latt(vport))
dea3101e 6278 goto dropit;
dea3101e 6279
c868595d 6280 /* Ignore traffic received during vport shutdown. */
92d7f7b0
JS
6281 if (vport->load_flag & FC_UNLOADING)
6282 goto dropit;
6283
92494144
JS
6284 /* If NPort discovery is delayed drop incoming ELS */
6285 if ((vport->fc_flag & FC_DISC_DELAYED) &&
6286 (cmd != ELS_CMD_PLOGI))
6287 goto dropit;
6288
2e0fef85 6289 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 6290 if (!ndlp) {
dea3101e 6291 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b 6292 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
ed957684 6293 if (!ndlp)
dea3101e 6294 goto dropit;
dea3101e 6295
2e0fef85 6296 lpfc_nlp_init(vport, ndlp, did);
98c9ea5c 6297 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
dea3101e 6298 newnode = 1;
e47c9093 6299 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
dea3101e 6300 ndlp->nlp_type |= NLP_FABRIC;
58da1ffb
JS
6301 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6302 ndlp = lpfc_enable_node(vport, ndlp,
6303 NLP_STE_UNUSED_NODE);
6304 if (!ndlp)
6305 goto dropit;
6306 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6307 newnode = 1;
6308 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6309 ndlp->nlp_type |= NLP_FABRIC;
6310 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
6311 /* This is similar to the new node path */
6312 ndlp = lpfc_nlp_get(ndlp);
6313 if (!ndlp)
6314 goto dropit;
6315 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6316 newnode = 1;
87af33fe 6317 }
dea3101e
JB
6318
6319 phba->fc_stat.elsRcvFrame++;
e47c9093 6320
329f9bc7 6321 elsiocb->context1 = lpfc_nlp_get(ndlp);
2e0fef85 6322 elsiocb->vport = vport;
dea3101e
JB
6323
6324 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
6325 cmd &= ELS_CMD_MASK;
6326 }
6327 /* ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
6328 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6329 "0112 ELS command x%x received from NPORT x%x "
6330 "Data: x%x\n", cmd, did, vport->port_state);
dea3101e
JB
6331 switch (cmd) {
6332 case ELS_CMD_PLOGI:
858c9f6c
JS
6333 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6334 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
6335 did, vport->port_state, ndlp->nlp_flag);
6336
dea3101e 6337 phba->fc_stat.elsRcvPLOGI++;
858c9f6c
JS
6338 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6339
ddcc50f0 6340 lpfc_send_els_event(vport, ndlp, payload);
92494144
JS
6341
6342 /* If Nport discovery is delayed, reject PLOGIs */
6343 if (vport->fc_flag & FC_DISC_DELAYED) {
6344 rjt_err = LSRJT_UNABLE_TPC;
6345 break;
6346 }
858c9f6c 6347 if (vport->port_state < LPFC_DISC_AUTH) {
1b32f6aa
JS
6348 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6349 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6350 rjt_err = LSRJT_UNABLE_TPC;
6351 break;
6352 }
6353 /* We get here, and drop thru, if we are PT2PT with
6354 * another NPort and the other side has initiated
6355 * the PLOGI before responding to our FLOGI.
6356 */
dea3101e 6357 }
87af33fe
JS
6358
6359 shost = lpfc_shost_from_vport(vport);
6360 spin_lock_irq(shost->host_lock);
6361 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6362 spin_unlock_irq(shost->host_lock);
6363
2e0fef85
JS
6364 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6365 NLP_EVT_RCV_PLOGI);
858c9f6c 6366
dea3101e
JB
6367 break;
6368 case ELS_CMD_FLOGI:
858c9f6c
JS
6369 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6370 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
6371 did, vport->port_state, ndlp->nlp_flag);
6372
dea3101e 6373 phba->fc_stat.elsRcvFLOGI++;
51ef4c26 6374 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
87af33fe 6375 if (newnode)
98c9ea5c 6376 lpfc_nlp_put(ndlp);
dea3101e
JB
6377 break;
6378 case ELS_CMD_LOGO:
858c9f6c
JS
6379 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6380 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
6381 did, vport->port_state, ndlp->nlp_flag);
6382
dea3101e 6383 phba->fc_stat.elsRcvLOGO++;
ddcc50f0 6384 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 6385 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6386 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6387 break;
6388 }
2e0fef85 6389 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
dea3101e
JB
6390 break;
6391 case ELS_CMD_PRLO:
858c9f6c
JS
6392 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6393 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
6394 did, vport->port_state, ndlp->nlp_flag);
6395
dea3101e 6396 phba->fc_stat.elsRcvPRLO++;
ddcc50f0 6397 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 6398 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6399 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6400 break;
6401 }
2e0fef85 6402 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
dea3101e
JB
6403 break;
6404 case ELS_CMD_RSCN:
6405 phba->fc_stat.elsRcvRSCN++;
51ef4c26 6406 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
87af33fe 6407 if (newnode)
98c9ea5c 6408 lpfc_nlp_put(ndlp);
dea3101e
JB
6409 break;
6410 case ELS_CMD_ADISC:
858c9f6c
JS
6411 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6412 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
6413 did, vport->port_state, ndlp->nlp_flag);
6414
ddcc50f0 6415 lpfc_send_els_event(vport, ndlp, payload);
dea3101e 6416 phba->fc_stat.elsRcvADISC++;
2e0fef85 6417 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6418 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6419 break;
6420 }
2e0fef85
JS
6421 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6422 NLP_EVT_RCV_ADISC);
dea3101e
JB
6423 break;
6424 case ELS_CMD_PDISC:
858c9f6c
JS
6425 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6426 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
6427 did, vport->port_state, ndlp->nlp_flag);
6428
dea3101e 6429 phba->fc_stat.elsRcvPDISC++;
2e0fef85 6430 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6431 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6432 break;
6433 }
2e0fef85
JS
6434 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6435 NLP_EVT_RCV_PDISC);
dea3101e
JB
6436 break;
6437 case ELS_CMD_FARPR:
858c9f6c
JS
6438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6439 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
6440 did, vport->port_state, ndlp->nlp_flag);
6441
dea3101e 6442 phba->fc_stat.elsRcvFARPR++;
2e0fef85 6443 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
dea3101e
JB
6444 break;
6445 case ELS_CMD_FARP:
858c9f6c
JS
6446 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6447 "RCV FARP: did:x%x/ste:x%x flg:x%x",
6448 did, vport->port_state, ndlp->nlp_flag);
6449
dea3101e 6450 phba->fc_stat.elsRcvFARP++;
2e0fef85 6451 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
dea3101e
JB
6452 break;
6453 case ELS_CMD_FAN:
858c9f6c
JS
6454 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6455 "RCV FAN: did:x%x/ste:x%x flg:x%x",
6456 did, vport->port_state, ndlp->nlp_flag);
6457
dea3101e 6458 phba->fc_stat.elsRcvFAN++;
2e0fef85 6459 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
dea3101e 6460 break;
dea3101e 6461 case ELS_CMD_PRLI:
858c9f6c
JS
6462 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6463 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
6464 did, vport->port_state, ndlp->nlp_flag);
6465
dea3101e 6466 phba->fc_stat.elsRcvPRLI++;
2e0fef85 6467 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6468 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6469 break;
6470 }
2e0fef85 6471 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
dea3101e 6472 break;
7bb3b137 6473 case ELS_CMD_LIRR:
858c9f6c
JS
6474 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6475 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
6476 did, vport->port_state, ndlp->nlp_flag);
6477
7bb3b137 6478 phba->fc_stat.elsRcvLIRR++;
2e0fef85 6479 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
87af33fe 6480 if (newnode)
98c9ea5c 6481 lpfc_nlp_put(ndlp);
7bb3b137 6482 break;
12265f68
JS
6483 case ELS_CMD_RLS:
6484 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6485 "RCV RLS: did:x%x/ste:x%x flg:x%x",
6486 did, vport->port_state, ndlp->nlp_flag);
6487
6488 phba->fc_stat.elsRcvRLS++;
6489 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6490 if (newnode)
6491 lpfc_nlp_put(ndlp);
6492 break;
7bb3b137 6493 case ELS_CMD_RPS:
858c9f6c
JS
6494 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6495 "RCV RPS: did:x%x/ste:x%x flg:x%x",
6496 did, vport->port_state, ndlp->nlp_flag);
6497
7bb3b137 6498 phba->fc_stat.elsRcvRPS++;
2e0fef85 6499 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
87af33fe 6500 if (newnode)
98c9ea5c 6501 lpfc_nlp_put(ndlp);
7bb3b137
JW
6502 break;
6503 case ELS_CMD_RPL:
858c9f6c
JS
6504 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6505 "RCV RPL: did:x%x/ste:x%x flg:x%x",
6506 did, vport->port_state, ndlp->nlp_flag);
6507
7bb3b137 6508 phba->fc_stat.elsRcvRPL++;
2e0fef85 6509 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
87af33fe 6510 if (newnode)
98c9ea5c 6511 lpfc_nlp_put(ndlp);
7bb3b137 6512 break;
dea3101e 6513 case ELS_CMD_RNID:
858c9f6c
JS
6514 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6515 "RCV RNID: did:x%x/ste:x%x flg:x%x",
6516 did, vport->port_state, ndlp->nlp_flag);
6517
dea3101e 6518 phba->fc_stat.elsRcvRNID++;
2e0fef85 6519 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
87af33fe 6520 if (newnode)
98c9ea5c 6521 lpfc_nlp_put(ndlp);
dea3101e 6522 break;
12265f68
JS
6523 case ELS_CMD_RTV:
6524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6525 "RCV RTV: did:x%x/ste:x%x flg:x%x",
6526 did, vport->port_state, ndlp->nlp_flag);
6527 phba->fc_stat.elsRcvRTV++;
6528 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6529 if (newnode)
6530 lpfc_nlp_put(ndlp);
6531 break;
5ffc266e
JS
6532 case ELS_CMD_RRQ:
6533 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6534 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
6535 did, vport->port_state, ndlp->nlp_flag);
6536
6537 phba->fc_stat.elsRcvRRQ++;
6538 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
6539 if (newnode)
6540 lpfc_nlp_put(ndlp);
6541 break;
12265f68
JS
6542 case ELS_CMD_ECHO:
6543 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6544 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6545 did, vport->port_state, ndlp->nlp_flag);
6546
6547 phba->fc_stat.elsRcvECHO++;
6548 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6549 if (newnode)
6550 lpfc_nlp_put(ndlp);
6551 break;
dea3101e 6552 default:
858c9f6c
JS
6553 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6554 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
6555 cmd, did, vport->port_state);
6556
dea3101e 6557 /* Unsupported ELS command, reject */
63e801ce 6558 rjt_err = LSRJT_CMD_UNSUPPORTED;
dea3101e
JB
6559
6560 /* Unknown ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
6561 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6562 "0115 Unknown ELS command x%x "
6563 "received from NPORT x%x\n", cmd, did);
87af33fe 6564 if (newnode)
98c9ea5c 6565 lpfc_nlp_put(ndlp);
dea3101e
JB
6566 break;
6567 }
6568
6569 /* check if need to LS_RJT received ELS cmd */
6570 if (rjt_err) {
92d7f7b0 6571 memset(&stat, 0, sizeof(stat));
858c9f6c 6572 stat.un.b.lsRjtRsnCode = rjt_err;
1f679caf 6573 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
858c9f6c
JS
6574 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6575 NULL);
dea3101e
JB
6576 }
6577
d7c255b2
JS
6578 lpfc_nlp_put(elsiocb->context1);
6579 elsiocb->context1 = NULL;
ed957684
JS
6580 return;
6581
6582dropit:
98c9ea5c 6583 if (vport && !(vport->load_flag & FC_UNLOADING))
6fb120a7
JS
6584 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6585 "0111 Dropping received ELS cmd "
ed957684 6586 "Data: x%x x%x x%x\n",
6fb120a7 6587 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
ed957684
JS
6588 phba->fc_stat.elsRcvDrop++;
6589}
6590
e59058c4 6591/**
3621a710 6592 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
e59058c4
JS
6593 * @phba: pointer to lpfc hba data structure.
6594 * @vpi: host virtual N_Port identifier.
6595 *
6596 * This routine finds a vport on a HBA (referred by @phba) through a
6597 * @vpi. The function walks the HBA's vport list and returns the address
6598 * of the vport with the matching @vpi.
6599 *
6600 * Return code
6601 * NULL - No vport with the matching @vpi found
6602 * Otherwise - Address to the vport with the matching @vpi.
6603 **/
6669f9bb 6604struct lpfc_vport *
92d7f7b0
JS
6605lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6606{
6607 struct lpfc_vport *vport;
549e55cd 6608 unsigned long flags;
5248a749 6609 int i = 0;
6d368e53
JS
6610
6611 /* The physical ports are always vpi 0 - translate is unnecessary. */
6612 if (vpi > 0) {
6613 /*
6614 * Translate the physical vpi to the logical vpi. The
6615 * vport stores the logical vpi.
6616 */
6617 for (i = 0; i < phba->max_vpi; i++) {
6618 if (vpi == phba->vpi_ids[i])
6619 break;
6620 }
6621
6622 if (i >= phba->max_vpi) {
6623 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6624 "2936 Could not find Vport mapped "
6625 "to vpi %d\n", vpi);
6626 return NULL;
6627 }
6628 }
92d7f7b0 6629
549e55cd 6630 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 6631 list_for_each_entry(vport, &phba->port_list, listentry) {
5248a749 6632 if (vport->vpi == i) {
549e55cd 6633 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0 6634 return vport;
549e55cd 6635 }
92d7f7b0 6636 }
549e55cd 6637 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0
JS
6638 return NULL;
6639}
ed957684 6640
e59058c4 6641/**
3621a710 6642 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
e59058c4
JS
6643 * @phba: pointer to lpfc hba data structure.
6644 * @pring: pointer to a SLI ring.
6645 * @elsiocb: pointer to lpfc els iocb data structure.
6646 *
6647 * This routine is used to process an unsolicited event received from a SLI
6648 * (Service Level Interface) ring. The actual processing of the data buffer
6649 * associated with the unsolicited event is done by invoking the routine
6650 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
6651 * SLI ring on which the unsolicited event was received.
6652 **/
ed957684
JS
6653void
6654lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6655 struct lpfc_iocbq *elsiocb)
6656{
6657 struct lpfc_vport *vport = phba->pport;
ed957684 6658 IOCB_t *icmd = &elsiocb->iocb;
ed957684 6659 dma_addr_t paddr;
92d7f7b0
JS
6660 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
6661 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
6662
d7c255b2 6663 elsiocb->context1 = NULL;
92d7f7b0
JS
6664 elsiocb->context2 = NULL;
6665 elsiocb->context3 = NULL;
ed957684 6666
92d7f7b0
JS
6667 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6668 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
6669 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6670 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
ed957684
JS
6671 phba->fc_stat.NoRcvBuf++;
6672 /* Not enough posted buffers; Try posting more buffers */
92d7f7b0 6673 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
495a714c 6674 lpfc_post_buffer(phba, pring, 0);
ed957684
JS
6675 return;
6676 }
6677
92d7f7b0
JS
6678 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6679 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
6680 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6681 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
6682 vport = phba->pport;
6fb120a7
JS
6683 else
6684 vport = lpfc_find_vport_by_vpid(phba,
6d368e53 6685 icmd->unsli3.rcvsli3.vpi);
92d7f7b0 6686 }
6d368e53 6687
7f5f3d0d
JS
6688 /* If there are no BDEs associated
6689 * with this IOCB, there is nothing to do.
6690 */
ed957684
JS
6691 if (icmd->ulpBdeCount == 0)
6692 return;
6693
7f5f3d0d
JS
6694 /* type of ELS cmd is first 32bit word
6695 * in packet
6696 */
ed957684 6697 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
92d7f7b0 6698 elsiocb->context2 = bdeBuf1;
ed957684
JS
6699 } else {
6700 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
6701 icmd->un.cont64[0].addrLow);
92d7f7b0
JS
6702 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
6703 paddr);
ed957684
JS
6704 }
6705
92d7f7b0
JS
6706 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6707 /*
6708 * The different unsolicited event handlers would tell us
6709 * if they are done with "mp" by setting context2 to NULL.
6710 */
dea3101e 6711 if (elsiocb->context2) {
92d7f7b0
JS
6712 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
6713 elsiocb->context2 = NULL;
dea3101e 6714 }
ed957684
JS
6715
6716 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
92d7f7b0 6717 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
ed957684 6718 icmd->ulpBdeCount == 2) {
92d7f7b0
JS
6719 elsiocb->context2 = bdeBuf2;
6720 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
ed957684
JS
6721 /* free mp if we are done with it */
6722 if (elsiocb->context2) {
92d7f7b0
JS
6723 lpfc_in_buf_free(phba, elsiocb->context2);
6724 elsiocb->context2 = NULL;
6725 }
6726 }
6727}
6728
e59058c4 6729/**
3621a710 6730 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
e59058c4
JS
6731 * @phba: pointer to lpfc hba data structure.
6732 * @vport: pointer to a virtual N_Port data structure.
6733 *
6734 * This routine issues a Port Login (PLOGI) to the Name Server with
6735 * State Change Request (SCR) for a @vport. This routine will create an
6736 * ndlp for the Name Server associated to the @vport if such node does
6737 * not already exist. The PLOGI to Name Server is issued by invoking the
6738 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
6739 * (FDMI) is configured to the @vport, a FDMI node will be created and
6740 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
6741 **/
92d7f7b0
JS
6742void
6743lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6744{
6745 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
92494144
JS
6746 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6747
6748 /*
6749 * If lpfc_delay_discovery parameter is set and the clean address
6750 * bit is cleared and fc fabric parameters chenged, delay FC NPort
6751 * discovery.
6752 */
6753 spin_lock_irq(shost->host_lock);
6754 if (vport->fc_flag & FC_DISC_DELAYED) {
6755 spin_unlock_irq(shost->host_lock);
6756 mod_timer(&vport->delayed_disc_tmo,
6757 jiffies + HZ * phba->fc_ratov);
6758 return;
6759 }
6760 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
6761
6762 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6763 if (!ndlp) {
6764 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6765 if (!ndlp) {
76a95d75 6766 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
6767 lpfc_disc_start(vport);
6768 return;
6769 }
6770 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6771 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6772 "0251 NameServer login: no memory\n");
92d7f7b0
JS
6773 return;
6774 }
6775 lpfc_nlp_init(vport, ndlp, NameServer_DID);
e47c9093
JS
6776 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6777 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6778 if (!ndlp) {
76a95d75 6779 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
e47c9093
JS
6780 lpfc_disc_start(vport);
6781 return;
6782 }
6783 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6784 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6785 "0348 NameServer login: node freed\n");
6786 return;
6787 }
92d7f7b0 6788 }
58da1ffb 6789 ndlp->nlp_type |= NLP_FABRIC;
92d7f7b0
JS
6790
6791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6792
6793 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
6794 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6795 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6796 "0252 Cannot issue NameServer login\n");
92d7f7b0
JS
6797 return;
6798 }
6799
3de2a653 6800 if (vport->cfg_fdmi_on) {
63e801ce
JS
6801 /* If this is the first time, allocate an ndlp and initialize
6802 * it. Otherwise, make sure the node is enabled and then do the
6803 * login.
6804 */
6805 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
6806 if (!ndlp_fdmi) {
6807 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
6808 GFP_KERNEL);
6809 if (ndlp_fdmi) {
6810 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6811 ndlp_fdmi->nlp_type |= NLP_FABRIC;
6812 } else
6813 return;
6814 }
6815 if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
6816 ndlp_fdmi = lpfc_enable_node(vport,
6817 ndlp_fdmi,
6818 NLP_STE_NPR_NODE);
6819
92d7f7b0 6820 if (ndlp_fdmi) {
58da1ffb 6821 lpfc_nlp_set_state(vport, ndlp_fdmi,
63e801ce
JS
6822 NLP_STE_PLOGI_ISSUE);
6823 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
92d7f7b0
JS
6824 }
6825 }
92d7f7b0
JS
6826}
6827
e59058c4 6828/**
3621a710 6829 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
e59058c4
JS
6830 * @phba: pointer to lpfc hba data structure.
6831 * @pmb: pointer to the driver internal queue element for mailbox command.
6832 *
6833 * This routine is the completion callback function to register new vport
6834 * mailbox command. If the new vport mailbox command completes successfully,
6835 * the fabric registration login shall be performed on physical port (the
6836 * new vport created is actually a physical port, with VPI 0) or the port
6837 * login to Name Server for State Change Request (SCR) will be performed
6838 * on virtual port (real virtual port, with VPI greater than 0).
6839 **/
92d7f7b0
JS
6840static void
6841lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6842{
6843 struct lpfc_vport *vport = pmb->vport;
6844 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6845 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
04c68496 6846 MAILBOX_t *mb = &pmb->u.mb;
695a814e 6847 int rc;
92d7f7b0 6848
09372820 6849 spin_lock_irq(shost->host_lock);
92d7f7b0 6850 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
09372820 6851 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
6852
6853 if (mb->mbxStatus) {
e8b62011 6854 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
38b92ef8
JS
6855 "0915 Register VPI failed : Status: x%x"
6856 " upd bit: x%x \n", mb->mbxStatus,
6857 mb->un.varRegVpi.upd);
6858 if (phba->sli_rev == LPFC_SLI_REV4 &&
6859 mb->un.varRegVpi.upd)
6860 goto mbox_err_exit ;
92d7f7b0
JS
6861
6862 switch (mb->mbxStatus) {
6863 case 0x11: /* unsupported feature */
6864 case 0x9603: /* max_vpi exceeded */
7f5f3d0d 6865 case 0x9602: /* Link event since CLEAR_LA */
92d7f7b0
JS
6866 /* giving up on vport registration */
6867 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6868 spin_lock_irq(shost->host_lock);
6869 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6870 spin_unlock_irq(shost->host_lock);
6871 lpfc_can_disctmo(vport);
6872 break;
695a814e
JS
6873 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6874 case 0x20:
6875 spin_lock_irq(shost->host_lock);
6876 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6877 spin_unlock_irq(shost->host_lock);
6878 lpfc_init_vpi(phba, pmb, vport->vpi);
6879 pmb->vport = vport;
6880 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6881 rc = lpfc_sli_issue_mbox(phba, pmb,
6882 MBX_NOWAIT);
6883 if (rc == MBX_NOT_FINISHED) {
6884 lpfc_printf_vlog(vport,
6885 KERN_ERR, LOG_MBOX,
6886 "2732 Failed to issue INIT_VPI"
6887 " mailbox command\n");
6888 } else {
6889 lpfc_nlp_put(ndlp);
6890 return;
6891 }
6892
92d7f7b0
JS
6893 default:
6894 /* Try to recover from this error */
5af5eee7
JS
6895 if (phba->sli_rev == LPFC_SLI_REV4)
6896 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 6897 lpfc_mbx_unreg_vpi(vport);
09372820 6898 spin_lock_irq(shost->host_lock);
92d7f7b0 6899 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 6900 spin_unlock_irq(shost->host_lock);
4b40c59e
JS
6901 if (vport->port_type == LPFC_PHYSICAL_PORT
6902 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
76a95d75 6903 lpfc_issue_init_vfi(vport);
7f5f3d0d
JS
6904 else
6905 lpfc_initial_fdisc(vport);
92d7f7b0
JS
6906 break;
6907 }
92d7f7b0 6908 } else {
695a814e 6909 spin_lock_irq(shost->host_lock);
1987807d 6910 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e
JS
6911 spin_unlock_irq(shost->host_lock);
6912 if (vport == phba->pport) {
6fb120a7
JS
6913 if (phba->sli_rev < LPFC_SLI_REV4)
6914 lpfc_issue_fabric_reglogin(vport);
695a814e 6915 else {
fc2b989b
JS
6916 /*
6917 * If the physical port is instantiated using
6918 * FDISC, do not start vport discovery.
6919 */
6920 if (vport->port_state != LPFC_FDISC)
6921 lpfc_start_fdiscs(phba);
695a814e
JS
6922 lpfc_do_scr_ns_plogi(phba, vport);
6923 }
6924 } else
92d7f7b0
JS
6925 lpfc_do_scr_ns_plogi(phba, vport);
6926 }
38b92ef8 6927mbox_err_exit:
fa4066b6
JS
6928 /* Now, we decrement the ndlp reference count held for this
6929 * callback function
6930 */
6931 lpfc_nlp_put(ndlp);
6932
92d7f7b0
JS
6933 mempool_free(pmb, phba->mbox_mem_pool);
6934 return;
6935}
6936
e59058c4 6937/**
3621a710 6938 * lpfc_register_new_vport - Register a new vport with a HBA
e59058c4
JS
6939 * @phba: pointer to lpfc hba data structure.
6940 * @vport: pointer to a host virtual N_Port data structure.
6941 * @ndlp: pointer to a node-list data structure.
6942 *
6943 * This routine registers the @vport as a new virtual port with a HBA.
6944 * It is done through a registering vpi mailbox command.
6945 **/
695a814e 6946void
92d7f7b0
JS
6947lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6948 struct lpfc_nodelist *ndlp)
6949{
09372820 6950 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
6951 LPFC_MBOXQ_t *mbox;
6952
6953 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6954 if (mbox) {
6fb120a7 6955 lpfc_reg_vpi(vport, mbox);
92d7f7b0
JS
6956 mbox->vport = vport;
6957 mbox->context2 = lpfc_nlp_get(ndlp);
6958 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
0b727fea 6959 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
92d7f7b0 6960 == MBX_NOT_FINISHED) {
fa4066b6
JS
6961 /* mailbox command not success, decrement ndlp
6962 * reference count for this command
6963 */
6964 lpfc_nlp_put(ndlp);
92d7f7b0 6965 mempool_free(mbox, phba->mbox_mem_pool);
92d7f7b0 6966
e8b62011
JS
6967 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6968 "0253 Register VPI: Can't send mbox\n");
fa4066b6 6969 goto mbox_err_exit;
92d7f7b0
JS
6970 }
6971 } else {
e8b62011
JS
6972 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6973 "0254 Register VPI: no memory\n");
fa4066b6 6974 goto mbox_err_exit;
92d7f7b0 6975 }
fa4066b6
JS
6976 return;
6977
6978mbox_err_exit:
6979 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6980 spin_lock_irq(shost->host_lock);
6981 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6982 spin_unlock_irq(shost->host_lock);
6983 return;
92d7f7b0
JS
6984}
6985
695a814e 6986/**
0c9ab6f5 6987 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
695a814e
JS
6988 * @phba: pointer to lpfc hba data structure.
6989 *
0c9ab6f5 6990 * This routine cancels the retry delay timers to all the vports.
695a814e
JS
6991 **/
6992void
0c9ab6f5 6993lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
695a814e
JS
6994{
6995 struct lpfc_vport **vports;
6996 struct lpfc_nodelist *ndlp;
695a814e 6997 uint32_t link_state;
0c9ab6f5 6998 int i;
695a814e
JS
6999
7000 /* Treat this failure as linkdown for all vports */
7001 link_state = phba->link_state;
7002 lpfc_linkdown(phba);
7003 phba->link_state = link_state;
7004
7005 vports = lpfc_create_vport_work_array(phba);
7006
7007 if (vports) {
7008 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
7009 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
7010 if (ndlp)
7011 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
7012 lpfc_els_flush_cmd(vports[i]);
7013 }
7014 lpfc_destroy_vport_work_array(phba, vports);
7015 }
0c9ab6f5
JS
7016}
7017
7018/**
7019 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
7020 * @phba: pointer to lpfc hba data structure.
7021 *
7022 * This routine abort all pending discovery commands and
7023 * start a timer to retry FLOGI for the physical port
7024 * discovery.
7025 **/
7026void
7027lpfc_retry_pport_discovery(struct lpfc_hba *phba)
7028{
7029 struct lpfc_nodelist *ndlp;
7030 struct Scsi_Host *shost;
7031
7032 /* Cancel the all vports retry delay retry timers */
7033 lpfc_cancel_all_vport_retry_delay_timer(phba);
695a814e
JS
7034
7035 /* If fabric require FLOGI, then re-instantiate physical login */
7036 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
7037 if (!ndlp)
7038 return;
7039
695a814e
JS
7040 shost = lpfc_shost_from_vport(phba->pport);
7041 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
7042 spin_lock_irq(shost->host_lock);
7043 ndlp->nlp_flag |= NLP_DELAY_TMO;
7044 spin_unlock_irq(shost->host_lock);
7045 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
7046 phba->pport->port_state = LPFC_FLOGI;
7047 return;
7048}
7049
7050/**
7051 * lpfc_fabric_login_reqd - Check if FLOGI required.
7052 * @phba: pointer to lpfc hba data structure.
7053 * @cmdiocb: pointer to FDISC command iocb.
7054 * @rspiocb: pointer to FDISC response iocb.
7055 *
7056 * This routine checks if a FLOGI is reguired for FDISC
7057 * to succeed.
7058 **/
7059static int
7060lpfc_fabric_login_reqd(struct lpfc_hba *phba,
7061 struct lpfc_iocbq *cmdiocb,
7062 struct lpfc_iocbq *rspiocb)
7063{
7064
7065 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
7066 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
7067 return 0;
7068 else
7069 return 1;
7070}
7071
e59058c4 7072/**
3621a710 7073 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
e59058c4
JS
7074 * @phba: pointer to lpfc hba data structure.
7075 * @cmdiocb: pointer to lpfc command iocb data structure.
7076 * @rspiocb: pointer to lpfc response iocb data structure.
7077 *
7078 * This routine is the completion callback function to a Fabric Discover
7079 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
7080 * single threaded, each FDISC completion callback function will reset
7081 * the discovery timer for all vports such that the timers will not get
7082 * unnecessary timeout. The function checks the FDISC IOCB status. If error
7083 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
7084 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
7085 * assigned to the vport has been changed with the completion of the FDISC
7086 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
7087 * are unregistered from the HBA, and then the lpfc_register_new_vport()
7088 * routine is invoked to register new vport with the HBA. Otherwise, the
7089 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
7090 * Server for State Change Request (SCR).
7091 **/
92d7f7b0
JS
7092static void
7093lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7094 struct lpfc_iocbq *rspiocb)
7095{
7096 struct lpfc_vport *vport = cmdiocb->vport;
7097 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7098 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
7099 struct lpfc_nodelist *np;
7100 struct lpfc_nodelist *next_np;
7101 IOCB_t *irsp = &rspiocb->iocb;
7102 struct lpfc_iocbq *piocb;
92494144
JS
7103 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
7104 struct serv_parm *sp;
7105 uint8_t fabric_param_changed;
92d7f7b0 7106
e8b62011
JS
7107 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7108 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7109 irsp->ulpStatus, irsp->un.ulpWord[4],
7110 vport->fc_prevDID);
92d7f7b0
JS
7111 /* Since all FDISCs are being single threaded, we
7112 * must reset the discovery timer for ALL vports
7113 * waiting to send FDISC when one completes.
7114 */
7115 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
7116 lpfc_set_disctmo(piocb->vport);
7117 }
7118
858c9f6c
JS
7119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7120 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
7121 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
7122
92d7f7b0 7123 if (irsp->ulpStatus) {
695a814e
JS
7124
7125 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
7126 lpfc_retry_pport_discovery(phba);
7127 goto out;
7128 }
7129
92d7f7b0
JS
7130 /* Check for retry */
7131 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
7132 goto out;
92d7f7b0 7133 /* FDISC failed */
e8b62011 7134 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 7135 "0126 FDISC failed. (%d/%d)\n",
e8b62011 7136 irsp->ulpStatus, irsp->un.ulpWord[4]);
d7c255b2
JS
7137 goto fdisc_failed;
7138 }
d7c255b2 7139 spin_lock_irq(shost->host_lock);
695a814e 7140 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 7141 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
d7c255b2 7142 vport->fc_flag |= FC_FABRIC;
76a95d75 7143 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
d7c255b2
JS
7144 vport->fc_flag |= FC_PUBLIC_LOOP;
7145 spin_unlock_irq(shost->host_lock);
92d7f7b0 7146
d7c255b2
JS
7147 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7148 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
92494144
JS
7149 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7150 sp = prsp->virt + sizeof(uint32_t);
7151 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7152 memcpy(&vport->fabric_portname, &sp->portName,
7153 sizeof(struct lpfc_name));
7154 memcpy(&vport->fabric_nodename, &sp->nodeName,
7155 sizeof(struct lpfc_name));
7156 if (fabric_param_changed &&
d7c255b2
JS
7157 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7158 /* If our NportID changed, we need to ensure all
7159 * remaining NPORTs get unreg_login'ed so we can
7160 * issue unreg_vpi.
7161 */
7162 list_for_each_entry_safe(np, next_np,
7163 &vport->fc_nodes, nlp_listp) {
7164 if (!NLP_CHK_NODE_ACT(ndlp) ||
7165 (np->nlp_state != NLP_STE_NPR_NODE) ||
7166 !(np->nlp_flag & NLP_NPR_ADISC))
7167 continue;
09372820 7168 spin_lock_irq(shost->host_lock);
d7c255b2 7169 np->nlp_flag &= ~NLP_NPR_ADISC;
09372820 7170 spin_unlock_irq(shost->host_lock);
d7c255b2 7171 lpfc_unreg_rpi(vport, np);
92d7f7b0 7172 }
78730cfe 7173 lpfc_cleanup_pending_mbox(vport);
5af5eee7
JS
7174
7175 if (phba->sli_rev == LPFC_SLI_REV4)
7176 lpfc_sli4_unreg_all_rpis(vport);
7177
d7c255b2
JS
7178 lpfc_mbx_unreg_vpi(vport);
7179 spin_lock_irq(shost->host_lock);
7180 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
0f65ff68
JS
7181 if (phba->sli_rev == LPFC_SLI_REV4)
7182 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4b40c59e
JS
7183 else
7184 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
d7c255b2 7185 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
7186 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
7187 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7188 /*
7189 * Driver needs to re-reg VPI in order for f/w
7190 * to update the MAC address.
7191 */
7192 lpfc_register_new_vport(phba, vport, ndlp);
5ac6b303 7193 goto out;
92d7f7b0
JS
7194 }
7195
ecfd03c6
JS
7196 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
7197 lpfc_issue_init_vpi(vport);
7198 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
d7c255b2
JS
7199 lpfc_register_new_vport(phba, vport, ndlp);
7200 else
7201 lpfc_do_scr_ns_plogi(phba, vport);
7202 goto out;
7203fdisc_failed:
7204 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7205 /* Cancel discovery timer */
7206 lpfc_can_disctmo(vport);
7207 lpfc_nlp_put(ndlp);
92d7f7b0
JS
7208out:
7209 lpfc_els_free_iocb(phba, cmdiocb);
7210}
7211
e59058c4 7212/**
3621a710 7213 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
e59058c4
JS
7214 * @vport: pointer to a virtual N_Port data structure.
7215 * @ndlp: pointer to a node-list data structure.
7216 * @retry: number of retries to the command IOCB.
7217 *
7218 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
7219 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
7220 * routine to issue the IOCB, which makes sure only one outstanding fabric
7221 * IOCB will be sent off HBA at any given time.
7222 *
7223 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7224 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7225 * will be stored into the context1 field of the IOCB for the completion
7226 * callback function to the FDISC ELS command.
7227 *
7228 * Return code
7229 * 0 - Successfully issued fdisc iocb command
7230 * 1 - Failed to issue fdisc iocb command
7231 **/
a6ababd2 7232static int
92d7f7b0
JS
7233lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7234 uint8_t retry)
7235{
7236 struct lpfc_hba *phba = vport->phba;
7237 IOCB_t *icmd;
7238 struct lpfc_iocbq *elsiocb;
7239 struct serv_parm *sp;
7240 uint8_t *pcmd;
7241 uint16_t cmdsize;
7242 int did = ndlp->nlp_DID;
7243 int rc;
92d7f7b0 7244
5ffc266e 7245 vport->port_state = LPFC_FDISC;
92d7f7b0
JS
7246 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7247 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7248 ELS_CMD_FDISC);
7249 if (!elsiocb) {
92d7f7b0 7250 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
7251 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7252 "0255 Issue FDISC: no IOCB\n");
92d7f7b0
JS
7253 return 1;
7254 }
7255
7256 icmd = &elsiocb->iocb;
7257 icmd->un.elsreq64.myID = 0;
7258 icmd->un.elsreq64.fl = 1;
7259
4042629e
JS
7260 if ((phba->sli_rev == LPFC_SLI_REV4) &&
7261 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7262 LPFC_SLI_INTF_IF_TYPE_0)) {
f1126688
JS
7263 /* FDISC needs to be 1 for WQE VPI */
7264 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7265 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7266 /* Set the ulpContext to the vpi */
6d368e53 7267 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
f1126688
JS
7268 } else {
7269 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7270 icmd->ulpCt_h = 1;
7271 icmd->ulpCt_l = 0;
7272 }
92d7f7b0
JS
7273
7274 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7275 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
7276 pcmd += sizeof(uint32_t); /* CSP Word 1 */
7277 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
7278 sp = (struct serv_parm *) pcmd;
7279 /* Setup CSPs accordingly for Fabric */
7280 sp->cmn.e_d_tov = 0;
7281 sp->cmn.w2.r_a_tov = 0;
7282 sp->cls1.classValid = 0;
7283 sp->cls2.seqDelivery = 1;
7284 sp->cls3.seqDelivery = 1;
7285
7286 pcmd += sizeof(uint32_t); /* CSP Word 2 */
7287 pcmd += sizeof(uint32_t); /* CSP Word 3 */
7288 pcmd += sizeof(uint32_t); /* CSP Word 4 */
7289 pcmd += sizeof(uint32_t); /* Port Name */
7290 memcpy(pcmd, &vport->fc_portname, 8);
7291 pcmd += sizeof(uint32_t); /* Node Name */
7292 pcmd += sizeof(uint32_t); /* Node Name */
7293 memcpy(pcmd, &vport->fc_nodename, 8);
7294
7295 lpfc_set_disctmo(vport);
7296
7297 phba->fc_stat.elsXmitFDISC++;
7298 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
7299
858c9f6c
JS
7300 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7301 "Issue FDISC: did:x%x",
7302 did, 0, 0);
7303
92d7f7b0
JS
7304 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
7305 if (rc == IOCB_ERROR) {
7306 lpfc_els_free_iocb(phba, elsiocb);
92d7f7b0 7307 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
7308 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7309 "0256 Issue FDISC: Cannot send IOCB\n");
92d7f7b0
JS
7310 return 1;
7311 }
7312 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
92d7f7b0
JS
7313 return 0;
7314}
7315
e59058c4 7316/**
3621a710 7317 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
e59058c4
JS
7318 * @phba: pointer to lpfc hba data structure.
7319 * @cmdiocb: pointer to lpfc command iocb data structure.
7320 * @rspiocb: pointer to lpfc response iocb data structure.
7321 *
7322 * This routine is the completion callback function to the issuing of a LOGO
7323 * ELS command off a vport. It frees the command IOCB and then decrement the
7324 * reference count held on ndlp for this completion function, indicating that
7325 * the reference to the ndlp is no long needed. Note that the
7326 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
7327 * callback function and an additional explicit ndlp reference decrementation
7328 * will trigger the actual release of the ndlp.
7329 **/
92d7f7b0
JS
7330static void
7331lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7332 struct lpfc_iocbq *rspiocb)
7333{
7334 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c 7335 IOCB_t *irsp;
e47c9093 7336 struct lpfc_nodelist *ndlp;
9589b062 7337 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
858c9f6c 7338
9589b062 7339 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
858c9f6c
JS
7340 irsp = &rspiocb->iocb;
7341 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7342 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
7343 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
92d7f7b0
JS
7344
7345 lpfc_els_free_iocb(phba, cmdiocb);
7346 vport->unreg_vpi_cmpl = VPORT_ERROR;
e47c9093
JS
7347
7348 /* Trigger the release of the ndlp after logo */
7349 lpfc_nlp_put(ndlp);
9589b062
JS
7350
7351 /* NPIV LOGO completes to NPort <nlp_DID> */
7352 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7353 "2928 NPIV LOGO completes to NPort x%x "
7354 "Data: x%x x%x x%x x%x\n",
7355 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7356 irsp->ulpTimeout, vport->num_disc_nodes);
7357
7358 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7359 spin_lock_irq(shost->host_lock);
7360 vport->fc_flag &= ~FC_FABRIC;
7361 spin_unlock_irq(shost->host_lock);
7362 }
92d7f7b0
JS
7363}
7364
e59058c4 7365/**
3621a710 7366 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
e59058c4
JS
7367 * @vport: pointer to a virtual N_Port data structure.
7368 * @ndlp: pointer to a node-list data structure.
7369 *
7370 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
7371 *
7372 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7373 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7374 * will be stored into the context1 field of the IOCB for the completion
7375 * callback function to the LOGO ELS command.
7376 *
7377 * Return codes
7378 * 0 - Successfully issued logo off the @vport
7379 * 1 - Failed to issue logo off the @vport
7380 **/
92d7f7b0
JS
7381int
7382lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7383{
7384 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7385 struct lpfc_hba *phba = vport->phba;
92d7f7b0
JS
7386 IOCB_t *icmd;
7387 struct lpfc_iocbq *elsiocb;
7388 uint8_t *pcmd;
7389 uint16_t cmdsize;
7390
7391 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
7392 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
7393 ELS_CMD_LOGO);
7394 if (!elsiocb)
7395 return 1;
7396
7397 icmd = &elsiocb->iocb;
7398 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7399 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
7400 pcmd += sizeof(uint32_t);
7401
7402 /* Fill in LOGO payload */
7403 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
7404 pcmd += sizeof(uint32_t);
7405 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
7406
858c9f6c
JS
7407 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7408 "Issue LOGO npiv did:x%x flg:x%x",
7409 ndlp->nlp_DID, ndlp->nlp_flag, 0);
7410
92d7f7b0
JS
7411 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
7412 spin_lock_irq(shost->host_lock);
7413 ndlp->nlp_flag |= NLP_LOGO_SND;
7414 spin_unlock_irq(shost->host_lock);
3772a991
JS
7415 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7416 IOCB_ERROR) {
92d7f7b0
JS
7417 spin_lock_irq(shost->host_lock);
7418 ndlp->nlp_flag &= ~NLP_LOGO_SND;
7419 spin_unlock_irq(shost->host_lock);
7420 lpfc_els_free_iocb(phba, elsiocb);
7421 return 1;
7422 }
7423 return 0;
7424}
7425
e59058c4 7426/**
3621a710 7427 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
e59058c4
JS
7428 * @ptr: holder for the timer function associated data.
7429 *
7430 * This routine is invoked by the fabric iocb block timer after
7431 * timeout. It posts the fabric iocb block timeout event by setting the
7432 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
7433 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
7434 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
7435 * posted event WORKER_FABRIC_BLOCK_TMO.
7436 **/
92d7f7b0
JS
7437void
7438lpfc_fabric_block_timeout(unsigned long ptr)
7439{
7440 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7441 unsigned long iflags;
7442 uint32_t tmo_posted;
5e9d9b82 7443
92d7f7b0
JS
7444 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
7445 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
7446 if (!tmo_posted)
7447 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
7448 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
7449
5e9d9b82
JS
7450 if (!tmo_posted)
7451 lpfc_worker_wake_up(phba);
7452 return;
92d7f7b0
JS
7453}
7454
e59058c4 7455/**
3621a710 7456 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
e59058c4
JS
7457 * @phba: pointer to lpfc hba data structure.
7458 *
7459 * This routine issues one fabric iocb from the driver internal list to
7460 * the HBA. It first checks whether it's ready to issue one fabric iocb to
7461 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
7462 * remove one pending fabric iocb from the driver internal list and invokes
7463 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
7464 **/
92d7f7b0
JS
7465static void
7466lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
7467{
7468 struct lpfc_iocbq *iocb;
7469 unsigned long iflags;
7470 int ret;
92d7f7b0
JS
7471 IOCB_t *cmd;
7472
7473repeat:
7474 iocb = NULL;
7475 spin_lock_irqsave(&phba->hbalock, iflags);
7f5f3d0d 7476 /* Post any pending iocb to the SLI layer */
92d7f7b0
JS
7477 if (atomic_read(&phba->fabric_iocb_count) == 0) {
7478 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
7479 list);
7480 if (iocb)
7f5f3d0d 7481 /* Increment fabric iocb count to hold the position */
92d7f7b0
JS
7482 atomic_inc(&phba->fabric_iocb_count);
7483 }
7484 spin_unlock_irqrestore(&phba->hbalock, iflags);
7485 if (iocb) {
7486 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7487 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7488 iocb->iocb_flag |= LPFC_IO_FABRIC;
7489
858c9f6c
JS
7490 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7491 "Fabric sched1: ste:x%x",
7492 iocb->vport->port_state, 0, 0);
7493
3772a991 7494 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
7495
7496 if (ret == IOCB_ERROR) {
7497 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7498 iocb->fabric_iocb_cmpl = NULL;
7499 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7500 cmd = &iocb->iocb;
7501 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
7502 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
7503 iocb->iocb_cmpl(phba, iocb, iocb);
7504
7505 atomic_dec(&phba->fabric_iocb_count);
7506 goto repeat;
7507 }
7508 }
7509
7510 return;
7511}
7512
e59058c4 7513/**
3621a710 7514 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
e59058c4
JS
7515 * @phba: pointer to lpfc hba data structure.
7516 *
7517 * This routine unblocks the issuing fabric iocb command. The function
7518 * will clear the fabric iocb block bit and then invoke the routine
7519 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
7520 * from the driver internal fabric iocb list.
7521 **/
92d7f7b0
JS
7522void
7523lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
7524{
7525 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7526
7527 lpfc_resume_fabric_iocbs(phba);
7528 return;
7529}
7530
e59058c4 7531/**
3621a710 7532 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
e59058c4
JS
7533 * @phba: pointer to lpfc hba data structure.
7534 *
7535 * This routine blocks the issuing fabric iocb for a specified amount of
7536 * time (currently 100 ms). This is done by set the fabric iocb block bit
7537 * and set up a timeout timer for 100ms. When the block bit is set, no more
7538 * fabric iocb will be issued out of the HBA.
7539 **/
92d7f7b0
JS
7540static void
7541lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7542{
7543 int blocked;
7544
7545 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7f5f3d0d 7546 /* Start a timer to unblock fabric iocbs after 100ms */
92d7f7b0
JS
7547 if (!blocked)
7548 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
7549
7550 return;
7551}
7552
e59058c4 7553/**
3621a710 7554 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
e59058c4
JS
7555 * @phba: pointer to lpfc hba data structure.
7556 * @cmdiocb: pointer to lpfc command iocb data structure.
7557 * @rspiocb: pointer to lpfc response iocb data structure.
7558 *
7559 * This routine is the callback function that is put to the fabric iocb's
7560 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
7561 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
7562 * function first restores and invokes the original iocb's callback function
7563 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
7564 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
7565 **/
92d7f7b0
JS
7566static void
7567lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7568 struct lpfc_iocbq *rspiocb)
7569{
7570 struct ls_rjt stat;
7571
7572 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
7573 BUG();
7574
7575 switch (rspiocb->iocb.ulpStatus) {
7576 case IOSTAT_NPORT_RJT:
7577 case IOSTAT_FABRIC_RJT:
7578 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
7579 lpfc_block_fabric_iocbs(phba);
ed957684 7580 }
92d7f7b0
JS
7581 break;
7582
7583 case IOSTAT_NPORT_BSY:
7584 case IOSTAT_FABRIC_BSY:
7585 lpfc_block_fabric_iocbs(phba);
7586 break;
7587
7588 case IOSTAT_LS_RJT:
7589 stat.un.lsRjtError =
7590 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
7591 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
7592 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
7593 lpfc_block_fabric_iocbs(phba);
7594 break;
7595 }
7596
7597 if (atomic_read(&phba->fabric_iocb_count) == 0)
7598 BUG();
7599
7600 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
7601 cmdiocb->fabric_iocb_cmpl = NULL;
7602 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
7603 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
7604
7605 atomic_dec(&phba->fabric_iocb_count);
7606 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7f5f3d0d
JS
7607 /* Post any pending iocbs to HBA */
7608 lpfc_resume_fabric_iocbs(phba);
92d7f7b0
JS
7609 }
7610}
7611
e59058c4 7612/**
3621a710 7613 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
e59058c4
JS
7614 * @phba: pointer to lpfc hba data structure.
7615 * @iocb: pointer to lpfc command iocb data structure.
7616 *
7617 * This routine is used as the top-level API for issuing a fabric iocb command
7618 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
7619 * function makes sure that only one fabric bound iocb will be outstanding at
7620 * any given time. As such, this function will first check to see whether there
7621 * is already an outstanding fabric iocb on the wire. If so, it will put the
7622 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
7623 * issued later. Otherwise, it will issue the iocb on the wire and update the
7624 * fabric iocb count it indicate that there is one fabric iocb on the wire.
7625 *
7626 * Note, this implementation has a potential sending out fabric IOCBs out of
7627 * order. The problem is caused by the construction of the "ready" boolen does
7628 * not include the condition that the internal fabric IOCB list is empty. As
7629 * such, it is possible a fabric IOCB issued by this routine might be "jump"
7630 * ahead of the fabric IOCBs in the internal list.
7631 *
7632 * Return code
7633 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
7634 * IOCB_ERROR - failed to issue fabric iocb
7635 **/
a6ababd2 7636static int
92d7f7b0
JS
7637lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
7638{
7639 unsigned long iflags;
92d7f7b0
JS
7640 int ready;
7641 int ret;
7642
7643 if (atomic_read(&phba->fabric_iocb_count) > 1)
7644 BUG();
7645
7646 spin_lock_irqsave(&phba->hbalock, iflags);
7647 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
7648 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7649
7f5f3d0d
JS
7650 if (ready)
7651 /* Increment fabric iocb count to hold the position */
7652 atomic_inc(&phba->fabric_iocb_count);
92d7f7b0
JS
7653 spin_unlock_irqrestore(&phba->hbalock, iflags);
7654 if (ready) {
7655 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7656 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7657 iocb->iocb_flag |= LPFC_IO_FABRIC;
7658
858c9f6c
JS
7659 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7660 "Fabric sched2: ste:x%x",
7661 iocb->vport->port_state, 0, 0);
7662
3772a991 7663 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
7664
7665 if (ret == IOCB_ERROR) {
7666 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7667 iocb->fabric_iocb_cmpl = NULL;
7668 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7669 atomic_dec(&phba->fabric_iocb_count);
7670 }
7671 } else {
7672 spin_lock_irqsave(&phba->hbalock, iflags);
7673 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
7674 spin_unlock_irqrestore(&phba->hbalock, iflags);
7675 ret = IOCB_SUCCESS;
7676 }
7677 return ret;
7678}
7679
e59058c4 7680/**
3621a710 7681 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
e59058c4
JS
7682 * @vport: pointer to a virtual N_Port data structure.
7683 *
7684 * This routine aborts all the IOCBs associated with a @vport from the
7685 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7686 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7687 * list, removes each IOCB associated with the @vport off the list, set the
7688 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7689 * associated with the IOCB.
7690 **/
a6ababd2 7691static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
92d7f7b0
JS
7692{
7693 LIST_HEAD(completions);
7694 struct lpfc_hba *phba = vport->phba;
7695 struct lpfc_iocbq *tmp_iocb, *piocb;
92d7f7b0
JS
7696
7697 spin_lock_irq(&phba->hbalock);
7698 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7699 list) {
7700
7701 if (piocb->vport != vport)
7702 continue;
7703
7704 list_move_tail(&piocb->list, &completions);
7705 }
7706 spin_unlock_irq(&phba->hbalock);
7707
a257bf90
JS
7708 /* Cancel all the IOCBs from the completions list */
7709 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7710 IOERR_SLI_ABORTED);
92d7f7b0
JS
7711}
7712
e59058c4 7713/**
3621a710 7714 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
e59058c4
JS
7715 * @ndlp: pointer to a node-list data structure.
7716 *
7717 * This routine aborts all the IOCBs associated with an @ndlp from the
7718 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7719 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7720 * list, removes each IOCB associated with the @ndlp off the list, set the
7721 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7722 * associated with the IOCB.
7723 **/
92d7f7b0
JS
7724void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
7725{
7726 LIST_HEAD(completions);
a257bf90 7727 struct lpfc_hba *phba = ndlp->phba;
92d7f7b0
JS
7728 struct lpfc_iocbq *tmp_iocb, *piocb;
7729 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
92d7f7b0
JS
7730
7731 spin_lock_irq(&phba->hbalock);
7732 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7733 list) {
7734 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
7735
7736 list_move_tail(&piocb->list, &completions);
ed957684 7737 }
dea3101e 7738 }
92d7f7b0
JS
7739 spin_unlock_irq(&phba->hbalock);
7740
a257bf90
JS
7741 /* Cancel all the IOCBs from the completions list */
7742 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7743 IOERR_SLI_ABORTED);
92d7f7b0
JS
7744}
7745
e59058c4 7746/**
3621a710 7747 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
e59058c4
JS
7748 * @phba: pointer to lpfc hba data structure.
7749 *
7750 * This routine aborts all the IOCBs currently on the driver internal
7751 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
7752 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
7753 * list, removes IOCBs off the list, set the status feild to
7754 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
7755 * the IOCB.
7756 **/
92d7f7b0
JS
7757void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
7758{
7759 LIST_HEAD(completions);
92d7f7b0
JS
7760
7761 spin_lock_irq(&phba->hbalock);
7762 list_splice_init(&phba->fabric_iocb_list, &completions);
7763 spin_unlock_irq(&phba->hbalock);
7764
a257bf90
JS
7765 /* Cancel all the IOCBs from the completions list */
7766 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7767 IOERR_SLI_ABORTED);
dea3101e 7768}
6fb120a7 7769
1151e3ec
JS
7770/**
7771 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
7772 * @vport: pointer to lpfc vport data structure.
7773 *
7774 * This routine is invoked by the vport cleanup for deletions and the cleanup
7775 * for an ndlp on removal.
7776 **/
7777void
7778lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
7779{
7780 struct lpfc_hba *phba = vport->phba;
7781 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7782 unsigned long iflag = 0;
7783
7784 spin_lock_irqsave(&phba->hbalock, iflag);
7785 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7786 list_for_each_entry_safe(sglq_entry, sglq_next,
7787 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7788 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
7789 sglq_entry->ndlp = NULL;
7790 }
7791 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7792 spin_unlock_irqrestore(&phba->hbalock, iflag);
7793 return;
7794}
7795
6fb120a7
JS
7796/**
7797 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
7798 * @phba: pointer to lpfc hba data structure.
7799 * @axri: pointer to the els xri abort wcqe structure.
7800 *
7801 * This routine is invoked by the worker thread to process a SLI4 slow-path
7802 * ELS aborted xri.
7803 **/
7804void
7805lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7806 struct sli4_wcqe_xri_aborted *axri)
7807{
7808 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 7809 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7851fe2c 7810 uint16_t lxri = 0;
19ca7609 7811
6fb120a7
JS
7812 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7813 unsigned long iflag = 0;
19ca7609 7814 struct lpfc_nodelist *ndlp;
589a52d6 7815 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6fb120a7 7816
0f65ff68
JS
7817 spin_lock_irqsave(&phba->hbalock, iflag);
7818 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7
JS
7819 list_for_each_entry_safe(sglq_entry, sglq_next,
7820 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7821 if (sglq_entry->sli4_xritag == xri) {
7822 list_del(&sglq_entry->list);
19ca7609
JS
7823 ndlp = sglq_entry->ndlp;
7824 sglq_entry->ndlp = NULL;
6fb120a7
JS
7825 list_add_tail(&sglq_entry->list,
7826 &phba->sli4_hba.lpfc_sgl_list);
0f65ff68
JS
7827 sglq_entry->state = SGL_FREED;
7828 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7 7829 spin_unlock_irqrestore(&phba->hbalock, iflag);
19ca7609 7830 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
589a52d6
JS
7831
7832 /* Check if TXQ queue needs to be serviced */
7833 if (pring->txq_cnt)
7834 lpfc_worker_wake_up(phba);
6fb120a7
JS
7835 return;
7836 }
7837 }
0f65ff68 7838 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7851fe2c
JS
7839 lxri = lpfc_sli4_xri_inrange(phba, xri);
7840 if (lxri == NO_XRI) {
7841 spin_unlock_irqrestore(&phba->hbalock, iflag);
7842 return;
7843 }
7844 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
0f65ff68
JS
7845 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7846 spin_unlock_irqrestore(&phba->hbalock, iflag);
7847 return;
7848 }
7849 sglq_entry->state = SGL_XRI_ABORTED;
7850 spin_unlock_irqrestore(&phba->hbalock, iflag);
7851 return;
6fb120a7 7852}