]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_els.c
[SCSI] lpfc 8.3.28: Add support for ABTS failure handling
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
92494144 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
09372820 21/* See Fibre Channel protocol T11 FC-LS for details */
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e
JB
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
da0436e9 32#include "lpfc_hw4.h"
dea3101e
JB
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
da0436e9 35#include "lpfc_sli4.h"
ea2151b4 36#include "lpfc_nl.h"
dea3101e
JB
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
92d7f7b0 42#include "lpfc_vport.h"
858c9f6c 43#include "lpfc_debugfs.h"
dea3101e
JB
44
45static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46 struct lpfc_iocbq *);
92d7f7b0
JS
47static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48 struct lpfc_iocbq *);
a6ababd2
AB
49static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51 struct lpfc_nodelist *ndlp, uint8_t retry);
52static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53 struct lpfc_iocbq *iocb);
92d7f7b0 54
dea3101e
JB
55static int lpfc_max_els_tries = 3;
56
e59058c4 57/**
3621a710 58 * lpfc_els_chk_latt - Check host link attention event for a vport
e59058c4
JS
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
78 **/
858c9f6c 79int
2e0fef85 80lpfc_els_chk_latt(struct lpfc_vport *vport)
dea3101e 81{
2e0fef85
JS
82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83 struct lpfc_hba *phba = vport->phba;
dea3101e 84 uint32_t ha_copy;
dea3101e 85
2e0fef85 86 if (vport->port_state >= LPFC_VPORT_READY ||
3772a991
JS
87 phba->link_state == LPFC_LINK_DOWN ||
88 phba->sli_rev > LPFC_SLI_REV3)
dea3101e
JB
89 return 0;
90
91 /* Read the HBA Host Attention Register */
9940b97b
JS
92 if (lpfc_readl(phba->HAregaddr, &ha_copy))
93 return 1;
dea3101e
JB
94
95 if (!(ha_copy & HA_LATT))
96 return 0;
97
98 /* Pending Link Event during Discovery */
e8b62011
JS
99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
100 "0237 Pending Link Event during "
101 "Discovery: State x%x\n",
102 phba->pport->port_state);
dea3101e
JB
103
104 /* CLEAR_LA should re-enable link attention events and
25985edc 105 * we should then immediately take a LATT event. The
dea3101e
JB
106 * LATT processing should call lpfc_linkdown() which
107 * will cleanup any left over in-progress discovery
108 * events.
109 */
2e0fef85
JS
110 spin_lock_irq(shost->host_lock);
111 vport->fc_flag |= FC_ABORT_DISCOVERY;
112 spin_unlock_irq(shost->host_lock);
dea3101e 113
92d7f7b0 114 if (phba->link_state != LPFC_CLEAR_LA)
ed957684 115 lpfc_issue_clear_la(phba, vport);
dea3101e 116
c9f8735b 117 return 1;
dea3101e
JB
118}
119
e59058c4 120/**
3621a710 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
e59058c4
JS
122 * @vport: pointer to a host virtual N_Port data structure.
123 * @expectRsp: flag indicating whether response is expected.
124 * @cmdSize: size of the ELS command.
125 * @retry: number of retries to the command IOCB when it fails.
126 * @ndlp: pointer to a node-list data structure.
127 * @did: destination identifier.
128 * @elscmd: the ELS command code.
129 *
130 * This routine is used for allocating a lpfc-IOCB data structure from
131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
132 * passed into the routine for discovery state machine to issue an Extended
133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
134 * and preparation routine that is used by all the discovery state machine
135 * routines and the ELS command-specific fields will be later set up by
136 * the individual discovery machine routines after calling this routine
137 * allocating and preparing a generic IOCB data structure. It fills in the
138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
139 * payload and response payload (if expected). The reference count on the
140 * ndlp is incremented by 1 and the reference to the ndlp is put into
141 * context1 of the IOCB data structure for this IOCB to hold the ndlp
142 * reference for the command's callback function to access later.
143 *
144 * Return code
145 * Pointer to the newly allocated/prepared els iocb data structure
146 * NULL - when els iocb data structure allocation/preparation failed
147 **/
f1c3b0fc 148struct lpfc_iocbq *
2e0fef85
JS
149lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
150 uint16_t cmdSize, uint8_t retry,
151 struct lpfc_nodelist *ndlp, uint32_t did,
152 uint32_t elscmd)
dea3101e 153{
2e0fef85 154 struct lpfc_hba *phba = vport->phba;
0bd4ca25 155 struct lpfc_iocbq *elsiocb;
dea3101e
JB
156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
157 struct ulp_bde64 *bpl;
158 IOCB_t *icmd;
159
dea3101e 160
2e0fef85
JS
161 if (!lpfc_is_link_up(phba))
162 return NULL;
dea3101e 163
dea3101e 164 /* Allocate buffer for command iocb */
0bd4ca25 165 elsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
166
167 if (elsiocb == NULL)
168 return NULL;
e47c9093 169
0c287589
JS
170 /*
171 * If this command is for fabric controller and HBA running
172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
173 */
174 if ((did == Fabric_DID) &&
45ed1190 175 (phba->hba_flag & HBA_FIP_SUPPORT) &&
0c287589
JS
176 ((elscmd == ELS_CMD_FLOGI) ||
177 (elscmd == ELS_CMD_FDISC) ||
178 (elscmd == ELS_CMD_LOGO)))
c868595d
JS
179 switch (elscmd) {
180 case ELS_CMD_FLOGI:
f0d9bccc
JS
181 elsiocb->iocb_flag |=
182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
183 & LPFC_FIP_ELS_ID_MASK);
184 break;
185 case ELS_CMD_FDISC:
f0d9bccc
JS
186 elsiocb->iocb_flag |=
187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
188 & LPFC_FIP_ELS_ID_MASK);
189 break;
190 case ELS_CMD_LOGO:
f0d9bccc
JS
191 elsiocb->iocb_flag |=
192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
193 & LPFC_FIP_ELS_ID_MASK);
194 break;
195 }
0c287589 196 else
c868595d 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
0c287589 198
dea3101e
JB
199 icmd = &elsiocb->iocb;
200
201 /* fill in BDEs for command */
202 /* Allocate buffer for command payload */
98c9ea5c
JS
203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
204 if (pcmd)
205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
fa4066b6
JS
206 if (!pcmd || !pcmd->virt)
207 goto els_iocb_free_pcmb_exit;
dea3101e
JB
208
209 INIT_LIST_HEAD(&pcmd->list);
210
211 /* Allocate buffer for response payload */
212 if (expectRsp) {
92d7f7b0 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e
JB
214 if (prsp)
215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
216 &prsp->phys);
fa4066b6
JS
217 if (!prsp || !prsp->virt)
218 goto els_iocb_free_prsp_exit;
dea3101e 219 INIT_LIST_HEAD(&prsp->list);
e47c9093 220 } else
dea3101e 221 prsp = NULL;
dea3101e
JB
222
223 /* Allocate buffer for Buffer ptr list */
92d7f7b0 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 225 if (pbuflist)
ed957684
JS
226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
227 &pbuflist->phys);
fa4066b6
JS
228 if (!pbuflist || !pbuflist->virt)
229 goto els_iocb_free_pbuf_exit;
dea3101e
JB
230
231 INIT_LIST_HEAD(&pbuflist->list);
232
233 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
234 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
34b02dcd 235 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2e0fef85 236 icmd->un.elsreq64.remoteID = did; /* DID */
dea3101e 237 if (expectRsp) {
92d7f7b0 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
dea3101e 239 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
2680eeaa 240 icmd->ulpTimeout = phba->fc_ratov * 2;
dea3101e 241 } else {
92d7f7b0 242 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
dea3101e
JB
243 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
244 }
dea3101e
JB
245 icmd->ulpBdeCount = 1;
246 icmd->ulpLe = 1;
247 icmd->ulpClass = CLASS3;
248
92d7f7b0
JS
249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
250 icmd->un.elsreq64.myID = vport->fc_myDID;
251
252 /* For ELS_REQUEST64_CR, use the VPI by default */
6d368e53 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
92d7f7b0 254 icmd->ulpCt_h = 0;
eada272d
JS
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO)
257 icmd->ulpCt_l = 0; /* context = invalid RPI */
258 else
259 icmd->ulpCt_l = 1; /* context = VPI */
92d7f7b0
JS
260 }
261
dea3101e
JB
262 bpl = (struct ulp_bde64 *) pbuflist->virt;
263 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
264 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
265 bpl->tus.f.bdeSize = cmdSize;
266 bpl->tus.f.bdeFlags = 0;
267 bpl->tus.w = le32_to_cpu(bpl->tus.w);
268
269 if (expectRsp) {
270 bpl++;
271 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
272 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
273 bpl->tus.f.bdeSize = FCELSSIZE;
34b02dcd 274 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
dea3101e
JB
275 bpl->tus.w = le32_to_cpu(bpl->tus.w);
276 }
277
fa4066b6 278 /* prevent preparing iocb with NULL ndlp reference */
51ef4c26 279 elsiocb->context1 = lpfc_nlp_get(ndlp);
fa4066b6
JS
280 if (!elsiocb->context1)
281 goto els_iocb_free_pbuf_exit;
329f9bc7
JS
282 elsiocb->context2 = pcmd;
283 elsiocb->context3 = pbuflist;
dea3101e 284 elsiocb->retry = retry;
2e0fef85 285 elsiocb->vport = vport;
dea3101e
JB
286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
287
288 if (prsp) {
289 list_add(&prsp->list, &pcmd->list);
290 }
dea3101e
JB
291 if (expectRsp) {
292 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
294 "0116 Xmit ELS command x%x to remote "
295 "NPORT x%x I/O tag: x%x, port state: x%x\n",
296 elscmd, did, elsiocb->iotag,
297 vport->port_state);
dea3101e
JB
298 } else {
299 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
e8b62011
JS
300 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
301 "0117 Xmit ELS response x%x to remote "
302 "NPORT x%x I/O tag: x%x, size: x%x\n",
303 elscmd, ndlp->nlp_DID, elsiocb->iotag,
304 cmdSize);
dea3101e 305 }
c9f8735b 306 return elsiocb;
dea3101e 307
fa4066b6 308els_iocb_free_pbuf_exit:
eaf15d5b
JS
309 if (expectRsp)
310 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
fa4066b6
JS
311 kfree(pbuflist);
312
313els_iocb_free_prsp_exit:
314 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
315 kfree(prsp);
316
317els_iocb_free_pcmb_exit:
318 kfree(pcmd);
319 lpfc_sli_release_iocbq(phba, elsiocb);
320 return NULL;
321}
dea3101e 322
e59058c4 323/**
3621a710 324 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
e59058c4
JS
325 * @vport: pointer to a host virtual N_Port data structure.
326 *
327 * This routine issues a fabric registration login for a @vport. An
328 * active ndlp node with Fabric_DID must already exist for this @vport.
329 * The routine invokes two mailbox commands to carry out fabric registration
330 * login through the HBA firmware: the first mailbox command requests the
331 * HBA to perform link configuration for the @vport; and the second mailbox
332 * command requests the HBA to perform the actual fabric registration login
333 * with the @vport.
334 *
335 * Return code
336 * 0 - successfully issued fabric registration login for @vport
337 * -ENXIO -- failed to issue fabric registration login for @vport
338 **/
3772a991 339int
92d7f7b0 340lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
dea3101e 341{
2e0fef85 342 struct lpfc_hba *phba = vport->phba;
dea3101e 343 LPFC_MBOXQ_t *mbox;
14691150 344 struct lpfc_dmabuf *mp;
92d7f7b0
JS
345 struct lpfc_nodelist *ndlp;
346 struct serv_parm *sp;
dea3101e 347 int rc;
98c9ea5c 348 int err = 0;
dea3101e 349
92d7f7b0
JS
350 sp = &phba->fc_fabparam;
351 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093 352 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
98c9ea5c 353 err = 1;
92d7f7b0 354 goto fail;
98c9ea5c 355 }
92d7f7b0
JS
356
357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
358 if (!mbox) {
359 err = 2;
92d7f7b0 360 goto fail;
98c9ea5c 361 }
92d7f7b0
JS
362
363 vport->port_state = LPFC_FABRIC_CFG_LINK;
364 lpfc_config_link(phba, mbox);
365 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
366 mbox->vport = vport;
367
0b727fea 368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
369 if (rc == MBX_NOT_FINISHED) {
370 err = 3;
92d7f7b0 371 goto fail_free_mbox;
98c9ea5c 372 }
92d7f7b0
JS
373
374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
375 if (!mbox) {
376 err = 4;
92d7f7b0 377 goto fail;
98c9ea5c 378 }
4042629e
JS
379 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
380 ndlp->nlp_rpi);
98c9ea5c
JS
381 if (rc) {
382 err = 5;
92d7f7b0 383 goto fail_free_mbox;
98c9ea5c 384 }
92d7f7b0
JS
385
386 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
387 mbox->vport = vport;
e47c9093
JS
388 /* increment the reference count on ndlp to hold reference
389 * for the callback routine.
390 */
92d7f7b0
JS
391 mbox->context2 = lpfc_nlp_get(ndlp);
392
0b727fea 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
394 if (rc == MBX_NOT_FINISHED) {
395 err = 6;
92d7f7b0 396 goto fail_issue_reg_login;
98c9ea5c 397 }
92d7f7b0
JS
398
399 return 0;
400
401fail_issue_reg_login:
e47c9093
JS
402 /* decrement the reference count on ndlp just incremented
403 * for the failed mbox command.
404 */
92d7f7b0
JS
405 lpfc_nlp_put(ndlp);
406 mp = (struct lpfc_dmabuf *) mbox->context1;
407 lpfc_mbuf_free(phba, mp->virt, mp->phys);
408 kfree(mp);
409fail_free_mbox:
410 mempool_free(mbox, phba->mbox_mem_pool);
411
412fail:
413 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 414 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
98c9ea5c 415 "0249 Cannot issue Register Fabric login: Err %d\n", err);
92d7f7b0
JS
416 return -ENXIO;
417}
418
6fb120a7
JS
419/**
420 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
421 * @vport: pointer to a host virtual N_Port data structure.
422 *
423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
424 * the @vport. This mailbox command is necessary for FCoE only.
425 *
426 * Return code
427 * 0 - successfully issued REG_VFI for @vport
428 * A failure code otherwise.
429 **/
430static int
431lpfc_issue_reg_vfi(struct lpfc_vport *vport)
432{
433 struct lpfc_hba *phba = vport->phba;
434 LPFC_MBOXQ_t *mboxq;
435 struct lpfc_nodelist *ndlp;
436 struct serv_parm *sp;
437 struct lpfc_dmabuf *dmabuf;
438 int rc = 0;
439
440 sp = &phba->fc_fabparam;
441 ndlp = lpfc_findnode_did(vport, Fabric_DID);
442 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
443 rc = -ENODEV;
444 goto fail;
445 }
446
447 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
448 if (!dmabuf) {
449 rc = -ENOMEM;
450 goto fail;
451 }
452 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
453 if (!dmabuf->virt) {
454 rc = -ENOMEM;
455 goto fail_free_dmabuf;
456 }
6d368e53 457
6fb120a7
JS
458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
459 if (!mboxq) {
460 rc = -ENOMEM;
461 goto fail_free_coherent;
462 }
463 vport->port_state = LPFC_FABRIC_CFG_LINK;
464 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
465 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
466 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
467 mboxq->vport = vport;
468 mboxq->context1 = dmabuf;
469 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
470 if (rc == MBX_NOT_FINISHED) {
471 rc = -ENXIO;
472 goto fail_free_mbox;
473 }
474 return 0;
475
476fail_free_mbox:
477 mempool_free(mboxq, phba->mbox_mem_pool);
478fail_free_coherent:
479 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
480fail_free_dmabuf:
481 kfree(dmabuf);
482fail:
483 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
484 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
485 "0289 Issue Register VFI failed: Err %d\n", rc);
486 return rc;
487}
488
92494144
JS
489/**
490 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
491 * @vport: pointer to a host virtual N_Port data structure.
492 * @sp: pointer to service parameter data structure.
493 *
494 * This routine is called from FLOGI/FDISC completion handler functions.
495 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
496 * node nodename is changed in the completion service parameter else return
497 * 0. This function also set flag in the vport data structure to delay
498 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
499 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
500 * node nodename is changed in the completion service parameter.
501 *
502 * Return code
503 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
504 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
505 *
506 **/
507static uint8_t
508lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
509 struct serv_parm *sp)
510{
511 uint8_t fabric_param_changed = 0;
512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
513
514 if ((vport->fc_prevDID != vport->fc_myDID) ||
515 memcmp(&vport->fabric_portname, &sp->portName,
516 sizeof(struct lpfc_name)) ||
517 memcmp(&vport->fabric_nodename, &sp->nodeName,
518 sizeof(struct lpfc_name)))
519 fabric_param_changed = 1;
520
521 /*
522 * Word 1 Bit 31 in common service parameter is overloaded.
523 * Word 1 Bit 31 in FLOGI request is multiple NPort request
524 * Word 1 Bit 31 in FLOGI response is clean address bit
525 *
526 * If fabric parameter is changed and clean address bit is
527 * cleared delay nport discovery if
528 * - vport->fc_prevDID != 0 (not initial discovery) OR
529 * - lpfc_delay_discovery module parameter is set.
530 */
531 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
532 (vport->fc_prevDID || lpfc_delay_discovery)) {
533 spin_lock_irq(shost->host_lock);
534 vport->fc_flag |= FC_DISC_DELAYED;
535 spin_unlock_irq(shost->host_lock);
536 }
537
538 return fabric_param_changed;
539}
540
541
e59058c4 542/**
3621a710 543 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
e59058c4
JS
544 * @vport: pointer to a host virtual N_Port data structure.
545 * @ndlp: pointer to a node-list data structure.
546 * @sp: pointer to service parameter data structure.
547 * @irsp: pointer to the IOCB within the lpfc response IOCB.
548 *
549 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
550 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
551 * port in a fabric topology. It properly sets up the parameters to the @ndlp
552 * from the IOCB response. It also check the newly assigned N_Port ID to the
553 * @vport against the previously assigned N_Port ID. If it is different from
554 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
555 * is invoked on all the remaining nodes with the @vport to unregister the
556 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
557 * is invoked to register login to the fabric.
558 *
559 * Return code
560 * 0 - Success (currently, always return 0)
561 **/
92d7f7b0
JS
562static int
563lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
564 struct serv_parm *sp, IOCB_t *irsp)
565{
566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
567 struct lpfc_hba *phba = vport->phba;
568 struct lpfc_nodelist *np;
569 struct lpfc_nodelist *next_np;
92494144 570 uint8_t fabric_param_changed;
92d7f7b0 571
2e0fef85
JS
572 spin_lock_irq(shost->host_lock);
573 vport->fc_flag |= FC_FABRIC;
574 spin_unlock_irq(shost->host_lock);
dea3101e
JB
575
576 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
577 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
578 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
579
12265f68 580 phba->fc_edtovResol = sp->cmn.edtovResolution;
dea3101e
JB
581 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
582
76a95d75 583 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2e0fef85
JS
584 spin_lock_irq(shost->host_lock);
585 vport->fc_flag |= FC_PUBLIC_LOOP;
586 spin_unlock_irq(shost->host_lock);
dea3101e
JB
587 }
588
2e0fef85 589 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
dea3101e 590 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
92d7f7b0 591 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
dea3101e
JB
592 ndlp->nlp_class_sup = 0;
593 if (sp->cls1.classValid)
594 ndlp->nlp_class_sup |= FC_COS_CLASS1;
595 if (sp->cls2.classValid)
596 ndlp->nlp_class_sup |= FC_COS_CLASS2;
597 if (sp->cls3.classValid)
598 ndlp->nlp_class_sup |= FC_COS_CLASS3;
599 if (sp->cls4.classValid)
600 ndlp->nlp_class_sup |= FC_COS_CLASS4;
601 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
602 sp->cmn.bbRcvSizeLsb;
92494144
JS
603
604 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
605 memcpy(&vport->fabric_portname, &sp->portName,
606 sizeof(struct lpfc_name));
607 memcpy(&vport->fabric_nodename, &sp->nodeName,
608 sizeof(struct lpfc_name));
dea3101e
JB
609 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
610
92d7f7b0
JS
611 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
612 if (sp->cmn.response_multiple_NPort) {
e8b62011
JS
613 lpfc_printf_vlog(vport, KERN_WARNING,
614 LOG_ELS | LOG_VPORT,
615 "1816 FLOGI NPIV supported, "
616 "response data 0x%x\n",
617 sp->cmn.response_multiple_NPort);
92d7f7b0 618 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
92d7f7b0
JS
619 } else {
620 /* Because we asked f/w for NPIV it still expects us
e8b62011
JS
621 to call reg_vnpid atleast for the physcial host */
622 lpfc_printf_vlog(vport, KERN_WARNING,
623 LOG_ELS | LOG_VPORT,
624 "1817 Fabric does not support NPIV "
625 "- configuring single port mode.\n");
92d7f7b0
JS
626 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
627 }
628 }
dea3101e 629
92494144 630 if (fabric_param_changed &&
92d7f7b0 631 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
dea3101e 632
92d7f7b0
JS
633 /* If our NportID changed, we need to ensure all
634 * remaining NPORTs get unreg_login'ed.
635 */
636 list_for_each_entry_safe(np, next_np,
637 &vport->fc_nodes, nlp_listp) {
d7c255b2 638 if (!NLP_CHK_NODE_ACT(np))
e47c9093 639 continue;
92d7f7b0
JS
640 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
641 !(np->nlp_flag & NLP_NPR_ADISC))
642 continue;
643 spin_lock_irq(shost->host_lock);
644 np->nlp_flag &= ~NLP_NPR_ADISC;
645 spin_unlock_irq(shost->host_lock);
646 lpfc_unreg_rpi(vport, np);
647 }
78730cfe 648 lpfc_cleanup_pending_mbox(vport);
5af5eee7 649
5248a749 650 if (phba->sli_rev == LPFC_SLI_REV4) {
5af5eee7 651 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 652 lpfc_mbx_unreg_vpi(vport);
09372820 653 spin_lock_irq(shost->host_lock);
92d7f7b0 654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
5248a749
JS
655 /*
656 * If VPI is unreged, driver need to do INIT_VPI
657 * before re-registering
658 */
ecfd03c6
JS
659 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
660 spin_unlock_irq(shost->host_lock);
661 }
38b92ef8
JS
662 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
663 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
664 /*
665 * Driver needs to re-reg VPI in order for f/w
666 * to update the MAC address.
667 */
9589b062 668 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
38b92ef8
JS
669 lpfc_register_new_vport(phba, vport, ndlp);
670 return 0;
92d7f7b0 671 }
dea3101e 672
6fb120a7
JS
673 if (phba->sli_rev < LPFC_SLI_REV4) {
674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
675 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
676 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
677 lpfc_register_new_vport(phba, vport, ndlp);
678 else
679 lpfc_issue_fabric_reglogin(vport);
680 } else {
681 ndlp->nlp_type |= NLP_FABRIC;
682 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
695a814e
JS
683 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
684 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
6fb120a7
JS
685 lpfc_start_fdiscs(phba);
686 lpfc_do_scr_ns_plogi(phba, vport);
695a814e 687 } else if (vport->fc_flag & FC_VFI_REGISTERED)
ecfd03c6 688 lpfc_issue_init_vpi(vport);
695a814e 689 else
6fb120a7 690 lpfc_issue_reg_vfi(vport);
92d7f7b0 691 }
dea3101e 692 return 0;
dea3101e 693}
e59058c4 694/**
3621a710 695 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
e59058c4
JS
696 * @vport: pointer to a host virtual N_Port data structure.
697 * @ndlp: pointer to a node-list data structure.
698 * @sp: pointer to service parameter data structure.
699 *
700 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
701 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
702 * in a point-to-point topology. First, the @vport's N_Port Name is compared
703 * with the received N_Port Name: if the @vport's N_Port Name is greater than
704 * the received N_Port Name lexicographically, this node shall assign local
705 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
706 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
707 * this node shall just wait for the remote node to issue PLOGI and assign
708 * N_Port IDs.
709 *
710 * Return code
711 * 0 - Success
712 * -ENXIO - Fail
713 **/
dea3101e 714static int
2e0fef85
JS
715lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
716 struct serv_parm *sp)
dea3101e 717{
2e0fef85
JS
718 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
719 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
720 LPFC_MBOXQ_t *mbox;
721 int rc;
722
2e0fef85
JS
723 spin_lock_irq(shost->host_lock);
724 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
725 spin_unlock_irq(shost->host_lock);
dea3101e
JB
726
727 phba->fc_edtov = FF_DEF_EDTOV;
728 phba->fc_ratov = FF_DEF_RATOV;
2e0fef85 729 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 730 sizeof(vport->fc_portname));
dea3101e
JB
731 if (rc >= 0) {
732 /* This side will initiate the PLOGI */
2e0fef85
JS
733 spin_lock_irq(shost->host_lock);
734 vport->fc_flag |= FC_PT2PT_PLOGI;
735 spin_unlock_irq(shost->host_lock);
dea3101e
JB
736
737 /*
738 * N_Port ID cannot be 0, set our to LocalID the other
739 * side will be RemoteID.
740 */
741
742 /* not equal */
743 if (rc)
2e0fef85 744 vport->fc_myDID = PT2PT_LocalID;
dea3101e
JB
745
746 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
747 if (!mbox)
748 goto fail;
749
750 lpfc_config_link(phba, mbox);
751
752 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 753 mbox->vport = vport;
0b727fea 754 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
dea3101e
JB
755 if (rc == MBX_NOT_FINISHED) {
756 mempool_free(mbox, phba->mbox_mem_pool);
757 goto fail;
758 }
e47c9093
JS
759 /* Decrement ndlp reference count indicating that ndlp can be
760 * safely released when other references to it are done.
761 */
329f9bc7 762 lpfc_nlp_put(ndlp);
dea3101e 763
2e0fef85 764 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
dea3101e
JB
765 if (!ndlp) {
766 /*
767 * Cannot find existing Fabric ndlp, so allocate a
768 * new one
769 */
770 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
771 if (!ndlp)
772 goto fail;
2e0fef85 773 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
e47c9093
JS
774 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
775 ndlp = lpfc_enable_node(vport, ndlp,
776 NLP_STE_UNUSED_NODE);
777 if(!ndlp)
778 goto fail;
dea3101e
JB
779 }
780
781 memcpy(&ndlp->nlp_portname, &sp->portName,
2e0fef85 782 sizeof(struct lpfc_name));
dea3101e 783 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
2e0fef85 784 sizeof(struct lpfc_name));
e47c9093 785 /* Set state will put ndlp onto node list if not already done */
2e0fef85
JS
786 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
787 spin_lock_irq(shost->host_lock);
dea3101e 788 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 789 spin_unlock_irq(shost->host_lock);
e47c9093
JS
790 } else
791 /* This side will wait for the PLOGI, decrement ndlp reference
792 * count indicating that ndlp can be released when other
793 * references to it are done.
794 */
329f9bc7 795 lpfc_nlp_put(ndlp);
dea3101e 796
09372820
JS
797 /* If we are pt2pt with another NPort, force NPIV off! */
798 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
799
2e0fef85
JS
800 spin_lock_irq(shost->host_lock);
801 vport->fc_flag |= FC_PT2PT;
802 spin_unlock_irq(shost->host_lock);
dea3101e
JB
803
804 /* Start discovery - this should just do CLEAR_LA */
2e0fef85 805 lpfc_disc_start(vport);
dea3101e 806 return 0;
92d7f7b0 807fail:
dea3101e
JB
808 return -ENXIO;
809}
810
e59058c4 811/**
3621a710 812 * lpfc_cmpl_els_flogi - Completion callback function for flogi
e59058c4
JS
813 * @phba: pointer to lpfc hba data structure.
814 * @cmdiocb: pointer to lpfc command iocb data structure.
815 * @rspiocb: pointer to lpfc response iocb data structure.
816 *
817 * This routine is the top-level completion callback function for issuing
818 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
819 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
820 * retry has been made (either immediately or delayed with lpfc_els_retry()
821 * returning 1), the command IOCB will be released and function returned.
822 * If the retry attempt has been given up (possibly reach the maximum
823 * number of retries), one additional decrement of ndlp reference shall be
824 * invoked before going out after releasing the command IOCB. This will
825 * actually release the remote node (Note, lpfc_els_free_iocb() will also
826 * invoke one decrement of ndlp reference count). If no error reported in
827 * the IOCB status, the command Port ID field is used to determine whether
828 * this is a point-to-point topology or a fabric topology: if the Port ID
829 * field is assigned, it is a fabric topology; otherwise, it is a
830 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
831 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
832 * specific topology completion conditions.
833 **/
dea3101e 834static void
329f9bc7
JS
835lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
836 struct lpfc_iocbq *rspiocb)
dea3101e 837{
2e0fef85
JS
838 struct lpfc_vport *vport = cmdiocb->vport;
839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
840 IOCB_t *irsp = &rspiocb->iocb;
841 struct lpfc_nodelist *ndlp = cmdiocb->context1;
842 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
843 struct serv_parm *sp;
0c9ab6f5 844 uint16_t fcf_index;
dea3101e
JB
845 int rc;
846
847 /* Check to see if link went down during discovery */
2e0fef85 848 if (lpfc_els_chk_latt(vport)) {
fa4066b6
JS
849 /* One additional decrement on node reference count to
850 * trigger the release of the node
851 */
329f9bc7 852 lpfc_nlp_put(ndlp);
dea3101e
JB
853 goto out;
854 }
855
858c9f6c
JS
856 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
857 "FLOGI cmpl: status:x%x/x%x state:x%x",
858 irsp->ulpStatus, irsp->un.ulpWord[4],
859 vport->port_state);
860
dea3101e 861 if (irsp->ulpStatus) {
0c9ab6f5 862 /*
a93ff37a 863 * In case of FIP mode, perform roundrobin FCF failover
0c9ab6f5
JS
864 * due to new FCF discovery
865 */
866 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
dbb6b3ab 867 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
9589b062
JS
868 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
869 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
0c9ab6f5 870 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
a93ff37a
JS
871 "2611 FLOGI failed on FCF (x%x), "
872 "status:x%x/x%x, tmo:x%x, perform "
873 "roundrobin FCF failover\n",
38b92ef8
JS
874 phba->fcf.current_rec.fcf_indx,
875 irsp->ulpStatus, irsp->un.ulpWord[4],
876 irsp->ulpTimeout);
7d791df7
JS
877 lpfc_sli4_set_fcf_flogi_fail(phba,
878 phba->fcf.current_rec.fcf_indx);
0c9ab6f5 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
a93ff37a
JS
880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
881 if (rc)
882 goto out;
0c9ab6f5
JS
883 }
884
38b92ef8
JS
885 /* FLOGI failure */
886 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
887 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
888 irsp->ulpStatus, irsp->un.ulpWord[4],
889 irsp->ulpTimeout);
890
dea3101e 891 /* Check for retry */
2e0fef85 892 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 893 goto out;
2e0fef85 894
76a95d75
JS
895 /* FLOGI failure */
896 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
897 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
898 irsp->ulpStatus, irsp->un.ulpWord[4],
899 irsp->ulpTimeout);
900
dea3101e 901 /* FLOGI failed, so there is no fabric */
2e0fef85
JS
902 spin_lock_irq(shost->host_lock);
903 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
904 spin_unlock_irq(shost->host_lock);
dea3101e 905
329f9bc7 906 /* If private loop, then allow max outstanding els to be
dea3101e
JB
907 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
908 * alpa map would take too long otherwise.
909 */
910 if (phba->alpa_map[0] == 0) {
3de2a653 911 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
ff78d8f9
JS
912 }
913 if ((phba->sli_rev == LPFC_SLI_REV4) &&
914 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
915 (vport->fc_prevDID != vport->fc_myDID))) {
916 if (vport->fc_flag & FC_VFI_REGISTERED)
917 lpfc_sli4_unreg_all_rpis(vport);
918 lpfc_issue_reg_vfi(vport);
919 lpfc_nlp_put(ndlp);
920 goto out;
dea3101e 921 }
dea3101e
JB
922 goto flogifail;
923 }
695a814e
JS
924 spin_lock_irq(shost->host_lock);
925 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 926 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
695a814e 927 spin_unlock_irq(shost->host_lock);
dea3101e
JB
928
929 /*
930 * The FLogI succeeded. Sync the data for the CPU before
931 * accessing it.
932 */
933 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
934
935 sp = prsp->virt + sizeof(uint32_t);
936
937 /* FLOGI completes successfully */
e8b62011 938 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
af901ca1 939 "0101 FLOGI completes successfully "
e8b62011
JS
940 "Data: x%x x%x x%x x%x\n",
941 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
942 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
dea3101e 943
2e0fef85 944 if (vport->port_state == LPFC_FLOGI) {
dea3101e
JB
945 /*
946 * If Common Service Parameters indicate Nport
947 * we are point to point, if Fport we are Fabric.
948 */
949 if (sp->cmn.fPort)
2e0fef85 950 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
76a95d75 951 else if (!(phba->hba_flag & HBA_FCOE_MODE))
2e0fef85 952 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
dbb6b3ab
JS
953 else {
954 lpfc_printf_vlog(vport, KERN_ERR,
955 LOG_FIP | LOG_ELS,
956 "2831 FLOGI response with cleared Fabric "
957 "bit fcf_index 0x%x "
958 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
959 "Fabric Name "
960 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
961 phba->fcf.current_rec.fcf_indx,
962 phba->fcf.current_rec.switch_name[0],
963 phba->fcf.current_rec.switch_name[1],
964 phba->fcf.current_rec.switch_name[2],
965 phba->fcf.current_rec.switch_name[3],
966 phba->fcf.current_rec.switch_name[4],
967 phba->fcf.current_rec.switch_name[5],
968 phba->fcf.current_rec.switch_name[6],
969 phba->fcf.current_rec.switch_name[7],
970 phba->fcf.current_rec.fabric_name[0],
971 phba->fcf.current_rec.fabric_name[1],
972 phba->fcf.current_rec.fabric_name[2],
973 phba->fcf.current_rec.fabric_name[3],
974 phba->fcf.current_rec.fabric_name[4],
975 phba->fcf.current_rec.fabric_name[5],
976 phba->fcf.current_rec.fabric_name[6],
977 phba->fcf.current_rec.fabric_name[7]);
978 lpfc_nlp_put(ndlp);
979 spin_lock_irq(&phba->hbalock);
980 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 981 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
dbb6b3ab
JS
982 spin_unlock_irq(&phba->hbalock);
983 goto out;
984 }
0c9ab6f5
JS
985 if (!rc) {
986 /* Mark the FCF discovery process done */
999d813f
JS
987 if (phba->hba_flag & HBA_FIP_SUPPORT)
988 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
989 LOG_ELS,
a93ff37a
JS
990 "2769 FLOGI to FCF (x%x) "
991 "completed successfully\n",
999d813f 992 phba->fcf.current_rec.fcf_indx);
0c9ab6f5
JS
993 spin_lock_irq(&phba->hbalock);
994 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 995 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
0c9ab6f5 996 spin_unlock_irq(&phba->hbalock);
dea3101e 997 goto out;
0c9ab6f5 998 }
dea3101e
JB
999 }
1000
1001flogifail:
329f9bc7 1002 lpfc_nlp_put(ndlp);
dea3101e 1003
858c9f6c 1004 if (!lpfc_error_lost_link(irsp)) {
dea3101e 1005 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 1006 lpfc_disc_list_loopmap(vport);
dea3101e
JB
1007
1008 /* Start discovery */
2e0fef85 1009 lpfc_disc_start(vport);
87af33fe
JS
1010 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1011 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
1012 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
1013 (phba->link_state != LPFC_CLEAR_LA)) {
1014 /* If FLOGI failed enable link interrupt. */
1015 lpfc_issue_clear_la(phba, vport);
dea3101e 1016 }
dea3101e
JB
1017out:
1018 lpfc_els_free_iocb(phba, cmdiocb);
1019}
1020
e59058c4 1021/**
3621a710 1022 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
e59058c4
JS
1023 * @vport: pointer to a host virtual N_Port data structure.
1024 * @ndlp: pointer to a node-list data structure.
1025 * @retry: number of retries to the command IOCB.
1026 *
1027 * This routine issues a Fabric Login (FLOGI) Request ELS command
1028 * for a @vport. The initiator service parameters are put into the payload
1029 * of the FLOGI Request IOCB and the top-level callback function pointer
1030 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1031 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1032 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1033 *
1034 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1035 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1036 * will be stored into the context1 field of the IOCB for the completion
1037 * callback function to the FLOGI ELS command.
1038 *
1039 * Return code
1040 * 0 - successfully issued flogi iocb for @vport
1041 * 1 - failed to issue flogi iocb for @vport
1042 **/
dea3101e 1043static int
2e0fef85 1044lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1045 uint8_t retry)
1046{
2e0fef85 1047 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1048 struct serv_parm *sp;
1049 IOCB_t *icmd;
1050 struct lpfc_iocbq *elsiocb;
1051 struct lpfc_sli_ring *pring;
1052 uint8_t *pcmd;
1053 uint16_t cmdsize;
1054 uint32_t tmo;
1055 int rc;
1056
1057 pring = &phba->sli.ring[LPFC_ELS_RING];
1058
92d7f7b0 1059 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2e0fef85
JS
1060 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1061 ndlp->nlp_DID, ELS_CMD_FLOGI);
92d7f7b0 1062
488d1469 1063 if (!elsiocb)
c9f8735b 1064 return 1;
dea3101e
JB
1065
1066 icmd = &elsiocb->iocb;
1067 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1068
1069 /* For FLOGI request, remainder of payload is service parameters */
1070 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
92d7f7b0
JS
1071 pcmd += sizeof(uint32_t);
1072 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1073 sp = (struct serv_parm *) pcmd;
1074
1075 /* Setup CSPs accordingly for Fabric */
1076 sp->cmn.e_d_tov = 0;
1077 sp->cmn.w2.r_a_tov = 0;
1078 sp->cls1.classValid = 0;
1079 sp->cls2.seqDelivery = 1;
1080 sp->cls3.seqDelivery = 1;
1081 if (sp->cmn.fcphLow < FC_PH3)
1082 sp->cmn.fcphLow = FC_PH3;
1083 if (sp->cmn.fcphHigh < FC_PH3)
1084 sp->cmn.fcphHigh = FC_PH3;
1085
c31098ce
JS
1086 if (phba->sli_rev == LPFC_SLI_REV4) {
1087 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1088 LPFC_SLI_INTF_IF_TYPE_0) {
1089 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1090 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1091 /* FLOGI needs to be 3 for WQE FCFI */
1092 /* Set the fcfi to the fcfi we registered with */
1093 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1094 }
5248a749
JS
1095 } else {
1096 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1097 sp->cmn.request_multiple_Nport = 1;
1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1099 icmd->ulpCt_h = 1;
1100 icmd->ulpCt_l = 0;
1101 } else
1102 sp->cmn.request_multiple_Nport = 0;
92d7f7b0
JS
1103 }
1104
76a95d75 1105 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
858c9f6c
JS
1106 icmd->un.elsreq64.myID = 0;
1107 icmd->un.elsreq64.fl = 1;
1108 }
1109
dea3101e
JB
1110 tmo = phba->fc_ratov;
1111 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
2e0fef85 1112 lpfc_set_disctmo(vport);
dea3101e
JB
1113 phba->fc_ratov = tmo;
1114
1115 phba->fc_stat.elsXmitFLOGI++;
1116 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
858c9f6c
JS
1117
1118 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1119 "Issue FLOGI: opt:x%x",
1120 phba->sli3_options, 0, 0);
1121
92d7f7b0 1122 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
dea3101e
JB
1123 if (rc == IOCB_ERROR) {
1124 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1125 return 1;
dea3101e 1126 }
c9f8735b 1127 return 0;
dea3101e
JB
1128}
1129
e59058c4 1130/**
3621a710 1131 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
e59058c4
JS
1132 * @phba: pointer to lpfc hba data structure.
1133 *
1134 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1135 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1136 * list and issues an abort IOCB commond on each outstanding IOCB that
1137 * contains a active Fabric_DID ndlp. Note that this function is to issue
1138 * the abort IOCB command on all the outstanding IOCBs, thus when this
1139 * function returns, it does not guarantee all the IOCBs are actually aborted.
1140 *
1141 * Return code
3ad2f3fb 1142 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
e59058c4 1143 **/
dea3101e 1144int
2e0fef85 1145lpfc_els_abort_flogi(struct lpfc_hba *phba)
dea3101e
JB
1146{
1147 struct lpfc_sli_ring *pring;
1148 struct lpfc_iocbq *iocb, *next_iocb;
1149 struct lpfc_nodelist *ndlp;
1150 IOCB_t *icmd;
1151
1152 /* Abort outstanding I/O on NPort <nlp_DID> */
1153 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
e8b62011
JS
1154 "0201 Abort outstanding I/O on NPort x%x\n",
1155 Fabric_DID);
dea3101e
JB
1156
1157 pring = &phba->sli.ring[LPFC_ELS_RING];
1158
1159 /*
1160 * Check the txcmplq for an iocb that matches the nport the driver is
1161 * searching for.
1162 */
2e0fef85 1163 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1164 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1165 icmd = &iocb->iocb;
2e0fef85
JS
1166 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1167 icmd->un.elsreq64.bdl.ulpIoTag32) {
dea3101e 1168 ndlp = (struct lpfc_nodelist *)(iocb->context1);
58da1ffb
JS
1169 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1170 (ndlp->nlp_DID == Fabric_DID))
07951076 1171 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e
JB
1172 }
1173 }
2e0fef85 1174 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1175
1176 return 0;
1177}
1178
e59058c4 1179/**
3621a710 1180 * lpfc_initial_flogi - Issue an initial fabric login for a vport
e59058c4
JS
1181 * @vport: pointer to a host virtual N_Port data structure.
1182 *
1183 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1184 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1185 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1186 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1187 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1188 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1189 * @vport.
1190 *
1191 * Return code
1192 * 0 - failed to issue initial flogi for @vport
1193 * 1 - successfully issued initial flogi for @vport
1194 **/
dea3101e 1195int
2e0fef85 1196lpfc_initial_flogi(struct lpfc_vport *vport)
dea3101e 1197{
2e0fef85 1198 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1199 struct lpfc_nodelist *ndlp;
1200
98c9ea5c
JS
1201 vport->port_state = LPFC_FLOGI;
1202 lpfc_set_disctmo(vport);
1203
c9f8735b 1204 /* First look for the Fabric ndlp */
2e0fef85 1205 ndlp = lpfc_findnode_did(vport, Fabric_DID);
c9f8735b 1206 if (!ndlp) {
dea3101e 1207 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b
JW
1208 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1209 if (!ndlp)
1210 return 0;
2e0fef85 1211 lpfc_nlp_init(vport, ndlp, Fabric_DID);
6fb120a7
JS
1212 /* Set the node type */
1213 ndlp->nlp_type |= NLP_FABRIC;
e47c9093
JS
1214 /* Put ndlp onto node list */
1215 lpfc_enqueue_node(vport, ndlp);
1216 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1217 /* re-setup ndlp without removing from node list */
1218 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1219 if (!ndlp)
1220 return 0;
dea3101e 1221 }
87af33fe 1222
5ac6b303 1223 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
fa4066b6
JS
1224 /* This decrement of reference count to node shall kick off
1225 * the release of the node.
1226 */
329f9bc7 1227 lpfc_nlp_put(ndlp);
5ac6b303
JS
1228 return 0;
1229 }
c9f8735b 1230 return 1;
dea3101e
JB
1231}
1232
e59058c4 1233/**
3621a710 1234 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
e59058c4
JS
1235 * @vport: pointer to a host virtual N_Port data structure.
1236 *
1237 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1238 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1239 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1240 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1241 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1242 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1243 * @vport.
1244 *
1245 * Return code
1246 * 0 - failed to issue initial fdisc for @vport
1247 * 1 - successfully issued initial fdisc for @vport
1248 **/
92d7f7b0
JS
1249int
1250lpfc_initial_fdisc(struct lpfc_vport *vport)
1251{
1252 struct lpfc_hba *phba = vport->phba;
1253 struct lpfc_nodelist *ndlp;
1254
1255 /* First look for the Fabric ndlp */
1256 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1257 if (!ndlp) {
1258 /* Cannot find existing Fabric ndlp, so allocate a new one */
1259 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1260 if (!ndlp)
1261 return 0;
1262 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
1263 /* Put ndlp onto node list */
1264 lpfc_enqueue_node(vport, ndlp);
1265 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1266 /* re-setup ndlp without removing from node list */
1267 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1268 if (!ndlp)
1269 return 0;
92d7f7b0 1270 }
e47c9093 1271
92d7f7b0 1272 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
fa4066b6
JS
1273 /* decrement node reference count to trigger the release of
1274 * the node.
1275 */
92d7f7b0 1276 lpfc_nlp_put(ndlp);
fa4066b6 1277 return 0;
92d7f7b0
JS
1278 }
1279 return 1;
1280}
87af33fe 1281
e59058c4 1282/**
3621a710 1283 * lpfc_more_plogi - Check and issue remaining plogis for a vport
e59058c4
JS
1284 * @vport: pointer to a host virtual N_Port data structure.
1285 *
1286 * This routine checks whether there are more remaining Port Logins
1287 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1288 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1289 * to issue ELS PLOGIs up to the configured discover threads with the
1290 * @vport (@vport->cfg_discovery_threads). The function also decrement
1291 * the @vport's num_disc_node by 1 if it is not already 0.
1292 **/
87af33fe 1293void
2e0fef85 1294lpfc_more_plogi(struct lpfc_vport *vport)
dea3101e
JB
1295{
1296 int sentplogi;
1297
2e0fef85
JS
1298 if (vport->num_disc_nodes)
1299 vport->num_disc_nodes--;
dea3101e
JB
1300
1301 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
e8b62011
JS
1302 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1303 "0232 Continue discovery with %d PLOGIs to go "
1304 "Data: x%x x%x x%x\n",
1305 vport->num_disc_nodes, vport->fc_plogi_cnt,
1306 vport->fc_flag, vport->port_state);
dea3101e 1307 /* Check to see if there are more PLOGIs to be sent */
2e0fef85
JS
1308 if (vport->fc_flag & FC_NLP_MORE)
1309 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1310 sentplogi = lpfc_els_disc_plogi(vport);
1311
dea3101e
JB
1312 return;
1313}
1314
e59058c4 1315/**
3621a710 1316 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
e59058c4
JS
1317 * @phba: pointer to lpfc hba data structure.
1318 * @prsp: pointer to response IOCB payload.
1319 * @ndlp: pointer to a node-list data structure.
1320 *
1321 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1322 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1323 * The following cases are considered N_Port confirmed:
1324 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1325 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1326 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1327 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1328 * 1) if there is a node on vport list other than the @ndlp with the same
1329 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1330 * on that node to release the RPI associated with the node; 2) if there is
1331 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1332 * into, a new node shall be allocated (or activated). In either case, the
1333 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1334 * be released and the new_ndlp shall be put on to the vport node list and
1335 * its pointer returned as the confirmed node.
1336 *
1337 * Note that before the @ndlp got "released", the keepDID from not-matching
1338 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1339 * of the @ndlp. This is because the release of @ndlp is actually to put it
1340 * into an inactive state on the vport node list and the vport node list
1341 * management algorithm does not allow two node with a same DID.
1342 *
1343 * Return code
1344 * pointer to the PLOGI N_Port @ndlp
1345 **/
488d1469 1346static struct lpfc_nodelist *
92d7f7b0 1347lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
488d1469
JS
1348 struct lpfc_nodelist *ndlp)
1349{
2e0fef85 1350 struct lpfc_vport *vport = ndlp->vport;
488d1469 1351 struct lpfc_nodelist *new_ndlp;
0ff10d46
JS
1352 struct lpfc_rport_data *rdata;
1353 struct fc_rport *rport;
488d1469 1354 struct serv_parm *sp;
92d7f7b0 1355 uint8_t name[sizeof(struct lpfc_name)];
58da1ffb 1356 uint32_t rc, keepDID = 0;
38b92ef8
JS
1357 int put_node;
1358 int put_rport;
19ca7609 1359 struct lpfc_node_rrqs rrq;
488d1469 1360
2fb9bd8b
JS
1361 /* Fabric nodes can have the same WWPN so we don't bother searching
1362 * by WWPN. Just return the ndlp that was given to us.
1363 */
1364 if (ndlp->nlp_type & NLP_FABRIC)
1365 return ndlp;
1366
92d7f7b0 1367 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
685f0bf7 1368 memset(name, 0, sizeof(struct lpfc_name));
488d1469 1369
685f0bf7 1370 /* Now we find out if the NPort we are logging into, matches the WWPN
488d1469
JS
1371 * we have for that ndlp. If not, we have some work to do.
1372 */
2e0fef85 1373 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
488d1469 1374
e47c9093 1375 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
488d1469 1376 return ndlp;
19ca7609 1377 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
488d1469
JS
1378
1379 if (!new_ndlp) {
2e0fef85
JS
1380 rc = memcmp(&ndlp->nlp_portname, name,
1381 sizeof(struct lpfc_name));
92795650
JS
1382 if (!rc)
1383 return ndlp;
488d1469
JS
1384 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1385 if (!new_ndlp)
1386 return ndlp;
2e0fef85 1387 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
e47c9093 1388 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
58da1ffb
JS
1389 rc = memcmp(&ndlp->nlp_portname, name,
1390 sizeof(struct lpfc_name));
1391 if (!rc)
1392 return ndlp;
e47c9093
JS
1393 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1394 NLP_STE_UNUSED_NODE);
1395 if (!new_ndlp)
1396 return ndlp;
58da1ffb 1397 keepDID = new_ndlp->nlp_DID;
19ca7609
JS
1398 if (phba->sli_rev == LPFC_SLI_REV4)
1399 memcpy(&rrq.xri_bitmap,
1400 &new_ndlp->active_rrqs.xri_bitmap,
1401 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1402 } else {
58da1ffb 1403 keepDID = new_ndlp->nlp_DID;
19ca7609
JS
1404 if (phba->sli_rev == LPFC_SLI_REV4)
1405 memcpy(&rrq.xri_bitmap,
1406 &new_ndlp->active_rrqs.xri_bitmap,
1407 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1408 }
488d1469 1409
2e0fef85 1410 lpfc_unreg_rpi(vport, new_ndlp);
488d1469 1411 new_ndlp->nlp_DID = ndlp->nlp_DID;
92795650 1412 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
19ca7609
JS
1413 if (phba->sli_rev == LPFC_SLI_REV4)
1414 memcpy(new_ndlp->active_rrqs.xri_bitmap,
1415 &ndlp->active_rrqs.xri_bitmap,
1416 sizeof(ndlp->active_rrqs.xri_bitmap));
0ff10d46
JS
1417
1418 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1419 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1420 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1421
e47c9093 1422 /* Set state will put new_ndlp on to node list if not already done */
2e0fef85 1423 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
488d1469 1424
2e0fef85 1425 /* Move this back to NPR state */
87af33fe
JS
1426 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1427 /* The new_ndlp is replacing ndlp totally, so we need
1428 * to put ndlp on UNUSED list and try to free it.
1429 */
0ff10d46
JS
1430
1431 /* Fix up the rport accordingly */
1432 rport = ndlp->rport;
1433 if (rport) {
1434 rdata = rport->dd_data;
1435 if (rdata->pnode == ndlp) {
1436 lpfc_nlp_put(ndlp);
1437 ndlp->rport = NULL;
1438 rdata->pnode = lpfc_nlp_get(new_ndlp);
1439 new_ndlp->rport = rport;
1440 }
1441 new_ndlp->nlp_type = ndlp->nlp_type;
1442 }
58da1ffb
JS
1443 /* We shall actually free the ndlp with both nlp_DID and
1444 * nlp_portname fields equals 0 to avoid any ndlp on the
1445 * nodelist never to be used.
1446 */
1447 if (ndlp->nlp_DID == 0) {
1448 spin_lock_irq(&phba->ndlp_lock);
1449 NLP_SET_FREE_REQ(ndlp);
1450 spin_unlock_irq(&phba->ndlp_lock);
1451 }
0ff10d46 1452
58da1ffb
JS
1453 /* Two ndlps cannot have the same did on the nodelist */
1454 ndlp->nlp_DID = keepDID;
19ca7609
JS
1455 if (phba->sli_rev == LPFC_SLI_REV4)
1456 memcpy(&ndlp->active_rrqs.xri_bitmap,
1457 &rrq.xri_bitmap,
1458 sizeof(ndlp->active_rrqs.xri_bitmap));
2e0fef85 1459 lpfc_drop_node(vport, ndlp);
87af33fe 1460 }
92795650 1461 else {
2e0fef85 1462 lpfc_unreg_rpi(vport, ndlp);
58da1ffb
JS
1463 /* Two ndlps cannot have the same did */
1464 ndlp->nlp_DID = keepDID;
19ca7609
JS
1465 if (phba->sli_rev == LPFC_SLI_REV4)
1466 memcpy(&ndlp->active_rrqs.xri_bitmap,
1467 &rrq.xri_bitmap,
1468 sizeof(ndlp->active_rrqs.xri_bitmap));
2e0fef85 1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
38b92ef8
JS
1470 /* Since we are swapping the ndlp passed in with the new one
1471 * and the did has already been swapped, copy over the
1472 * state and names.
1473 */
1474 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1475 sizeof(struct lpfc_name));
1476 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1477 sizeof(struct lpfc_name));
1478 new_ndlp->nlp_state = ndlp->nlp_state;
1479 /* Fix up the rport accordingly */
1480 rport = ndlp->rport;
1481 if (rport) {
1482 rdata = rport->dd_data;
1483 put_node = rdata->pnode != NULL;
1484 put_rport = ndlp->rport != NULL;
1485 rdata->pnode = NULL;
1486 ndlp->rport = NULL;
1487 if (put_node)
1488 lpfc_nlp_put(ndlp);
1489 if (put_rport)
1490 put_device(&rport->dev);
1491 }
92795650 1492 }
488d1469
JS
1493 return new_ndlp;
1494}
1495
e59058c4 1496/**
3621a710 1497 * lpfc_end_rscn - Check and handle more rscn for a vport
e59058c4
JS
1498 * @vport: pointer to a host virtual N_Port data structure.
1499 *
1500 * This routine checks whether more Registration State Change
1501 * Notifications (RSCNs) came in while the discovery state machine was in
1502 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1503 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1504 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1505 * handling the RSCNs.
1506 **/
87af33fe
JS
1507void
1508lpfc_end_rscn(struct lpfc_vport *vport)
1509{
1510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1511
1512 if (vport->fc_flag & FC_RSCN_MODE) {
1513 /*
1514 * Check to see if more RSCNs came in while we were
1515 * processing this one.
1516 */
1517 if (vport->fc_rscn_id_cnt ||
1518 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1519 lpfc_els_handle_rscn(vport);
1520 else {
1521 spin_lock_irq(shost->host_lock);
1522 vport->fc_flag &= ~FC_RSCN_MODE;
1523 spin_unlock_irq(shost->host_lock);
1524 }
1525 }
1526}
1527
19ca7609
JS
1528/**
1529 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1530 * @phba: pointer to lpfc hba data structure.
1531 * @cmdiocb: pointer to lpfc command iocb data structure.
1532 * @rspiocb: pointer to lpfc response iocb data structure.
1533 *
1534 * This routine will call the clear rrq function to free the rrq and
1535 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1536 * exist then the clear_rrq is still called because the rrq needs to
1537 * be freed.
1538 **/
1539
1540static void
1541lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1542 struct lpfc_iocbq *rspiocb)
1543{
1544 struct lpfc_vport *vport = cmdiocb->vport;
1545 IOCB_t *irsp;
1546 struct lpfc_nodelist *ndlp;
1547 struct lpfc_node_rrq *rrq;
1548
1549 /* we pass cmdiocb to state machine which needs rspiocb as well */
1550 rrq = cmdiocb->context_un.rrq;
1551 cmdiocb->context_un.rsp_iocb = rspiocb;
1552
1553 irsp = &rspiocb->iocb;
1554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1555 "RRQ cmpl: status:x%x/x%x did:x%x",
1556 irsp->ulpStatus, irsp->un.ulpWord[4],
1557 irsp->un.elsreq64.remoteID);
1558
1559 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1560 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1561 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1562 "2882 RRQ completes to NPort x%x "
1563 "with no ndlp. Data: x%x x%x x%x\n",
1564 irsp->un.elsreq64.remoteID,
1565 irsp->ulpStatus, irsp->un.ulpWord[4],
1566 irsp->ulpIoTag);
1567 goto out;
1568 }
1569
1570 /* rrq completes to NPort <nlp_DID> */
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1572 "2880 RRQ completes to NPort x%x "
1573 "Data: x%x x%x x%x x%x x%x\n",
1574 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1575 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1576
1577 if (irsp->ulpStatus) {
1578 /* Check for retry */
1579 /* RRQ failed Don't print the vport to vport rjts */
1580 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1581 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1582 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1583 (phba)->pport->cfg_log_verbose & LOG_ELS)
1584 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1585 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1586 ndlp->nlp_DID, irsp->ulpStatus,
1587 irsp->un.ulpWord[4]);
1588 }
1589out:
1590 if (rrq)
1591 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1592 lpfc_els_free_iocb(phba, cmdiocb);
1593 return;
1594}
e59058c4 1595/**
3621a710 1596 * lpfc_cmpl_els_plogi - Completion callback function for plogi
e59058c4
JS
1597 * @phba: pointer to lpfc hba data structure.
1598 * @cmdiocb: pointer to lpfc command iocb data structure.
1599 * @rspiocb: pointer to lpfc response iocb data structure.
1600 *
1601 * This routine is the completion callback function for issuing the Port
1602 * Login (PLOGI) command. For PLOGI completion, there must be an active
1603 * ndlp on the vport node list that matches the remote node ID from the
25985edc 1604 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
e59058c4
JS
1605 * ignored and command IOCB released. The PLOGI response IOCB status is
1606 * checked for error conditons. If there is error status reported, PLOGI
1607 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1608 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1609 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1610 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1611 * there are additional N_Port nodes with the vport that need to perform
1612 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1613 * PLOGIs.
1614 **/
dea3101e 1615static void
2e0fef85
JS
1616lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1617 struct lpfc_iocbq *rspiocb)
dea3101e 1618{
2e0fef85
JS
1619 struct lpfc_vport *vport = cmdiocb->vport;
1620 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1621 IOCB_t *irsp;
dea3101e 1622 struct lpfc_nodelist *ndlp;
92795650 1623 struct lpfc_dmabuf *prsp;
dea3101e
JB
1624 int disc, rc, did, type;
1625
dea3101e
JB
1626 /* we pass cmdiocb to state machine which needs rspiocb as well */
1627 cmdiocb->context_un.rsp_iocb = rspiocb;
1628
1629 irsp = &rspiocb->iocb;
858c9f6c
JS
1630 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1631 "PLOGI cmpl: status:x%x/x%x did:x%x",
1632 irsp->ulpStatus, irsp->un.ulpWord[4],
1633 irsp->un.elsreq64.remoteID);
1634
2e0fef85 1635 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
e47c9093 1636 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
e8b62011
JS
1637 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1638 "0136 PLOGI completes to NPort x%x "
1639 "with no ndlp. Data: x%x x%x x%x\n",
1640 irsp->un.elsreq64.remoteID,
1641 irsp->ulpStatus, irsp->un.ulpWord[4],
1642 irsp->ulpIoTag);
488d1469 1643 goto out;
ed957684 1644 }
dea3101e
JB
1645
1646 /* Since ndlp can be freed in the disc state machine, note if this node
1647 * is being used during discovery.
1648 */
2e0fef85 1649 spin_lock_irq(shost->host_lock);
dea3101e 1650 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
488d1469 1651 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85 1652 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1653 rc = 0;
1654
1655 /* PLOGI completes to NPort <nlp_DID> */
e8b62011
JS
1656 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1657 "0102 PLOGI completes to NPort x%x "
1658 "Data: x%x x%x x%x x%x x%x\n",
1659 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1660 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1661 /* Check to see if link went down during discovery */
2e0fef85
JS
1662 if (lpfc_els_chk_latt(vport)) {
1663 spin_lock_irq(shost->host_lock);
dea3101e 1664 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1665 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1666 goto out;
1667 }
1668
1669 /* ndlp could be freed in DSM, save these values now */
1670 type = ndlp->nlp_type;
1671 did = ndlp->nlp_DID;
1672
1673 if (irsp->ulpStatus) {
1674 /* Check for retry */
1675 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1676 /* ELS command is being retried */
1677 if (disc) {
2e0fef85 1678 spin_lock_irq(shost->host_lock);
dea3101e 1679 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1680 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1681 }
1682 goto out;
1683 }
2a9bf3d0
JS
1684 /* PLOGI failed Don't print the vport to vport rjts */
1685 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1686 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1687 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1688 (phba)->pport->cfg_log_verbose & LOG_ELS)
1689 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
e40a02c1
JS
1690 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1691 ndlp->nlp_DID, irsp->ulpStatus,
1692 irsp->un.ulpWord[4]);
dea3101e 1693 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1694 if (lpfc_error_lost_link(irsp))
c9f8735b 1695 rc = NLP_STE_FREED_NODE;
e47c9093 1696 else
2e0fef85 1697 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1698 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1699 } else {
1700 /* Good status, call state machine */
92795650 1701 prsp = list_entry(((struct lpfc_dmabuf *)
92d7f7b0
JS
1702 cmdiocb->context2)->list.next,
1703 struct lpfc_dmabuf, list);
1704 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2e0fef85 1705 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1706 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1707 }
1708
2e0fef85 1709 if (disc && vport->num_disc_nodes) {
dea3101e 1710 /* Check to see if there are more PLOGIs to be sent */
2e0fef85 1711 lpfc_more_plogi(vport);
dea3101e 1712
2e0fef85
JS
1713 if (vport->num_disc_nodes == 0) {
1714 spin_lock_irq(shost->host_lock);
1715 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1716 spin_unlock_irq(shost->host_lock);
dea3101e 1717
2e0fef85 1718 lpfc_can_disctmo(vport);
87af33fe 1719 lpfc_end_rscn(vport);
dea3101e
JB
1720 }
1721 }
1722
1723out:
1724 lpfc_els_free_iocb(phba, cmdiocb);
1725 return;
1726}
1727
e59058c4 1728/**
3621a710 1729 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
e59058c4
JS
1730 * @vport: pointer to a host virtual N_Port data structure.
1731 * @did: destination port identifier.
1732 * @retry: number of retries to the command IOCB.
1733 *
1734 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1735 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1736 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1737 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1738 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1739 *
1740 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1741 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1742 * will be stored into the context1 field of the IOCB for the completion
1743 * callback function to the PLOGI ELS command.
1744 *
1745 * Return code
1746 * 0 - Successfully issued a plogi for @vport
1747 * 1 - failed to issue a plogi for @vport
1748 **/
dea3101e 1749int
2e0fef85 1750lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
dea3101e 1751{
2e0fef85 1752 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1753 struct serv_parm *sp;
1754 IOCB_t *icmd;
98c9ea5c 1755 struct lpfc_nodelist *ndlp;
dea3101e 1756 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1757 struct lpfc_sli *psli;
1758 uint8_t *pcmd;
1759 uint16_t cmdsize;
92d7f7b0 1760 int ret;
dea3101e
JB
1761
1762 psli = &phba->sli;
dea3101e 1763
98c9ea5c 1764 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
1765 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1766 ndlp = NULL;
98c9ea5c 1767
e47c9093 1768 /* If ndlp is not NULL, we will bump the reference count on it */
92d7f7b0 1769 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
98c9ea5c 1770 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2e0fef85 1771 ELS_CMD_PLOGI);
c9f8735b
JW
1772 if (!elsiocb)
1773 return 1;
dea3101e
JB
1774
1775 icmd = &elsiocb->iocb;
1776 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1777
1778 /* For PLOGI request, remainder of payload is service parameters */
1779 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
92d7f7b0
JS
1780 pcmd += sizeof(uint32_t);
1781 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1782 sp = (struct serv_parm *) pcmd;
1783
5ac6b303
JS
1784 /*
1785 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1786 * to device on remote loops work.
1787 */
1788 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1789 sp->cmn.altBbCredit = 1;
1790
dea3101e
JB
1791 if (sp->cmn.fcphLow < FC_PH_4_3)
1792 sp->cmn.fcphLow = FC_PH_4_3;
1793
1794 if (sp->cmn.fcphHigh < FC_PH3)
1795 sp->cmn.fcphHigh = FC_PH3;
1796
858c9f6c
JS
1797 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1798 "Issue PLOGI: did:x%x",
1799 did, 0, 0);
1800
dea3101e
JB
1801 phba->fc_stat.elsXmitPLOGI++;
1802 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
3772a991 1803 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
1804
1805 if (ret == IOCB_ERROR) {
dea3101e 1806 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1807 return 1;
dea3101e 1808 }
c9f8735b 1809 return 0;
dea3101e
JB
1810}
1811
e59058c4 1812/**
3621a710 1813 * lpfc_cmpl_els_prli - Completion callback function for prli
e59058c4
JS
1814 * @phba: pointer to lpfc hba data structure.
1815 * @cmdiocb: pointer to lpfc command iocb data structure.
1816 * @rspiocb: pointer to lpfc response iocb data structure.
1817 *
1818 * This routine is the completion callback function for a Process Login
1819 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1820 * status. If there is error status reported, PRLI retry shall be attempted
1821 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1822 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1823 * ndlp to mark the PRLI completion.
1824 **/
dea3101e 1825static void
2e0fef85
JS
1826lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1827 struct lpfc_iocbq *rspiocb)
dea3101e 1828{
2e0fef85
JS
1829 struct lpfc_vport *vport = cmdiocb->vport;
1830 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
1831 IOCB_t *irsp;
1832 struct lpfc_sli *psli;
1833 struct lpfc_nodelist *ndlp;
1834
1835 psli = &phba->sli;
1836 /* we pass cmdiocb to state machine which needs rspiocb as well */
1837 cmdiocb->context_un.rsp_iocb = rspiocb;
1838
1839 irsp = &(rspiocb->iocb);
1840 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2e0fef85 1841 spin_lock_irq(shost->host_lock);
dea3101e 1842 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1843 spin_unlock_irq(shost->host_lock);
dea3101e 1844
858c9f6c
JS
1845 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1846 "PRLI cmpl: status:x%x/x%x did:x%x",
1847 irsp->ulpStatus, irsp->un.ulpWord[4],
1848 ndlp->nlp_DID);
dea3101e 1849 /* PRLI completes to NPort <nlp_DID> */
e8b62011
JS
1850 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1851 "0103 PRLI completes to NPort x%x "
1852 "Data: x%x x%x x%x x%x\n",
1853 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1854 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 1855
2e0fef85 1856 vport->fc_prli_sent--;
dea3101e 1857 /* Check to see if link went down during discovery */
2e0fef85 1858 if (lpfc_els_chk_latt(vport))
dea3101e
JB
1859 goto out;
1860
1861 if (irsp->ulpStatus) {
1862 /* Check for retry */
1863 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1864 /* ELS command is being retried */
1865 goto out;
1866 }
1867 /* PRLI failed */
e40a02c1
JS
1868 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1869 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1870 ndlp->nlp_DID, irsp->ulpStatus,
1871 irsp->un.ulpWord[4]);
dea3101e 1872 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1873 if (lpfc_error_lost_link(irsp))
dea3101e 1874 goto out;
e47c9093 1875 else
2e0fef85 1876 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1877 NLP_EVT_CMPL_PRLI);
e47c9093 1878 } else
dea3101e 1879 /* Good status, call state machine */
2e0fef85 1880 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1881 NLP_EVT_CMPL_PRLI);
dea3101e
JB
1882out:
1883 lpfc_els_free_iocb(phba, cmdiocb);
1884 return;
1885}
1886
e59058c4 1887/**
3621a710 1888 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
e59058c4
JS
1889 * @vport: pointer to a host virtual N_Port data structure.
1890 * @ndlp: pointer to a node-list data structure.
1891 * @retry: number of retries to the command IOCB.
1892 *
1893 * This routine issues a Process Login (PRLI) ELS command for the
1894 * @vport. The PRLI service parameters are set up in the payload of the
1895 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1896 * is put to the IOCB completion callback func field before invoking the
1897 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1898 *
1899 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1900 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1901 * will be stored into the context1 field of the IOCB for the completion
1902 * callback function to the PRLI ELS command.
1903 *
1904 * Return code
1905 * 0 - successfully issued prli iocb command for @vport
1906 * 1 - failed to issue prli iocb command for @vport
1907 **/
dea3101e 1908int
2e0fef85 1909lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1910 uint8_t retry)
1911{
2e0fef85
JS
1912 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1913 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1914 PRLI *npr;
1915 IOCB_t *icmd;
1916 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1917 uint8_t *pcmd;
1918 uint16_t cmdsize;
1919
92d7f7b0 1920 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2e0fef85
JS
1921 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1922 ndlp->nlp_DID, ELS_CMD_PRLI);
488d1469 1923 if (!elsiocb)
c9f8735b 1924 return 1;
dea3101e
JB
1925
1926 icmd = &elsiocb->iocb;
1927 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1928
1929 /* For PRLI request, remainder of payload is service parameters */
92d7f7b0 1930 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
dea3101e 1931 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
92d7f7b0 1932 pcmd += sizeof(uint32_t);
dea3101e
JB
1933
1934 /* For PRLI, remainder of payload is PRLI parameter page */
1935 npr = (PRLI *) pcmd;
1936 /*
1937 * If our firmware version is 3.20 or later,
1938 * set the following bits for FC-TAPE support.
1939 */
1940 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1941 npr->ConfmComplAllowed = 1;
1942 npr->Retry = 1;
1943 npr->TaskRetryIdReq = 1;
1944 }
1945 npr->estabImagePair = 1;
1946 npr->readXferRdyDis = 1;
1947
1948 /* For FCP support */
1949 npr->prliType = PRLI_FCP_TYPE;
1950 npr->initiatorFunc = 1;
1951
858c9f6c
JS
1952 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1953 "Issue PRLI: did:x%x",
1954 ndlp->nlp_DID, 0, 0);
1955
dea3101e
JB
1956 phba->fc_stat.elsXmitPRLI++;
1957 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2e0fef85 1958 spin_lock_irq(shost->host_lock);
dea3101e 1959 ndlp->nlp_flag |= NLP_PRLI_SND;
2e0fef85 1960 spin_unlock_irq(shost->host_lock);
3772a991
JS
1961 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1962 IOCB_ERROR) {
2e0fef85 1963 spin_lock_irq(shost->host_lock);
dea3101e 1964 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1965 spin_unlock_irq(shost->host_lock);
dea3101e 1966 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1967 return 1;
dea3101e 1968 }
2e0fef85 1969 vport->fc_prli_sent++;
c9f8735b 1970 return 0;
dea3101e
JB
1971}
1972
90160e01 1973/**
3621a710 1974 * lpfc_rscn_disc - Perform rscn discovery for a vport
90160e01
JS
1975 * @vport: pointer to a host virtual N_Port data structure.
1976 *
1977 * This routine performs Registration State Change Notification (RSCN)
1978 * discovery for a @vport. If the @vport's node port recovery count is not
1979 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1980 * the nodes that need recovery. If none of the PLOGI were needed through
1981 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1982 * invoked to check and handle possible more RSCN came in during the period
1983 * of processing the current ones.
1984 **/
1985static void
1986lpfc_rscn_disc(struct lpfc_vport *vport)
1987{
1988 lpfc_can_disctmo(vport);
1989
1990 /* RSCN discovery */
1991 /* go thru NPR nodes and issue ELS PLOGIs */
1992 if (vport->fc_npr_cnt)
1993 if (lpfc_els_disc_plogi(vport))
1994 return;
1995
1996 lpfc_end_rscn(vport);
1997}
1998
1999/**
3621a710 2000 * lpfc_adisc_done - Complete the adisc phase of discovery
90160e01
JS
2001 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2002 *
2003 * This function is called when the final ADISC is completed during discovery.
2004 * This function handles clearing link attention or issuing reg_vpi depending
2005 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2006 * discovery.
2007 * This function is called with no locks held.
2008 **/
2009static void
2010lpfc_adisc_done(struct lpfc_vport *vport)
2011{
2012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2013 struct lpfc_hba *phba = vport->phba;
2014
2015 /*
2016 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2017 * and continue discovery.
2018 */
2019 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6fb120a7
JS
2020 !(vport->fc_flag & FC_RSCN_MODE) &&
2021 (phba->sli_rev < LPFC_SLI_REV4)) {
90160e01
JS
2022 lpfc_issue_reg_vpi(phba, vport);
2023 return;
2024 }
2025 /*
2026 * For SLI2, we need to set port_state to READY
2027 * and continue discovery.
2028 */
2029 if (vport->port_state < LPFC_VPORT_READY) {
2030 /* If we get here, there is nothing to ADISC */
2031 if (vport->port_type == LPFC_PHYSICAL_PORT)
2032 lpfc_issue_clear_la(phba, vport);
2033 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2034 vport->num_disc_nodes = 0;
2035 /* go thru NPR list, issue ELS PLOGIs */
2036 if (vport->fc_npr_cnt)
2037 lpfc_els_disc_plogi(vport);
2038 if (!vport->num_disc_nodes) {
2039 spin_lock_irq(shost->host_lock);
2040 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2041 spin_unlock_irq(shost->host_lock);
2042 lpfc_can_disctmo(vport);
2043 lpfc_end_rscn(vport);
2044 }
2045 }
2046 vport->port_state = LPFC_VPORT_READY;
2047 } else
2048 lpfc_rscn_disc(vport);
2049}
2050
e59058c4 2051/**
3621a710 2052 * lpfc_more_adisc - Issue more adisc as needed
e59058c4
JS
2053 * @vport: pointer to a host virtual N_Port data structure.
2054 *
2055 * This routine determines whether there are more ndlps on a @vport
2056 * node list need to have Address Discover (ADISC) issued. If so, it will
2057 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2058 * remaining nodes which need to have ADISC sent.
2059 **/
0ff10d46 2060void
2e0fef85 2061lpfc_more_adisc(struct lpfc_vport *vport)
dea3101e
JB
2062{
2063 int sentadisc;
2064
2e0fef85
JS
2065 if (vport->num_disc_nodes)
2066 vport->num_disc_nodes--;
dea3101e 2067 /* Continue discovery with <num_disc_nodes> ADISCs to go */
e8b62011
JS
2068 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2069 "0210 Continue discovery with %d ADISCs to go "
2070 "Data: x%x x%x x%x\n",
2071 vport->num_disc_nodes, vport->fc_adisc_cnt,
2072 vport->fc_flag, vport->port_state);
dea3101e 2073 /* Check to see if there are more ADISCs to be sent */
2e0fef85
JS
2074 if (vport->fc_flag & FC_NLP_MORE) {
2075 lpfc_set_disctmo(vport);
2076 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2077 sentadisc = lpfc_els_disc_adisc(vport);
dea3101e 2078 }
90160e01
JS
2079 if (!vport->num_disc_nodes)
2080 lpfc_adisc_done(vport);
dea3101e
JB
2081 return;
2082}
2083
e59058c4 2084/**
3621a710 2085 * lpfc_cmpl_els_adisc - Completion callback function for adisc
e59058c4
JS
2086 * @phba: pointer to lpfc hba data structure.
2087 * @cmdiocb: pointer to lpfc command iocb data structure.
2088 * @rspiocb: pointer to lpfc response iocb data structure.
2089 *
2090 * This routine is the completion function for issuing the Address Discover
2091 * (ADISC) command. It first checks to see whether link went down during
2092 * the discovery process. If so, the node will be marked as node port
2093 * recovery for issuing discover IOCB by the link attention handler and
2094 * exit. Otherwise, the response status is checked. If error was reported
2095 * in the response status, the ADISC command shall be retried by invoking
2096 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2097 * the response status, the state machine is invoked to set transition
2098 * with respect to NLP_EVT_CMPL_ADISC event.
2099 **/
dea3101e 2100static void
2e0fef85
JS
2101lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2102 struct lpfc_iocbq *rspiocb)
dea3101e 2103{
2e0fef85
JS
2104 struct lpfc_vport *vport = cmdiocb->vport;
2105 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2106 IOCB_t *irsp;
dea3101e 2107 struct lpfc_nodelist *ndlp;
2e0fef85 2108 int disc;
dea3101e
JB
2109
2110 /* we pass cmdiocb to state machine which needs rspiocb as well */
2111 cmdiocb->context_un.rsp_iocb = rspiocb;
2112
2113 irsp = &(rspiocb->iocb);
2114 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
dea3101e 2115
858c9f6c
JS
2116 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2117 "ADISC cmpl: status:x%x/x%x did:x%x",
2118 irsp->ulpStatus, irsp->un.ulpWord[4],
2119 ndlp->nlp_DID);
2120
dea3101e
JB
2121 /* Since ndlp can be freed in the disc state machine, note if this node
2122 * is being used during discovery.
2123 */
2e0fef85 2124 spin_lock_irq(shost->host_lock);
dea3101e 2125 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
c9f8735b 2126 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2e0fef85 2127 spin_unlock_irq(shost->host_lock);
dea3101e 2128 /* ADISC completes to NPort <nlp_DID> */
e8b62011
JS
2129 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2130 "0104 ADISC completes to NPort x%x "
2131 "Data: x%x x%x x%x x%x x%x\n",
2132 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2133 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 2134 /* Check to see if link went down during discovery */
2e0fef85
JS
2135 if (lpfc_els_chk_latt(vport)) {
2136 spin_lock_irq(shost->host_lock);
dea3101e 2137 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2138 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2139 goto out;
2140 }
2141
2142 if (irsp->ulpStatus) {
2143 /* Check for retry */
2144 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2145 /* ELS command is being retried */
2146 if (disc) {
2e0fef85 2147 spin_lock_irq(shost->host_lock);
dea3101e 2148 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85
JS
2149 spin_unlock_irq(shost->host_lock);
2150 lpfc_set_disctmo(vport);
dea3101e
JB
2151 }
2152 goto out;
2153 }
2154 /* ADISC failed */
e40a02c1
JS
2155 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2156 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2157 ndlp->nlp_DID, irsp->ulpStatus,
2158 irsp->un.ulpWord[4]);
dea3101e 2159 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2160 if (!lpfc_error_lost_link(irsp))
2e0fef85 2161 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
858c9f6c 2162 NLP_EVT_CMPL_ADISC);
e47c9093 2163 } else
dea3101e 2164 /* Good status, call state machine */
2e0fef85 2165 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
dea3101e 2166 NLP_EVT_CMPL_ADISC);
dea3101e 2167
90160e01
JS
2168 /* Check to see if there are more ADISCs to be sent */
2169 if (disc && vport->num_disc_nodes)
2e0fef85 2170 lpfc_more_adisc(vport);
dea3101e
JB
2171out:
2172 lpfc_els_free_iocb(phba, cmdiocb);
2173 return;
2174}
2175
e59058c4 2176/**
3621a710 2177 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
e59058c4
JS
2178 * @vport: pointer to a virtual N_Port data structure.
2179 * @ndlp: pointer to a node-list data structure.
2180 * @retry: number of retries to the command IOCB.
2181 *
2182 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2183 * @vport. It prepares the payload of the ADISC ELS command, updates the
2184 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2185 * to issue the ADISC ELS command.
2186 *
2187 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2188 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2189 * will be stored into the context1 field of the IOCB for the completion
2190 * callback function to the ADISC ELS command.
2191 *
2192 * Return code
2193 * 0 - successfully issued adisc
2194 * 1 - failed to issue adisc
2195 **/
dea3101e 2196int
2e0fef85 2197lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2198 uint8_t retry)
2199{
2e0fef85
JS
2200 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2201 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2202 ADISC *ap;
2203 IOCB_t *icmd;
2204 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2205 uint8_t *pcmd;
2206 uint16_t cmdsize;
2207
92d7f7b0 2208 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2e0fef85
JS
2209 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2210 ndlp->nlp_DID, ELS_CMD_ADISC);
488d1469 2211 if (!elsiocb)
c9f8735b 2212 return 1;
dea3101e
JB
2213
2214 icmd = &elsiocb->iocb;
2215 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2216
2217 /* For ADISC request, remainder of payload is service parameters */
2218 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
92d7f7b0 2219 pcmd += sizeof(uint32_t);
dea3101e
JB
2220
2221 /* Fill in ADISC payload */
2222 ap = (ADISC *) pcmd;
2223 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
2224 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2225 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2226 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 2227
858c9f6c
JS
2228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2229 "Issue ADISC: did:x%x",
2230 ndlp->nlp_DID, 0, 0);
2231
dea3101e
JB
2232 phba->fc_stat.elsXmitADISC++;
2233 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2e0fef85 2234 spin_lock_irq(shost->host_lock);
dea3101e 2235 ndlp->nlp_flag |= NLP_ADISC_SND;
2e0fef85 2236 spin_unlock_irq(shost->host_lock);
3772a991
JS
2237 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2238 IOCB_ERROR) {
2e0fef85 2239 spin_lock_irq(shost->host_lock);
dea3101e 2240 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2e0fef85 2241 spin_unlock_irq(shost->host_lock);
dea3101e 2242 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2243 return 1;
dea3101e 2244 }
c9f8735b 2245 return 0;
dea3101e
JB
2246}
2247
e59058c4 2248/**
3621a710 2249 * lpfc_cmpl_els_logo - Completion callback function for logo
e59058c4
JS
2250 * @phba: pointer to lpfc hba data structure.
2251 * @cmdiocb: pointer to lpfc command iocb data structure.
2252 * @rspiocb: pointer to lpfc response iocb data structure.
2253 *
2254 * This routine is the completion function for issuing the ELS Logout (LOGO)
2255 * command. If no error status was reported from the LOGO response, the
2256 * state machine of the associated ndlp shall be invoked for transition with
2257 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2258 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2259 **/
dea3101e 2260static void
2e0fef85
JS
2261lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2262 struct lpfc_iocbq *rspiocb)
dea3101e 2263{
2e0fef85
JS
2264 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2265 struct lpfc_vport *vport = ndlp->vport;
2266 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
2267 IOCB_t *irsp;
2268 struct lpfc_sli *psli;
92494144 2269 struct lpfcMboxq *mbox;
dea3101e
JB
2270
2271 psli = &phba->sli;
2272 /* we pass cmdiocb to state machine which needs rspiocb as well */
2273 cmdiocb->context_un.rsp_iocb = rspiocb;
2274
2275 irsp = &(rspiocb->iocb);
2e0fef85 2276 spin_lock_irq(shost->host_lock);
dea3101e 2277 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2278 spin_unlock_irq(shost->host_lock);
dea3101e 2279
858c9f6c
JS
2280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2281 "LOGO cmpl: status:x%x/x%x did:x%x",
2282 irsp->ulpStatus, irsp->un.ulpWord[4],
2283 ndlp->nlp_DID);
dea3101e 2284 /* LOGO completes to NPort <nlp_DID> */
e8b62011
JS
2285 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2286 "0105 LOGO completes to NPort x%x "
2287 "Data: x%x x%x x%x x%x\n",
2288 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2289 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 2290 /* Check to see if link went down during discovery */
2e0fef85 2291 if (lpfc_els_chk_latt(vport))
dea3101e
JB
2292 goto out;
2293
92d7f7b0
JS
2294 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2295 /* NLP_EVT_DEVICE_RM should unregister the RPI
2296 * which should abort all outstanding IOs.
2297 */
2298 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2299 NLP_EVT_DEVICE_RM);
2300 goto out;
2301 }
2302
dea3101e
JB
2303 if (irsp->ulpStatus) {
2304 /* Check for retry */
2e0fef85 2305 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e
JB
2306 /* ELS command is being retried */
2307 goto out;
dea3101e 2308 /* LOGO failed */
e40a02c1
JS
2309 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2310 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2311 ndlp->nlp_DID, irsp->ulpStatus,
2312 irsp->un.ulpWord[4]);
dea3101e 2313 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
858c9f6c 2314 if (lpfc_error_lost_link(irsp))
dea3101e 2315 goto out;
858c9f6c 2316 else
2e0fef85 2317 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2318 NLP_EVT_CMPL_LOGO);
e47c9093 2319 } else
5024ab17
JW
2320 /* Good status, call state machine.
2321 * This will unregister the rpi if needed.
2322 */
2e0fef85 2323 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2324 NLP_EVT_CMPL_LOGO);
dea3101e
JB
2325out:
2326 lpfc_els_free_iocb(phba, cmdiocb);
92494144
JS
2327 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2328 if ((vport->fc_flag & FC_PT2PT) &&
2329 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2330 phba->pport->fc_myDID = 0;
2331 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2332 if (mbox) {
2333 lpfc_config_link(phba, mbox);
2334 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2335 mbox->vport = vport;
2336 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2337 MBX_NOT_FINISHED) {
2338 mempool_free(mbox, phba->mbox_mem_pool);
2339 }
2340 }
2341 }
dea3101e
JB
2342 return;
2343}
2344
e59058c4 2345/**
3621a710 2346 * lpfc_issue_els_logo - Issue a logo to an node on a vport
e59058c4
JS
2347 * @vport: pointer to a virtual N_Port data structure.
2348 * @ndlp: pointer to a node-list data structure.
2349 * @retry: number of retries to the command IOCB.
2350 *
2351 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2352 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2353 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2354 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2355 *
2356 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2357 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2358 * will be stored into the context1 field of the IOCB for the completion
2359 * callback function to the LOGO ELS command.
2360 *
2361 * Return code
2362 * 0 - successfully issued logo
2363 * 1 - failed to issue logo
2364 **/
dea3101e 2365int
2e0fef85 2366lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2367 uint8_t retry)
2368{
2e0fef85
JS
2369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2370 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2371 IOCB_t *icmd;
2372 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2373 uint8_t *pcmd;
2374 uint16_t cmdsize;
92d7f7b0 2375 int rc;
dea3101e 2376
98c9ea5c
JS
2377 spin_lock_irq(shost->host_lock);
2378 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2379 spin_unlock_irq(shost->host_lock);
2380 return 0;
2381 }
2382 spin_unlock_irq(shost->host_lock);
2383
92d7f7b0 2384 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2e0fef85
JS
2385 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2386 ndlp->nlp_DID, ELS_CMD_LOGO);
488d1469 2387 if (!elsiocb)
c9f8735b 2388 return 1;
dea3101e
JB
2389
2390 icmd = &elsiocb->iocb;
2391 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2392 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
92d7f7b0 2393 pcmd += sizeof(uint32_t);
dea3101e
JB
2394
2395 /* Fill in LOGO payload */
2e0fef85 2396 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
92d7f7b0
JS
2397 pcmd += sizeof(uint32_t);
2398 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 2399
858c9f6c
JS
2400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2401 "Issue LOGO: did:x%x",
2402 ndlp->nlp_DID, 0, 0);
2403
dea3101e
JB
2404 phba->fc_stat.elsXmitLOGO++;
2405 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2e0fef85 2406 spin_lock_irq(shost->host_lock);
dea3101e 2407 ndlp->nlp_flag |= NLP_LOGO_SND;
2e0fef85 2408 spin_unlock_irq(shost->host_lock);
3772a991 2409 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2410
2411 if (rc == IOCB_ERROR) {
2e0fef85 2412 spin_lock_irq(shost->host_lock);
dea3101e 2413 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2414 spin_unlock_irq(shost->host_lock);
dea3101e 2415 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2416 return 1;
dea3101e 2417 }
c9f8735b 2418 return 0;
dea3101e
JB
2419}
2420
e59058c4 2421/**
3621a710 2422 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
e59058c4
JS
2423 * @phba: pointer to lpfc hba data structure.
2424 * @cmdiocb: pointer to lpfc command iocb data structure.
2425 * @rspiocb: pointer to lpfc response iocb data structure.
2426 *
2427 * This routine is a generic completion callback function for ELS commands.
2428 * Specifically, it is the callback function which does not need to perform
2429 * any command specific operations. It is currently used by the ELS command
2430 * issuing routines for the ELS State Change Request (SCR),
2431 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2432 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2433 * certain debug loggings, this callback function simply invokes the
2434 * lpfc_els_chk_latt() routine to check whether link went down during the
2435 * discovery process.
2436 **/
dea3101e 2437static void
2e0fef85
JS
2438lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2439 struct lpfc_iocbq *rspiocb)
dea3101e 2440{
2e0fef85 2441 struct lpfc_vport *vport = cmdiocb->vport;
dea3101e
JB
2442 IOCB_t *irsp;
2443
2444 irsp = &rspiocb->iocb;
2445
858c9f6c
JS
2446 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2447 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2448 irsp->ulpStatus, irsp->un.ulpWord[4],
2449 irsp->un.elsreq64.remoteID);
dea3101e 2450 /* ELS cmd tag <ulpIoTag> completes */
e8b62011
JS
2451 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2452 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2453 irsp->ulpIoTag, irsp->ulpStatus,
2454 irsp->un.ulpWord[4], irsp->ulpTimeout);
dea3101e 2455 /* Check to see if link went down during discovery */
2e0fef85 2456 lpfc_els_chk_latt(vport);
dea3101e
JB
2457 lpfc_els_free_iocb(phba, cmdiocb);
2458 return;
2459}
2460
e59058c4 2461/**
3621a710 2462 * lpfc_issue_els_scr - Issue a scr to an node on a vport
e59058c4
JS
2463 * @vport: pointer to a host virtual N_Port data structure.
2464 * @nportid: N_Port identifier to the remote node.
2465 * @retry: number of retries to the command IOCB.
2466 *
2467 * This routine issues a State Change Request (SCR) to a fabric node
2468 * on a @vport. The remote node @nportid is passed into the function. It
2469 * first search the @vport node list to find the matching ndlp. If no such
2470 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2471 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2472 * routine is invoked to send the SCR IOCB.
2473 *
2474 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2475 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2476 * will be stored into the context1 field of the IOCB for the completion
2477 * callback function to the SCR ELS command.
2478 *
2479 * Return code
2480 * 0 - Successfully issued scr command
2481 * 1 - Failed to issue scr command
2482 **/
dea3101e 2483int
2e0fef85 2484lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2485{
2e0fef85 2486 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2487 IOCB_t *icmd;
2488 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2489 struct lpfc_sli *psli;
2490 uint8_t *pcmd;
2491 uint16_t cmdsize;
2492 struct lpfc_nodelist *ndlp;
2493
2494 psli = &phba->sli;
92d7f7b0 2495 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
dea3101e 2496
e47c9093
JS
2497 ndlp = lpfc_findnode_did(vport, nportid);
2498 if (!ndlp) {
2499 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2500 if (!ndlp)
2501 return 1;
2502 lpfc_nlp_init(vport, ndlp, nportid);
2503 lpfc_enqueue_node(vport, ndlp);
2504 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2505 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2506 if (!ndlp)
2507 return 1;
2508 }
2e0fef85
JS
2509
2510 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2511 ndlp->nlp_DID, ELS_CMD_SCR);
dea3101e 2512
488d1469 2513 if (!elsiocb) {
fa4066b6
JS
2514 /* This will trigger the release of the node just
2515 * allocated
2516 */
329f9bc7 2517 lpfc_nlp_put(ndlp);
c9f8735b 2518 return 1;
dea3101e
JB
2519 }
2520
2521 icmd = &elsiocb->iocb;
2522 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2523
2524 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
92d7f7b0 2525 pcmd += sizeof(uint32_t);
dea3101e
JB
2526
2527 /* For SCR, remainder of payload is SCR parameter page */
92d7f7b0 2528 memset(pcmd, 0, sizeof(SCR));
dea3101e
JB
2529 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2530
858c9f6c
JS
2531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2532 "Issue SCR: did:x%x",
2533 ndlp->nlp_DID, 0, 0);
2534
dea3101e
JB
2535 phba->fc_stat.elsXmitSCR++;
2536 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2537 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2538 IOCB_ERROR) {
fa4066b6
JS
2539 /* The additional lpfc_nlp_put will cause the following
2540 * lpfc_els_free_iocb routine to trigger the rlease of
2541 * the node.
2542 */
329f9bc7 2543 lpfc_nlp_put(ndlp);
dea3101e 2544 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2545 return 1;
dea3101e 2546 }
fa4066b6
JS
2547 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2548 * trigger the release of node.
2549 */
329f9bc7 2550 lpfc_nlp_put(ndlp);
c9f8735b 2551 return 0;
dea3101e
JB
2552}
2553
e59058c4 2554/**
3621a710 2555 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
e59058c4
JS
2556 * @vport: pointer to a host virtual N_Port data structure.
2557 * @nportid: N_Port identifier to the remote node.
2558 * @retry: number of retries to the command IOCB.
2559 *
2560 * This routine issues a Fibre Channel Address Resolution Response
2561 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2562 * is passed into the function. It first search the @vport node list to find
2563 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2564 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2565 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2566 *
2567 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2568 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2569 * will be stored into the context1 field of the IOCB for the completion
2570 * callback function to the PARPR ELS command.
2571 *
2572 * Return code
2573 * 0 - Successfully issued farpr command
2574 * 1 - Failed to issue farpr command
2575 **/
dea3101e 2576static int
2e0fef85 2577lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2578{
2e0fef85 2579 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2580 IOCB_t *icmd;
2581 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2582 struct lpfc_sli *psli;
2583 FARP *fp;
2584 uint8_t *pcmd;
2585 uint32_t *lp;
2586 uint16_t cmdsize;
2587 struct lpfc_nodelist *ondlp;
2588 struct lpfc_nodelist *ndlp;
2589
2590 psli = &phba->sli;
92d7f7b0 2591 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
dea3101e 2592
e47c9093
JS
2593 ndlp = lpfc_findnode_did(vport, nportid);
2594 if (!ndlp) {
2595 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2596 if (!ndlp)
2597 return 1;
2598 lpfc_nlp_init(vport, ndlp, nportid);
2599 lpfc_enqueue_node(vport, ndlp);
2600 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2601 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2602 if (!ndlp)
2603 return 1;
2604 }
2e0fef85
JS
2605
2606 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2607 ndlp->nlp_DID, ELS_CMD_RNID);
488d1469 2608 if (!elsiocb) {
fa4066b6
JS
2609 /* This will trigger the release of the node just
2610 * allocated
2611 */
329f9bc7 2612 lpfc_nlp_put(ndlp);
c9f8735b 2613 return 1;
dea3101e
JB
2614 }
2615
2616 icmd = &elsiocb->iocb;
2617 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2618
2619 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
92d7f7b0 2620 pcmd += sizeof(uint32_t);
dea3101e
JB
2621
2622 /* Fill in FARPR payload */
2623 fp = (FARP *) (pcmd);
92d7f7b0 2624 memset(fp, 0, sizeof(FARP));
dea3101e
JB
2625 lp = (uint32_t *) pcmd;
2626 *lp++ = be32_to_cpu(nportid);
2e0fef85 2627 *lp++ = be32_to_cpu(vport->fc_myDID);
dea3101e
JB
2628 fp->Rflags = 0;
2629 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2630
92d7f7b0
JS
2631 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2632 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2633 ondlp = lpfc_findnode_did(vport, nportid);
e47c9093 2634 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
dea3101e 2635 memcpy(&fp->OportName, &ondlp->nlp_portname,
92d7f7b0 2636 sizeof(struct lpfc_name));
dea3101e 2637 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
92d7f7b0 2638 sizeof(struct lpfc_name));
dea3101e
JB
2639 }
2640
858c9f6c
JS
2641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2642 "Issue FARPR: did:x%x",
2643 ndlp->nlp_DID, 0, 0);
2644
dea3101e
JB
2645 phba->fc_stat.elsXmitFARPR++;
2646 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2647 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2648 IOCB_ERROR) {
fa4066b6
JS
2649 /* The additional lpfc_nlp_put will cause the following
2650 * lpfc_els_free_iocb routine to trigger the release of
2651 * the node.
2652 */
329f9bc7 2653 lpfc_nlp_put(ndlp);
dea3101e 2654 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2655 return 1;
dea3101e 2656 }
fa4066b6
JS
2657 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2658 * trigger the release of the node.
2659 */
329f9bc7 2660 lpfc_nlp_put(ndlp);
c9f8735b 2661 return 0;
dea3101e
JB
2662}
2663
e59058c4 2664/**
3621a710 2665 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
e59058c4
JS
2666 * @vport: pointer to a host virtual N_Port data structure.
2667 * @nlp: pointer to a node-list data structure.
2668 *
2669 * This routine cancels the timer with a delayed IOCB-command retry for
2670 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2671 * removes the ELS retry event if it presents. In addition, if the
2672 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2673 * commands are sent for the @vport's nodes that require issuing discovery
2674 * ADISC.
2675 **/
fdcebe28 2676void
2e0fef85 2677lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
fdcebe28 2678{
2e0fef85 2679 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
e47c9093 2680 struct lpfc_work_evt *evtp;
2e0fef85 2681
0d2b6b83
JS
2682 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2683 return;
2e0fef85 2684 spin_lock_irq(shost->host_lock);
fdcebe28 2685 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2686 spin_unlock_irq(shost->host_lock);
fdcebe28
JS
2687 del_timer_sync(&nlp->nlp_delayfunc);
2688 nlp->nlp_last_elscmd = 0;
e47c9093 2689 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
fdcebe28 2690 list_del_init(&nlp->els_retry_evt.evt_listp);
e47c9093
JS
2691 /* Decrement nlp reference count held for the delayed retry */
2692 evtp = &nlp->els_retry_evt;
2693 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2694 }
fdcebe28 2695 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2e0fef85 2696 spin_lock_irq(shost->host_lock);
fdcebe28 2697 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85
JS
2698 spin_unlock_irq(shost->host_lock);
2699 if (vport->num_disc_nodes) {
0d2b6b83
JS
2700 if (vport->port_state < LPFC_VPORT_READY) {
2701 /* Check if there are more ADISCs to be sent */
2702 lpfc_more_adisc(vport);
0d2b6b83
JS
2703 } else {
2704 /* Check if there are more PLOGIs to be sent */
2705 lpfc_more_plogi(vport);
90160e01
JS
2706 if (vport->num_disc_nodes == 0) {
2707 spin_lock_irq(shost->host_lock);
2708 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2709 spin_unlock_irq(shost->host_lock);
2710 lpfc_can_disctmo(vport);
2711 lpfc_end_rscn(vport);
2712 }
fdcebe28
JS
2713 }
2714 }
2715 }
2716 return;
2717}
2718
e59058c4 2719/**
3621a710 2720 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
e59058c4
JS
2721 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2722 *
2723 * This routine is invoked by the ndlp delayed-function timer to check
2724 * whether there is any pending ELS retry event(s) with the node. If not, it
2725 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2726 * adds the delayed events to the HBA work list and invokes the
2727 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2728 * event. Note that lpfc_nlp_get() is called before posting the event to
2729 * the work list to hold reference count of ndlp so that it guarantees the
2730 * reference to ndlp will still be available when the worker thread gets
2731 * to the event associated with the ndlp.
2732 **/
dea3101e
JB
2733void
2734lpfc_els_retry_delay(unsigned long ptr)
2735{
2e0fef85
JS
2736 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2737 struct lpfc_vport *vport = ndlp->vport;
2e0fef85 2738 struct lpfc_hba *phba = vport->phba;
92d7f7b0 2739 unsigned long flags;
2e0fef85 2740 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
dea3101e 2741
92d7f7b0 2742 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2743 if (!list_empty(&evtp->evt_listp)) {
92d7f7b0 2744 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2745 return;
2746 }
2747
fa4066b6
JS
2748 /* We need to hold the node by incrementing the reference
2749 * count until the queued work is done
2750 */
2751 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
2752 if (evtp->evt_arg1) {
2753 evtp->evt = LPFC_EVT_ELS_RETRY;
2754 list_add_tail(&evtp->evt_listp, &phba->work_list);
92d7f7b0 2755 lpfc_worker_wake_up(phba);
5e9d9b82 2756 }
92d7f7b0 2757 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2758 return;
2759}
2760
e59058c4 2761/**
3621a710 2762 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
e59058c4
JS
2763 * @ndlp: pointer to a node-list data structure.
2764 *
2765 * This routine is the worker-thread handler for processing the @ndlp delayed
2766 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2767 * the last ELS command from the associated ndlp and invokes the proper ELS
2768 * function according to the delayed ELS command to retry the command.
2769 **/
dea3101e
JB
2770void
2771lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2772{
2e0fef85
JS
2773 struct lpfc_vport *vport = ndlp->vport;
2774 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2775 uint32_t cmd, did, retry;
dea3101e 2776
2e0fef85 2777 spin_lock_irq(shost->host_lock);
5024ab17
JW
2778 did = ndlp->nlp_DID;
2779 cmd = ndlp->nlp_last_elscmd;
2780 ndlp->nlp_last_elscmd = 0;
dea3101e
JB
2781
2782 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2e0fef85 2783 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2784 return;
2785 }
2786
2787 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2788 spin_unlock_irq(shost->host_lock);
1a169689
JS
2789 /*
2790 * If a discovery event readded nlp_delayfunc after timer
2791 * firing and before processing the timer, cancel the
2792 * nlp_delayfunc.
2793 */
2794 del_timer_sync(&ndlp->nlp_delayfunc);
dea3101e 2795 retry = ndlp->nlp_retry;
4d9ab994 2796 ndlp->nlp_retry = 0;
dea3101e
JB
2797
2798 switch (cmd) {
2799 case ELS_CMD_FLOGI:
2e0fef85 2800 lpfc_issue_els_flogi(vport, ndlp, retry);
dea3101e
JB
2801 break;
2802 case ELS_CMD_PLOGI:
2e0fef85 2803 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
5024ab17 2804 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2805 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6ad42535 2806 }
dea3101e
JB
2807 break;
2808 case ELS_CMD_ADISC:
2e0fef85 2809 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
5024ab17 2810 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2811 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6ad42535 2812 }
dea3101e
JB
2813 break;
2814 case ELS_CMD_PRLI:
2e0fef85 2815 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
5024ab17 2816 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2817 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
6ad42535 2818 }
dea3101e
JB
2819 break;
2820 case ELS_CMD_LOGO:
2e0fef85 2821 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
5024ab17 2822 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2823 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6ad42535 2824 }
dea3101e 2825 break;
92d7f7b0 2826 case ELS_CMD_FDISC:
fedd3b7b
JS
2827 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
2828 lpfc_issue_els_fdisc(vport, ndlp, retry);
92d7f7b0 2829 break;
dea3101e
JB
2830 }
2831 return;
2832}
2833
e59058c4 2834/**
3621a710 2835 * lpfc_els_retry - Make retry decision on an els command iocb
e59058c4
JS
2836 * @phba: pointer to lpfc hba data structure.
2837 * @cmdiocb: pointer to lpfc command iocb data structure.
2838 * @rspiocb: pointer to lpfc response iocb data structure.
2839 *
2840 * This routine makes a retry decision on an ELS command IOCB, which has
2841 * failed. The following ELS IOCBs use this function for retrying the command
2842 * when previously issued command responsed with error status: FLOGI, PLOGI,
2843 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2844 * returned error status, it makes the decision whether a retry shall be
2845 * issued for the command, and whether a retry shall be made immediately or
2846 * delayed. In the former case, the corresponding ELS command issuing-function
2847 * is called to retry the command. In the later case, the ELS command shall
2848 * be posted to the ndlp delayed event and delayed function timer set to the
2849 * ndlp for the delayed command issusing.
2850 *
2851 * Return code
2852 * 0 - No retry of els command is made
2853 * 1 - Immediate or delayed retry of els command is made
2854 **/
dea3101e 2855static int
2e0fef85
JS
2856lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2857 struct lpfc_iocbq *rspiocb)
dea3101e 2858{
2e0fef85
JS
2859 struct lpfc_vport *vport = cmdiocb->vport;
2860 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2861 IOCB_t *irsp = &rspiocb->iocb;
2862 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2863 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
dea3101e
JB
2864 uint32_t *elscmd;
2865 struct ls_rjt stat;
2e0fef85 2866 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
98c9ea5c 2867 int logerr = 0;
2e0fef85 2868 uint32_t cmd = 0;
488d1469 2869 uint32_t did;
dea3101e 2870
488d1469 2871
dea3101e
JB
2872 /* Note: context2 may be 0 for internal driver abort
2873 * of delays ELS command.
2874 */
2875
2876 if (pcmd && pcmd->virt) {
2877 elscmd = (uint32_t *) (pcmd->virt);
2878 cmd = *elscmd++;
2879 }
2880
e47c9093 2881 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
488d1469
JS
2882 did = ndlp->nlp_DID;
2883 else {
2884 /* We should only hit this case for retrying PLOGI */
2885 did = irsp->un.elsreq64.remoteID;
2e0fef85 2886 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
2887 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2888 && (cmd != ELS_CMD_PLOGI))
488d1469
JS
2889 return 1;
2890 }
2891
858c9f6c
JS
2892 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2893 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2894 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2895
dea3101e
JB
2896 switch (irsp->ulpStatus) {
2897 case IOSTAT_FCP_RSP_ERROR:
1151e3ec 2898 break;
dea3101e 2899 case IOSTAT_REMOTE_STOP:
1151e3ec
JS
2900 if (phba->sli_rev == LPFC_SLI_REV4) {
2901 /* This IO was aborted by the target, we don't
2902 * know the rxid and because we did not send the
2903 * ABTS we cannot generate and RRQ.
2904 */
2905 lpfc_set_rrq_active(phba, ndlp,
2906 cmdiocb->sli4_xritag, 0, 0);
2907 }
dea3101e 2908 break;
dea3101e
JB
2909 case IOSTAT_LOCAL_REJECT:
2910 switch ((irsp->un.ulpWord[4] & 0xff)) {
2911 case IOERR_LOOP_OPEN_FAILURE:
eaf15d5b
JS
2912 if (cmd == ELS_CMD_FLOGI) {
2913 if (PCI_DEVICE_ID_HORNET ==
2914 phba->pcidev->device) {
76a95d75 2915 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
eaf15d5b
JS
2916 phba->pport->fc_myDID = 0;
2917 phba->alpa_map[0] = 0;
2918 phba->alpa_map[1] = 0;
2919 }
2920 }
2e0fef85 2921 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
92d7f7b0 2922 delay = 1000;
dea3101e
JB
2923 retry = 1;
2924 break;
2925
92d7f7b0 2926 case IOERR_ILLEGAL_COMMAND:
7f5f3d0d
JS
2927 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2928 "0124 Retry illegal cmd x%x "
2929 "retry:x%x delay:x%x\n",
2930 cmd, cmdiocb->retry, delay);
2931 retry = 1;
2932 /* All command's retry policy */
2933 maxretry = 8;
2934 if (cmdiocb->retry > 2)
2935 delay = 1000;
92d7f7b0
JS
2936 break;
2937
dea3101e 2938 case IOERR_NO_RESOURCES:
98c9ea5c 2939 logerr = 1; /* HBA out of resources */
858c9f6c
JS
2940 retry = 1;
2941 if (cmdiocb->retry > 100)
2942 delay = 100;
2943 maxretry = 250;
2944 break;
2945
2946 case IOERR_ILLEGAL_FRAME:
92d7f7b0 2947 delay = 100;
dea3101e
JB
2948 retry = 1;
2949 break;
2950
858c9f6c 2951 case IOERR_SEQUENCE_TIMEOUT:
dea3101e
JB
2952 case IOERR_INVALID_RPI:
2953 retry = 1;
2954 break;
2955 }
2956 break;
2957
2958 case IOSTAT_NPORT_RJT:
2959 case IOSTAT_FABRIC_RJT:
2960 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2961 retry = 1;
2962 break;
2963 }
2964 break;
2965
2966 case IOSTAT_NPORT_BSY:
2967 case IOSTAT_FABRIC_BSY:
98c9ea5c 2968 logerr = 1; /* Fabric / Remote NPort out of resources */
dea3101e
JB
2969 retry = 1;
2970 break;
2971
2972 case IOSTAT_LS_RJT:
2973 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2974 /* Added for Vendor specifc support
2975 * Just keep retrying for these Rsn / Exp codes
2976 */
2977 switch (stat.un.b.lsRjtRsnCode) {
2978 case LSRJT_UNABLE_TPC:
2979 if (stat.un.b.lsRjtRsnCodeExp ==
2980 LSEXP_CMD_IN_PROGRESS) {
2981 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2982 delay = 1000;
dea3101e
JB
2983 maxretry = 48;
2984 }
2985 retry = 1;
2986 break;
2987 }
ffc95493
JS
2988 if (stat.un.b.lsRjtRsnCodeExp ==
2989 LSEXP_CANT_GIVE_DATA) {
2990 if (cmd == ELS_CMD_PLOGI) {
2991 delay = 1000;
2992 maxretry = 48;
2993 }
2994 retry = 1;
2995 break;
2996 }
dea3101e 2997 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2998 delay = 1000;
dea3101e
JB
2999 maxretry = lpfc_max_els_tries + 1;
3000 retry = 1;
3001 break;
3002 }
92d7f7b0
JS
3003 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3004 (cmd == ELS_CMD_FDISC) &&
3005 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
e8b62011
JS
3006 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3007 "0125 FDISC Failed (x%x). "
3008 "Fabric out of resources\n",
3009 stat.un.lsRjtError);
92d7f7b0
JS
3010 lpfc_vport_set_state(vport,
3011 FC_VPORT_NO_FABRIC_RSCS);
3012 }
dea3101e
JB
3013 break;
3014
3015 case LSRJT_LOGICAL_BSY:
858c9f6c
JS
3016 if ((cmd == ELS_CMD_PLOGI) ||
3017 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3018 delay = 1000;
dea3101e 3019 maxretry = 48;
92d7f7b0 3020 } else if (cmd == ELS_CMD_FDISC) {
51ef4c26
JS
3021 /* FDISC retry policy */
3022 maxretry = 48;
3023 if (cmdiocb->retry >= 32)
3024 delay = 1000;
dea3101e
JB
3025 }
3026 retry = 1;
3027 break;
92d7f7b0
JS
3028
3029 case LSRJT_LOGICAL_ERR:
7f5f3d0d
JS
3030 /* There are some cases where switches return this
3031 * error when they are not ready and should be returning
3032 * Logical Busy. We should delay every time.
3033 */
3034 if (cmd == ELS_CMD_FDISC &&
3035 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3036 maxretry = 3;
3037 delay = 1000;
3038 retry = 1;
3039 break;
3040 }
92d7f7b0
JS
3041 case LSRJT_PROTOCOL_ERR:
3042 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3043 (cmd == ELS_CMD_FDISC) &&
3044 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3045 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3046 ) {
e8b62011 3047 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 3048 "0122 FDISC Failed (x%x). "
e8b62011
JS
3049 "Fabric Detected Bad WWN\n",
3050 stat.un.lsRjtError);
92d7f7b0
JS
3051 lpfc_vport_set_state(vport,
3052 FC_VPORT_FABRIC_REJ_WWN);
3053 }
3054 break;
dea3101e
JB
3055 }
3056 break;
3057
3058 case IOSTAT_INTERMED_RSP:
3059 case IOSTAT_BA_RJT:
3060 break;
3061
3062 default:
3063 break;
3064 }
3065
488d1469 3066 if (did == FDMI_DID)
dea3101e 3067 retry = 1;
dea3101e 3068
695a814e 3069 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
76a95d75 3070 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
1b32f6aa 3071 !lpfc_error_lost_link(irsp)) {
98c9ea5c
JS
3072 /* FLOGI retry policy */
3073 retry = 1;
6669f9bb
JS
3074 /* retry forever */
3075 maxretry = 0;
3076 if (cmdiocb->retry >= 100)
3077 delay = 5000;
3078 else if (cmdiocb->retry >= 32)
98c9ea5c
JS
3079 delay = 1000;
3080 }
3081
6669f9bb
JS
3082 cmdiocb->retry++;
3083 if (maxretry && (cmdiocb->retry >= maxretry)) {
dea3101e
JB
3084 phba->fc_stat.elsRetryExceeded++;
3085 retry = 0;
3086 }
3087
ed957684
JS
3088 if ((vport->load_flag & FC_UNLOADING) != 0)
3089 retry = 0;
3090
dea3101e 3091 if (retry) {
38b92ef8
JS
3092 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3093 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3094 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3095 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3096 "2849 Stop retry ELS command "
3097 "x%x to remote NPORT x%x, "
3098 "Data: x%x x%x\n", cmd, did,
3099 cmdiocb->retry, delay);
3100 return 0;
3101 }
3102 }
dea3101e
JB
3103
3104 /* Retry ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
3105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3106 "0107 Retry ELS command x%x to remote "
3107 "NPORT x%x Data: x%x x%x\n",
3108 cmd, did, cmdiocb->retry, delay);
dea3101e 3109
858c9f6c
JS
3110 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3111 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3112 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
3113 /* Don't reset timer for no resources */
3114
dea3101e 3115 /* If discovery / RSCN timer is running, reset it */
2e0fef85 3116 if (timer_pending(&vport->fc_disctmo) ||
92d7f7b0 3117 (vport->fc_flag & FC_RSCN_MODE))
2e0fef85 3118 lpfc_set_disctmo(vport);
dea3101e
JB
3119 }
3120
3121 phba->fc_stat.elsXmitRetry++;
58da1ffb 3122 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
dea3101e
JB
3123 phba->fc_stat.elsDelayRetry++;
3124 ndlp->nlp_retry = cmdiocb->retry;
3125
92d7f7b0
JS
3126 /* delay is specified in milliseconds */
3127 mod_timer(&ndlp->nlp_delayfunc,
3128 jiffies + msecs_to_jiffies(delay));
2e0fef85 3129 spin_lock_irq(shost->host_lock);
dea3101e 3130 ndlp->nlp_flag |= NLP_DELAY_TMO;
2e0fef85 3131 spin_unlock_irq(shost->host_lock);
dea3101e 3132
5024ab17 3133 ndlp->nlp_prev_state = ndlp->nlp_state;
858c9f6c
JS
3134 if (cmd == ELS_CMD_PRLI)
3135 lpfc_nlp_set_state(vport, ndlp,
3136 NLP_STE_REG_LOGIN_ISSUE);
3137 else
3138 lpfc_nlp_set_state(vport, ndlp,
3139 NLP_STE_NPR_NODE);
dea3101e
JB
3140 ndlp->nlp_last_elscmd = cmd;
3141
c9f8735b 3142 return 1;
dea3101e
JB
3143 }
3144 switch (cmd) {
3145 case ELS_CMD_FLOGI:
2e0fef85 3146 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
c9f8735b 3147 return 1;
92d7f7b0
JS
3148 case ELS_CMD_FDISC:
3149 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3150 return 1;
dea3101e 3151 case ELS_CMD_PLOGI:
58da1ffb 3152 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
488d1469 3153 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3154 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 3155 NLP_STE_PLOGI_ISSUE);
488d1469 3156 }
2e0fef85 3157 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
c9f8735b 3158 return 1;
dea3101e 3159 case ELS_CMD_ADISC:
5024ab17 3160 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3161 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3162 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
c9f8735b 3163 return 1;
dea3101e 3164 case ELS_CMD_PRLI:
5024ab17 3165 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3166 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3167 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
c9f8735b 3168 return 1;
dea3101e 3169 case ELS_CMD_LOGO:
5024ab17 3170 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3172 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
c9f8735b 3173 return 1;
dea3101e
JB
3174 }
3175 }
dea3101e 3176 /* No retry ELS command <elsCmd> to remote NPORT <did> */
98c9ea5c
JS
3177 if (logerr) {
3178 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3179 "0137 No retry ELS command x%x to remote "
3180 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3181 cmd, did, irsp->ulpStatus,
3182 irsp->un.ulpWord[4]);
3183 }
3184 else {
3185 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
a58cbd52
JS
3186 "0108 No retry ELS command x%x to remote "
3187 "NPORT x%x Retried:%d Error:x%x/%x\n",
3188 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3189 irsp->un.ulpWord[4]);
98c9ea5c 3190 }
c9f8735b 3191 return 0;
dea3101e
JB
3192}
3193
e59058c4 3194/**
3621a710 3195 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
e59058c4
JS
3196 * @phba: pointer to lpfc hba data structure.
3197 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3198 *
3199 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3200 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3201 * checks to see whether there is a lpfc DMA buffer associated with the
3202 * response of the command IOCB. If so, it will be released before releasing
3203 * the lpfc DMA buffer associated with the IOCB itself.
3204 *
3205 * Return code
3206 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3207 **/
09372820 3208static int
87af33fe
JS
3209lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3210{
3211 struct lpfc_dmabuf *buf_ptr;
3212
e59058c4 3213 /* Free the response before processing the command. */
87af33fe
JS
3214 if (!list_empty(&buf_ptr1->list)) {
3215 list_remove_head(&buf_ptr1->list, buf_ptr,
3216 struct lpfc_dmabuf,
3217 list);
3218 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3219 kfree(buf_ptr);
3220 }
3221 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3222 kfree(buf_ptr1);
3223 return 0;
3224}
3225
e59058c4 3226/**
3621a710 3227 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
e59058c4
JS
3228 * @phba: pointer to lpfc hba data structure.
3229 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3230 *
3231 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3232 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3233 * pool.
3234 *
3235 * Return code
3236 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3237 **/
09372820 3238static int
87af33fe
JS
3239lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3240{
3241 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3242 kfree(buf_ptr);
3243 return 0;
3244}
3245
e59058c4 3246/**
3621a710 3247 * lpfc_els_free_iocb - Free a command iocb and its associated resources
e59058c4
JS
3248 * @phba: pointer to lpfc hba data structure.
3249 * @elsiocb: pointer to lpfc els command iocb data structure.
3250 *
3251 * This routine frees a command IOCB and its associated resources. The
3252 * command IOCB data structure contains the reference to various associated
3253 * resources, these fields must be set to NULL if the associated reference
3254 * not present:
3255 * context1 - reference to ndlp
3256 * context2 - reference to cmd
3257 * context2->next - reference to rsp
3258 * context3 - reference to bpl
3259 *
3260 * It first properly decrements the reference count held on ndlp for the
3261 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3262 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3263 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3264 * adds the DMA buffer the @phba data structure for the delayed release.
3265 * If reference to the Buffer Pointer List (BPL) is present, the
3266 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3267 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3268 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3269 *
3270 * Return code
3271 * 0 - Success (currently, always return 0)
3272 **/
dea3101e 3273int
329f9bc7 3274lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
dea3101e
JB
3275{
3276 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
a8adb832
JS
3277 struct lpfc_nodelist *ndlp;
3278
3279 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3280 if (ndlp) {
3281 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3282 lpfc_nlp_put(ndlp);
dea3101e 3283
a8adb832
JS
3284 /* If the ndlp is not being used by another discovery
3285 * thread, free it.
3286 */
3287 if (!lpfc_nlp_not_used(ndlp)) {
3288 /* If ndlp is being used by another discovery
3289 * thread, just clear NLP_DEFER_RM
3290 */
3291 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3292 }
3293 }
3294 else
3295 lpfc_nlp_put(ndlp);
329f9bc7
JS
3296 elsiocb->context1 = NULL;
3297 }
dea3101e
JB
3298 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3299 if (elsiocb->context2) {
0ff10d46
JS
3300 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3301 /* Firmware could still be in progress of DMAing
3302 * payload, so don't free data buffer till after
3303 * a hbeat.
3304 */
3305 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3306 buf_ptr = elsiocb->context2;
3307 elsiocb->context2 = NULL;
3308 if (buf_ptr) {
3309 buf_ptr1 = NULL;
3310 spin_lock_irq(&phba->hbalock);
3311 if (!list_empty(&buf_ptr->list)) {
3312 list_remove_head(&buf_ptr->list,
3313 buf_ptr1, struct lpfc_dmabuf,
3314 list);
3315 INIT_LIST_HEAD(&buf_ptr1->list);
3316 list_add_tail(&buf_ptr1->list,
3317 &phba->elsbuf);
3318 phba->elsbuf_cnt++;
3319 }
3320 INIT_LIST_HEAD(&buf_ptr->list);
3321 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3322 phba->elsbuf_cnt++;
3323 spin_unlock_irq(&phba->hbalock);
3324 }
3325 } else {
3326 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3327 lpfc_els_free_data(phba, buf_ptr1);
3328 }
dea3101e
JB
3329 }
3330
3331 if (elsiocb->context3) {
3332 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
87af33fe 3333 lpfc_els_free_bpl(phba, buf_ptr);
dea3101e 3334 }
604a3e30 3335 lpfc_sli_release_iocbq(phba, elsiocb);
dea3101e
JB
3336 return 0;
3337}
3338
e59058c4 3339/**
3621a710 3340 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
e59058c4
JS
3341 * @phba: pointer to lpfc hba data structure.
3342 * @cmdiocb: pointer to lpfc command iocb data structure.
3343 * @rspiocb: pointer to lpfc response iocb data structure.
3344 *
3345 * This routine is the completion callback function to the Logout (LOGO)
3346 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3347 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3348 * release the ndlp if it has the last reference remaining (reference count
3349 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3350 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3351 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3352 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3353 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3354 * IOCB data structure.
3355 **/
dea3101e 3356static void
2e0fef85
JS
3357lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3358 struct lpfc_iocbq *rspiocb)
dea3101e 3359{
2e0fef85
JS
3360 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3361 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c
JS
3362 IOCB_t *irsp;
3363
3364 irsp = &rspiocb->iocb;
3365 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3366 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3367 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
dea3101e 3368 /* ACC to LOGO completes to NPort <nlp_DID> */
e8b62011
JS
3369 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3370 "0109 ACC to LOGO completes to NPort x%x "
3371 "Data: x%x x%x x%x\n",
3372 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3373 ndlp->nlp_rpi);
87af33fe
JS
3374
3375 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3376 /* NPort Recovery mode or node is just allocated */
3377 if (!lpfc_nlp_not_used(ndlp)) {
3378 /* If the ndlp is being used by another discovery
3379 * thread, just unregister the RPI.
3380 */
3381 lpfc_unreg_rpi(vport, ndlp);
fa4066b6
JS
3382 } else {
3383 /* Indicate the node has already released, should
3384 * not reference to it from within lpfc_els_free_iocb.
3385 */
3386 cmdiocb->context1 = NULL;
87af33fe 3387 }
dea3101e 3388 }
73d91e50
JS
3389
3390 /*
3391 * The driver received a LOGO from the rport and has ACK'd it.
3392 * At this point, the driver is done so release the IOCB and
3393 * remove the ndlp reference.
3394 */
dea3101e 3395 lpfc_els_free_iocb(phba, cmdiocb);
73d91e50 3396 lpfc_nlp_put(ndlp);
dea3101e
JB
3397 return;
3398}
3399
e59058c4 3400/**
3621a710 3401 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
e59058c4
JS
3402 * @phba: pointer to lpfc hba data structure.
3403 * @pmb: pointer to the driver internal queue element for mailbox command.
3404 *
3405 * This routine is the completion callback function for unregister default
3406 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3407 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3408 * decrements the ndlp reference count held for this completion callback
3409 * function. After that, it invokes the lpfc_nlp_not_used() to check
3410 * whether there is only one reference left on the ndlp. If so, it will
3411 * perform one more decrement and trigger the release of the ndlp.
3412 **/
858c9f6c
JS
3413void
3414lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3415{
3416 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3417 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3418
3419 pmb->context1 = NULL;
d439d286
JS
3420 pmb->context2 = NULL;
3421
858c9f6c
JS
3422 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3423 kfree(mp);
3424 mempool_free(pmb, phba->mbox_mem_pool);
58da1ffb 3425 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
a8adb832 3426 lpfc_nlp_put(ndlp);
a8adb832
JS
3427 /* This is the end of the default RPI cleanup logic for this
3428 * ndlp. If no other discovery threads are using this ndlp.
3429 * we should free all resources associated with it.
3430 */
3431 lpfc_nlp_not_used(ndlp);
3432 }
3772a991 3433
858c9f6c
JS
3434 return;
3435}
3436
e59058c4 3437/**
3621a710 3438 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
e59058c4
JS
3439 * @phba: pointer to lpfc hba data structure.
3440 * @cmdiocb: pointer to lpfc command iocb data structure.
3441 * @rspiocb: pointer to lpfc response iocb data structure.
3442 *
3443 * This routine is the completion callback function for ELS Response IOCB
3444 * command. In normal case, this callback function just properly sets the
3445 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3446 * field in the command IOCB is not NULL, the referred mailbox command will
3447 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3448 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3449 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3450 * routine shall be invoked trying to release the ndlp if no other threads
3451 * are currently referring it.
3452 **/
dea3101e 3453static void
858c9f6c 3454lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
329f9bc7 3455 struct lpfc_iocbq *rspiocb)
dea3101e 3456{
2e0fef85
JS
3457 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3458 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3459 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
87af33fe
JS
3460 IOCB_t *irsp;
3461 uint8_t *pcmd;
dea3101e 3462 LPFC_MBOXQ_t *mbox = NULL;
2e0fef85 3463 struct lpfc_dmabuf *mp = NULL;
87af33fe 3464 uint32_t ls_rjt = 0;
dea3101e 3465
33ccf8d1
JS
3466 irsp = &rspiocb->iocb;
3467
dea3101e
JB
3468 if (cmdiocb->context_un.mbox)
3469 mbox = cmdiocb->context_un.mbox;
3470
fa4066b6
JS
3471 /* First determine if this is a LS_RJT cmpl. Note, this callback
3472 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3473 */
87af33fe 3474 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
58da1ffb
JS
3475 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3476 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
fa4066b6 3477 /* A LS_RJT associated with Default RPI cleanup has its own
3ad2f3fb 3478 * separate code path.
87af33fe
JS
3479 */
3480 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3481 ls_rjt = 1;
3482 }
3483
dea3101e 3484 /* Check to see if link went down during discovery */
58da1ffb 3485 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
dea3101e 3486 if (mbox) {
14691150
JS
3487 mp = (struct lpfc_dmabuf *) mbox->context1;
3488 if (mp) {
3489 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3490 kfree(mp);
3491 }
329f9bc7 3492 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3493 }
58da1ffb
JS
3494 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3495 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
fa4066b6 3496 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3497 ndlp = NULL;
fa4066b6
JS
3498 /* Indicate the node has already released,
3499 * should not reference to it from within
3500 * the routine lpfc_els_free_iocb.
3501 */
3502 cmdiocb->context1 = NULL;
3503 }
dea3101e
JB
3504 goto out;
3505 }
3506
858c9f6c 3507 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
51ef4c26 3508 "ELS rsp cmpl: status:x%x/x%x did:x%x",
858c9f6c 3509 irsp->ulpStatus, irsp->un.ulpWord[4],
51ef4c26 3510 cmdiocb->iocb.un.elsreq64.remoteID);
dea3101e 3511 /* ELS response tag <ulpIoTag> completes */
e8b62011
JS
3512 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3513 "0110 ELS response tag x%x completes "
3514 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3515 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3516 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3517 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3518 ndlp->nlp_rpi);
dea3101e
JB
3519 if (mbox) {
3520 if ((rspiocb->iocb.ulpStatus == 0)
3521 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2e0fef85 3522 lpfc_unreg_rpi(vport, ndlp);
e47c9093
JS
3523 /* Increment reference count to ndlp to hold the
3524 * reference to ndlp for the callback function.
3525 */
329f9bc7 3526 mbox->context2 = lpfc_nlp_get(ndlp);
2e0fef85 3527 mbox->vport = vport;
858c9f6c
JS
3528 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3529 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3530 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3531 }
3532 else {
3533 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3534 ndlp->nlp_prev_state = ndlp->nlp_state;
3535 lpfc_nlp_set_state(vport, ndlp,
2e0fef85 3536 NLP_STE_REG_LOGIN_ISSUE);
858c9f6c 3537 }
0b727fea 3538 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
e47c9093 3539 != MBX_NOT_FINISHED)
dea3101e 3540 goto out;
e47c9093
JS
3541 else
3542 /* Decrement the ndlp reference count we
3543 * set for this failed mailbox command.
3544 */
3545 lpfc_nlp_put(ndlp);
98c9ea5c
JS
3546
3547 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3548 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3549 "0138 ELS rsp: Cannot issue reg_login for x%x "
3550 "Data: x%x x%x x%x\n",
3551 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3552 ndlp->nlp_rpi);
3553
fa4066b6 3554 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3555 ndlp = NULL;
fa4066b6
JS
3556 /* Indicate node has already been released,
3557 * should not reference to it from within
3558 * the routine lpfc_els_free_iocb.
3559 */
3560 cmdiocb->context1 = NULL;
3561 }
dea3101e 3562 } else {
858c9f6c
JS
3563 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3564 if (!lpfc_error_lost_link(irsp) &&
3565 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
fa4066b6 3566 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3567 ndlp = NULL;
fa4066b6
JS
3568 /* Indicate node has already been
3569 * released, should not reference
3570 * to it from within the routine
3571 * lpfc_els_free_iocb.
3572 */
3573 cmdiocb->context1 = NULL;
3574 }
dea3101e
JB
3575 }
3576 }
14691150
JS
3577 mp = (struct lpfc_dmabuf *) mbox->context1;
3578 if (mp) {
3579 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3580 kfree(mp);
3581 }
3582 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e
JB
3583 }
3584out:
58da1ffb 3585 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2e0fef85 3586 spin_lock_irq(shost->host_lock);
858c9f6c 3587 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2e0fef85 3588 spin_unlock_irq(shost->host_lock);
87af33fe
JS
3589
3590 /* If the node is not being used by another discovery thread,
3591 * and we are sending a reject, we are done with it.
3592 * Release driver reference count here and free associated
3593 * resources.
3594 */
3595 if (ls_rjt)
fa4066b6
JS
3596 if (lpfc_nlp_not_used(ndlp))
3597 /* Indicate node has already been released,
3598 * should not reference to it from within
3599 * the routine lpfc_els_free_iocb.
3600 */
3601 cmdiocb->context1 = NULL;
dea3101e 3602 }
87af33fe 3603
dea3101e
JB
3604 lpfc_els_free_iocb(phba, cmdiocb);
3605 return;
3606}
3607
e59058c4 3608/**
3621a710 3609 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
e59058c4
JS
3610 * @vport: pointer to a host virtual N_Port data structure.
3611 * @flag: the els command code to be accepted.
3612 * @oldiocb: pointer to the original lpfc command iocb data structure.
3613 * @ndlp: pointer to a node-list data structure.
3614 * @mbox: pointer to the driver internal queue element for mailbox command.
3615 *
3616 * This routine prepares and issues an Accept (ACC) response IOCB
3617 * command. It uses the @flag to properly set up the IOCB field for the
3618 * specific ACC response command to be issued and invokes the
3619 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3620 * @mbox pointer is passed in, it will be put into the context_un.mbox
3621 * field of the IOCB for the completion callback function to issue the
3622 * mailbox command to the HBA later when callback is invoked.
3623 *
3624 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3625 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3626 * will be stored into the context1 field of the IOCB for the completion
3627 * callback function to the corresponding response ELS IOCB command.
3628 *
3629 * Return code
3630 * 0 - Successfully issued acc response
3631 * 1 - Failed to issue acc response
3632 **/
dea3101e 3633int
2e0fef85
JS
3634lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3635 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
51ef4c26 3636 LPFC_MBOXQ_t *mbox)
dea3101e 3637{
2e0fef85
JS
3638 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3639 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3640 IOCB_t *icmd;
3641 IOCB_t *oldcmd;
3642 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3643 struct lpfc_sli *psli;
3644 uint8_t *pcmd;
3645 uint16_t cmdsize;
3646 int rc;
82d9a2a2 3647 ELS_PKT *els_pkt_ptr;
dea3101e
JB
3648
3649 psli = &phba->sli;
dea3101e
JB
3650 oldcmd = &oldiocb->iocb;
3651
3652 switch (flag) {
3653 case ELS_CMD_ACC:
92d7f7b0 3654 cmdsize = sizeof(uint32_t);
2e0fef85
JS
3655 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3656 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3657 if (!elsiocb) {
2e0fef85 3658 spin_lock_irq(shost->host_lock);
5024ab17 3659 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3660 spin_unlock_irq(shost->host_lock);
c9f8735b 3661 return 1;
dea3101e 3662 }
2e0fef85 3663
dea3101e 3664 icmd = &elsiocb->iocb;
7851fe2c
JS
3665 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3666 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3667 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3668 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3669 pcmd += sizeof(uint32_t);
858c9f6c
JS
3670
3671 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3672 "Issue ACC: did:x%x flg:x%x",
3673 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e
JB
3674 break;
3675 case ELS_CMD_PLOGI:
92d7f7b0 3676 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2e0fef85
JS
3677 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3678 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3679 if (!elsiocb)
c9f8735b 3680 return 1;
488d1469 3681
dea3101e 3682 icmd = &elsiocb->iocb;
7851fe2c
JS
3683 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3684 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3685 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3686
3687 if (mbox)
3688 elsiocb->context_un.mbox = mbox;
3689
3690 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0
JS
3691 pcmd += sizeof(uint32_t);
3692 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
858c9f6c
JS
3693
3694 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3695 "Issue ACC PLOGI: did:x%x flg:x%x",
3696 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3697 break;
82d9a2a2 3698 case ELS_CMD_PRLO:
92d7f7b0 3699 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2e0fef85 3700 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
82d9a2a2
JS
3701 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3702 if (!elsiocb)
3703 return 1;
3704
3705 icmd = &elsiocb->iocb;
7851fe2c
JS
3706 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3707 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
82d9a2a2
JS
3708 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3709
3710 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
92d7f7b0 3711 sizeof(uint32_t) + sizeof(PRLO));
82d9a2a2
JS
3712 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3713 els_pkt_ptr = (ELS_PKT *) pcmd;
3714 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
858c9f6c
JS
3715
3716 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3717 "Issue ACC PRLO: did:x%x flg:x%x",
3718 ndlp->nlp_DID, ndlp->nlp_flag, 0);
82d9a2a2 3719 break;
dea3101e 3720 default:
c9f8735b 3721 return 1;
dea3101e 3722 }
dea3101e 3723 /* Xmit ELS ACC response tag <ulpIoTag> */
e8b62011
JS
3724 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3725 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3726 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3727 elsiocb->iotag, elsiocb->iocb.ulpContext,
3728 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3729 ndlp->nlp_rpi);
dea3101e 3730 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2e0fef85 3731 spin_lock_irq(shost->host_lock);
c9f8735b 3732 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3733 spin_unlock_irq(shost->host_lock);
dea3101e
JB
3734 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3735 } else {
858c9f6c 3736 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e
JB
3737 }
3738
3739 phba->fc_stat.elsXmitACC++;
3772a991 3740 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3741 if (rc == IOCB_ERROR) {
3742 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3743 return 1;
dea3101e 3744 }
c9f8735b 3745 return 0;
dea3101e
JB
3746}
3747
e59058c4 3748/**
3621a710 3749 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
e59058c4
JS
3750 * @vport: pointer to a virtual N_Port data structure.
3751 * @rejectError:
3752 * @oldiocb: pointer to the original lpfc command iocb data structure.
3753 * @ndlp: pointer to a node-list data structure.
3754 * @mbox: pointer to the driver internal queue element for mailbox command.
3755 *
3756 * This routine prepares and issue an Reject (RJT) response IOCB
3757 * command. If a @mbox pointer is passed in, it will be put into the
3758 * context_un.mbox field of the IOCB for the completion callback function
3759 * to issue to the HBA later.
3760 *
3761 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3762 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3763 * will be stored into the context1 field of the IOCB for the completion
3764 * callback function to the reject response ELS IOCB command.
3765 *
3766 * Return code
3767 * 0 - Successfully issued reject response
3768 * 1 - Failed to issue reject response
3769 **/
dea3101e 3770int
2e0fef85 3771lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
858c9f6c
JS
3772 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3773 LPFC_MBOXQ_t *mbox)
dea3101e 3774{
2e0fef85 3775 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3776 IOCB_t *icmd;
3777 IOCB_t *oldcmd;
3778 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3779 struct lpfc_sli *psli;
3780 uint8_t *pcmd;
3781 uint16_t cmdsize;
3782 int rc;
3783
3784 psli = &phba->sli;
92d7f7b0 3785 cmdsize = 2 * sizeof(uint32_t);
2e0fef85
JS
3786 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3787 ndlp->nlp_DID, ELS_CMD_LS_RJT);
488d1469 3788 if (!elsiocb)
c9f8735b 3789 return 1;
dea3101e
JB
3790
3791 icmd = &elsiocb->iocb;
3792 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3793 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3794 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3795 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3796
3797 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
92d7f7b0 3798 pcmd += sizeof(uint32_t);
dea3101e
JB
3799 *((uint32_t *) (pcmd)) = rejectError;
3800
51ef4c26 3801 if (mbox)
858c9f6c 3802 elsiocb->context_un.mbox = mbox;
858c9f6c 3803
dea3101e 3804 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
e8b62011
JS
3805 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3806 "0129 Xmit ELS RJT x%x response tag x%x "
3807 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3808 "rpi x%x\n",
3809 rejectError, elsiocb->iotag,
3810 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3811 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
858c9f6c
JS
3812 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3813 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3814 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3815
dea3101e 3816 phba->fc_stat.elsXmitLSRJT++;
858c9f6c 3817 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3818 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
51ef4c26 3819
dea3101e
JB
3820 if (rc == IOCB_ERROR) {
3821 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3822 return 1;
dea3101e 3823 }
c9f8735b 3824 return 0;
dea3101e
JB
3825}
3826
e59058c4 3827/**
3621a710 3828 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
e59058c4
JS
3829 * @vport: pointer to a virtual N_Port data structure.
3830 * @oldiocb: pointer to the original lpfc command iocb data structure.
3831 * @ndlp: pointer to a node-list data structure.
3832 *
3833 * This routine prepares and issues an Accept (ACC) response to Address
3834 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3835 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3836 *
3837 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3838 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3839 * will be stored into the context1 field of the IOCB for the completion
3840 * callback function to the ADISC Accept response ELS IOCB command.
3841 *
3842 * Return code
3843 * 0 - Successfully issued acc adisc response
3844 * 1 - Failed to issue adisc acc response
3845 **/
dea3101e 3846int
2e0fef85
JS
3847lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3848 struct lpfc_nodelist *ndlp)
dea3101e 3849{
2e0fef85 3850 struct lpfc_hba *phba = vport->phba;
dea3101e 3851 ADISC *ap;
2e0fef85 3852 IOCB_t *icmd, *oldcmd;
dea3101e 3853 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3854 uint8_t *pcmd;
3855 uint16_t cmdsize;
3856 int rc;
3857
92d7f7b0 3858 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2e0fef85
JS
3859 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3860 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3861 if (!elsiocb)
c9f8735b 3862 return 1;
dea3101e 3863
5b8bd0c9
JS
3864 icmd = &elsiocb->iocb;
3865 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3866 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3867 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5b8bd0c9 3868
dea3101e 3869 /* Xmit ADISC ACC response tag <ulpIoTag> */
e8b62011
JS
3870 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3871 "0130 Xmit ADISC ACC response iotag x%x xri: "
3872 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3873 elsiocb->iotag, elsiocb->iocb.ulpContext,
3874 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3875 ndlp->nlp_rpi);
dea3101e
JB
3876 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3877
3878 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3879 pcmd += sizeof(uint32_t);
dea3101e
JB
3880
3881 ap = (ADISC *) (pcmd);
3882 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
3883 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3884 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 3885 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 3886
858c9f6c
JS
3887 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3888 "Issue ACC ADISC: did:x%x flg:x%x",
3889 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3890
dea3101e 3891 phba->fc_stat.elsXmitACC++;
858c9f6c 3892 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3893 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3894 if (rc == IOCB_ERROR) {
3895 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3896 return 1;
dea3101e 3897 }
c9f8735b 3898 return 0;
dea3101e
JB
3899}
3900
e59058c4 3901/**
3621a710 3902 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
e59058c4
JS
3903 * @vport: pointer to a virtual N_Port data structure.
3904 * @oldiocb: pointer to the original lpfc command iocb data structure.
3905 * @ndlp: pointer to a node-list data structure.
3906 *
3907 * This routine prepares and issues an Accept (ACC) response to Process
3908 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3909 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3910 *
3911 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3912 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3913 * will be stored into the context1 field of the IOCB for the completion
3914 * callback function to the PRLI Accept response ELS IOCB command.
3915 *
3916 * Return code
3917 * 0 - Successfully issued acc prli response
3918 * 1 - Failed to issue acc prli response
3919 **/
dea3101e 3920int
2e0fef85 3921lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5b8bd0c9 3922 struct lpfc_nodelist *ndlp)
dea3101e 3923{
2e0fef85 3924 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3925 PRLI *npr;
3926 lpfc_vpd_t *vpd;
3927 IOCB_t *icmd;
3928 IOCB_t *oldcmd;
3929 struct lpfc_iocbq *elsiocb;
dea3101e
JB
3930 struct lpfc_sli *psli;
3931 uint8_t *pcmd;
3932 uint16_t cmdsize;
3933 int rc;
3934
3935 psli = &phba->sli;
dea3101e 3936
92d7f7b0 3937 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2e0fef85 3938 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
92d7f7b0 3939 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
c9f8735b
JW
3940 if (!elsiocb)
3941 return 1;
dea3101e 3942
5b8bd0c9
JS
3943 icmd = &elsiocb->iocb;
3944 oldcmd = &oldiocb->iocb;
7851fe2c
JS
3945 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3946 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3947
dea3101e 3948 /* Xmit PRLI ACC response tag <ulpIoTag> */
e8b62011
JS
3949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3950 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3951 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3952 elsiocb->iotag, elsiocb->iocb.ulpContext,
3953 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3954 ndlp->nlp_rpi);
dea3101e
JB
3955 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3956
3957 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
92d7f7b0 3958 pcmd += sizeof(uint32_t);
dea3101e
JB
3959
3960 /* For PRLI, remainder of payload is PRLI parameter page */
92d7f7b0 3961 memset(pcmd, 0, sizeof(PRLI));
dea3101e
JB
3962
3963 npr = (PRLI *) pcmd;
3964 vpd = &phba->vpd;
3965 /*
0d2b6b83
JS
3966 * If the remote port is a target and our firmware version is 3.20 or
3967 * later, set the following bits for FC-TAPE support.
dea3101e 3968 */
0d2b6b83
JS
3969 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3970 (vpd->rev.feaLevelHigh >= 0x02)) {
dea3101e
JB
3971 npr->ConfmComplAllowed = 1;
3972 npr->Retry = 1;
3973 npr->TaskRetryIdReq = 1;
3974 }
3975
3976 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3977 npr->estabImagePair = 1;
3978 npr->readXferRdyDis = 1;
3979 npr->ConfmComplAllowed = 1;
3980
3981 npr->prliType = PRLI_FCP_TYPE;
3982 npr->initiatorFunc = 1;
3983
858c9f6c
JS
3984 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3985 "Issue ACC PRLI: did:x%x flg:x%x",
3986 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3987
dea3101e 3988 phba->fc_stat.elsXmitACC++;
858c9f6c 3989 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 3990
3772a991 3991 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
3992 if (rc == IOCB_ERROR) {
3993 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3994 return 1;
dea3101e 3995 }
c9f8735b 3996 return 0;
dea3101e
JB
3997}
3998
e59058c4 3999/**
3621a710 4000 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
e59058c4
JS
4001 * @vport: pointer to a virtual N_Port data structure.
4002 * @format: rnid command format.
4003 * @oldiocb: pointer to the original lpfc command iocb data structure.
4004 * @ndlp: pointer to a node-list data structure.
4005 *
4006 * This routine issues a Request Node Identification Data (RNID) Accept
4007 * (ACC) response. It constructs the RNID ACC response command according to
4008 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4009 * issue the response. Note that this command does not need to hold the ndlp
4010 * reference count for the callback. So, the ndlp reference count taken by
4011 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4012 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4013 * there is no ndlp reference available.
4014 *
4015 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4016 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4017 * will be stored into the context1 field of the IOCB for the completion
4018 * callback function. However, for the RNID Accept Response ELS command,
4019 * this is undone later by this routine after the IOCB is allocated.
4020 *
4021 * Return code
4022 * 0 - Successfully issued acc rnid response
4023 * 1 - Failed to issue acc rnid response
4024 **/
dea3101e 4025static int
2e0fef85 4026lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
329f9bc7 4027 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
dea3101e 4028{
2e0fef85 4029 struct lpfc_hba *phba = vport->phba;
dea3101e 4030 RNID *rn;
2e0fef85 4031 IOCB_t *icmd, *oldcmd;
dea3101e 4032 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4033 struct lpfc_sli *psli;
4034 uint8_t *pcmd;
4035 uint16_t cmdsize;
4036 int rc;
4037
4038 psli = &phba->sli;
92d7f7b0
JS
4039 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4040 + (2 * sizeof(struct lpfc_name));
dea3101e 4041 if (format)
92d7f7b0 4042 cmdsize += sizeof(RNID_TOP_DISC);
dea3101e 4043
2e0fef85
JS
4044 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4045 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4046 if (!elsiocb)
c9f8735b 4047 return 1;
dea3101e 4048
5b8bd0c9
JS
4049 icmd = &elsiocb->iocb;
4050 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4051 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4052 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4053
dea3101e 4054 /* Xmit RNID ACC response tag <ulpIoTag> */
e8b62011
JS
4055 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4056 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4057 elsiocb->iotag, elsiocb->iocb.ulpContext);
dea3101e 4058 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
dea3101e 4059 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4060 pcmd += sizeof(uint32_t);
dea3101e 4061
92d7f7b0 4062 memset(pcmd, 0, sizeof(RNID));
dea3101e
JB
4063 rn = (RNID *) (pcmd);
4064 rn->Format = format;
92d7f7b0
JS
4065 rn->CommonLen = (2 * sizeof(struct lpfc_name));
4066 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4067 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
dea3101e
JB
4068 switch (format) {
4069 case 0:
4070 rn->SpecificLen = 0;
4071 break;
4072 case RNID_TOPOLOGY_DISC:
92d7f7b0 4073 rn->SpecificLen = sizeof(RNID_TOP_DISC);
dea3101e 4074 memcpy(&rn->un.topologyDisc.portName,
92d7f7b0 4075 &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e
JB
4076 rn->un.topologyDisc.unitType = RNID_HBA;
4077 rn->un.topologyDisc.physPort = 0;
4078 rn->un.topologyDisc.attachedNodes = 0;
4079 break;
4080 default:
4081 rn->CommonLen = 0;
4082 rn->SpecificLen = 0;
4083 break;
4084 }
4085
858c9f6c
JS
4086 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4087 "Issue ACC RNID: did:x%x flg:x%x",
4088 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4089
dea3101e 4090 phba->fc_stat.elsXmitACC++;
858c9f6c 4091 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4092
3772a991 4093 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4094 if (rc == IOCB_ERROR) {
4095 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4096 return 1;
dea3101e 4097 }
c9f8735b 4098 return 0;
dea3101e
JB
4099}
4100
19ca7609
JS
4101/**
4102 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4103 * @vport: pointer to a virtual N_Port data structure.
4104 * @iocb: pointer to the lpfc command iocb data structure.
4105 * @ndlp: pointer to a node-list data structure.
4106 *
4107 * Return
4108 **/
4109static void
4110lpfc_els_clear_rrq(struct lpfc_vport *vport,
4111 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4112{
4113 struct lpfc_hba *phba = vport->phba;
4114 uint8_t *pcmd;
4115 struct RRQ *rrq;
4116 uint16_t rxid;
1151e3ec 4117 uint16_t xri;
19ca7609
JS
4118 struct lpfc_node_rrq *prrq;
4119
4120
4121 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4122 pcmd += sizeof(uint32_t);
4123 rrq = (struct RRQ *)pcmd;
1151e3ec 4124 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
9589b062 4125 rxid = bf_get(rrq_rxid, rrq);
19ca7609
JS
4126
4127 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4128 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4129 " x%x x%x\n",
1151e3ec 4130 be32_to_cpu(bf_get(rrq_did, rrq)),
9589b062 4131 bf_get(rrq_oxid, rrq),
19ca7609
JS
4132 rxid,
4133 iocb->iotag, iocb->iocb.ulpContext);
4134
4135 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4136 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4137 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
1151e3ec 4138 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
9589b062 4139 xri = bf_get(rrq_oxid, rrq);
1151e3ec
JS
4140 else
4141 xri = rxid;
4142 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
19ca7609 4143 if (prrq)
1151e3ec 4144 lpfc_clr_rrq_active(phba, xri, prrq);
19ca7609
JS
4145 return;
4146}
4147
12265f68
JS
4148/**
4149 * lpfc_els_rsp_echo_acc - Issue echo acc response
4150 * @vport: pointer to a virtual N_Port data structure.
4151 * @data: pointer to echo data to return in the accept.
4152 * @oldiocb: pointer to the original lpfc command iocb data structure.
4153 * @ndlp: pointer to a node-list data structure.
4154 *
4155 * Return code
4156 * 0 - Successfully issued acc echo response
4157 * 1 - Failed to issue acc echo response
4158 **/
4159static int
4160lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4161 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4162{
4163 struct lpfc_hba *phba = vport->phba;
4164 struct lpfc_iocbq *elsiocb;
4165 struct lpfc_sli *psli;
4166 uint8_t *pcmd;
4167 uint16_t cmdsize;
4168 int rc;
4169
4170 psli = &phba->sli;
4171 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4172
bf08611b
JS
4173 /* The accumulated length can exceed the BPL_SIZE. For
4174 * now, use this as the limit
4175 */
4176 if (cmdsize > LPFC_BPL_SIZE)
4177 cmdsize = LPFC_BPL_SIZE;
12265f68
JS
4178 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4179 ndlp->nlp_DID, ELS_CMD_ACC);
4180 if (!elsiocb)
4181 return 1;
4182
7851fe2c
JS
4183 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4184 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4185
12265f68
JS
4186 /* Xmit ECHO ACC response tag <ulpIoTag> */
4187 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4188 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4189 elsiocb->iotag, elsiocb->iocb.ulpContext);
4190 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4191 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4192 pcmd += sizeof(uint32_t);
4193 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4194
4195 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4196 "Issue ACC ECHO: did:x%x flg:x%x",
4197 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4198
4199 phba->fc_stat.elsXmitACC++;
4200 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
12265f68
JS
4201
4202 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4203 if (rc == IOCB_ERROR) {
4204 lpfc_els_free_iocb(phba, elsiocb);
4205 return 1;
4206 }
4207 return 0;
4208}
4209
e59058c4 4210/**
3621a710 4211 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
e59058c4
JS
4212 * @vport: pointer to a host virtual N_Port data structure.
4213 *
4214 * This routine issues Address Discover (ADISC) ELS commands to those
4215 * N_Ports which are in node port recovery state and ADISC has not been issued
4216 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4217 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4218 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4219 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4220 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4221 * IOCBs quit for later pick up. On the other hand, after walking through
4222 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4223 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4224 * no more ADISC need to be sent.
4225 *
4226 * Return code
4227 * The number of N_Ports with adisc issued.
4228 **/
dea3101e 4229int
2e0fef85 4230lpfc_els_disc_adisc(struct lpfc_vport *vport)
dea3101e 4231{
2e0fef85 4232 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4233 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4234 int sentadisc = 0;
dea3101e 4235
685f0bf7 4236 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2e0fef85 4237 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4238 if (!NLP_CHK_NODE_ACT(ndlp))
4239 continue;
685f0bf7
JS
4240 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4241 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4242 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2e0fef85 4243 spin_lock_irq(shost->host_lock);
685f0bf7 4244 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2e0fef85 4245 spin_unlock_irq(shost->host_lock);
685f0bf7 4246 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4247 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4248 lpfc_issue_els_adisc(vport, ndlp, 0);
685f0bf7 4249 sentadisc++;
2e0fef85
JS
4250 vport->num_disc_nodes++;
4251 if (vport->num_disc_nodes >=
3de2a653 4252 vport->cfg_discovery_threads) {
2e0fef85
JS
4253 spin_lock_irq(shost->host_lock);
4254 vport->fc_flag |= FC_NLP_MORE;
4255 spin_unlock_irq(shost->host_lock);
685f0bf7 4256 break;
dea3101e
JB
4257 }
4258 }
4259 }
4260 if (sentadisc == 0) {
2e0fef85
JS
4261 spin_lock_irq(shost->host_lock);
4262 vport->fc_flag &= ~FC_NLP_MORE;
4263 spin_unlock_irq(shost->host_lock);
dea3101e 4264 }
2fe165b6 4265 return sentadisc;
dea3101e
JB
4266}
4267
e59058c4 4268/**
3621a710 4269 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
e59058c4
JS
4270 * @vport: pointer to a host virtual N_Port data structure.
4271 *
4272 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4273 * which are in node port recovery state, with a @vport. Each time an ELS
4274 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4275 * the per @vport number of discover count (num_disc_nodes) shall be
4276 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4277 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4278 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4279 * later pick up. On the other hand, after walking through all the ndlps with
4280 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4281 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4282 * PLOGI need to be sent.
4283 *
4284 * Return code
4285 * The number of N_Ports with plogi issued.
4286 **/
dea3101e 4287int
2e0fef85 4288lpfc_els_disc_plogi(struct lpfc_vport *vport)
dea3101e 4289{
2e0fef85 4290 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4291 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4292 int sentplogi = 0;
dea3101e 4293
2e0fef85
JS
4294 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4295 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4296 if (!NLP_CHK_NODE_ACT(ndlp))
4297 continue;
685f0bf7
JS
4298 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4299 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4300 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4301 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4302 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4303 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4304 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
685f0bf7 4305 sentplogi++;
2e0fef85
JS
4306 vport->num_disc_nodes++;
4307 if (vport->num_disc_nodes >=
3de2a653 4308 vport->cfg_discovery_threads) {
2e0fef85
JS
4309 spin_lock_irq(shost->host_lock);
4310 vport->fc_flag |= FC_NLP_MORE;
4311 spin_unlock_irq(shost->host_lock);
685f0bf7 4312 break;
dea3101e
JB
4313 }
4314 }
4315 }
87af33fe
JS
4316 if (sentplogi) {
4317 lpfc_set_disctmo(vport);
4318 }
4319 else {
2e0fef85
JS
4320 spin_lock_irq(shost->host_lock);
4321 vport->fc_flag &= ~FC_NLP_MORE;
4322 spin_unlock_irq(shost->host_lock);
dea3101e 4323 }
2fe165b6 4324 return sentplogi;
dea3101e
JB
4325}
4326
e59058c4 4327/**
3621a710 4328 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
e59058c4
JS
4329 * @vport: pointer to a host virtual N_Port data structure.
4330 *
4331 * This routine cleans up any Registration State Change Notification
4332 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4333 * @vport together with the host_lock is used to prevent multiple thread
4334 * trying to access the RSCN array on a same @vport at the same time.
4335 **/
92d7f7b0 4336void
2e0fef85 4337lpfc_els_flush_rscn(struct lpfc_vport *vport)
dea3101e 4338{
2e0fef85
JS
4339 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4340 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4341 int i;
4342
7f5f3d0d
JS
4343 spin_lock_irq(shost->host_lock);
4344 if (vport->fc_rscn_flush) {
4345 /* Another thread is walking fc_rscn_id_list on this vport */
4346 spin_unlock_irq(shost->host_lock);
4347 return;
4348 }
4349 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
4350 vport->fc_rscn_flush = 1;
4351 spin_unlock_irq(shost->host_lock);
4352
2e0fef85 4353 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0 4354 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2e0fef85 4355 vport->fc_rscn_id_list[i] = NULL;
dea3101e 4356 }
2e0fef85
JS
4357 spin_lock_irq(shost->host_lock);
4358 vport->fc_rscn_id_cnt = 0;
4359 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4360 spin_unlock_irq(shost->host_lock);
4361 lpfc_can_disctmo(vport);
7f5f3d0d
JS
4362 /* Indicate we are done walking this fc_rscn_id_list */
4363 vport->fc_rscn_flush = 0;
dea3101e
JB
4364}
4365
e59058c4 4366/**
3621a710 4367 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
e59058c4
JS
4368 * @vport: pointer to a host virtual N_Port data structure.
4369 * @did: remote destination port identifier.
4370 *
4371 * This routine checks whether there is any pending Registration State
4372 * Configuration Notification (RSCN) to a @did on @vport.
4373 *
4374 * Return code
4375 * None zero - The @did matched with a pending rscn
4376 * 0 - not able to match @did with a pending rscn
4377 **/
dea3101e 4378int
2e0fef85 4379lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
dea3101e
JB
4380{
4381 D_ID ns_did;
4382 D_ID rscn_did;
dea3101e 4383 uint32_t *lp;
92d7f7b0 4384 uint32_t payload_len, i;
7f5f3d0d 4385 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
4386
4387 ns_did.un.word = did;
dea3101e
JB
4388
4389 /* Never match fabric nodes for RSCNs */
4390 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2e0fef85 4391 return 0;
dea3101e
JB
4392
4393 /* If we are doing a FULL RSCN rediscovery, match everything */
2e0fef85 4394 if (vport->fc_flag & FC_RSCN_DISCOVERY)
c9f8735b 4395 return did;
dea3101e 4396
7f5f3d0d
JS
4397 spin_lock_irq(shost->host_lock);
4398 if (vport->fc_rscn_flush) {
4399 /* Another thread is walking fc_rscn_id_list on this vport */
4400 spin_unlock_irq(shost->host_lock);
4401 return 0;
4402 }
4403 /* Indicate we are walking fc_rscn_id_list on this vport */
4404 vport->fc_rscn_flush = 1;
4405 spin_unlock_irq(shost->host_lock);
2e0fef85 4406 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0
JS
4407 lp = vport->fc_rscn_id_list[i]->virt;
4408 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4409 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 4410 while (payload_len) {
92d7f7b0
JS
4411 rscn_did.un.word = be32_to_cpu(*lp++);
4412 payload_len -= sizeof(uint32_t);
eaf15d5b
JS
4413 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4414 case RSCN_ADDRESS_FORMAT_PORT:
6fb120a7
JS
4415 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4416 && (ns_did.un.b.area == rscn_did.un.b.area)
4417 && (ns_did.un.b.id == rscn_did.un.b.id))
7f5f3d0d 4418 goto return_did_out;
dea3101e 4419 break;
eaf15d5b 4420 case RSCN_ADDRESS_FORMAT_AREA:
dea3101e
JB
4421 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4422 && (ns_did.un.b.area == rscn_did.un.b.area))
7f5f3d0d 4423 goto return_did_out;
dea3101e 4424 break;
eaf15d5b 4425 case RSCN_ADDRESS_FORMAT_DOMAIN:
dea3101e 4426 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7f5f3d0d 4427 goto return_did_out;
dea3101e 4428 break;
eaf15d5b 4429 case RSCN_ADDRESS_FORMAT_FABRIC:
7f5f3d0d 4430 goto return_did_out;
dea3101e
JB
4431 }
4432 }
92d7f7b0 4433 }
7f5f3d0d
JS
4434 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4435 vport->fc_rscn_flush = 0;
92d7f7b0 4436 return 0;
7f5f3d0d
JS
4437return_did_out:
4438 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4439 vport->fc_rscn_flush = 0;
4440 return did;
dea3101e
JB
4441}
4442
e59058c4 4443/**
3621a710 4444 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
e59058c4
JS
4445 * @vport: pointer to a host virtual N_Port data structure.
4446 *
4447 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4448 * state machine for a @vport's nodes that are with pending RSCN (Registration
4449 * State Change Notification).
4450 *
4451 * Return code
4452 * 0 - Successful (currently alway return 0)
4453 **/
dea3101e 4454static int
2e0fef85 4455lpfc_rscn_recovery_check(struct lpfc_vport *vport)
dea3101e 4456{
685f0bf7 4457 struct lpfc_nodelist *ndlp = NULL;
dea3101e 4458
0d2b6b83 4459 /* Move all affected nodes by pending RSCNs to NPR state. */
2e0fef85 4460 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093 4461 if (!NLP_CHK_NODE_ACT(ndlp) ||
0d2b6b83
JS
4462 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4463 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
685f0bf7 4464 continue;
2e0fef85 4465 lpfc_disc_state_machine(vport, ndlp, NULL,
0d2b6b83
JS
4466 NLP_EVT_DEVICE_RECOVERY);
4467 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 4468 }
c9f8735b 4469 return 0;
dea3101e
JB
4470}
4471
ddcc50f0 4472/**
3621a710 4473 * lpfc_send_rscn_event - Send an RSCN event to management application
ddcc50f0
JS
4474 * @vport: pointer to a host virtual N_Port data structure.
4475 * @cmdiocb: pointer to lpfc command iocb data structure.
4476 *
4477 * lpfc_send_rscn_event sends an RSCN netlink event to management
4478 * applications.
4479 */
4480static void
4481lpfc_send_rscn_event(struct lpfc_vport *vport,
4482 struct lpfc_iocbq *cmdiocb)
4483{
4484 struct lpfc_dmabuf *pcmd;
4485 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4486 uint32_t *payload_ptr;
4487 uint32_t payload_len;
4488 struct lpfc_rscn_event_header *rscn_event_data;
4489
4490 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4491 payload_ptr = (uint32_t *) pcmd->virt;
4492 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4493
4494 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4495 payload_len, GFP_KERNEL);
4496 if (!rscn_event_data) {
4497 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4498 "0147 Failed to allocate memory for RSCN event\n");
4499 return;
4500 }
4501 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4502 rscn_event_data->payload_length = payload_len;
4503 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4504 payload_len);
4505
4506 fc_host_post_vendor_event(shost,
4507 fc_get_event_number(),
4508 sizeof(struct lpfc_els_event_header) + payload_len,
4509 (char *)rscn_event_data,
4510 LPFC_NL_VENDOR_ID);
4511
4512 kfree(rscn_event_data);
4513}
4514
e59058c4 4515/**
3621a710 4516 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
e59058c4
JS
4517 * @vport: pointer to a host virtual N_Port data structure.
4518 * @cmdiocb: pointer to lpfc command iocb data structure.
4519 * @ndlp: pointer to a node-list data structure.
4520 *
4521 * This routine processes an unsolicited RSCN (Registration State Change
4522 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4523 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4524 * discover state machine is about to begin discovery, it just accepts the
4525 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4526 * contains N_Port IDs for other vports on this HBA, it just accepts the
4527 * RSCN and ignore processing it. If the state machine is in the recovery
4528 * state, the fc_rscn_id_list of this @vport is walked and the
4529 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4530 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4531 * routine is invoked to handle the RSCN event.
4532 *
4533 * Return code
4534 * 0 - Just sent the acc response
4535 * 1 - Sent the acc response and waited for name server completion
4536 **/
dea3101e 4537static int
2e0fef85 4538lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4539 struct lpfc_nodelist *ndlp)
dea3101e 4540{
2e0fef85
JS
4541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4542 struct lpfc_hba *phba = vport->phba;
dea3101e 4543 struct lpfc_dmabuf *pcmd;
92d7f7b0 4544 uint32_t *lp, *datap;
dea3101e 4545 IOCB_t *icmd;
92d7f7b0 4546 uint32_t payload_len, length, nportid, *cmd;
7f5f3d0d 4547 int rscn_cnt;
92d7f7b0 4548 int rscn_id = 0, hba_id = 0;
d2873e4c 4549 int i;
dea3101e
JB
4550
4551 icmd = &cmdiocb->iocb;
4552 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4553 lp = (uint32_t *) pcmd->virt;
4554
92d7f7b0
JS
4555 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4556 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 4557 /* RSCN received */
e8b62011
JS
4558 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4559 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7f5f3d0d
JS
4560 vport->fc_flag, payload_len, *lp,
4561 vport->fc_rscn_id_cnt);
ddcc50f0
JS
4562
4563 /* Send an RSCN event to the management application */
4564 lpfc_send_rscn_event(vport, cmdiocb);
4565
d2873e4c 4566 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2e0fef85 4567 fc_host_post_event(shost, fc_get_event_number(),
d2873e4c
JS
4568 FCH_EVT_RSCN, lp[i]);
4569
dea3101e
JB
4570 /* If we are about to begin discovery, just ACC the RSCN.
4571 * Discovery processing will satisfy it.
4572 */
2e0fef85 4573 if (vport->port_state <= LPFC_NS_QRY) {
858c9f6c
JS
4574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4575 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4576 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4577
51ef4c26 4578 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
c9f8735b 4579 return 0;
dea3101e
JB
4580 }
4581
92d7f7b0
JS
4582 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4583 * just ACC and ignore it.
4584 */
4585 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3de2a653 4586 !(vport->cfg_peer_port_login)) {
92d7f7b0
JS
4587 i = payload_len;
4588 datap = lp;
4589 while (i > 0) {
4590 nportid = *datap++;
4591 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4592 i -= sizeof(uint32_t);
4593 rscn_id++;
549e55cd
JS
4594 if (lpfc_find_vport_by_did(phba, nportid))
4595 hba_id++;
92d7f7b0
JS
4596 }
4597 if (rscn_id == hba_id) {
4598 /* ALL NPortIDs in RSCN are on HBA */
e8b62011 4599 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
d7c255b2 4600 "0219 Ignore RSCN "
e8b62011
JS
4601 "Data: x%x x%x x%x x%x\n",
4602 vport->fc_flag, payload_len,
7f5f3d0d 4603 *lp, vport->fc_rscn_id_cnt);
858c9f6c
JS
4604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4605 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4606 ndlp->nlp_DID, vport->port_state,
4607 ndlp->nlp_flag);
4608
92d7f7b0 4609 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
51ef4c26 4610 ndlp, NULL);
92d7f7b0
JS
4611 return 0;
4612 }
4613 }
4614
7f5f3d0d
JS
4615 spin_lock_irq(shost->host_lock);
4616 if (vport->fc_rscn_flush) {
4617 /* Another thread is walking fc_rscn_id_list on this vport */
7f5f3d0d 4618 vport->fc_flag |= FC_RSCN_DISCOVERY;
97957244 4619 spin_unlock_irq(shost->host_lock);
58da1ffb
JS
4620 /* Send back ACC */
4621 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7f5f3d0d
JS
4622 return 0;
4623 }
4624 /* Indicate we are walking fc_rscn_id_list on this vport */
4625 vport->fc_rscn_flush = 1;
4626 spin_unlock_irq(shost->host_lock);
af901ca1 4627 /* Get the array count after successfully have the token */
7f5f3d0d 4628 rscn_cnt = vport->fc_rscn_id_cnt;
dea3101e
JB
4629 /* If we are already processing an RSCN, save the received
4630 * RSCN payload buffer, cmdiocb->context2 to process later.
4631 */
2e0fef85 4632 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
858c9f6c
JS
4633 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4634 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4635 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4636
09372820 4637 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
4638 vport->fc_flag |= FC_RSCN_DEFERRED;
4639 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2e0fef85 4640 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2e0fef85
JS
4641 vport->fc_flag |= FC_RSCN_MODE;
4642 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
4643 if (rscn_cnt) {
4644 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4645 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4646 }
4647 if ((rscn_cnt) &&
4648 (payload_len + length <= LPFC_BPL_SIZE)) {
4649 *cmd &= ELS_CMD_MASK;
7f5f3d0d 4650 *cmd |= cpu_to_be32(payload_len + length);
92d7f7b0
JS
4651 memcpy(((uint8_t *)cmd) + length, lp,
4652 payload_len);
4653 } else {
4654 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4655 vport->fc_rscn_id_cnt++;
4656 /* If we zero, cmdiocb->context2, the calling
4657 * routine will not try to free it.
4658 */
4659 cmdiocb->context2 = NULL;
4660 }
dea3101e 4661 /* Deferred RSCN */
e8b62011
JS
4662 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4663 "0235 Deferred RSCN "
4664 "Data: x%x x%x x%x\n",
4665 vport->fc_rscn_id_cnt, vport->fc_flag,
4666 vport->port_state);
dea3101e 4667 } else {
2e0fef85
JS
4668 vport->fc_flag |= FC_RSCN_DISCOVERY;
4669 spin_unlock_irq(shost->host_lock);
dea3101e 4670 /* ReDiscovery RSCN */
e8b62011
JS
4671 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4672 "0234 ReDiscovery RSCN "
4673 "Data: x%x x%x x%x\n",
4674 vport->fc_rscn_id_cnt, vport->fc_flag,
4675 vport->port_state);
dea3101e 4676 }
7f5f3d0d
JS
4677 /* Indicate we are done walking fc_rscn_id_list on this vport */
4678 vport->fc_rscn_flush = 0;
dea3101e 4679 /* Send back ACC */
51ef4c26 4680 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4681 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4682 lpfc_rscn_recovery_check(vport);
09372820 4683 spin_lock_irq(shost->host_lock);
92d7f7b0 4684 vport->fc_flag &= ~FC_RSCN_DEFERRED;
09372820 4685 spin_unlock_irq(shost->host_lock);
c9f8735b 4686 return 0;
dea3101e 4687 }
858c9f6c
JS
4688 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4689 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4690 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4691
2e0fef85
JS
4692 spin_lock_irq(shost->host_lock);
4693 vport->fc_flag |= FC_RSCN_MODE;
4694 spin_unlock_irq(shost->host_lock);
4695 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7f5f3d0d
JS
4696 /* Indicate we are done walking fc_rscn_id_list on this vport */
4697 vport->fc_rscn_flush = 0;
dea3101e
JB
4698 /*
4699 * If we zero, cmdiocb->context2, the calling routine will
4700 * not try to free it.
4701 */
4702 cmdiocb->context2 = NULL;
2e0fef85 4703 lpfc_set_disctmo(vport);
dea3101e 4704 /* Send back ACC */
51ef4c26 4705 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4706 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4707 lpfc_rscn_recovery_check(vport);
2e0fef85 4708 return lpfc_els_handle_rscn(vport);
dea3101e
JB
4709}
4710
e59058c4 4711/**
3621a710 4712 * lpfc_els_handle_rscn - Handle rscn for a vport
e59058c4
JS
4713 * @vport: pointer to a host virtual N_Port data structure.
4714 *
4715 * This routine handles the Registration State Configuration Notification
4716 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4717 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4718 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4719 * NameServer shall be issued. If CT command to the NameServer fails to be
4720 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4721 * RSCN activities with the @vport.
4722 *
4723 * Return code
4724 * 0 - Cleaned up rscn on the @vport
4725 * 1 - Wait for plogi to name server before proceed
4726 **/
dea3101e 4727int
2e0fef85 4728lpfc_els_handle_rscn(struct lpfc_vport *vport)
dea3101e
JB
4729{
4730 struct lpfc_nodelist *ndlp;
2e0fef85 4731 struct lpfc_hba *phba = vport->phba;
dea3101e 4732
92d7f7b0
JS
4733 /* Ignore RSCN if the port is being torn down. */
4734 if (vport->load_flag & FC_UNLOADING) {
4735 lpfc_els_flush_rscn(vport);
4736 return 0;
4737 }
4738
dea3101e 4739 /* Start timer for RSCN processing */
2e0fef85 4740 lpfc_set_disctmo(vport);
dea3101e
JB
4741
4742 /* RSCN processed */
e8b62011
JS
4743 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4744 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4745 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4746 vport->port_state);
dea3101e
JB
4747
4748 /* To process RSCN, first compare RSCN data with NameServer */
2e0fef85 4749 vport->fc_ns_retry = 0;
0ff10d46
JS
4750 vport->num_disc_nodes = 0;
4751
2e0fef85 4752 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093
JS
4753 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4754 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
dea3101e 4755 /* Good ndlp, issue CT Request to NameServer */
92d7f7b0 4756 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
dea3101e
JB
4757 /* Wait for NameServer query cmpl before we can
4758 continue */
c9f8735b 4759 return 1;
dea3101e
JB
4760 } else {
4761 /* If login to NameServer does not exist, issue one */
4762 /* Good status, issue PLOGI to NameServer */
2e0fef85 4763 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 4764 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
dea3101e
JB
4765 /* Wait for NameServer login cmpl before we can
4766 continue */
c9f8735b 4767 return 1;
2e0fef85 4768
e47c9093
JS
4769 if (ndlp) {
4770 ndlp = lpfc_enable_node(vport, ndlp,
4771 NLP_STE_PLOGI_ISSUE);
4772 if (!ndlp) {
4773 lpfc_els_flush_rscn(vport);
4774 return 0;
4775 }
4776 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
dea3101e 4777 } else {
e47c9093
JS
4778 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4779 if (!ndlp) {
4780 lpfc_els_flush_rscn(vport);
4781 return 0;
4782 }
2e0fef85 4783 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5024ab17 4784 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 4785 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
dea3101e 4786 }
e47c9093
JS
4787 ndlp->nlp_type |= NLP_FABRIC;
4788 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4789 /* Wait for NameServer login cmpl before we can
4790 * continue
4791 */
4792 return 1;
dea3101e
JB
4793 }
4794
2e0fef85 4795 lpfc_els_flush_rscn(vport);
c9f8735b 4796 return 0;
dea3101e
JB
4797}
4798
e59058c4 4799/**
3621a710 4800 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
e59058c4
JS
4801 * @vport: pointer to a host virtual N_Port data structure.
4802 * @cmdiocb: pointer to lpfc command iocb data structure.
4803 * @ndlp: pointer to a node-list data structure.
4804 *
4805 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4806 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4807 * point topology. As an unsolicited FLOGI should not be received in a loop
4808 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4809 * lpfc_check_sparm() routine is invoked to check the parameters in the
4810 * unsolicited FLOGI. If parameters validation failed, the routine
4811 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4812 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4813 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4814 * will initiate PLOGI. The higher lexicographical value party shall has
4815 * higher priority (as the winning port) and will initiate PLOGI and
4816 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4817 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4818 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4819 *
4820 * Return code
4821 * 0 - Successfully processed the unsolicited flogi
4822 * 1 - Failed to process the unsolicited flogi
4823 **/
dea3101e 4824static int
2e0fef85 4825lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4826 struct lpfc_nodelist *ndlp)
dea3101e 4827{
2e0fef85
JS
4828 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4829 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4830 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4831 uint32_t *lp = (uint32_t *) pcmd->virt;
4832 IOCB_t *icmd = &cmdiocb->iocb;
4833 struct serv_parm *sp;
4834 LPFC_MBOXQ_t *mbox;
4835 struct ls_rjt stat;
4836 uint32_t cmd, did;
4837 int rc;
4838
4839 cmd = *lp++;
4840 sp = (struct serv_parm *) lp;
4841
4842 /* FLOGI received */
4843
2e0fef85 4844 lpfc_set_disctmo(vport);
dea3101e 4845
76a95d75 4846 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
dea3101e
JB
4847 /* We should never receive a FLOGI in loop mode, ignore it */
4848 did = icmd->un.elsreq64.remoteID;
4849
4850 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4851 Loop Mode */
e8b62011
JS
4852 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4853 "0113 An FLOGI ELS command x%x was "
4854 "received from DID x%x in Loop Mode\n",
4855 cmd, did);
c9f8735b 4856 return 1;
dea3101e
JB
4857 }
4858
4859 did = Fabric_DID;
4860
341af102 4861 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
dea3101e
JB
4862 /* For a FLOGI we accept, then if our portname is greater
4863 * then the remote portname we initiate Nport login.
4864 */
4865
2e0fef85 4866 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 4867 sizeof(struct lpfc_name));
dea3101e
JB
4868
4869 if (!rc) {
2e0fef85
JS
4870 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4871 if (!mbox)
c9f8735b 4872 return 1;
2e0fef85 4873
dea3101e
JB
4874 lpfc_linkdown(phba);
4875 lpfc_init_link(phba, mbox,
4876 phba->cfg_topology,
4877 phba->cfg_link_speed);
04c68496 4878 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
dea3101e 4879 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 4880 mbox->vport = vport;
0b727fea 4881 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5b8bd0c9 4882 lpfc_set_loopback_flag(phba);
dea3101e 4883 if (rc == MBX_NOT_FINISHED) {
329f9bc7 4884 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 4885 }
c9f8735b 4886 return 1;
2fe165b6 4887 } else if (rc > 0) { /* greater than */
2e0fef85
JS
4888 spin_lock_irq(shost->host_lock);
4889 vport->fc_flag |= FC_PT2PT_PLOGI;
4890 spin_unlock_irq(shost->host_lock);
dea3101e 4891 }
2e0fef85
JS
4892 spin_lock_irq(shost->host_lock);
4893 vport->fc_flag |= FC_PT2PT;
4894 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4895 spin_unlock_irq(shost->host_lock);
dea3101e
JB
4896 } else {
4897 /* Reject this request because invalid parameters */
4898 stat.un.b.lsRjtRsvd0 = 0;
4899 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4900 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4901 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4902 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4903 NULL);
c9f8735b 4904 return 1;
dea3101e
JB
4905 }
4906
4907 /* Send back ACC */
51ef4c26 4908 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
dea3101e 4909
c9f8735b 4910 return 0;
dea3101e
JB
4911}
4912
e59058c4 4913/**
3621a710 4914 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
e59058c4
JS
4915 * @vport: pointer to a host virtual N_Port data structure.
4916 * @cmdiocb: pointer to lpfc command iocb data structure.
4917 * @ndlp: pointer to a node-list data structure.
4918 *
4919 * This routine processes Request Node Identification Data (RNID) IOCB
4920 * received as an ELS unsolicited event. Only when the RNID specified format
4921 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4922 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4923 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4924 * rejected by invoking the lpfc_els_rsp_reject() routine.
4925 *
4926 * Return code
4927 * 0 - Successfully processed rnid iocb (currently always return 0)
4928 **/
dea3101e 4929static int
2e0fef85
JS
4930lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4931 struct lpfc_nodelist *ndlp)
dea3101e
JB
4932{
4933 struct lpfc_dmabuf *pcmd;
4934 uint32_t *lp;
4935 IOCB_t *icmd;
4936 RNID *rn;
4937 struct ls_rjt stat;
4938 uint32_t cmd, did;
4939
4940 icmd = &cmdiocb->iocb;
4941 did = icmd->un.elsreq64.remoteID;
4942 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4943 lp = (uint32_t *) pcmd->virt;
4944
4945 cmd = *lp++;
4946 rn = (RNID *) lp;
4947
4948 /* RNID received */
4949
4950 switch (rn->Format) {
4951 case 0:
4952 case RNID_TOPOLOGY_DISC:
4953 /* Send back ACC */
2e0fef85 4954 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
dea3101e
JB
4955 break;
4956 default:
4957 /* Reject this request because format not supported */
4958 stat.un.b.lsRjtRsvd0 = 0;
4959 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4960 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4961 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4962 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4963 NULL);
dea3101e 4964 }
c9f8735b 4965 return 0;
dea3101e
JB
4966}
4967
12265f68
JS
4968/**
4969 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
4970 * @vport: pointer to a host virtual N_Port data structure.
4971 * @cmdiocb: pointer to lpfc command iocb data structure.
4972 * @ndlp: pointer to a node-list data structure.
4973 *
4974 * Return code
4975 * 0 - Successfully processed echo iocb (currently always return 0)
4976 **/
4977static int
4978lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4979 struct lpfc_nodelist *ndlp)
4980{
4981 uint8_t *pcmd;
4982
4983 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4984
4985 /* skip over first word of echo command to find echo data */
4986 pcmd += sizeof(uint32_t);
4987
4988 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
4989 return 0;
4990}
4991
e59058c4 4992/**
3621a710 4993 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
e59058c4
JS
4994 * @vport: pointer to a host virtual N_Port data structure.
4995 * @cmdiocb: pointer to lpfc command iocb data structure.
4996 * @ndlp: pointer to a node-list data structure.
4997 *
4998 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4999 * received as an ELS unsolicited event. Currently, this function just invokes
5000 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
5001 *
5002 * Return code
5003 * 0 - Successfully processed lirr iocb (currently always return 0)
5004 **/
dea3101e 5005static int
2e0fef85
JS
5006lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5007 struct lpfc_nodelist *ndlp)
7bb3b137
JW
5008{
5009 struct ls_rjt stat;
5010
5011 /* For now, unconditionally reject this command */
5012 stat.un.b.lsRjtRsvd0 = 0;
5013 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5014 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5015 stat.un.b.vendorUnique = 0;
858c9f6c 5016 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
5017 return 0;
5018}
5019
5ffc266e
JS
5020/**
5021 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
5022 * @vport: pointer to a host virtual N_Port data structure.
5023 * @cmdiocb: pointer to lpfc command iocb data structure.
5024 * @ndlp: pointer to a node-list data structure.
5025 *
5026 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
5027 * received as an ELS unsolicited event. A request to RRQ shall only
5028 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
5029 * Nx_Port N_Port_ID of the target Exchange is the same as the
5030 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
5031 * not accepted, an LS_RJT with reason code "Unable to perform
5032 * command request" and reason code explanation "Invalid Originator
5033 * S_ID" shall be returned. For now, we just unconditionally accept
5034 * RRQ from the target.
5035 **/
5036static void
5037lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5038 struct lpfc_nodelist *ndlp)
5039{
5040 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
19ca7609
JS
5041 if (vport->phba->sli_rev == LPFC_SLI_REV4)
5042 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5ffc266e
JS
5043}
5044
12265f68
JS
5045/**
5046 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5047 * @phba: pointer to lpfc hba data structure.
5048 * @pmb: pointer to the driver internal queue element for mailbox command.
5049 *
5050 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5051 * mailbox command. This callback function is to actually send the Accept
5052 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5053 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5054 * mailbox command, constructs the RPS response with the link statistics
5055 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5056 * response to the RPS.
5057 *
5058 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5059 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5060 * will be stored into the context1 field of the IOCB for the completion
5061 * callback function to the RPS Accept Response ELS IOCB command.
5062 *
5063 **/
5064static void
5065lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5066{
5067 MAILBOX_t *mb;
5068 IOCB_t *icmd;
5069 struct RLS_RSP *rls_rsp;
5070 uint8_t *pcmd;
5071 struct lpfc_iocbq *elsiocb;
5072 struct lpfc_nodelist *ndlp;
7851fe2c
JS
5073 uint16_t oxid;
5074 uint16_t rxid;
12265f68
JS
5075 uint32_t cmdsize;
5076
5077 mb = &pmb->u.mb;
5078
5079 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
5080 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5081 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
12265f68
JS
5082 pmb->context1 = NULL;
5083 pmb->context2 = NULL;
5084
5085 if (mb->mbxStatus) {
5086 mempool_free(pmb, phba->mbox_mem_pool);
5087 return;
5088 }
5089
5090 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5091 mempool_free(pmb, phba->mbox_mem_pool);
5092 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5093 lpfc_max_els_tries, ndlp,
5094 ndlp->nlp_DID, ELS_CMD_ACC);
5095
5096 /* Decrement the ndlp reference count from previous mbox command */
5097 lpfc_nlp_put(ndlp);
5098
5099 if (!elsiocb)
5100 return;
5101
5102 icmd = &elsiocb->iocb;
7851fe2c
JS
5103 icmd->ulpContext = rxid;
5104 icmd->unsli3.rcvsli3.ox_id = oxid;
12265f68
JS
5105
5106 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5107 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5108 pcmd += sizeof(uint32_t); /* Skip past command */
5109 rls_rsp = (struct RLS_RSP *)pcmd;
5110
5111 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5112 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5113 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5114 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5115 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5116 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5117
5118 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5119 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5120 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5121 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5122 elsiocb->iotag, elsiocb->iocb.ulpContext,
5123 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5124 ndlp->nlp_rpi);
5125 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5126 phba->fc_stat.elsXmitACC++;
5127 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5128 lpfc_els_free_iocb(phba, elsiocb);
5129}
5130
e59058c4 5131/**
3621a710 5132 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
e59058c4
JS
5133 * @phba: pointer to lpfc hba data structure.
5134 * @pmb: pointer to the driver internal queue element for mailbox command.
5135 *
5136 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5137 * mailbox command. This callback function is to actually send the Accept
5138 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5139 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5140 * mailbox command, constructs the RPS response with the link statistics
5141 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5142 * response to the RPS.
5143 *
5144 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5145 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5146 * will be stored into the context1 field of the IOCB for the completion
5147 * callback function to the RPS Accept Response ELS IOCB command.
5148 *
5149 **/
082c0266 5150static void
329f9bc7 5151lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7bb3b137 5152{
7bb3b137
JW
5153 MAILBOX_t *mb;
5154 IOCB_t *icmd;
5155 RPS_RSP *rps_rsp;
5156 uint8_t *pcmd;
5157 struct lpfc_iocbq *elsiocb;
5158 struct lpfc_nodelist *ndlp;
7851fe2c
JS
5159 uint16_t status;
5160 uint16_t oxid;
5161 uint16_t rxid;
7bb3b137
JW
5162 uint32_t cmdsize;
5163
04c68496 5164 mb = &pmb->u.mb;
7bb3b137
JW
5165
5166 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
5167 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5168 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
041976fb
RD
5169 pmb->context1 = NULL;
5170 pmb->context2 = NULL;
7bb3b137
JW
5171
5172 if (mb->mbxStatus) {
329f9bc7 5173 mempool_free(pmb, phba->mbox_mem_pool);
7bb3b137
JW
5174 return;
5175 }
5176
5177 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
329f9bc7 5178 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
5179 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5180 lpfc_max_els_tries, ndlp,
5181 ndlp->nlp_DID, ELS_CMD_ACC);
fa4066b6
JS
5182
5183 /* Decrement the ndlp reference count from previous mbox command */
329f9bc7 5184 lpfc_nlp_put(ndlp);
fa4066b6 5185
c9f8735b 5186 if (!elsiocb)
7bb3b137 5187 return;
7bb3b137
JW
5188
5189 icmd = &elsiocb->iocb;
7851fe2c
JS
5190 icmd->ulpContext = rxid;
5191 icmd->unsli3.rcvsli3.ox_id = oxid;
7bb3b137
JW
5192
5193 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5194 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 5195 pcmd += sizeof(uint32_t); /* Skip past command */
7bb3b137
JW
5196 rps_rsp = (RPS_RSP *)pcmd;
5197
76a95d75 5198 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
7bb3b137
JW
5199 status = 0x10;
5200 else
5201 status = 0x8;
2e0fef85 5202 if (phba->pport->fc_flag & FC_FABRIC)
7bb3b137
JW
5203 status |= 0x4;
5204
5205 rps_rsp->rsvd1 = 0;
09372820
JS
5206 rps_rsp->portStatus = cpu_to_be16(status);
5207 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5208 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5209 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5210 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5211 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5212 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7bb3b137 5213 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
e8b62011
JS
5214 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5215 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5216 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5217 elsiocb->iotag, elsiocb->iocb.ulpContext,
5218 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5219 ndlp->nlp_rpi);
858c9f6c 5220 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 5221 phba->fc_stat.elsXmitACC++;
3772a991 5222 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7bb3b137 5223 lpfc_els_free_iocb(phba, elsiocb);
7bb3b137
JW
5224 return;
5225}
5226
e59058c4 5227/**
12265f68
JS
5228 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
5229 * @vport: pointer to a host virtual N_Port data structure.
5230 * @cmdiocb: pointer to lpfc command iocb data structure.
5231 * @ndlp: pointer to a node-list data structure.
5232 *
5233 * This routine processes Read Port Status (RPL) IOCB received as an
5234 * ELS unsolicited event. It first checks the remote port state. If the
5235 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5236 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5237 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5238 * for reading the HBA link statistics. It is for the callback function,
5239 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
5240 * to actually sending out RPL Accept (ACC) response.
5241 *
5242 * Return codes
5243 * 0 - Successfully processed rls iocb (currently always return 0)
5244 **/
5245static int
5246lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5247 struct lpfc_nodelist *ndlp)
5248{
5249 struct lpfc_hba *phba = vport->phba;
5250 LPFC_MBOXQ_t *mbox;
5251 struct lpfc_dmabuf *pcmd;
5252 struct ls_rjt stat;
5253
5254 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5255 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5256 /* reject the unsolicited RPS request and done with it */
5257 goto reject_out;
5258
5259 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5260
5261 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5262 if (mbox) {
5263 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
5264 mbox->context1 = (void *)((unsigned long)
5265 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5266 cmdiocb->iocb.ulpContext)); /* rx_id */
12265f68
JS
5267 mbox->context2 = lpfc_nlp_get(ndlp);
5268 mbox->vport = vport;
5269 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5270 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5271 != MBX_NOT_FINISHED)
5272 /* Mbox completion will send ELS Response */
5273 return 0;
5274 /* Decrement reference count used for the failed mbox
5275 * command.
5276 */
5277 lpfc_nlp_put(ndlp);
5278 mempool_free(mbox, phba->mbox_mem_pool);
5279 }
5280reject_out:
5281 /* issue rejection response */
5282 stat.un.b.lsRjtRsvd0 = 0;
5283 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5284 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5285 stat.un.b.vendorUnique = 0;
5286 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5287 return 0;
5288}
5289
5290/**
5291 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5292 * @vport: pointer to a host virtual N_Port data structure.
5293 * @cmdiocb: pointer to lpfc command iocb data structure.
5294 * @ndlp: pointer to a node-list data structure.
5295 *
5296 * This routine processes Read Timout Value (RTV) IOCB received as an
5297 * ELS unsolicited event. It first checks the remote port state. If the
5298 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5299 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5300 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5301 * Value (RTV) unsolicited IOCB event.
5302 *
5303 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5304 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5305 * will be stored into the context1 field of the IOCB for the completion
5306 * callback function to the RPS Accept Response ELS IOCB command.
5307 *
5308 * Return codes
5309 * 0 - Successfully processed rtv iocb (currently always return 0)
5310 **/
5311static int
5312lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5313 struct lpfc_nodelist *ndlp)
5314{
5315 struct lpfc_hba *phba = vport->phba;
5316 struct ls_rjt stat;
5317 struct RTV_RSP *rtv_rsp;
5318 uint8_t *pcmd;
5319 struct lpfc_iocbq *elsiocb;
5320 uint32_t cmdsize;
5321
5322
5323 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5324 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5325 /* reject the unsolicited RPS request and done with it */
5326 goto reject_out;
5327
5328 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5329 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5330 lpfc_max_els_tries, ndlp,
5331 ndlp->nlp_DID, ELS_CMD_ACC);
5332
5333 if (!elsiocb)
5334 return 1;
5335
5336 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5337 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5338 pcmd += sizeof(uint32_t); /* Skip past command */
5339
5340 /* use the command's xri in the response */
7851fe2c
JS
5341 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5342 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
12265f68
JS
5343
5344 rtv_rsp = (struct RTV_RSP *)pcmd;
5345
5346 /* populate RTV payload */
5347 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5348 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5349 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5350 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5351 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5352
5353 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5354 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5355 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5356 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5357 "Data: x%x x%x x%x\n",
5358 elsiocb->iotag, elsiocb->iocb.ulpContext,
5359 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5360 ndlp->nlp_rpi,
5361 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5362 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5363 phba->fc_stat.elsXmitACC++;
5364 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5365 lpfc_els_free_iocb(phba, elsiocb);
5366 return 0;
5367
5368reject_out:
5369 /* issue rejection response */
5370 stat.un.b.lsRjtRsvd0 = 0;
5371 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5372 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5373 stat.un.b.vendorUnique = 0;
5374 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5375 return 0;
5376}
5377
5378/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
e59058c4
JS
5379 * @vport: pointer to a host virtual N_Port data structure.
5380 * @cmdiocb: pointer to lpfc command iocb data structure.
5381 * @ndlp: pointer to a node-list data structure.
5382 *
5383 * This routine processes Read Port Status (RPS) IOCB received as an
5384 * ELS unsolicited event. It first checks the remote port state. If the
5385 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5386 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5387 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5388 * for reading the HBA link statistics. It is for the callback function,
5389 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5390 * to actually sending out RPS Accept (ACC) response.
5391 *
5392 * Return codes
5393 * 0 - Successfully processed rps iocb (currently always return 0)
5394 **/
7bb3b137 5395static int
2e0fef85
JS
5396lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5397 struct lpfc_nodelist *ndlp)
dea3101e 5398{
2e0fef85 5399 struct lpfc_hba *phba = vport->phba;
dea3101e 5400 uint32_t *lp;
7bb3b137
JW
5401 uint8_t flag;
5402 LPFC_MBOXQ_t *mbox;
5403 struct lpfc_dmabuf *pcmd;
5404 RPS *rps;
5405 struct ls_rjt stat;
5406
2fe165b6 5407 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
90160e01
JS
5408 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5409 /* reject the unsolicited RPS request and done with it */
5410 goto reject_out;
7bb3b137
JW
5411
5412 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5413 lp = (uint32_t *) pcmd->virt;
5414 flag = (be32_to_cpu(*lp++) & 0xf);
5415 rps = (RPS *) lp;
5416
5417 if ((flag == 0) ||
5418 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2e0fef85 5419 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
92d7f7b0 5420 sizeof(struct lpfc_name)) == 0))) {
2e0fef85 5421
92d7f7b0
JS
5422 printk("Fix me....\n");
5423 dump_stack();
2e0fef85
JS
5424 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5425 if (mbox) {
7bb3b137 5426 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
5427 mbox->context1 = (void *)((unsigned long)
5428 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5429 cmdiocb->iocb.ulpContext)); /* rx_id */
329f9bc7 5430 mbox->context2 = lpfc_nlp_get(ndlp);
92d7f7b0 5431 mbox->vport = vport;
7bb3b137 5432 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
fa4066b6 5433 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
0b727fea 5434 != MBX_NOT_FINISHED)
7bb3b137
JW
5435 /* Mbox completion will send ELS Response */
5436 return 0;
fa4066b6
JS
5437 /* Decrement reference count used for the failed mbox
5438 * command.
5439 */
329f9bc7 5440 lpfc_nlp_put(ndlp);
7bb3b137
JW
5441 mempool_free(mbox, phba->mbox_mem_pool);
5442 }
5443 }
90160e01
JS
5444
5445reject_out:
5446 /* issue rejection response */
7bb3b137
JW
5447 stat.un.b.lsRjtRsvd0 = 0;
5448 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5449 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5450 stat.un.b.vendorUnique = 0;
858c9f6c 5451 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
5452 return 0;
5453}
5454
19ca7609
JS
5455/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5456 * @vport: pointer to a host virtual N_Port data structure.
5457 * @ndlp: pointer to a node-list data structure.
5458 * @did: DID of the target.
5459 * @rrq: Pointer to the rrq struct.
5460 *
5461 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5462 * Successful the the completion handler will clear the RRQ.
5463 *
5464 * Return codes
5465 * 0 - Successfully sent rrq els iocb.
5466 * 1 - Failed to send rrq els iocb.
5467 **/
5468static int
5469lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5470 uint32_t did, struct lpfc_node_rrq *rrq)
5471{
5472 struct lpfc_hba *phba = vport->phba;
5473 struct RRQ *els_rrq;
5474 IOCB_t *icmd;
5475 struct lpfc_iocbq *elsiocb;
5476 uint8_t *pcmd;
5477 uint16_t cmdsize;
5478 int ret;
5479
5480
5481 if (ndlp != rrq->ndlp)
5482 ndlp = rrq->ndlp;
5483 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5484 return 1;
5485
5486 /* If ndlp is not NULL, we will bump the reference count on it */
5487 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5488 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5489 ELS_CMD_RRQ);
5490 if (!elsiocb)
5491 return 1;
5492
5493 icmd = &elsiocb->iocb;
5494 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5495
5496 /* For RRQ request, remainder of payload is Exchange IDs */
5497 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5498 pcmd += sizeof(uint32_t);
5499 els_rrq = (struct RRQ *) pcmd;
5500
5501 bf_set(rrq_oxid, els_rrq, rrq->xritag);
5502 bf_set(rrq_rxid, els_rrq, rrq->rxid);
5503 bf_set(rrq_did, els_rrq, vport->fc_myDID);
5504 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5505 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5506
5507
5508 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5509 "Issue RRQ: did:x%x",
5510 did, rrq->xritag, rrq->rxid);
5511 elsiocb->context_un.rrq = rrq;
5512 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5513 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5514
5515 if (ret == IOCB_ERROR) {
5516 lpfc_els_free_iocb(phba, elsiocb);
5517 return 1;
5518 }
5519 return 0;
5520}
5521
5522/**
5523 * lpfc_send_rrq - Sends ELS RRQ if needed.
5524 * @phba: pointer to lpfc hba data structure.
5525 * @rrq: pointer to the active rrq.
5526 *
5527 * This routine will call the lpfc_issue_els_rrq if the rrq is
5528 * still active for the xri. If this function returns a failure then
5529 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5530 *
5531 * Returns 0 Success.
5532 * 1 Failure.
5533 **/
5534int
5535lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5536{
5537 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5538 rrq->nlp_DID);
5539 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5540 return lpfc_issue_els_rrq(rrq->vport, ndlp,
5541 rrq->nlp_DID, rrq);
5542 else
5543 return 1;
5544}
5545
e59058c4 5546/**
3621a710 5547 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
e59058c4
JS
5548 * @vport: pointer to a host virtual N_Port data structure.
5549 * @cmdsize: size of the ELS command.
5550 * @oldiocb: pointer to the original lpfc command iocb data structure.
5551 * @ndlp: pointer to a node-list data structure.
5552 *
5553 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
5554 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
5555 *
5556 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5557 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5558 * will be stored into the context1 field of the IOCB for the completion
5559 * callback function to the RPL Accept Response ELS command.
5560 *
5561 * Return code
5562 * 0 - Successfully issued ACC RPL ELS command
5563 * 1 - Failed to issue ACC RPL ELS command
5564 **/
082c0266 5565static int
2e0fef85
JS
5566lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5567 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7bb3b137 5568{
2e0fef85
JS
5569 struct lpfc_hba *phba = vport->phba;
5570 IOCB_t *icmd, *oldcmd;
7bb3b137
JW
5571 RPL_RSP rpl_rsp;
5572 struct lpfc_iocbq *elsiocb;
7bb3b137 5573 uint8_t *pcmd;
dea3101e 5574
2e0fef85
JS
5575 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5576 ndlp->nlp_DID, ELS_CMD_ACC);
7bb3b137 5577
488d1469 5578 if (!elsiocb)
7bb3b137 5579 return 1;
488d1469 5580
7bb3b137
JW
5581 icmd = &elsiocb->iocb;
5582 oldcmd = &oldiocb->iocb;
7851fe2c
JS
5583 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5584 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7bb3b137
JW
5585
5586 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5587 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 5588 pcmd += sizeof(uint16_t);
7bb3b137
JW
5589 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
5590 pcmd += sizeof(uint16_t);
5591
5592 /* Setup the RPL ACC payload */
5593 rpl_rsp.listLen = be32_to_cpu(1);
5594 rpl_rsp.index = 0;
5595 rpl_rsp.port_num_blk.portNum = 0;
2e0fef85
JS
5596 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
5597 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7bb3b137 5598 sizeof(struct lpfc_name));
7bb3b137 5599 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7bb3b137 5600 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
e8b62011
JS
5601 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5602 "0120 Xmit ELS RPL ACC response tag x%x "
5603 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5604 "rpi x%x\n",
5605 elsiocb->iotag, elsiocb->iocb.ulpContext,
5606 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5607 ndlp->nlp_rpi);
858c9f6c 5608 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 5609 phba->fc_stat.elsXmitACC++;
3772a991
JS
5610 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
5611 IOCB_ERROR) {
7bb3b137
JW
5612 lpfc_els_free_iocb(phba, elsiocb);
5613 return 1;
5614 }
5615 return 0;
5616}
5617
e59058c4 5618/**
3621a710 5619 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
e59058c4
JS
5620 * @vport: pointer to a host virtual N_Port data structure.
5621 * @cmdiocb: pointer to lpfc command iocb data structure.
5622 * @ndlp: pointer to a node-list data structure.
5623 *
5624 * This routine processes Read Port List (RPL) IOCB received as an ELS
5625 * unsolicited event. It first checks the remote port state. If the remote
5626 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
5627 * invokes the lpfc_els_rsp_reject() routine to send reject response.
5628 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
5629 * to accept the RPL.
5630 *
5631 * Return code
5632 * 0 - Successfully processed rpl iocb (currently always return 0)
5633 **/
7bb3b137 5634static int
2e0fef85
JS
5635lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5636 struct lpfc_nodelist *ndlp)
7bb3b137
JW
5637{
5638 struct lpfc_dmabuf *pcmd;
5639 uint32_t *lp;
5640 uint32_t maxsize;
5641 uint16_t cmdsize;
5642 RPL *rpl;
5643 struct ls_rjt stat;
5644
2fe165b6
JW
5645 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5646 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
90160e01 5647 /* issue rejection response */
7bb3b137
JW
5648 stat.un.b.lsRjtRsvd0 = 0;
5649 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5650 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5651 stat.un.b.vendorUnique = 0;
858c9f6c
JS
5652 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5653 NULL);
90160e01
JS
5654 /* rejected the unsolicited RPL request and done with it */
5655 return 0;
7bb3b137
JW
5656 }
5657
dea3101e
JB
5658 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5659 lp = (uint32_t *) pcmd->virt;
7bb3b137 5660 rpl = (RPL *) (lp + 1);
7bb3b137 5661 maxsize = be32_to_cpu(rpl->maxsize);
dea3101e 5662
7bb3b137
JW
5663 /* We support only one port */
5664 if ((rpl->index == 0) &&
5665 ((maxsize == 0) ||
5666 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
5667 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2fe165b6 5668 } else {
7bb3b137
JW
5669 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
5670 }
2e0fef85 5671 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
dea3101e
JB
5672
5673 return 0;
5674}
5675
e59058c4 5676/**
3621a710 5677 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
e59058c4
JS
5678 * @vport: pointer to a virtual N_Port data structure.
5679 * @cmdiocb: pointer to lpfc command iocb data structure.
5680 * @ndlp: pointer to a node-list data structure.
5681 *
5682 * This routine processes Fibre Channel Address Resolution Protocol
5683 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
5684 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
5685 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
5686 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
5687 * remote PortName is compared against the FC PortName stored in the @vport
5688 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
5689 * compared against the FC NodeName stored in the @vport data structure.
5690 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
5691 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
5692 * invoked to send out FARP Response to the remote node. Before sending the
5693 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
5694 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
5695 * routine is invoked to log into the remote port first.
5696 *
5697 * Return code
5698 * 0 - Either the FARP Match Mode not supported or successfully processed
5699 **/
dea3101e 5700static int
2e0fef85
JS
5701lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5702 struct lpfc_nodelist *ndlp)
dea3101e
JB
5703{
5704 struct lpfc_dmabuf *pcmd;
5705 uint32_t *lp;
5706 IOCB_t *icmd;
5707 FARP *fp;
5708 uint32_t cmd, cnt, did;
5709
5710 icmd = &cmdiocb->iocb;
5711 did = icmd->un.elsreq64.remoteID;
5712 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5713 lp = (uint32_t *) pcmd->virt;
5714
5715 cmd = *lp++;
5716 fp = (FARP *) lp;
dea3101e 5717 /* FARP-REQ received from DID <did> */
e8b62011
JS
5718 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5719 "0601 FARP-REQ received from DID x%x\n", did);
dea3101e
JB
5720 /* We will only support match on WWPN or WWNN */
5721 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
c9f8735b 5722 return 0;
dea3101e
JB
5723 }
5724
5725 cnt = 0;
5726 /* If this FARP command is searching for my portname */
5727 if (fp->Mflags & FARP_MATCH_PORT) {
2e0fef85 5728 if (memcmp(&fp->RportName, &vport->fc_portname,
92d7f7b0 5729 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
5730 cnt = 1;
5731 }
5732
5733 /* If this FARP command is searching for my nodename */
5734 if (fp->Mflags & FARP_MATCH_NODE) {
2e0fef85 5735 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
92d7f7b0 5736 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
5737 cnt = 1;
5738 }
5739
5740 if (cnt) {
5741 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
5742 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
5743 /* Log back into the node before sending the FARP. */
5744 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5024ab17 5745 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 5746 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 5747 NLP_STE_PLOGI_ISSUE);
2e0fef85 5748 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
dea3101e
JB
5749 }
5750
5751 /* Send a FARP response to that node */
2e0fef85
JS
5752 if (fp->Rflags & FARP_REQUEST_FARPR)
5753 lpfc_issue_els_farpr(vport, did, 0);
dea3101e
JB
5754 }
5755 }
c9f8735b 5756 return 0;
dea3101e
JB
5757}
5758
e59058c4 5759/**
3621a710 5760 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
e59058c4
JS
5761 * @vport: pointer to a host virtual N_Port data structure.
5762 * @cmdiocb: pointer to lpfc command iocb data structure.
5763 * @ndlp: pointer to a node-list data structure.
5764 *
5765 * This routine processes Fibre Channel Address Resolution Protocol
5766 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5767 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5768 * the FARP response request.
5769 *
5770 * Return code
5771 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5772 **/
dea3101e 5773static int
2e0fef85
JS
5774lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5775 struct lpfc_nodelist *ndlp)
dea3101e
JB
5776{
5777 struct lpfc_dmabuf *pcmd;
5778 uint32_t *lp;
5779 IOCB_t *icmd;
5780 uint32_t cmd, did;
5781
5782 icmd = &cmdiocb->iocb;
5783 did = icmd->un.elsreq64.remoteID;
5784 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5785 lp = (uint32_t *) pcmd->virt;
5786
5787 cmd = *lp++;
5788 /* FARP-RSP received from DID <did> */
e8b62011
JS
5789 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5790 "0600 FARP-RSP received from DID x%x\n", did);
dea3101e 5791 /* ACCEPT the Farp resp request */
51ef4c26 5792 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e
JB
5793
5794 return 0;
5795}
5796
e59058c4 5797/**
3621a710 5798 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
e59058c4
JS
5799 * @vport: pointer to a host virtual N_Port data structure.
5800 * @cmdiocb: pointer to lpfc command iocb data structure.
5801 * @fan_ndlp: pointer to a node-list data structure.
5802 *
5803 * This routine processes a Fabric Address Notification (FAN) IOCB
5804 * command received as an ELS unsolicited event. The FAN ELS command will
5805 * only be processed on a physical port (i.e., the @vport represents the
5806 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5807 * compared against those in the phba data structure. If any of those is
5808 * different, the lpfc_initial_flogi() routine is invoked to initialize
5809 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5810 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5811 * is invoked to register login to the fabric.
5812 *
5813 * Return code
5814 * 0 - Successfully processed fan iocb (currently always return 0).
5815 **/
dea3101e 5816static int
2e0fef85
JS
5817lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5818 struct lpfc_nodelist *fan_ndlp)
dea3101e 5819{
0d2b6b83 5820 struct lpfc_hba *phba = vport->phba;
dea3101e 5821 uint32_t *lp;
5024ab17 5822 FAN *fp;
dea3101e 5823
0d2b6b83
JS
5824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5825 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5826 fp = (FAN *) ++lp;
5024ab17 5827 /* FAN received; Fan does not have a reply sequence */
0d2b6b83
JS
5828 if ((vport == phba->pport) &&
5829 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5024ab17 5830 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
0d2b6b83 5831 sizeof(struct lpfc_name))) ||
5024ab17 5832 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
0d2b6b83
JS
5833 sizeof(struct lpfc_name)))) {
5834 /* This port has switched fabrics. FLOGI is required */
76a95d75 5835 lpfc_issue_init_vfi(vport);
0d2b6b83
JS
5836 } else {
5837 /* FAN verified - skip FLOGI */
5838 vport->fc_myDID = vport->fc_prevDID;
6fb120a7
JS
5839 if (phba->sli_rev < LPFC_SLI_REV4)
5840 lpfc_issue_fabric_reglogin(vport);
5841 else
5842 lpfc_issue_reg_vfi(vport);
5024ab17 5843 }
dea3101e 5844 }
c9f8735b 5845 return 0;
dea3101e
JB
5846}
5847
e59058c4 5848/**
3621a710 5849 * lpfc_els_timeout - Handler funciton to the els timer
e59058c4
JS
5850 * @ptr: holder for the timer function associated data.
5851 *
5852 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5853 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5854 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5855 * up the worker thread. It is for the worker thread to invoke the routine
5856 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5857 **/
dea3101e
JB
5858void
5859lpfc_els_timeout(unsigned long ptr)
5860{
2e0fef85
JS
5861 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5862 struct lpfc_hba *phba = vport->phba;
5e9d9b82 5863 uint32_t tmo_posted;
dea3101e
JB
5864 unsigned long iflag;
5865
2e0fef85 5866 spin_lock_irqsave(&vport->work_port_lock, iflag);
5e9d9b82
JS
5867 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5868 if (!tmo_posted)
2e0fef85 5869 vport->work_port_events |= WORKER_ELS_TMO;
5e9d9b82 5870 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
92d7f7b0 5871
5e9d9b82
JS
5872 if (!tmo_posted)
5873 lpfc_worker_wake_up(phba);
dea3101e
JB
5874 return;
5875}
5876
2a9bf3d0 5877
e59058c4 5878/**
3621a710 5879 * lpfc_els_timeout_handler - Process an els timeout event
e59058c4
JS
5880 * @vport: pointer to a virtual N_Port data structure.
5881 *
5882 * This routine is the actual handler function that processes an ELS timeout
5883 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5884 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5885 * invoking the lpfc_sli_issue_abort_iotag() routine.
5886 **/
dea3101e 5887void
2e0fef85 5888lpfc_els_timeout_handler(struct lpfc_vport *vport)
dea3101e 5889{
2e0fef85 5890 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
5891 struct lpfc_sli_ring *pring;
5892 struct lpfc_iocbq *tmp_iocb, *piocb;
5893 IOCB_t *cmd = NULL;
5894 struct lpfc_dmabuf *pcmd;
2e0fef85 5895 uint32_t els_command = 0;
dea3101e 5896 uint32_t timeout;
2e0fef85 5897 uint32_t remote_ID = 0xffffffff;
2a9bf3d0
JS
5898 LIST_HEAD(txcmplq_completions);
5899 LIST_HEAD(abort_list);
5900
dea3101e 5901
dea3101e
JB
5902 timeout = (uint32_t)(phba->fc_ratov << 1);
5903
5904 pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 5905
2a9bf3d0
JS
5906 spin_lock_irq(&phba->hbalock);
5907 list_splice_init(&pring->txcmplq, &txcmplq_completions);
5908 spin_unlock_irq(&phba->hbalock);
5909
5910 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
dea3101e
JB
5911 cmd = &piocb->iocb;
5912
2e0fef85
JS
5913 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5914 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5915 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
dea3101e 5916 continue;
2e0fef85
JS
5917
5918 if (piocb->vport != vport)
5919 continue;
5920
dea3101e 5921 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2e0fef85
JS
5922 if (pcmd)
5923 els_command = *(uint32_t *) (pcmd->virt);
dea3101e 5924
92d7f7b0
JS
5925 if (els_command == ELS_CMD_FARP ||
5926 els_command == ELS_CMD_FARPR ||
5927 els_command == ELS_CMD_FDISC)
5928 continue;
5929
dea3101e 5930 if (piocb->drvrTimeout > 0) {
92d7f7b0 5931 if (piocb->drvrTimeout >= timeout)
dea3101e 5932 piocb->drvrTimeout -= timeout;
92d7f7b0 5933 else
dea3101e 5934 piocb->drvrTimeout = 0;
dea3101e
JB
5935 continue;
5936 }
5937
2e0fef85
JS
5938 remote_ID = 0xffffffff;
5939 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
dea3101e 5940 remote_ID = cmd->un.elsreq64.remoteID;
2e0fef85
JS
5941 else {
5942 struct lpfc_nodelist *ndlp;
5943 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
58da1ffb 5944 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2e0fef85 5945 remote_ID = ndlp->nlp_DID;
dea3101e 5946 }
2a9bf3d0
JS
5947 list_add_tail(&piocb->dlist, &abort_list);
5948 }
5949 spin_lock_irq(&phba->hbalock);
5950 list_splice(&txcmplq_completions, &pring->txcmplq);
5951 spin_unlock_irq(&phba->hbalock);
5952
5953 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
e8b62011 5954 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2a9bf3d0
JS
5955 "0127 ELS timeout Data: x%x x%x x%x "
5956 "x%x\n", els_command,
5957 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5958 spin_lock_irq(&phba->hbalock);
5959 list_del_init(&piocb->dlist);
07951076 5960 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
2a9bf3d0 5961 spin_unlock_irq(&phba->hbalock);
dea3101e 5962 }
5a0e326d 5963
2e0fef85
JS
5964 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5965 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
dea3101e
JB
5966}
5967
e59058c4 5968/**
3621a710 5969 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
e59058c4
JS
5970 * @vport: pointer to a host virtual N_Port data structure.
5971 *
5972 * This routine is used to clean up all the outstanding ELS commands on a
5973 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5974 * routine. After that, it walks the ELS transmit queue to remove all the
5975 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5976 * the IOCBs with a non-NULL completion callback function, the callback
5977 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5978 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5979 * callback function, the IOCB will simply be released. Finally, it walks
5980 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5981 * completion queue IOCB that is associated with the @vport and is not
5982 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5983 * part of the discovery state machine) out to HBA by invoking the
5984 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5985 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5986 * the IOCBs are aborted when this function returns.
5987 **/
dea3101e 5988void
2e0fef85 5989lpfc_els_flush_cmd(struct lpfc_vport *vport)
dea3101e 5990{
2534ba75 5991 LIST_HEAD(completions);
2e0fef85 5992 struct lpfc_hba *phba = vport->phba;
329f9bc7 5993 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e
JB
5994 struct lpfc_iocbq *tmp_iocb, *piocb;
5995 IOCB_t *cmd = NULL;
92d7f7b0
JS
5996
5997 lpfc_fabric_abort_vport(vport);
dea3101e 5998
2e0fef85 5999 spin_lock_irq(&phba->hbalock);
dea3101e
JB
6000 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6001 cmd = &piocb->iocb;
6002
6003 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6004 continue;
6005 }
6006
6007 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
329f9bc7
JS
6008 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6009 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6010 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6011 cmd->ulpCommand == CMD_ABORT_XRI_CN)
dea3101e 6012 continue;
dea3101e 6013
2e0fef85
JS
6014 if (piocb->vport != vport)
6015 continue;
6016
2534ba75 6017 list_move_tail(&piocb->list, &completions);
1dcb58e5 6018 pring->txq_cnt--;
dea3101e
JB
6019 }
6020
6021 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
dea3101e
JB
6022 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6023 continue;
6024 }
dea3101e 6025
2e0fef85
JS
6026 if (piocb->vport != vport)
6027 continue;
6028
07951076 6029 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
dea3101e 6030 }
2e0fef85 6031 spin_unlock_irq(&phba->hbalock);
2534ba75 6032
a257bf90
JS
6033 /* Cancell all the IOCBs from the completions list */
6034 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6035 IOERR_SLI_ABORTED);
2534ba75 6036
dea3101e
JB
6037 return;
6038}
6039
e59058c4 6040/**
3621a710 6041 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
e59058c4
JS
6042 * @phba: pointer to lpfc hba data structure.
6043 *
6044 * This routine is used to clean up all the outstanding ELS commands on a
6045 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
6046 * routine. After that, it walks the ELS transmit queue to remove all the
6047 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
6048 * the IOCBs with the completion callback function associated, the callback
6049 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6050 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
6051 * callback function associated, the IOCB will simply be released. Finally,
6052 * it walks the ELS transmit completion queue to issue an abort IOCB to any
6053 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
6054 * management plane IOCBs that are not part of the discovery state machine)
6055 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
6056 **/
549e55cd
JS
6057void
6058lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6059{
6060 LIST_HEAD(completions);
6061 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6062 struct lpfc_iocbq *tmp_iocb, *piocb;
6063 IOCB_t *cmd = NULL;
6064
6065 lpfc_fabric_abort_hba(phba);
6066 spin_lock_irq(&phba->hbalock);
6067 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6068 cmd = &piocb->iocb;
6069 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6070 continue;
6071 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6072 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6073 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6074 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6075 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6076 continue;
6077 list_move_tail(&piocb->list, &completions);
6078 pring->txq_cnt--;
6079 }
6080 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6081 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6082 continue;
6083 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6084 }
6085 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
6086
6087 /* Cancel all the IOCBs from the completions list */
6088 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6089 IOERR_SLI_ABORTED);
6090
549e55cd
JS
6091 return;
6092}
6093
ea2151b4 6094/**
3621a710 6095 * lpfc_send_els_failure_event - Posts an ELS command failure event
ea2151b4
JS
6096 * @phba: Pointer to hba context object.
6097 * @cmdiocbp: Pointer to command iocb which reported error.
6098 * @rspiocbp: Pointer to response iocb which reported error.
6099 *
6100 * This function sends an event when there is an ELS command
6101 * failure.
6102 **/
6103void
6104lpfc_send_els_failure_event(struct lpfc_hba *phba,
6105 struct lpfc_iocbq *cmdiocbp,
6106 struct lpfc_iocbq *rspiocbp)
6107{
6108 struct lpfc_vport *vport = cmdiocbp->vport;
6109 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6110 struct lpfc_lsrjt_event lsrjt_event;
6111 struct lpfc_fabric_event_header fabric_event;
6112 struct ls_rjt stat;
6113 struct lpfc_nodelist *ndlp;
6114 uint32_t *pcmd;
6115
6116 ndlp = cmdiocbp->context1;
6117 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6118 return;
6119
6120 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
6121 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
6122 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
6123 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
6124 sizeof(struct lpfc_name));
6125 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
6126 sizeof(struct lpfc_name));
6127 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
6128 cmdiocbp->context2)->virt);
49198b37 6129 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
ea2151b4
JS
6130 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
6131 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
6132 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
6133 fc_host_post_vendor_event(shost,
6134 fc_get_event_number(),
6135 sizeof(lsrjt_event),
6136 (char *)&lsrjt_event,
ddcc50f0 6137 LPFC_NL_VENDOR_ID);
ea2151b4
JS
6138 return;
6139 }
6140 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
6141 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
6142 fabric_event.event_type = FC_REG_FABRIC_EVENT;
6143 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
6144 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
6145 else
6146 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
6147 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
6148 sizeof(struct lpfc_name));
6149 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
6150 sizeof(struct lpfc_name));
6151 fc_host_post_vendor_event(shost,
6152 fc_get_event_number(),
6153 sizeof(fabric_event),
6154 (char *)&fabric_event,
ddcc50f0 6155 LPFC_NL_VENDOR_ID);
ea2151b4
JS
6156 return;
6157 }
6158
6159}
6160
6161/**
3621a710 6162 * lpfc_send_els_event - Posts unsolicited els event
ea2151b4
JS
6163 * @vport: Pointer to vport object.
6164 * @ndlp: Pointer FC node object.
6165 * @cmd: ELS command code.
6166 *
6167 * This function posts an event when there is an incoming
6168 * unsolicited ELS command.
6169 **/
6170static void
6171lpfc_send_els_event(struct lpfc_vport *vport,
6172 struct lpfc_nodelist *ndlp,
ddcc50f0 6173 uint32_t *payload)
ea2151b4 6174{
ddcc50f0
JS
6175 struct lpfc_els_event_header *els_data = NULL;
6176 struct lpfc_logo_event *logo_data = NULL;
ea2151b4
JS
6177 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6178
ddcc50f0
JS
6179 if (*payload == ELS_CMD_LOGO) {
6180 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
6181 if (!logo_data) {
6182 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6183 "0148 Failed to allocate memory "
6184 "for LOGO event\n");
6185 return;
6186 }
6187 els_data = &logo_data->header;
6188 } else {
6189 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
6190 GFP_KERNEL);
6191 if (!els_data) {
6192 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6193 "0149 Failed to allocate memory "
6194 "for ELS event\n");
6195 return;
6196 }
6197 }
6198 els_data->event_type = FC_REG_ELS_EVENT;
6199 switch (*payload) {
ea2151b4 6200 case ELS_CMD_PLOGI:
ddcc50f0 6201 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
ea2151b4
JS
6202 break;
6203 case ELS_CMD_PRLO:
ddcc50f0 6204 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
ea2151b4
JS
6205 break;
6206 case ELS_CMD_ADISC:
ddcc50f0
JS
6207 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
6208 break;
6209 case ELS_CMD_LOGO:
6210 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
6211 /* Copy the WWPN in the LOGO payload */
6212 memcpy(logo_data->logo_wwpn, &payload[2],
6213 sizeof(struct lpfc_name));
ea2151b4
JS
6214 break;
6215 default:
e916141c 6216 kfree(els_data);
ea2151b4
JS
6217 return;
6218 }
ddcc50f0
JS
6219 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
6220 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
6221 if (*payload == ELS_CMD_LOGO) {
6222 fc_host_post_vendor_event(shost,
6223 fc_get_event_number(),
6224 sizeof(struct lpfc_logo_event),
6225 (char *)logo_data,
6226 LPFC_NL_VENDOR_ID);
6227 kfree(logo_data);
6228 } else {
6229 fc_host_post_vendor_event(shost,
6230 fc_get_event_number(),
6231 sizeof(struct lpfc_els_event_header),
6232 (char *)els_data,
6233 LPFC_NL_VENDOR_ID);
6234 kfree(els_data);
6235 }
ea2151b4
JS
6236
6237 return;
6238}
6239
6240
e59058c4 6241/**
3621a710 6242 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
e59058c4
JS
6243 * @phba: pointer to lpfc hba data structure.
6244 * @pring: pointer to a SLI ring.
6245 * @vport: pointer to a host virtual N_Port data structure.
6246 * @elsiocb: pointer to lpfc els command iocb data structure.
6247 *
6248 * This routine is used for processing the IOCB associated with a unsolicited
6249 * event. It first determines whether there is an existing ndlp that matches
6250 * the DID from the unsolicited IOCB. If not, it will create a new one with
6251 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
6252 * IOCB is then used to invoke the proper routine and to set up proper state
6253 * of the discovery state machine.
6254 **/
ed957684
JS
6255static void
6256lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
92d7f7b0 6257 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
dea3101e 6258{
87af33fe 6259 struct Scsi_Host *shost;
dea3101e 6260 struct lpfc_nodelist *ndlp;
dea3101e 6261 struct ls_rjt stat;
92d7f7b0 6262 uint32_t *payload;
2e0fef85 6263 uint32_t cmd, did, newnode, rjt_err = 0;
ed957684 6264 IOCB_t *icmd = &elsiocb->iocb;
dea3101e 6265
e47c9093 6266 if (!vport || !(elsiocb->context2))
dea3101e 6267 goto dropit;
2e0fef85 6268
dea3101e 6269 newnode = 0;
92d7f7b0
JS
6270 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
6271 cmd = *payload;
ed957684 6272 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
495a714c 6273 lpfc_post_buffer(phba, pring, 1);
dea3101e 6274
858c9f6c
JS
6275 did = icmd->un.rcvels.remoteID;
6276 if (icmd->ulpStatus) {
6277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6278 "RCV Unsol ELS: status:x%x/x%x did:x%x",
6279 icmd->ulpStatus, icmd->un.ulpWord[4], did);
dea3101e 6280 goto dropit;
858c9f6c 6281 }
dea3101e
JB
6282
6283 /* Check to see if link went down during discovery */
ed957684 6284 if (lpfc_els_chk_latt(vport))
dea3101e 6285 goto dropit;
dea3101e 6286
c868595d 6287 /* Ignore traffic received during vport shutdown. */
92d7f7b0
JS
6288 if (vport->load_flag & FC_UNLOADING)
6289 goto dropit;
6290
92494144
JS
6291 /* If NPort discovery is delayed drop incoming ELS */
6292 if ((vport->fc_flag & FC_DISC_DELAYED) &&
6293 (cmd != ELS_CMD_PLOGI))
6294 goto dropit;
6295
2e0fef85 6296 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 6297 if (!ndlp) {
dea3101e 6298 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b 6299 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
ed957684 6300 if (!ndlp)
dea3101e 6301 goto dropit;
dea3101e 6302
2e0fef85 6303 lpfc_nlp_init(vport, ndlp, did);
98c9ea5c 6304 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
dea3101e 6305 newnode = 1;
e47c9093 6306 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
dea3101e 6307 ndlp->nlp_type |= NLP_FABRIC;
58da1ffb
JS
6308 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6309 ndlp = lpfc_enable_node(vport, ndlp,
6310 NLP_STE_UNUSED_NODE);
6311 if (!ndlp)
6312 goto dropit;
6313 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6314 newnode = 1;
6315 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6316 ndlp->nlp_type |= NLP_FABRIC;
6317 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
6318 /* This is similar to the new node path */
6319 ndlp = lpfc_nlp_get(ndlp);
6320 if (!ndlp)
6321 goto dropit;
6322 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6323 newnode = 1;
87af33fe 6324 }
dea3101e
JB
6325
6326 phba->fc_stat.elsRcvFrame++;
e47c9093 6327
329f9bc7 6328 elsiocb->context1 = lpfc_nlp_get(ndlp);
2e0fef85 6329 elsiocb->vport = vport;
dea3101e
JB
6330
6331 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
6332 cmd &= ELS_CMD_MASK;
6333 }
6334 /* ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
6335 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6336 "0112 ELS command x%x received from NPORT x%x "
6337 "Data: x%x\n", cmd, did, vport->port_state);
dea3101e
JB
6338 switch (cmd) {
6339 case ELS_CMD_PLOGI:
858c9f6c
JS
6340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6341 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
6342 did, vport->port_state, ndlp->nlp_flag);
6343
dea3101e 6344 phba->fc_stat.elsRcvPLOGI++;
858c9f6c
JS
6345 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6346
ddcc50f0 6347 lpfc_send_els_event(vport, ndlp, payload);
92494144
JS
6348
6349 /* If Nport discovery is delayed, reject PLOGIs */
6350 if (vport->fc_flag & FC_DISC_DELAYED) {
6351 rjt_err = LSRJT_UNABLE_TPC;
6352 break;
6353 }
858c9f6c 6354 if (vport->port_state < LPFC_DISC_AUTH) {
1b32f6aa
JS
6355 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6356 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6357 rjt_err = LSRJT_UNABLE_TPC;
6358 break;
6359 }
6360 /* We get here, and drop thru, if we are PT2PT with
6361 * another NPort and the other side has initiated
6362 * the PLOGI before responding to our FLOGI.
6363 */
dea3101e 6364 }
87af33fe
JS
6365
6366 shost = lpfc_shost_from_vport(vport);
6367 spin_lock_irq(shost->host_lock);
6368 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6369 spin_unlock_irq(shost->host_lock);
6370
2e0fef85
JS
6371 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6372 NLP_EVT_RCV_PLOGI);
858c9f6c 6373
dea3101e
JB
6374 break;
6375 case ELS_CMD_FLOGI:
858c9f6c
JS
6376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6377 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
6378 did, vport->port_state, ndlp->nlp_flag);
6379
dea3101e 6380 phba->fc_stat.elsRcvFLOGI++;
51ef4c26 6381 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
87af33fe 6382 if (newnode)
98c9ea5c 6383 lpfc_nlp_put(ndlp);
dea3101e
JB
6384 break;
6385 case ELS_CMD_LOGO:
858c9f6c
JS
6386 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6387 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
6388 did, vport->port_state, ndlp->nlp_flag);
6389
dea3101e 6390 phba->fc_stat.elsRcvLOGO++;
ddcc50f0 6391 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 6392 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6393 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6394 break;
6395 }
2e0fef85 6396 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
dea3101e
JB
6397 break;
6398 case ELS_CMD_PRLO:
858c9f6c
JS
6399 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6400 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
6401 did, vport->port_state, ndlp->nlp_flag);
6402
dea3101e 6403 phba->fc_stat.elsRcvPRLO++;
ddcc50f0 6404 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 6405 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6406 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6407 break;
6408 }
2e0fef85 6409 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
dea3101e
JB
6410 break;
6411 case ELS_CMD_RSCN:
6412 phba->fc_stat.elsRcvRSCN++;
51ef4c26 6413 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
87af33fe 6414 if (newnode)
98c9ea5c 6415 lpfc_nlp_put(ndlp);
dea3101e
JB
6416 break;
6417 case ELS_CMD_ADISC:
858c9f6c
JS
6418 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6419 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
6420 did, vport->port_state, ndlp->nlp_flag);
6421
ddcc50f0 6422 lpfc_send_els_event(vport, ndlp, payload);
dea3101e 6423 phba->fc_stat.elsRcvADISC++;
2e0fef85 6424 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6425 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6426 break;
6427 }
2e0fef85
JS
6428 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6429 NLP_EVT_RCV_ADISC);
dea3101e
JB
6430 break;
6431 case ELS_CMD_PDISC:
858c9f6c
JS
6432 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6433 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
6434 did, vport->port_state, ndlp->nlp_flag);
6435
dea3101e 6436 phba->fc_stat.elsRcvPDISC++;
2e0fef85 6437 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6438 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6439 break;
6440 }
2e0fef85
JS
6441 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6442 NLP_EVT_RCV_PDISC);
dea3101e
JB
6443 break;
6444 case ELS_CMD_FARPR:
858c9f6c
JS
6445 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6446 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
6447 did, vport->port_state, ndlp->nlp_flag);
6448
dea3101e 6449 phba->fc_stat.elsRcvFARPR++;
2e0fef85 6450 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
dea3101e
JB
6451 break;
6452 case ELS_CMD_FARP:
858c9f6c
JS
6453 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6454 "RCV FARP: did:x%x/ste:x%x flg:x%x",
6455 did, vport->port_state, ndlp->nlp_flag);
6456
dea3101e 6457 phba->fc_stat.elsRcvFARP++;
2e0fef85 6458 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
dea3101e
JB
6459 break;
6460 case ELS_CMD_FAN:
858c9f6c
JS
6461 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6462 "RCV FAN: did:x%x/ste:x%x flg:x%x",
6463 did, vport->port_state, ndlp->nlp_flag);
6464
dea3101e 6465 phba->fc_stat.elsRcvFAN++;
2e0fef85 6466 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
dea3101e 6467 break;
dea3101e 6468 case ELS_CMD_PRLI:
858c9f6c
JS
6469 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6470 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
6471 did, vport->port_state, ndlp->nlp_flag);
6472
dea3101e 6473 phba->fc_stat.elsRcvPRLI++;
2e0fef85 6474 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 6475 rjt_err = LSRJT_UNABLE_TPC;
dea3101e
JB
6476 break;
6477 }
2e0fef85 6478 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
dea3101e 6479 break;
7bb3b137 6480 case ELS_CMD_LIRR:
858c9f6c
JS
6481 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6482 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
6483 did, vport->port_state, ndlp->nlp_flag);
6484
7bb3b137 6485 phba->fc_stat.elsRcvLIRR++;
2e0fef85 6486 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
87af33fe 6487 if (newnode)
98c9ea5c 6488 lpfc_nlp_put(ndlp);
7bb3b137 6489 break;
12265f68
JS
6490 case ELS_CMD_RLS:
6491 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6492 "RCV RLS: did:x%x/ste:x%x flg:x%x",
6493 did, vport->port_state, ndlp->nlp_flag);
6494
6495 phba->fc_stat.elsRcvRLS++;
6496 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6497 if (newnode)
6498 lpfc_nlp_put(ndlp);
6499 break;
7bb3b137 6500 case ELS_CMD_RPS:
858c9f6c
JS
6501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6502 "RCV RPS: did:x%x/ste:x%x flg:x%x",
6503 did, vport->port_state, ndlp->nlp_flag);
6504
7bb3b137 6505 phba->fc_stat.elsRcvRPS++;
2e0fef85 6506 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
87af33fe 6507 if (newnode)
98c9ea5c 6508 lpfc_nlp_put(ndlp);
7bb3b137
JW
6509 break;
6510 case ELS_CMD_RPL:
858c9f6c
JS
6511 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6512 "RCV RPL: did:x%x/ste:x%x flg:x%x",
6513 did, vport->port_state, ndlp->nlp_flag);
6514
7bb3b137 6515 phba->fc_stat.elsRcvRPL++;
2e0fef85 6516 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
87af33fe 6517 if (newnode)
98c9ea5c 6518 lpfc_nlp_put(ndlp);
7bb3b137 6519 break;
dea3101e 6520 case ELS_CMD_RNID:
858c9f6c
JS
6521 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6522 "RCV RNID: did:x%x/ste:x%x flg:x%x",
6523 did, vport->port_state, ndlp->nlp_flag);
6524
dea3101e 6525 phba->fc_stat.elsRcvRNID++;
2e0fef85 6526 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
87af33fe 6527 if (newnode)
98c9ea5c 6528 lpfc_nlp_put(ndlp);
dea3101e 6529 break;
12265f68
JS
6530 case ELS_CMD_RTV:
6531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6532 "RCV RTV: did:x%x/ste:x%x flg:x%x",
6533 did, vport->port_state, ndlp->nlp_flag);
6534 phba->fc_stat.elsRcvRTV++;
6535 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6536 if (newnode)
6537 lpfc_nlp_put(ndlp);
6538 break;
5ffc266e
JS
6539 case ELS_CMD_RRQ:
6540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6541 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
6542 did, vport->port_state, ndlp->nlp_flag);
6543
6544 phba->fc_stat.elsRcvRRQ++;
6545 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
6546 if (newnode)
6547 lpfc_nlp_put(ndlp);
6548 break;
12265f68
JS
6549 case ELS_CMD_ECHO:
6550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6551 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6552 did, vport->port_state, ndlp->nlp_flag);
6553
6554 phba->fc_stat.elsRcvECHO++;
6555 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6556 if (newnode)
6557 lpfc_nlp_put(ndlp);
6558 break;
dea3101e 6559 default:
858c9f6c
JS
6560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6561 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
6562 cmd, did, vport->port_state);
6563
dea3101e 6564 /* Unsupported ELS command, reject */
63e801ce 6565 rjt_err = LSRJT_CMD_UNSUPPORTED;
dea3101e
JB
6566
6567 /* Unknown ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
6568 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6569 "0115 Unknown ELS command x%x "
6570 "received from NPORT x%x\n", cmd, did);
87af33fe 6571 if (newnode)
98c9ea5c 6572 lpfc_nlp_put(ndlp);
dea3101e
JB
6573 break;
6574 }
6575
6576 /* check if need to LS_RJT received ELS cmd */
6577 if (rjt_err) {
92d7f7b0 6578 memset(&stat, 0, sizeof(stat));
858c9f6c 6579 stat.un.b.lsRjtRsnCode = rjt_err;
1f679caf 6580 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
858c9f6c
JS
6581 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6582 NULL);
dea3101e
JB
6583 }
6584
d7c255b2
JS
6585 lpfc_nlp_put(elsiocb->context1);
6586 elsiocb->context1 = NULL;
ed957684
JS
6587 return;
6588
6589dropit:
98c9ea5c 6590 if (vport && !(vport->load_flag & FC_UNLOADING))
6fb120a7
JS
6591 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6592 "0111 Dropping received ELS cmd "
ed957684 6593 "Data: x%x x%x x%x\n",
6fb120a7 6594 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
ed957684
JS
6595 phba->fc_stat.elsRcvDrop++;
6596}
6597
e59058c4 6598/**
3621a710 6599 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
e59058c4
JS
6600 * @phba: pointer to lpfc hba data structure.
6601 * @pring: pointer to a SLI ring.
6602 * @elsiocb: pointer to lpfc els iocb data structure.
6603 *
6604 * This routine is used to process an unsolicited event received from a SLI
6605 * (Service Level Interface) ring. The actual processing of the data buffer
6606 * associated with the unsolicited event is done by invoking the routine
6607 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
6608 * SLI ring on which the unsolicited event was received.
6609 **/
ed957684
JS
6610void
6611lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6612 struct lpfc_iocbq *elsiocb)
6613{
6614 struct lpfc_vport *vport = phba->pport;
ed957684 6615 IOCB_t *icmd = &elsiocb->iocb;
ed957684 6616 dma_addr_t paddr;
92d7f7b0
JS
6617 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
6618 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
6619
d7c255b2 6620 elsiocb->context1 = NULL;
92d7f7b0
JS
6621 elsiocb->context2 = NULL;
6622 elsiocb->context3 = NULL;
ed957684 6623
92d7f7b0
JS
6624 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6625 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
6626 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6627 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
ed957684
JS
6628 phba->fc_stat.NoRcvBuf++;
6629 /* Not enough posted buffers; Try posting more buffers */
92d7f7b0 6630 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
495a714c 6631 lpfc_post_buffer(phba, pring, 0);
ed957684
JS
6632 return;
6633 }
6634
92d7f7b0
JS
6635 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6636 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
6637 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6638 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
6639 vport = phba->pport;
6fb120a7
JS
6640 else
6641 vport = lpfc_find_vport_by_vpid(phba,
6d368e53 6642 icmd->unsli3.rcvsli3.vpi);
92d7f7b0 6643 }
6d368e53 6644
7f5f3d0d
JS
6645 /* If there are no BDEs associated
6646 * with this IOCB, there is nothing to do.
6647 */
ed957684
JS
6648 if (icmd->ulpBdeCount == 0)
6649 return;
6650
7f5f3d0d
JS
6651 /* type of ELS cmd is first 32bit word
6652 * in packet
6653 */
ed957684 6654 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
92d7f7b0 6655 elsiocb->context2 = bdeBuf1;
ed957684
JS
6656 } else {
6657 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
6658 icmd->un.cont64[0].addrLow);
92d7f7b0
JS
6659 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
6660 paddr);
ed957684
JS
6661 }
6662
92d7f7b0
JS
6663 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6664 /*
6665 * The different unsolicited event handlers would tell us
6666 * if they are done with "mp" by setting context2 to NULL.
6667 */
dea3101e 6668 if (elsiocb->context2) {
92d7f7b0
JS
6669 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
6670 elsiocb->context2 = NULL;
dea3101e 6671 }
ed957684
JS
6672
6673 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
92d7f7b0 6674 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
ed957684 6675 icmd->ulpBdeCount == 2) {
92d7f7b0
JS
6676 elsiocb->context2 = bdeBuf2;
6677 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
ed957684
JS
6678 /* free mp if we are done with it */
6679 if (elsiocb->context2) {
92d7f7b0
JS
6680 lpfc_in_buf_free(phba, elsiocb->context2);
6681 elsiocb->context2 = NULL;
6682 }
6683 }
6684}
6685
e59058c4 6686/**
3621a710 6687 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
e59058c4
JS
6688 * @phba: pointer to lpfc hba data structure.
6689 * @vport: pointer to a virtual N_Port data structure.
6690 *
6691 * This routine issues a Port Login (PLOGI) to the Name Server with
6692 * State Change Request (SCR) for a @vport. This routine will create an
6693 * ndlp for the Name Server associated to the @vport if such node does
6694 * not already exist. The PLOGI to Name Server is issued by invoking the
6695 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
6696 * (FDMI) is configured to the @vport, a FDMI node will be created and
6697 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
6698 **/
92d7f7b0
JS
6699void
6700lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6701{
6702 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
92494144
JS
6703 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6704
6705 /*
6706 * If lpfc_delay_discovery parameter is set and the clean address
6707 * bit is cleared and fc fabric parameters chenged, delay FC NPort
6708 * discovery.
6709 */
6710 spin_lock_irq(shost->host_lock);
6711 if (vport->fc_flag & FC_DISC_DELAYED) {
6712 spin_unlock_irq(shost->host_lock);
6713 mod_timer(&vport->delayed_disc_tmo,
6714 jiffies + HZ * phba->fc_ratov);
6715 return;
6716 }
6717 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
6718
6719 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6720 if (!ndlp) {
6721 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6722 if (!ndlp) {
76a95d75 6723 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
6724 lpfc_disc_start(vport);
6725 return;
6726 }
6727 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6728 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6729 "0251 NameServer login: no memory\n");
92d7f7b0
JS
6730 return;
6731 }
6732 lpfc_nlp_init(vport, ndlp, NameServer_DID);
e47c9093
JS
6733 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6734 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6735 if (!ndlp) {
76a95d75 6736 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
e47c9093
JS
6737 lpfc_disc_start(vport);
6738 return;
6739 }
6740 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6741 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6742 "0348 NameServer login: node freed\n");
6743 return;
6744 }
92d7f7b0 6745 }
58da1ffb 6746 ndlp->nlp_type |= NLP_FABRIC;
92d7f7b0
JS
6747
6748 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6749
6750 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
6751 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6752 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6753 "0252 Cannot issue NameServer login\n");
92d7f7b0
JS
6754 return;
6755 }
6756
3de2a653 6757 if (vport->cfg_fdmi_on) {
63e801ce
JS
6758 /* If this is the first time, allocate an ndlp and initialize
6759 * it. Otherwise, make sure the node is enabled and then do the
6760 * login.
6761 */
6762 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
6763 if (!ndlp_fdmi) {
6764 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
6765 GFP_KERNEL);
6766 if (ndlp_fdmi) {
6767 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6768 ndlp_fdmi->nlp_type |= NLP_FABRIC;
6769 } else
6770 return;
6771 }
6772 if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
6773 ndlp_fdmi = lpfc_enable_node(vport,
6774 ndlp_fdmi,
6775 NLP_STE_NPR_NODE);
6776
92d7f7b0 6777 if (ndlp_fdmi) {
58da1ffb 6778 lpfc_nlp_set_state(vport, ndlp_fdmi,
63e801ce
JS
6779 NLP_STE_PLOGI_ISSUE);
6780 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
92d7f7b0
JS
6781 }
6782 }
92d7f7b0
JS
6783}
6784
e59058c4 6785/**
3621a710 6786 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
e59058c4
JS
6787 * @phba: pointer to lpfc hba data structure.
6788 * @pmb: pointer to the driver internal queue element for mailbox command.
6789 *
6790 * This routine is the completion callback function to register new vport
6791 * mailbox command. If the new vport mailbox command completes successfully,
6792 * the fabric registration login shall be performed on physical port (the
6793 * new vport created is actually a physical port, with VPI 0) or the port
6794 * login to Name Server for State Change Request (SCR) will be performed
6795 * on virtual port (real virtual port, with VPI greater than 0).
6796 **/
92d7f7b0
JS
6797static void
6798lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6799{
6800 struct lpfc_vport *vport = pmb->vport;
6801 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6802 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
04c68496 6803 MAILBOX_t *mb = &pmb->u.mb;
695a814e 6804 int rc;
92d7f7b0 6805
09372820 6806 spin_lock_irq(shost->host_lock);
92d7f7b0 6807 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
09372820 6808 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
6809
6810 if (mb->mbxStatus) {
e8b62011 6811 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
38b92ef8
JS
6812 "0915 Register VPI failed : Status: x%x"
6813 " upd bit: x%x \n", mb->mbxStatus,
6814 mb->un.varRegVpi.upd);
6815 if (phba->sli_rev == LPFC_SLI_REV4 &&
6816 mb->un.varRegVpi.upd)
6817 goto mbox_err_exit ;
92d7f7b0
JS
6818
6819 switch (mb->mbxStatus) {
6820 case 0x11: /* unsupported feature */
6821 case 0x9603: /* max_vpi exceeded */
7f5f3d0d 6822 case 0x9602: /* Link event since CLEAR_LA */
92d7f7b0
JS
6823 /* giving up on vport registration */
6824 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6825 spin_lock_irq(shost->host_lock);
6826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6827 spin_unlock_irq(shost->host_lock);
6828 lpfc_can_disctmo(vport);
6829 break;
695a814e
JS
6830 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6831 case 0x20:
6832 spin_lock_irq(shost->host_lock);
6833 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6834 spin_unlock_irq(shost->host_lock);
6835 lpfc_init_vpi(phba, pmb, vport->vpi);
6836 pmb->vport = vport;
6837 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6838 rc = lpfc_sli_issue_mbox(phba, pmb,
6839 MBX_NOWAIT);
6840 if (rc == MBX_NOT_FINISHED) {
6841 lpfc_printf_vlog(vport,
6842 KERN_ERR, LOG_MBOX,
6843 "2732 Failed to issue INIT_VPI"
6844 " mailbox command\n");
6845 } else {
6846 lpfc_nlp_put(ndlp);
6847 return;
6848 }
6849
92d7f7b0
JS
6850 default:
6851 /* Try to recover from this error */
5af5eee7
JS
6852 if (phba->sli_rev == LPFC_SLI_REV4)
6853 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 6854 lpfc_mbx_unreg_vpi(vport);
09372820 6855 spin_lock_irq(shost->host_lock);
92d7f7b0 6856 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 6857 spin_unlock_irq(shost->host_lock);
4b40c59e
JS
6858 if (vport->port_type == LPFC_PHYSICAL_PORT
6859 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
76a95d75 6860 lpfc_issue_init_vfi(vport);
7f5f3d0d
JS
6861 else
6862 lpfc_initial_fdisc(vport);
92d7f7b0
JS
6863 break;
6864 }
92d7f7b0 6865 } else {
695a814e 6866 spin_lock_irq(shost->host_lock);
1987807d 6867 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e
JS
6868 spin_unlock_irq(shost->host_lock);
6869 if (vport == phba->pport) {
6fb120a7
JS
6870 if (phba->sli_rev < LPFC_SLI_REV4)
6871 lpfc_issue_fabric_reglogin(vport);
695a814e 6872 else {
fc2b989b
JS
6873 /*
6874 * If the physical port is instantiated using
6875 * FDISC, do not start vport discovery.
6876 */
6877 if (vport->port_state != LPFC_FDISC)
6878 lpfc_start_fdiscs(phba);
695a814e
JS
6879 lpfc_do_scr_ns_plogi(phba, vport);
6880 }
6881 } else
92d7f7b0
JS
6882 lpfc_do_scr_ns_plogi(phba, vport);
6883 }
38b92ef8 6884mbox_err_exit:
fa4066b6
JS
6885 /* Now, we decrement the ndlp reference count held for this
6886 * callback function
6887 */
6888 lpfc_nlp_put(ndlp);
6889
92d7f7b0
JS
6890 mempool_free(pmb, phba->mbox_mem_pool);
6891 return;
6892}
6893
e59058c4 6894/**
3621a710 6895 * lpfc_register_new_vport - Register a new vport with a HBA
e59058c4
JS
6896 * @phba: pointer to lpfc hba data structure.
6897 * @vport: pointer to a host virtual N_Port data structure.
6898 * @ndlp: pointer to a node-list data structure.
6899 *
6900 * This routine registers the @vport as a new virtual port with a HBA.
6901 * It is done through a registering vpi mailbox command.
6902 **/
695a814e 6903void
92d7f7b0
JS
6904lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6905 struct lpfc_nodelist *ndlp)
6906{
09372820 6907 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
6908 LPFC_MBOXQ_t *mbox;
6909
6910 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6911 if (mbox) {
6fb120a7 6912 lpfc_reg_vpi(vport, mbox);
92d7f7b0
JS
6913 mbox->vport = vport;
6914 mbox->context2 = lpfc_nlp_get(ndlp);
6915 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
0b727fea 6916 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
92d7f7b0 6917 == MBX_NOT_FINISHED) {
fa4066b6
JS
6918 /* mailbox command not success, decrement ndlp
6919 * reference count for this command
6920 */
6921 lpfc_nlp_put(ndlp);
92d7f7b0 6922 mempool_free(mbox, phba->mbox_mem_pool);
92d7f7b0 6923
e8b62011
JS
6924 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6925 "0253 Register VPI: Can't send mbox\n");
fa4066b6 6926 goto mbox_err_exit;
92d7f7b0
JS
6927 }
6928 } else {
e8b62011
JS
6929 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6930 "0254 Register VPI: no memory\n");
fa4066b6 6931 goto mbox_err_exit;
92d7f7b0 6932 }
fa4066b6
JS
6933 return;
6934
6935mbox_err_exit:
6936 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6937 spin_lock_irq(shost->host_lock);
6938 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6939 spin_unlock_irq(shost->host_lock);
6940 return;
92d7f7b0
JS
6941}
6942
695a814e 6943/**
0c9ab6f5 6944 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
695a814e
JS
6945 * @phba: pointer to lpfc hba data structure.
6946 *
0c9ab6f5 6947 * This routine cancels the retry delay timers to all the vports.
695a814e
JS
6948 **/
6949void
0c9ab6f5 6950lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
695a814e
JS
6951{
6952 struct lpfc_vport **vports;
6953 struct lpfc_nodelist *ndlp;
695a814e 6954 uint32_t link_state;
0c9ab6f5 6955 int i;
695a814e
JS
6956
6957 /* Treat this failure as linkdown for all vports */
6958 link_state = phba->link_state;
6959 lpfc_linkdown(phba);
6960 phba->link_state = link_state;
6961
6962 vports = lpfc_create_vport_work_array(phba);
6963
6964 if (vports) {
6965 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6966 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6967 if (ndlp)
6968 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6969 lpfc_els_flush_cmd(vports[i]);
6970 }
6971 lpfc_destroy_vport_work_array(phba, vports);
6972 }
0c9ab6f5
JS
6973}
6974
6975/**
6976 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6977 * @phba: pointer to lpfc hba data structure.
6978 *
6979 * This routine abort all pending discovery commands and
6980 * start a timer to retry FLOGI for the physical port
6981 * discovery.
6982 **/
6983void
6984lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6985{
6986 struct lpfc_nodelist *ndlp;
6987 struct Scsi_Host *shost;
6988
6989 /* Cancel the all vports retry delay retry timers */
6990 lpfc_cancel_all_vport_retry_delay_timer(phba);
695a814e
JS
6991
6992 /* If fabric require FLOGI, then re-instantiate physical login */
6993 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6994 if (!ndlp)
6995 return;
6996
695a814e
JS
6997 shost = lpfc_shost_from_vport(phba->pport);
6998 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6999 spin_lock_irq(shost->host_lock);
7000 ndlp->nlp_flag |= NLP_DELAY_TMO;
7001 spin_unlock_irq(shost->host_lock);
7002 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
7003 phba->pport->port_state = LPFC_FLOGI;
7004 return;
7005}
7006
7007/**
7008 * lpfc_fabric_login_reqd - Check if FLOGI required.
7009 * @phba: pointer to lpfc hba data structure.
7010 * @cmdiocb: pointer to FDISC command iocb.
7011 * @rspiocb: pointer to FDISC response iocb.
7012 *
7013 * This routine checks if a FLOGI is reguired for FDISC
7014 * to succeed.
7015 **/
7016static int
7017lpfc_fabric_login_reqd(struct lpfc_hba *phba,
7018 struct lpfc_iocbq *cmdiocb,
7019 struct lpfc_iocbq *rspiocb)
7020{
7021
7022 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
7023 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
7024 return 0;
7025 else
7026 return 1;
7027}
7028
e59058c4 7029/**
3621a710 7030 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
e59058c4
JS
7031 * @phba: pointer to lpfc hba data structure.
7032 * @cmdiocb: pointer to lpfc command iocb data structure.
7033 * @rspiocb: pointer to lpfc response iocb data structure.
7034 *
7035 * This routine is the completion callback function to a Fabric Discover
7036 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
7037 * single threaded, each FDISC completion callback function will reset
7038 * the discovery timer for all vports such that the timers will not get
7039 * unnecessary timeout. The function checks the FDISC IOCB status. If error
7040 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
7041 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
7042 * assigned to the vport has been changed with the completion of the FDISC
7043 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
7044 * are unregistered from the HBA, and then the lpfc_register_new_vport()
7045 * routine is invoked to register new vport with the HBA. Otherwise, the
7046 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
7047 * Server for State Change Request (SCR).
7048 **/
92d7f7b0
JS
7049static void
7050lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7051 struct lpfc_iocbq *rspiocb)
7052{
7053 struct lpfc_vport *vport = cmdiocb->vport;
7054 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7055 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
7056 struct lpfc_nodelist *np;
7057 struct lpfc_nodelist *next_np;
7058 IOCB_t *irsp = &rspiocb->iocb;
7059 struct lpfc_iocbq *piocb;
92494144
JS
7060 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
7061 struct serv_parm *sp;
7062 uint8_t fabric_param_changed;
92d7f7b0 7063
e8b62011
JS
7064 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7065 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7066 irsp->ulpStatus, irsp->un.ulpWord[4],
7067 vport->fc_prevDID);
92d7f7b0
JS
7068 /* Since all FDISCs are being single threaded, we
7069 * must reset the discovery timer for ALL vports
7070 * waiting to send FDISC when one completes.
7071 */
7072 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
7073 lpfc_set_disctmo(piocb->vport);
7074 }
7075
858c9f6c
JS
7076 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7077 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
7078 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
7079
92d7f7b0 7080 if (irsp->ulpStatus) {
695a814e
JS
7081
7082 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
7083 lpfc_retry_pport_discovery(phba);
7084 goto out;
7085 }
7086
92d7f7b0
JS
7087 /* Check for retry */
7088 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
7089 goto out;
92d7f7b0 7090 /* FDISC failed */
e8b62011 7091 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 7092 "0126 FDISC failed. (%d/%d)\n",
e8b62011 7093 irsp->ulpStatus, irsp->un.ulpWord[4]);
d7c255b2
JS
7094 goto fdisc_failed;
7095 }
d7c255b2 7096 spin_lock_irq(shost->host_lock);
695a814e 7097 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 7098 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
d7c255b2 7099 vport->fc_flag |= FC_FABRIC;
76a95d75 7100 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
d7c255b2
JS
7101 vport->fc_flag |= FC_PUBLIC_LOOP;
7102 spin_unlock_irq(shost->host_lock);
92d7f7b0 7103
d7c255b2
JS
7104 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7105 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
92494144
JS
7106 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7107 sp = prsp->virt + sizeof(uint32_t);
7108 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7109 memcpy(&vport->fabric_portname, &sp->portName,
7110 sizeof(struct lpfc_name));
7111 memcpy(&vport->fabric_nodename, &sp->nodeName,
7112 sizeof(struct lpfc_name));
7113 if (fabric_param_changed &&
d7c255b2
JS
7114 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7115 /* If our NportID changed, we need to ensure all
7116 * remaining NPORTs get unreg_login'ed so we can
7117 * issue unreg_vpi.
7118 */
7119 list_for_each_entry_safe(np, next_np,
7120 &vport->fc_nodes, nlp_listp) {
7121 if (!NLP_CHK_NODE_ACT(ndlp) ||
7122 (np->nlp_state != NLP_STE_NPR_NODE) ||
7123 !(np->nlp_flag & NLP_NPR_ADISC))
7124 continue;
09372820 7125 spin_lock_irq(shost->host_lock);
d7c255b2 7126 np->nlp_flag &= ~NLP_NPR_ADISC;
09372820 7127 spin_unlock_irq(shost->host_lock);
d7c255b2 7128 lpfc_unreg_rpi(vport, np);
92d7f7b0 7129 }
78730cfe 7130 lpfc_cleanup_pending_mbox(vport);
5af5eee7
JS
7131
7132 if (phba->sli_rev == LPFC_SLI_REV4)
7133 lpfc_sli4_unreg_all_rpis(vport);
7134
d7c255b2
JS
7135 lpfc_mbx_unreg_vpi(vport);
7136 spin_lock_irq(shost->host_lock);
7137 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
0f65ff68
JS
7138 if (phba->sli_rev == LPFC_SLI_REV4)
7139 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4b40c59e
JS
7140 else
7141 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
d7c255b2 7142 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
7143 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
7144 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7145 /*
7146 * Driver needs to re-reg VPI in order for f/w
7147 * to update the MAC address.
7148 */
7149 lpfc_register_new_vport(phba, vport, ndlp);
5ac6b303 7150 goto out;
92d7f7b0
JS
7151 }
7152
ecfd03c6
JS
7153 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
7154 lpfc_issue_init_vpi(vport);
7155 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
d7c255b2
JS
7156 lpfc_register_new_vport(phba, vport, ndlp);
7157 else
7158 lpfc_do_scr_ns_plogi(phba, vport);
7159 goto out;
7160fdisc_failed:
7161 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7162 /* Cancel discovery timer */
7163 lpfc_can_disctmo(vport);
7164 lpfc_nlp_put(ndlp);
92d7f7b0
JS
7165out:
7166 lpfc_els_free_iocb(phba, cmdiocb);
7167}
7168
e59058c4 7169/**
3621a710 7170 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
e59058c4
JS
7171 * @vport: pointer to a virtual N_Port data structure.
7172 * @ndlp: pointer to a node-list data structure.
7173 * @retry: number of retries to the command IOCB.
7174 *
7175 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
7176 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
7177 * routine to issue the IOCB, which makes sure only one outstanding fabric
7178 * IOCB will be sent off HBA at any given time.
7179 *
7180 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7181 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7182 * will be stored into the context1 field of the IOCB for the completion
7183 * callback function to the FDISC ELS command.
7184 *
7185 * Return code
7186 * 0 - Successfully issued fdisc iocb command
7187 * 1 - Failed to issue fdisc iocb command
7188 **/
a6ababd2 7189static int
92d7f7b0
JS
7190lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7191 uint8_t retry)
7192{
7193 struct lpfc_hba *phba = vport->phba;
7194 IOCB_t *icmd;
7195 struct lpfc_iocbq *elsiocb;
7196 struct serv_parm *sp;
7197 uint8_t *pcmd;
7198 uint16_t cmdsize;
7199 int did = ndlp->nlp_DID;
7200 int rc;
92d7f7b0 7201
5ffc266e 7202 vport->port_state = LPFC_FDISC;
92d7f7b0
JS
7203 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7204 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7205 ELS_CMD_FDISC);
7206 if (!elsiocb) {
92d7f7b0 7207 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
7208 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7209 "0255 Issue FDISC: no IOCB\n");
92d7f7b0
JS
7210 return 1;
7211 }
7212
7213 icmd = &elsiocb->iocb;
7214 icmd->un.elsreq64.myID = 0;
7215 icmd->un.elsreq64.fl = 1;
7216
73d91e50
JS
7217 /*
7218 * SLI3 ports require a different context type value than SLI4.
7219 * Catch SLI3 ports here and override the prep.
7220 */
7221 if (phba->sli_rev == LPFC_SLI_REV3) {
f1126688
JS
7222 icmd->ulpCt_h = 1;
7223 icmd->ulpCt_l = 0;
7224 }
92d7f7b0
JS
7225
7226 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7227 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
7228 pcmd += sizeof(uint32_t); /* CSP Word 1 */
7229 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
7230 sp = (struct serv_parm *) pcmd;
7231 /* Setup CSPs accordingly for Fabric */
7232 sp->cmn.e_d_tov = 0;
7233 sp->cmn.w2.r_a_tov = 0;
7234 sp->cls1.classValid = 0;
7235 sp->cls2.seqDelivery = 1;
7236 sp->cls3.seqDelivery = 1;
7237
7238 pcmd += sizeof(uint32_t); /* CSP Word 2 */
7239 pcmd += sizeof(uint32_t); /* CSP Word 3 */
7240 pcmd += sizeof(uint32_t); /* CSP Word 4 */
7241 pcmd += sizeof(uint32_t); /* Port Name */
7242 memcpy(pcmd, &vport->fc_portname, 8);
7243 pcmd += sizeof(uint32_t); /* Node Name */
7244 pcmd += sizeof(uint32_t); /* Node Name */
7245 memcpy(pcmd, &vport->fc_nodename, 8);
7246
7247 lpfc_set_disctmo(vport);
7248
7249 phba->fc_stat.elsXmitFDISC++;
7250 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
7251
858c9f6c
JS
7252 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7253 "Issue FDISC: did:x%x",
7254 did, 0, 0);
7255
92d7f7b0
JS
7256 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
7257 if (rc == IOCB_ERROR) {
7258 lpfc_els_free_iocb(phba, elsiocb);
92d7f7b0 7259 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
7260 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7261 "0256 Issue FDISC: Cannot send IOCB\n");
92d7f7b0
JS
7262 return 1;
7263 }
7264 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
92d7f7b0
JS
7265 return 0;
7266}
7267
e59058c4 7268/**
3621a710 7269 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
e59058c4
JS
7270 * @phba: pointer to lpfc hba data structure.
7271 * @cmdiocb: pointer to lpfc command iocb data structure.
7272 * @rspiocb: pointer to lpfc response iocb data structure.
7273 *
7274 * This routine is the completion callback function to the issuing of a LOGO
7275 * ELS command off a vport. It frees the command IOCB and then decrement the
7276 * reference count held on ndlp for this completion function, indicating that
7277 * the reference to the ndlp is no long needed. Note that the
7278 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
7279 * callback function and an additional explicit ndlp reference decrementation
7280 * will trigger the actual release of the ndlp.
7281 **/
92d7f7b0
JS
7282static void
7283lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7284 struct lpfc_iocbq *rspiocb)
7285{
7286 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c 7287 IOCB_t *irsp;
e47c9093 7288 struct lpfc_nodelist *ndlp;
9589b062 7289 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
858c9f6c 7290
9589b062 7291 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
858c9f6c
JS
7292 irsp = &rspiocb->iocb;
7293 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7294 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
7295 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
92d7f7b0
JS
7296
7297 lpfc_els_free_iocb(phba, cmdiocb);
7298 vport->unreg_vpi_cmpl = VPORT_ERROR;
e47c9093
JS
7299
7300 /* Trigger the release of the ndlp after logo */
7301 lpfc_nlp_put(ndlp);
9589b062
JS
7302
7303 /* NPIV LOGO completes to NPort <nlp_DID> */
7304 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7305 "2928 NPIV LOGO completes to NPort x%x "
7306 "Data: x%x x%x x%x x%x\n",
7307 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7308 irsp->ulpTimeout, vport->num_disc_nodes);
7309
7310 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7311 spin_lock_irq(shost->host_lock);
7312 vport->fc_flag &= ~FC_FABRIC;
7313 spin_unlock_irq(shost->host_lock);
7314 }
92d7f7b0
JS
7315}
7316
e59058c4 7317/**
3621a710 7318 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
e59058c4
JS
7319 * @vport: pointer to a virtual N_Port data structure.
7320 * @ndlp: pointer to a node-list data structure.
7321 *
7322 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
7323 *
7324 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7325 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7326 * will be stored into the context1 field of the IOCB for the completion
7327 * callback function to the LOGO ELS command.
7328 *
7329 * Return codes
7330 * 0 - Successfully issued logo off the @vport
7331 * 1 - Failed to issue logo off the @vport
7332 **/
92d7f7b0
JS
7333int
7334lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7335{
7336 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7337 struct lpfc_hba *phba = vport->phba;
92d7f7b0
JS
7338 IOCB_t *icmd;
7339 struct lpfc_iocbq *elsiocb;
7340 uint8_t *pcmd;
7341 uint16_t cmdsize;
7342
7343 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
7344 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
7345 ELS_CMD_LOGO);
7346 if (!elsiocb)
7347 return 1;
7348
7349 icmd = &elsiocb->iocb;
7350 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7351 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
7352 pcmd += sizeof(uint32_t);
7353
7354 /* Fill in LOGO payload */
7355 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
7356 pcmd += sizeof(uint32_t);
7357 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
7358
858c9f6c
JS
7359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7360 "Issue LOGO npiv did:x%x flg:x%x",
7361 ndlp->nlp_DID, ndlp->nlp_flag, 0);
7362
92d7f7b0
JS
7363 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
7364 spin_lock_irq(shost->host_lock);
7365 ndlp->nlp_flag |= NLP_LOGO_SND;
7366 spin_unlock_irq(shost->host_lock);
3772a991
JS
7367 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7368 IOCB_ERROR) {
92d7f7b0
JS
7369 spin_lock_irq(shost->host_lock);
7370 ndlp->nlp_flag &= ~NLP_LOGO_SND;
7371 spin_unlock_irq(shost->host_lock);
7372 lpfc_els_free_iocb(phba, elsiocb);
7373 return 1;
7374 }
7375 return 0;
7376}
7377
e59058c4 7378/**
3621a710 7379 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
e59058c4
JS
7380 * @ptr: holder for the timer function associated data.
7381 *
7382 * This routine is invoked by the fabric iocb block timer after
7383 * timeout. It posts the fabric iocb block timeout event by setting the
7384 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
7385 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
7386 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
7387 * posted event WORKER_FABRIC_BLOCK_TMO.
7388 **/
92d7f7b0
JS
7389void
7390lpfc_fabric_block_timeout(unsigned long ptr)
7391{
7392 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7393 unsigned long iflags;
7394 uint32_t tmo_posted;
5e9d9b82 7395
92d7f7b0
JS
7396 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
7397 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
7398 if (!tmo_posted)
7399 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
7400 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
7401
5e9d9b82
JS
7402 if (!tmo_posted)
7403 lpfc_worker_wake_up(phba);
7404 return;
92d7f7b0
JS
7405}
7406
e59058c4 7407/**
3621a710 7408 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
e59058c4
JS
7409 * @phba: pointer to lpfc hba data structure.
7410 *
7411 * This routine issues one fabric iocb from the driver internal list to
7412 * the HBA. It first checks whether it's ready to issue one fabric iocb to
7413 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
7414 * remove one pending fabric iocb from the driver internal list and invokes
7415 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
7416 **/
92d7f7b0
JS
7417static void
7418lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
7419{
7420 struct lpfc_iocbq *iocb;
7421 unsigned long iflags;
7422 int ret;
92d7f7b0
JS
7423 IOCB_t *cmd;
7424
7425repeat:
7426 iocb = NULL;
7427 spin_lock_irqsave(&phba->hbalock, iflags);
7f5f3d0d 7428 /* Post any pending iocb to the SLI layer */
92d7f7b0
JS
7429 if (atomic_read(&phba->fabric_iocb_count) == 0) {
7430 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
7431 list);
7432 if (iocb)
7f5f3d0d 7433 /* Increment fabric iocb count to hold the position */
92d7f7b0
JS
7434 atomic_inc(&phba->fabric_iocb_count);
7435 }
7436 spin_unlock_irqrestore(&phba->hbalock, iflags);
7437 if (iocb) {
7438 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7439 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7440 iocb->iocb_flag |= LPFC_IO_FABRIC;
7441
858c9f6c
JS
7442 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7443 "Fabric sched1: ste:x%x",
7444 iocb->vport->port_state, 0, 0);
7445
3772a991 7446 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
7447
7448 if (ret == IOCB_ERROR) {
7449 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7450 iocb->fabric_iocb_cmpl = NULL;
7451 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7452 cmd = &iocb->iocb;
7453 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
7454 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
7455 iocb->iocb_cmpl(phba, iocb, iocb);
7456
7457 atomic_dec(&phba->fabric_iocb_count);
7458 goto repeat;
7459 }
7460 }
7461
7462 return;
7463}
7464
e59058c4 7465/**
3621a710 7466 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
e59058c4
JS
7467 * @phba: pointer to lpfc hba data structure.
7468 *
7469 * This routine unblocks the issuing fabric iocb command. The function
7470 * will clear the fabric iocb block bit and then invoke the routine
7471 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
7472 * from the driver internal fabric iocb list.
7473 **/
92d7f7b0
JS
7474void
7475lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
7476{
7477 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7478
7479 lpfc_resume_fabric_iocbs(phba);
7480 return;
7481}
7482
e59058c4 7483/**
3621a710 7484 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
e59058c4
JS
7485 * @phba: pointer to lpfc hba data structure.
7486 *
7487 * This routine blocks the issuing fabric iocb for a specified amount of
7488 * time (currently 100 ms). This is done by set the fabric iocb block bit
7489 * and set up a timeout timer for 100ms. When the block bit is set, no more
7490 * fabric iocb will be issued out of the HBA.
7491 **/
92d7f7b0
JS
7492static void
7493lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7494{
7495 int blocked;
7496
7497 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7f5f3d0d 7498 /* Start a timer to unblock fabric iocbs after 100ms */
92d7f7b0
JS
7499 if (!blocked)
7500 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
7501
7502 return;
7503}
7504
e59058c4 7505/**
3621a710 7506 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
e59058c4
JS
7507 * @phba: pointer to lpfc hba data structure.
7508 * @cmdiocb: pointer to lpfc command iocb data structure.
7509 * @rspiocb: pointer to lpfc response iocb data structure.
7510 *
7511 * This routine is the callback function that is put to the fabric iocb's
7512 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
7513 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
7514 * function first restores and invokes the original iocb's callback function
7515 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
7516 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
7517 **/
92d7f7b0
JS
7518static void
7519lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7520 struct lpfc_iocbq *rspiocb)
7521{
7522 struct ls_rjt stat;
7523
7524 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
7525 BUG();
7526
7527 switch (rspiocb->iocb.ulpStatus) {
7528 case IOSTAT_NPORT_RJT:
7529 case IOSTAT_FABRIC_RJT:
7530 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
7531 lpfc_block_fabric_iocbs(phba);
ed957684 7532 }
92d7f7b0
JS
7533 break;
7534
7535 case IOSTAT_NPORT_BSY:
7536 case IOSTAT_FABRIC_BSY:
7537 lpfc_block_fabric_iocbs(phba);
7538 break;
7539
7540 case IOSTAT_LS_RJT:
7541 stat.un.lsRjtError =
7542 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
7543 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
7544 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
7545 lpfc_block_fabric_iocbs(phba);
7546 break;
7547 }
7548
7549 if (atomic_read(&phba->fabric_iocb_count) == 0)
7550 BUG();
7551
7552 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
7553 cmdiocb->fabric_iocb_cmpl = NULL;
7554 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
7555 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
7556
7557 atomic_dec(&phba->fabric_iocb_count);
7558 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7f5f3d0d
JS
7559 /* Post any pending iocbs to HBA */
7560 lpfc_resume_fabric_iocbs(phba);
92d7f7b0
JS
7561 }
7562}
7563
e59058c4 7564/**
3621a710 7565 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
e59058c4
JS
7566 * @phba: pointer to lpfc hba data structure.
7567 * @iocb: pointer to lpfc command iocb data structure.
7568 *
7569 * This routine is used as the top-level API for issuing a fabric iocb command
7570 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
7571 * function makes sure that only one fabric bound iocb will be outstanding at
7572 * any given time. As such, this function will first check to see whether there
7573 * is already an outstanding fabric iocb on the wire. If so, it will put the
7574 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
7575 * issued later. Otherwise, it will issue the iocb on the wire and update the
7576 * fabric iocb count it indicate that there is one fabric iocb on the wire.
7577 *
7578 * Note, this implementation has a potential sending out fabric IOCBs out of
7579 * order. The problem is caused by the construction of the "ready" boolen does
7580 * not include the condition that the internal fabric IOCB list is empty. As
7581 * such, it is possible a fabric IOCB issued by this routine might be "jump"
7582 * ahead of the fabric IOCBs in the internal list.
7583 *
7584 * Return code
7585 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
7586 * IOCB_ERROR - failed to issue fabric iocb
7587 **/
a6ababd2 7588static int
92d7f7b0
JS
7589lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
7590{
7591 unsigned long iflags;
92d7f7b0
JS
7592 int ready;
7593 int ret;
7594
7595 if (atomic_read(&phba->fabric_iocb_count) > 1)
7596 BUG();
7597
7598 spin_lock_irqsave(&phba->hbalock, iflags);
7599 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
7600 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7601
7f5f3d0d
JS
7602 if (ready)
7603 /* Increment fabric iocb count to hold the position */
7604 atomic_inc(&phba->fabric_iocb_count);
92d7f7b0
JS
7605 spin_unlock_irqrestore(&phba->hbalock, iflags);
7606 if (ready) {
7607 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7608 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7609 iocb->iocb_flag |= LPFC_IO_FABRIC;
7610
858c9f6c
JS
7611 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7612 "Fabric sched2: ste:x%x",
7613 iocb->vport->port_state, 0, 0);
7614
3772a991 7615 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
7616
7617 if (ret == IOCB_ERROR) {
7618 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7619 iocb->fabric_iocb_cmpl = NULL;
7620 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7621 atomic_dec(&phba->fabric_iocb_count);
7622 }
7623 } else {
7624 spin_lock_irqsave(&phba->hbalock, iflags);
7625 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
7626 spin_unlock_irqrestore(&phba->hbalock, iflags);
7627 ret = IOCB_SUCCESS;
7628 }
7629 return ret;
7630}
7631
e59058c4 7632/**
3621a710 7633 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
e59058c4
JS
7634 * @vport: pointer to a virtual N_Port data structure.
7635 *
7636 * This routine aborts all the IOCBs associated with a @vport from the
7637 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7638 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7639 * list, removes each IOCB associated with the @vport off the list, set the
7640 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7641 * associated with the IOCB.
7642 **/
a6ababd2 7643static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
92d7f7b0
JS
7644{
7645 LIST_HEAD(completions);
7646 struct lpfc_hba *phba = vport->phba;
7647 struct lpfc_iocbq *tmp_iocb, *piocb;
92d7f7b0
JS
7648
7649 spin_lock_irq(&phba->hbalock);
7650 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7651 list) {
7652
7653 if (piocb->vport != vport)
7654 continue;
7655
7656 list_move_tail(&piocb->list, &completions);
7657 }
7658 spin_unlock_irq(&phba->hbalock);
7659
a257bf90
JS
7660 /* Cancel all the IOCBs from the completions list */
7661 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7662 IOERR_SLI_ABORTED);
92d7f7b0
JS
7663}
7664
e59058c4 7665/**
3621a710 7666 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
e59058c4
JS
7667 * @ndlp: pointer to a node-list data structure.
7668 *
7669 * This routine aborts all the IOCBs associated with an @ndlp from the
7670 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7671 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7672 * list, removes each IOCB associated with the @ndlp off the list, set the
7673 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7674 * associated with the IOCB.
7675 **/
92d7f7b0
JS
7676void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
7677{
7678 LIST_HEAD(completions);
a257bf90 7679 struct lpfc_hba *phba = ndlp->phba;
92d7f7b0
JS
7680 struct lpfc_iocbq *tmp_iocb, *piocb;
7681 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
92d7f7b0
JS
7682
7683 spin_lock_irq(&phba->hbalock);
7684 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7685 list) {
7686 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
7687
7688 list_move_tail(&piocb->list, &completions);
ed957684 7689 }
dea3101e 7690 }
92d7f7b0
JS
7691 spin_unlock_irq(&phba->hbalock);
7692
a257bf90
JS
7693 /* Cancel all the IOCBs from the completions list */
7694 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7695 IOERR_SLI_ABORTED);
92d7f7b0
JS
7696}
7697
e59058c4 7698/**
3621a710 7699 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
e59058c4
JS
7700 * @phba: pointer to lpfc hba data structure.
7701 *
7702 * This routine aborts all the IOCBs currently on the driver internal
7703 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
7704 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
7705 * list, removes IOCBs off the list, set the status feild to
7706 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
7707 * the IOCB.
7708 **/
92d7f7b0
JS
7709void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
7710{
7711 LIST_HEAD(completions);
92d7f7b0
JS
7712
7713 spin_lock_irq(&phba->hbalock);
7714 list_splice_init(&phba->fabric_iocb_list, &completions);
7715 spin_unlock_irq(&phba->hbalock);
7716
a257bf90
JS
7717 /* Cancel all the IOCBs from the completions list */
7718 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7719 IOERR_SLI_ABORTED);
dea3101e 7720}
6fb120a7 7721
1151e3ec
JS
7722/**
7723 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
7724 * @vport: pointer to lpfc vport data structure.
7725 *
7726 * This routine is invoked by the vport cleanup for deletions and the cleanup
7727 * for an ndlp on removal.
7728 **/
7729void
7730lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
7731{
7732 struct lpfc_hba *phba = vport->phba;
7733 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7734 unsigned long iflag = 0;
7735
7736 spin_lock_irqsave(&phba->hbalock, iflag);
7737 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7738 list_for_each_entry_safe(sglq_entry, sglq_next,
7739 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7740 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
7741 sglq_entry->ndlp = NULL;
7742 }
7743 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7744 spin_unlock_irqrestore(&phba->hbalock, iflag);
7745 return;
7746}
7747
6fb120a7
JS
7748/**
7749 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
7750 * @phba: pointer to lpfc hba data structure.
7751 * @axri: pointer to the els xri abort wcqe structure.
7752 *
7753 * This routine is invoked by the worker thread to process a SLI4 slow-path
7754 * ELS aborted xri.
7755 **/
7756void
7757lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7758 struct sli4_wcqe_xri_aborted *axri)
7759{
7760 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 7761 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7851fe2c 7762 uint16_t lxri = 0;
19ca7609 7763
6fb120a7
JS
7764 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7765 unsigned long iflag = 0;
19ca7609 7766 struct lpfc_nodelist *ndlp;
589a52d6 7767 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6fb120a7 7768
0f65ff68
JS
7769 spin_lock_irqsave(&phba->hbalock, iflag);
7770 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7
JS
7771 list_for_each_entry_safe(sglq_entry, sglq_next,
7772 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7773 if (sglq_entry->sli4_xritag == xri) {
7774 list_del(&sglq_entry->list);
19ca7609
JS
7775 ndlp = sglq_entry->ndlp;
7776 sglq_entry->ndlp = NULL;
6fb120a7
JS
7777 list_add_tail(&sglq_entry->list,
7778 &phba->sli4_hba.lpfc_sgl_list);
0f65ff68
JS
7779 sglq_entry->state = SGL_FREED;
7780 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7 7781 spin_unlock_irqrestore(&phba->hbalock, iflag);
19ca7609 7782 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
589a52d6
JS
7783
7784 /* Check if TXQ queue needs to be serviced */
7785 if (pring->txq_cnt)
7786 lpfc_worker_wake_up(phba);
6fb120a7
JS
7787 return;
7788 }
7789 }
0f65ff68 7790 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7851fe2c
JS
7791 lxri = lpfc_sli4_xri_inrange(phba, xri);
7792 if (lxri == NO_XRI) {
7793 spin_unlock_irqrestore(&phba->hbalock, iflag);
7794 return;
7795 }
7796 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
0f65ff68
JS
7797 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7798 spin_unlock_irqrestore(&phba->hbalock, iflag);
7799 return;
7800 }
7801 sglq_entry->state = SGL_XRI_ABORTED;
7802 spin_unlock_irqrestore(&phba->hbalock, iflag);
7803 return;
6fb120a7 7804}