]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_els.c
lpfc: Remove global lpfc_enable_npiv attribute in leiu of per-hba lpfc_enable_npiv
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
50611577 4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
09372820 21/* See Fibre Channel protocol T11 FC-LS for details */
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e
JB
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
e74c03c8 32
da0436e9 33#include "lpfc_hw4.h"
dea3101e
JB
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
da0436e9 36#include "lpfc_sli4.h"
ea2151b4 37#include "lpfc_nl.h"
dea3101e
JB
38#include "lpfc_disc.h"
39#include "lpfc_scsi.h"
40#include "lpfc.h"
41#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h"
92d7f7b0 43#include "lpfc_vport.h"
858c9f6c 44#include "lpfc_debugfs.h"
dea3101e
JB
45
46static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
47 struct lpfc_iocbq *);
92d7f7b0
JS
48static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
49 struct lpfc_iocbq *);
a6ababd2
AB
50static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
51static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp, uint8_t retry);
53static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
54 struct lpfc_iocbq *iocb);
92d7f7b0 55
dea3101e
JB
56static int lpfc_max_els_tries = 3;
57
e59058c4 58/**
3621a710 59 * lpfc_els_chk_latt - Check host link attention event for a vport
e59058c4
JS
60 * @vport: pointer to a host virtual N_Port data structure.
61 *
62 * This routine checks whether there is an outstanding host link
63 * attention event during the discovery process with the @vport. It is done
64 * by reading the HBA's Host Attention (HA) register. If there is any host
65 * link attention events during this @vport's discovery process, the @vport
66 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
67 * be issued if the link state is not already in host link cleared state,
68 * and a return code shall indicate whether the host link attention event
69 * had happened.
70 *
71 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
72 * state in LPFC_VPORT_READY, the request for checking host link attention
73 * event will be ignored and a return code shall indicate no host link
74 * attention event had happened.
75 *
76 * Return codes
77 * 0 - no host link attention event happened
78 * 1 - host link attention event happened
79 **/
858c9f6c 80int
2e0fef85 81lpfc_els_chk_latt(struct lpfc_vport *vport)
dea3101e 82{
2e0fef85
JS
83 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
84 struct lpfc_hba *phba = vport->phba;
dea3101e 85 uint32_t ha_copy;
dea3101e 86
2e0fef85 87 if (vport->port_state >= LPFC_VPORT_READY ||
3772a991
JS
88 phba->link_state == LPFC_LINK_DOWN ||
89 phba->sli_rev > LPFC_SLI_REV3)
dea3101e
JB
90 return 0;
91
92 /* Read the HBA Host Attention Register */
9940b97b
JS
93 if (lpfc_readl(phba->HAregaddr, &ha_copy))
94 return 1;
dea3101e
JB
95
96 if (!(ha_copy & HA_LATT))
97 return 0;
98
99 /* Pending Link Event during Discovery */
e8b62011
JS
100 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
101 "0237 Pending Link Event during "
102 "Discovery: State x%x\n",
103 phba->pport->port_state);
dea3101e
JB
104
105 /* CLEAR_LA should re-enable link attention events and
25985edc 106 * we should then immediately take a LATT event. The
dea3101e
JB
107 * LATT processing should call lpfc_linkdown() which
108 * will cleanup any left over in-progress discovery
109 * events.
110 */
2e0fef85
JS
111 spin_lock_irq(shost->host_lock);
112 vport->fc_flag |= FC_ABORT_DISCOVERY;
113 spin_unlock_irq(shost->host_lock);
dea3101e 114
92d7f7b0 115 if (phba->link_state != LPFC_CLEAR_LA)
ed957684 116 lpfc_issue_clear_la(phba, vport);
dea3101e 117
c9f8735b 118 return 1;
dea3101e
JB
119}
120
e59058c4 121/**
3621a710 122 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
e59058c4
JS
123 * @vport: pointer to a host virtual N_Port data structure.
124 * @expectRsp: flag indicating whether response is expected.
125 * @cmdSize: size of the ELS command.
126 * @retry: number of retries to the command IOCB when it fails.
127 * @ndlp: pointer to a node-list data structure.
128 * @did: destination identifier.
129 * @elscmd: the ELS command code.
130 *
131 * This routine is used for allocating a lpfc-IOCB data structure from
132 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
133 * passed into the routine for discovery state machine to issue an Extended
134 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
135 * and preparation routine that is used by all the discovery state machine
136 * routines and the ELS command-specific fields will be later set up by
137 * the individual discovery machine routines after calling this routine
138 * allocating and preparing a generic IOCB data structure. It fills in the
139 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
140 * payload and response payload (if expected). The reference count on the
141 * ndlp is incremented by 1 and the reference to the ndlp is put into
142 * context1 of the IOCB data structure for this IOCB to hold the ndlp
143 * reference for the command's callback function to access later.
144 *
145 * Return code
146 * Pointer to the newly allocated/prepared els iocb data structure
147 * NULL - when els iocb data structure allocation/preparation failed
148 **/
f1c3b0fc 149struct lpfc_iocbq *
2e0fef85
JS
150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
151 uint16_t cmdSize, uint8_t retry,
152 struct lpfc_nodelist *ndlp, uint32_t did,
153 uint32_t elscmd)
dea3101e 154{
2e0fef85 155 struct lpfc_hba *phba = vport->phba;
0bd4ca25 156 struct lpfc_iocbq *elsiocb;
dea3101e
JB
157 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
158 struct ulp_bde64 *bpl;
159 IOCB_t *icmd;
160
dea3101e 161
2e0fef85
JS
162 if (!lpfc_is_link_up(phba))
163 return NULL;
dea3101e 164
dea3101e 165 /* Allocate buffer for command iocb */
0bd4ca25 166 elsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
167
168 if (elsiocb == NULL)
169 return NULL;
e47c9093 170
0c287589
JS
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
45ed1190 176 (phba->hba_flag & HBA_FIP_SUPPORT) &&
0c287589
JS
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
c868595d
JS
180 switch (elscmd) {
181 case ELS_CMD_FLOGI:
f0d9bccc
JS
182 elsiocb->iocb_flag |=
183 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
184 & LPFC_FIP_ELS_ID_MASK);
185 break;
186 case ELS_CMD_FDISC:
f0d9bccc
JS
187 elsiocb->iocb_flag |=
188 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
189 & LPFC_FIP_ELS_ID_MASK);
190 break;
191 case ELS_CMD_LOGO:
f0d9bccc
JS
192 elsiocb->iocb_flag |=
193 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
194 & LPFC_FIP_ELS_ID_MASK);
195 break;
196 }
0c287589 197 else
c868595d 198 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
0c287589 199
dea3101e
JB
200 icmd = &elsiocb->iocb;
201
202 /* fill in BDEs for command */
203 /* Allocate buffer for command payload */
98c9ea5c
JS
204 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
205 if (pcmd)
206 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
fa4066b6
JS
207 if (!pcmd || !pcmd->virt)
208 goto els_iocb_free_pcmb_exit;
dea3101e
JB
209
210 INIT_LIST_HEAD(&pcmd->list);
211
212 /* Allocate buffer for response payload */
213 if (expectRsp) {
92d7f7b0 214 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e
JB
215 if (prsp)
216 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
217 &prsp->phys);
fa4066b6
JS
218 if (!prsp || !prsp->virt)
219 goto els_iocb_free_prsp_exit;
dea3101e 220 INIT_LIST_HEAD(&prsp->list);
e47c9093 221 } else
dea3101e 222 prsp = NULL;
dea3101e
JB
223
224 /* Allocate buffer for Buffer ptr list */
92d7f7b0 225 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 226 if (pbuflist)
ed957684
JS
227 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
228 &pbuflist->phys);
fa4066b6
JS
229 if (!pbuflist || !pbuflist->virt)
230 goto els_iocb_free_pbuf_exit;
dea3101e
JB
231
232 INIT_LIST_HEAD(&pbuflist->list);
233
dea3101e 234 if (expectRsp) {
939723a4
JS
235 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
236 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
237 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
92d7f7b0 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
939723a4
JS
239
240 icmd->un.elsreq64.remoteID = did; /* DID */
dea3101e 241 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
88f43a08
JS
242 if (elscmd == ELS_CMD_FLOGI)
243 icmd->ulpTimeout = FF_DEF_RATOV * 2;
244 else
245 icmd->ulpTimeout = phba->fc_ratov * 2;
dea3101e 246 } else {
939723a4
JS
247 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
248 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
249 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
250 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
251 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
dea3101e
JB
252 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
253 }
dea3101e
JB
254 icmd->ulpBdeCount = 1;
255 icmd->ulpLe = 1;
256 icmd->ulpClass = CLASS3;
257
939723a4
JS
258 /*
259 * If we have NPIV enabled, we want to send ELS traffic by VPI.
260 * For SLI4, since the driver controls VPIs we also want to include
261 * all ELS pt2pt protocol traffic as well.
262 */
263 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
264 ((phba->sli_rev == LPFC_SLI_REV4) &&
265 (vport->fc_flag & FC_PT2PT))) {
266
267 if (expectRsp) {
268 icmd->un.elsreq64.myID = vport->fc_myDID;
269
270 /* For ELS_REQUEST64_CR, use the VPI by default */
271 icmd->ulpContext = phba->vpi_ids[vport->vpi];
272 }
92d7f7b0 273
92d7f7b0 274 icmd->ulpCt_h = 0;
eada272d
JS
275 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
276 if (elscmd == ELS_CMD_ECHO)
277 icmd->ulpCt_l = 0; /* context = invalid RPI */
278 else
279 icmd->ulpCt_l = 1; /* context = VPI */
92d7f7b0
JS
280 }
281
dea3101e
JB
282 bpl = (struct ulp_bde64 *) pbuflist->virt;
283 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
284 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
285 bpl->tus.f.bdeSize = cmdSize;
286 bpl->tus.f.bdeFlags = 0;
287 bpl->tus.w = le32_to_cpu(bpl->tus.w);
288
289 if (expectRsp) {
290 bpl++;
291 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
292 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
293 bpl->tus.f.bdeSize = FCELSSIZE;
34b02dcd 294 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
dea3101e
JB
295 bpl->tus.w = le32_to_cpu(bpl->tus.w);
296 }
297
fa4066b6 298 /* prevent preparing iocb with NULL ndlp reference */
51ef4c26 299 elsiocb->context1 = lpfc_nlp_get(ndlp);
fa4066b6
JS
300 if (!elsiocb->context1)
301 goto els_iocb_free_pbuf_exit;
329f9bc7
JS
302 elsiocb->context2 = pcmd;
303 elsiocb->context3 = pbuflist;
dea3101e 304 elsiocb->retry = retry;
2e0fef85 305 elsiocb->vport = vport;
dea3101e
JB
306 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
307
308 if (prsp) {
309 list_add(&prsp->list, &pcmd->list);
310 }
dea3101e
JB
311 if (expectRsp) {
312 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
313 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
314 "0116 Xmit ELS command x%x to remote "
e74c03c8
JS
315 "NPORT x%x I/O tag: x%x, port state:x%x"
316 " fc_flag:x%x\n",
e8b62011 317 elscmd, did, elsiocb->iotag,
e74c03c8
JS
318 vport->port_state,
319 vport->fc_flag);
dea3101e
JB
320 } else {
321 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
e8b62011
JS
322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
323 "0117 Xmit ELS response x%x to remote "
e74c03c8
JS
324 "NPORT x%x I/O tag: x%x, size: x%x "
325 "port_state x%x fc_flag x%x\n",
e8b62011 326 elscmd, ndlp->nlp_DID, elsiocb->iotag,
e74c03c8
JS
327 cmdSize, vport->port_state,
328 vport->fc_flag);
dea3101e 329 }
c9f8735b 330 return elsiocb;
dea3101e 331
fa4066b6 332els_iocb_free_pbuf_exit:
eaf15d5b
JS
333 if (expectRsp)
334 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
fa4066b6
JS
335 kfree(pbuflist);
336
337els_iocb_free_prsp_exit:
338 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
339 kfree(prsp);
340
341els_iocb_free_pcmb_exit:
342 kfree(pcmd);
343 lpfc_sli_release_iocbq(phba, elsiocb);
344 return NULL;
345}
dea3101e 346
e59058c4 347/**
3621a710 348 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
e59058c4
JS
349 * @vport: pointer to a host virtual N_Port data structure.
350 *
351 * This routine issues a fabric registration login for a @vport. An
352 * active ndlp node with Fabric_DID must already exist for this @vport.
353 * The routine invokes two mailbox commands to carry out fabric registration
354 * login through the HBA firmware: the first mailbox command requests the
355 * HBA to perform link configuration for the @vport; and the second mailbox
356 * command requests the HBA to perform the actual fabric registration login
357 * with the @vport.
358 *
359 * Return code
360 * 0 - successfully issued fabric registration login for @vport
361 * -ENXIO -- failed to issue fabric registration login for @vport
362 **/
3772a991 363int
92d7f7b0 364lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
dea3101e 365{
2e0fef85 366 struct lpfc_hba *phba = vport->phba;
dea3101e 367 LPFC_MBOXQ_t *mbox;
14691150 368 struct lpfc_dmabuf *mp;
92d7f7b0
JS
369 struct lpfc_nodelist *ndlp;
370 struct serv_parm *sp;
dea3101e 371 int rc;
98c9ea5c 372 int err = 0;
dea3101e 373
92d7f7b0
JS
374 sp = &phba->fc_fabparam;
375 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093 376 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
98c9ea5c 377 err = 1;
92d7f7b0 378 goto fail;
98c9ea5c 379 }
92d7f7b0
JS
380
381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
382 if (!mbox) {
383 err = 2;
92d7f7b0 384 goto fail;
98c9ea5c 385 }
92d7f7b0
JS
386
387 vport->port_state = LPFC_FABRIC_CFG_LINK;
388 lpfc_config_link(phba, mbox);
389 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
390 mbox->vport = vport;
391
0b727fea 392 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
393 if (rc == MBX_NOT_FINISHED) {
394 err = 3;
92d7f7b0 395 goto fail_free_mbox;
98c9ea5c 396 }
92d7f7b0
JS
397
398 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
399 if (!mbox) {
400 err = 4;
92d7f7b0 401 goto fail;
98c9ea5c 402 }
4042629e
JS
403 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
404 ndlp->nlp_rpi);
98c9ea5c
JS
405 if (rc) {
406 err = 5;
92d7f7b0 407 goto fail_free_mbox;
98c9ea5c 408 }
92d7f7b0
JS
409
410 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
411 mbox->vport = vport;
e47c9093
JS
412 /* increment the reference count on ndlp to hold reference
413 * for the callback routine.
414 */
92d7f7b0
JS
415 mbox->context2 = lpfc_nlp_get(ndlp);
416
0b727fea 417 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
418 if (rc == MBX_NOT_FINISHED) {
419 err = 6;
92d7f7b0 420 goto fail_issue_reg_login;
98c9ea5c 421 }
92d7f7b0
JS
422
423 return 0;
424
425fail_issue_reg_login:
e47c9093
JS
426 /* decrement the reference count on ndlp just incremented
427 * for the failed mbox command.
428 */
92d7f7b0
JS
429 lpfc_nlp_put(ndlp);
430 mp = (struct lpfc_dmabuf *) mbox->context1;
431 lpfc_mbuf_free(phba, mp->virt, mp->phys);
432 kfree(mp);
433fail_free_mbox:
434 mempool_free(mbox, phba->mbox_mem_pool);
435
436fail:
437 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 438 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
98c9ea5c 439 "0249 Cannot issue Register Fabric login: Err %d\n", err);
92d7f7b0
JS
440 return -ENXIO;
441}
442
6fb120a7
JS
443/**
444 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
445 * @vport: pointer to a host virtual N_Port data structure.
446 *
447 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
1b51197d 448 * the @vport. This mailbox command is necessary for SLI4 port only.
6fb120a7
JS
449 *
450 * Return code
451 * 0 - successfully issued REG_VFI for @vport
452 * A failure code otherwise.
453 **/
1b51197d 454int
6fb120a7
JS
455lpfc_issue_reg_vfi(struct lpfc_vport *vport)
456{
457 struct lpfc_hba *phba = vport->phba;
d6de08cc 458 LPFC_MBOXQ_t *mboxq = NULL;
6fb120a7 459 struct lpfc_nodelist *ndlp;
d6de08cc 460 struct lpfc_dmabuf *dmabuf = NULL;
6fb120a7
JS
461 int rc = 0;
462
939723a4 463 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
1b51197d 464 if ((phba->sli_rev == LPFC_SLI_REV4) &&
939723a4
JS
465 !(phba->link_flag & LS_LOOPBACK_MODE) &&
466 !(vport->fc_flag & FC_PT2PT)) {
1b51197d
JS
467 ndlp = lpfc_findnode_did(vport, Fabric_DID);
468 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
469 rc = -ENODEV;
470 goto fail;
471 }
6fb120a7
JS
472 }
473
d6de08cc
JS
474 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
475 if (!mboxq) {
6fb120a7
JS
476 rc = -ENOMEM;
477 goto fail;
478 }
6d368e53 479
d6de08cc
JS
480 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
481 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
482 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
483 if (!dmabuf) {
484 rc = -ENOMEM;
485 goto fail;
486 }
487 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
488 if (!dmabuf->virt) {
489 rc = -ENOMEM;
490 goto fail;
491 }
492 memcpy(dmabuf->virt, &phba->fc_fabparam,
493 sizeof(struct serv_parm));
6fb120a7 494 }
d6de08cc 495
6fb120a7 496 vport->port_state = LPFC_FABRIC_CFG_LINK;
d6de08cc
JS
497 if (dmabuf)
498 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
499 else
500 lpfc_reg_vfi(mboxq, vport, 0);
ae05ebe3 501
6fb120a7
JS
502 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
503 mboxq->vport = vport;
504 mboxq->context1 = dmabuf;
505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
506 if (rc == MBX_NOT_FINISHED) {
507 rc = -ENXIO;
d6de08cc 508 goto fail;
6fb120a7
JS
509 }
510 return 0;
511
6fb120a7 512fail:
d6de08cc
JS
513 if (mboxq)
514 mempool_free(mboxq, phba->mbox_mem_pool);
515 if (dmabuf) {
516 if (dmabuf->virt)
517 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
518 kfree(dmabuf);
519 }
520
6fb120a7
JS
521 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
522 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
523 "0289 Issue Register VFI failed: Err %d\n", rc);
524 return rc;
525}
526
1b51197d
JS
527/**
528 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
529 * @vport: pointer to a host virtual N_Port data structure.
530 *
531 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
532 * the @vport. This mailbox command is necessary for SLI4 port only.
533 *
534 * Return code
535 * 0 - successfully issued REG_VFI for @vport
536 * A failure code otherwise.
537 **/
538int
539lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
540{
541 struct lpfc_hba *phba = vport->phba;
542 struct Scsi_Host *shost;
543 LPFC_MBOXQ_t *mboxq;
544 int rc;
545
546 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
547 if (!mboxq) {
548 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
549 "2556 UNREG_VFI mbox allocation failed"
550 "HBA state x%x\n", phba->pport->port_state);
551 return -ENOMEM;
552 }
553
554 lpfc_unreg_vfi(mboxq, vport);
555 mboxq->vport = vport;
556 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
557
558 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
559 if (rc == MBX_NOT_FINISHED) {
560 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
561 "2557 UNREG_VFI issue mbox failed rc x%x "
562 "HBA state x%x\n",
563 rc, phba->pport->port_state);
564 mempool_free(mboxq, phba->mbox_mem_pool);
565 return -EIO;
566 }
567
568 shost = lpfc_shost_from_vport(vport);
569 spin_lock_irq(shost->host_lock);
570 vport->fc_flag &= ~FC_VFI_REGISTERED;
571 spin_unlock_irq(shost->host_lock);
572 return 0;
573}
574
92494144
JS
575/**
576 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
577 * @vport: pointer to a host virtual N_Port data structure.
578 * @sp: pointer to service parameter data structure.
579 *
580 * This routine is called from FLOGI/FDISC completion handler functions.
581 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
582 * node nodename is changed in the completion service parameter else return
583 * 0. This function also set flag in the vport data structure to delay
584 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
585 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
586 * node nodename is changed in the completion service parameter.
587 *
588 * Return code
589 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
590 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
591 *
592 **/
593static uint8_t
594lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
595 struct serv_parm *sp)
596{
597 uint8_t fabric_param_changed = 0;
598 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
599
600 if ((vport->fc_prevDID != vport->fc_myDID) ||
601 memcmp(&vport->fabric_portname, &sp->portName,
602 sizeof(struct lpfc_name)) ||
603 memcmp(&vport->fabric_nodename, &sp->nodeName,
604 sizeof(struct lpfc_name)))
605 fabric_param_changed = 1;
606
607 /*
608 * Word 1 Bit 31 in common service parameter is overloaded.
609 * Word 1 Bit 31 in FLOGI request is multiple NPort request
610 * Word 1 Bit 31 in FLOGI response is clean address bit
611 *
612 * If fabric parameter is changed and clean address bit is
613 * cleared delay nport discovery if
614 * - vport->fc_prevDID != 0 (not initial discovery) OR
615 * - lpfc_delay_discovery module parameter is set.
616 */
617 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
618 (vport->fc_prevDID || lpfc_delay_discovery)) {
619 spin_lock_irq(shost->host_lock);
620 vport->fc_flag |= FC_DISC_DELAYED;
621 spin_unlock_irq(shost->host_lock);
622 }
623
624 return fabric_param_changed;
625}
626
627
e59058c4 628/**
3621a710 629 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
e59058c4
JS
630 * @vport: pointer to a host virtual N_Port data structure.
631 * @ndlp: pointer to a node-list data structure.
632 * @sp: pointer to service parameter data structure.
633 * @irsp: pointer to the IOCB within the lpfc response IOCB.
634 *
635 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
636 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
637 * port in a fabric topology. It properly sets up the parameters to the @ndlp
638 * from the IOCB response. It also check the newly assigned N_Port ID to the
639 * @vport against the previously assigned N_Port ID. If it is different from
640 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
641 * is invoked on all the remaining nodes with the @vport to unregister the
642 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
643 * is invoked to register login to the fabric.
644 *
645 * Return code
646 * 0 - Success (currently, always return 0)
647 **/
92d7f7b0
JS
648static int
649lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
650 struct serv_parm *sp, IOCB_t *irsp)
651{
652 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
653 struct lpfc_hba *phba = vport->phba;
654 struct lpfc_nodelist *np;
655 struct lpfc_nodelist *next_np;
92494144 656 uint8_t fabric_param_changed;
92d7f7b0 657
2e0fef85
JS
658 spin_lock_irq(shost->host_lock);
659 vport->fc_flag |= FC_FABRIC;
660 spin_unlock_irq(shost->host_lock);
dea3101e
JB
661
662 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
663 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
664 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
665
12265f68 666 phba->fc_edtovResol = sp->cmn.edtovResolution;
dea3101e
JB
667 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
668
76a95d75 669 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2e0fef85
JS
670 spin_lock_irq(shost->host_lock);
671 vport->fc_flag |= FC_PUBLIC_LOOP;
672 spin_unlock_irq(shost->host_lock);
dea3101e
JB
673 }
674
2e0fef85 675 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
dea3101e 676 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
92d7f7b0 677 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
dea3101e
JB
678 ndlp->nlp_class_sup = 0;
679 if (sp->cls1.classValid)
680 ndlp->nlp_class_sup |= FC_COS_CLASS1;
681 if (sp->cls2.classValid)
682 ndlp->nlp_class_sup |= FC_COS_CLASS2;
683 if (sp->cls3.classValid)
684 ndlp->nlp_class_sup |= FC_COS_CLASS3;
685 if (sp->cls4.classValid)
686 ndlp->nlp_class_sup |= FC_COS_CLASS4;
687 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
688 sp->cmn.bbRcvSizeLsb;
92494144
JS
689
690 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
4258e98e
JS
691 if (fabric_param_changed) {
692 /* Reset FDMI attribute masks based on config parameter */
8663cbbe
JS
693 if (phba->cfg_enable_SmartSAN ||
694 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
695 /* Setup appropriate attribute masks */
696 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 697 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
698 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
699 else
700 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8663cbbe
JS
701 } else {
702 vport->fdmi_hba_mask = 0;
703 vport->fdmi_port_mask = 0;
4258e98e
JS
704 }
705
706 }
92494144
JS
707 memcpy(&vport->fabric_portname, &sp->portName,
708 sizeof(struct lpfc_name));
709 memcpy(&vport->fabric_nodename, &sp->nodeName,
710 sizeof(struct lpfc_name));
dea3101e
JB
711 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
712
92d7f7b0
JS
713 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
714 if (sp->cmn.response_multiple_NPort) {
e8b62011
JS
715 lpfc_printf_vlog(vport, KERN_WARNING,
716 LOG_ELS | LOG_VPORT,
717 "1816 FLOGI NPIV supported, "
718 "response data 0x%x\n",
719 sp->cmn.response_multiple_NPort);
1b51197d 720 spin_lock_irq(&phba->hbalock);
92d7f7b0 721 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
1b51197d 722 spin_unlock_irq(&phba->hbalock);
92d7f7b0
JS
723 } else {
724 /* Because we asked f/w for NPIV it still expects us
e8b62011
JS
725 to call reg_vnpid atleast for the physcial host */
726 lpfc_printf_vlog(vport, KERN_WARNING,
727 LOG_ELS | LOG_VPORT,
728 "1817 Fabric does not support NPIV "
729 "- configuring single port mode.\n");
1b51197d 730 spin_lock_irq(&phba->hbalock);
92d7f7b0 731 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
1b51197d 732 spin_unlock_irq(&phba->hbalock);
92d7f7b0
JS
733 }
734 }
dea3101e 735
ae05ebe3
JS
736 /*
737 * For FC we need to do some special processing because of the SLI
738 * Port's default settings of the Common Service Parameters.
739 */
d6de08cc
JS
740 if ((phba->sli_rev == LPFC_SLI_REV4) &&
741 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
ae05ebe3 742 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
d6de08cc 743 if (fabric_param_changed)
ae05ebe3
JS
744 lpfc_unregister_fcf_prep(phba);
745
746 /* This should just update the VFI CSPs*/
747 if (vport->fc_flag & FC_VFI_REGISTERED)
748 lpfc_issue_reg_vfi(vport);
749 }
750
92494144 751 if (fabric_param_changed &&
92d7f7b0 752 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
dea3101e 753
92d7f7b0
JS
754 /* If our NportID changed, we need to ensure all
755 * remaining NPORTs get unreg_login'ed.
756 */
757 list_for_each_entry_safe(np, next_np,
758 &vport->fc_nodes, nlp_listp) {
d7c255b2 759 if (!NLP_CHK_NODE_ACT(np))
e47c9093 760 continue;
92d7f7b0
JS
761 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
762 !(np->nlp_flag & NLP_NPR_ADISC))
763 continue;
764 spin_lock_irq(shost->host_lock);
765 np->nlp_flag &= ~NLP_NPR_ADISC;
766 spin_unlock_irq(shost->host_lock);
767 lpfc_unreg_rpi(vport, np);
768 }
78730cfe 769 lpfc_cleanup_pending_mbox(vport);
5af5eee7 770
5248a749 771 if (phba->sli_rev == LPFC_SLI_REV4) {
5af5eee7 772 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 773 lpfc_mbx_unreg_vpi(vport);
09372820 774 spin_lock_irq(shost->host_lock);
ecfd03c6
JS
775 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
776 spin_unlock_irq(shost->host_lock);
777 }
27aa1b73
JS
778
779 /*
780 * For SLI3 and SLI4, the VPI needs to be reregistered in
781 * response to this fabric parameter change event.
782 */
783 spin_lock_irq(shost->host_lock);
784 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
785 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
786 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
787 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
788 /*
789 * Driver needs to re-reg VPI in order for f/w
790 * to update the MAC address.
791 */
9589b062 792 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
38b92ef8
JS
793 lpfc_register_new_vport(phba, vport, ndlp);
794 return 0;
92d7f7b0 795 }
dea3101e 796
6fb120a7
JS
797 if (phba->sli_rev < LPFC_SLI_REV4) {
798 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
799 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
800 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
801 lpfc_register_new_vport(phba, vport, ndlp);
802 else
803 lpfc_issue_fabric_reglogin(vport);
804 } else {
805 ndlp->nlp_type |= NLP_FABRIC;
806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
695a814e
JS
807 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
808 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
6fb120a7
JS
809 lpfc_start_fdiscs(phba);
810 lpfc_do_scr_ns_plogi(phba, vport);
695a814e 811 } else if (vport->fc_flag & FC_VFI_REGISTERED)
ecfd03c6 812 lpfc_issue_init_vpi(vport);
1b51197d
JS
813 else {
814 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
815 "3135 Need register VFI: (x%x/%x)\n",
816 vport->fc_prevDID, vport->fc_myDID);
6fb120a7 817 lpfc_issue_reg_vfi(vport);
1b51197d 818 }
92d7f7b0 819 }
dea3101e 820 return 0;
dea3101e 821}
1b51197d 822
e59058c4 823/**
3621a710 824 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
e59058c4
JS
825 * @vport: pointer to a host virtual N_Port data structure.
826 * @ndlp: pointer to a node-list data structure.
827 * @sp: pointer to service parameter data structure.
828 *
829 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
830 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
831 * in a point-to-point topology. First, the @vport's N_Port Name is compared
832 * with the received N_Port Name: if the @vport's N_Port Name is greater than
833 * the received N_Port Name lexicographically, this node shall assign local
834 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
835 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
836 * this node shall just wait for the remote node to issue PLOGI and assign
837 * N_Port IDs.
838 *
839 * Return code
840 * 0 - Success
841 * -ENXIO - Fail
842 **/
dea3101e 843static int
2e0fef85
JS
844lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
845 struct serv_parm *sp)
dea3101e 846{
2e0fef85
JS
847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
848 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
849 LPFC_MBOXQ_t *mbox;
850 int rc;
851
2e0fef85
JS
852 spin_lock_irq(shost->host_lock);
853 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
d6de08cc 854 vport->fc_flag |= FC_PT2PT;
2e0fef85 855 spin_unlock_irq(shost->host_lock);
dea3101e 856
d6de08cc
JS
857 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
858 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
859 lpfc_unregister_fcf_prep(phba);
860
861 spin_lock_irq(shost->host_lock);
862 vport->fc_flag &= ~FC_VFI_REGISTERED;
863 spin_unlock_irq(shost->host_lock);
864 phba->fc_topology_changed = 0;
865 }
866
2e0fef85 867 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 868 sizeof(vport->fc_portname));
2eb6862a 869
dea3101e
JB
870 if (rc >= 0) {
871 /* This side will initiate the PLOGI */
2e0fef85
JS
872 spin_lock_irq(shost->host_lock);
873 vport->fc_flag |= FC_PT2PT_PLOGI;
874 spin_unlock_irq(shost->host_lock);
dea3101e
JB
875
876 /*
d6de08cc
JS
877 * N_Port ID cannot be 0, set our Id to LocalID
878 * the other side will be RemoteID.
dea3101e
JB
879 */
880
881 /* not equal */
882 if (rc)
2e0fef85 883 vport->fc_myDID = PT2PT_LocalID;
dea3101e 884
e47c9093
JS
885 /* Decrement ndlp reference count indicating that ndlp can be
886 * safely released when other references to it are done.
887 */
329f9bc7 888 lpfc_nlp_put(ndlp);
dea3101e 889
2e0fef85 890 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
dea3101e
JB
891 if (!ndlp) {
892 /*
893 * Cannot find existing Fabric ndlp, so allocate a
894 * new one
895 */
896 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
897 if (!ndlp)
898 goto fail;
2e0fef85 899 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
e47c9093
JS
900 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
901 ndlp = lpfc_enable_node(vport, ndlp,
902 NLP_STE_UNUSED_NODE);
903 if(!ndlp)
904 goto fail;
dea3101e
JB
905 }
906
907 memcpy(&ndlp->nlp_portname, &sp->portName,
2e0fef85 908 sizeof(struct lpfc_name));
dea3101e 909 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
2e0fef85 910 sizeof(struct lpfc_name));
e47c9093 911 /* Set state will put ndlp onto node list if not already done */
2e0fef85
JS
912 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
913 spin_lock_irq(shost->host_lock);
dea3101e 914 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 915 spin_unlock_irq(shost->host_lock);
e47c9093
JS
916 } else
917 /* This side will wait for the PLOGI, decrement ndlp reference
918 * count indicating that ndlp can be released when other
919 * references to it are done.
920 */
329f9bc7 921 lpfc_nlp_put(ndlp);
dea3101e 922
09372820
JS
923 /* If we are pt2pt with another NPort, force NPIV off! */
924 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
925
d6de08cc
JS
926 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
927 if (!mbox)
928 goto fail;
e74c03c8 929
d6de08cc
JS
930 lpfc_config_link(phba, mbox);
931
932 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
933 mbox->vport = vport;
934 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
935 if (rc == MBX_NOT_FINISHED) {
936 mempool_free(mbox, phba->mbox_mem_pool);
937 goto fail;
e74c03c8 938 }
dea3101e 939
dea3101e 940 return 0;
92d7f7b0 941fail:
dea3101e
JB
942 return -ENXIO;
943}
944
e59058c4 945/**
3621a710 946 * lpfc_cmpl_els_flogi - Completion callback function for flogi
e59058c4
JS
947 * @phba: pointer to lpfc hba data structure.
948 * @cmdiocb: pointer to lpfc command iocb data structure.
949 * @rspiocb: pointer to lpfc response iocb data structure.
950 *
951 * This routine is the top-level completion callback function for issuing
952 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
953 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
954 * retry has been made (either immediately or delayed with lpfc_els_retry()
955 * returning 1), the command IOCB will be released and function returned.
956 * If the retry attempt has been given up (possibly reach the maximum
957 * number of retries), one additional decrement of ndlp reference shall be
958 * invoked before going out after releasing the command IOCB. This will
959 * actually release the remote node (Note, lpfc_els_free_iocb() will also
960 * invoke one decrement of ndlp reference count). If no error reported in
961 * the IOCB status, the command Port ID field is used to determine whether
962 * this is a point-to-point topology or a fabric topology: if the Port ID
963 * field is assigned, it is a fabric topology; otherwise, it is a
964 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
965 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
966 * specific topology completion conditions.
967 **/
dea3101e 968static void
329f9bc7
JS
969lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
970 struct lpfc_iocbq *rspiocb)
dea3101e 971{
2e0fef85
JS
972 struct lpfc_vport *vport = cmdiocb->vport;
973 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
974 IOCB_t *irsp = &rspiocb->iocb;
975 struct lpfc_nodelist *ndlp = cmdiocb->context1;
976 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
977 struct serv_parm *sp;
0c9ab6f5 978 uint16_t fcf_index;
dea3101e
JB
979 int rc;
980
981 /* Check to see if link went down during discovery */
2e0fef85 982 if (lpfc_els_chk_latt(vport)) {
fa4066b6
JS
983 /* One additional decrement on node reference count to
984 * trigger the release of the node
985 */
329f9bc7 986 lpfc_nlp_put(ndlp);
dea3101e
JB
987 goto out;
988 }
989
858c9f6c
JS
990 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
991 "FLOGI cmpl: status:x%x/x%x state:x%x",
992 irsp->ulpStatus, irsp->un.ulpWord[4],
993 vport->port_state);
994
dea3101e 995 if (irsp->ulpStatus) {
0c9ab6f5 996 /*
a93ff37a 997 * In case of FIP mode, perform roundrobin FCF failover
0c9ab6f5
JS
998 * due to new FCF discovery
999 */
1000 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
80c17849
JS
1001 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1002 if (phba->link_state < LPFC_LINK_UP)
1003 goto stop_rr_fcf_flogi;
1004 if ((phba->fcoe_cvl_eventtag_attn ==
1005 phba->fcoe_cvl_eventtag) &&
1006 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
1007 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1008 IOERR_SLI_ABORTED))
80c17849
JS
1009 goto stop_rr_fcf_flogi;
1010 else
1011 phba->fcoe_cvl_eventtag_attn =
1012 phba->fcoe_cvl_eventtag;
0c9ab6f5 1013 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
a93ff37a
JS
1014 "2611 FLOGI failed on FCF (x%x), "
1015 "status:x%x/x%x, tmo:x%x, perform "
1016 "roundrobin FCF failover\n",
38b92ef8
JS
1017 phba->fcf.current_rec.fcf_indx,
1018 irsp->ulpStatus, irsp->un.ulpWord[4],
1019 irsp->ulpTimeout);
7d791df7
JS
1020 lpfc_sli4_set_fcf_flogi_fail(phba,
1021 phba->fcf.current_rec.fcf_indx);
0c9ab6f5 1022 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
a93ff37a
JS
1023 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1024 if (rc)
1025 goto out;
0c9ab6f5
JS
1026 }
1027
80c17849 1028stop_rr_fcf_flogi:
38b92ef8
JS
1029 /* FLOGI failure */
1030 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8fe5c165
JS
1031 "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
1032 "Data x%x x%x\n",
38b92ef8 1033 irsp->ulpStatus, irsp->un.ulpWord[4],
8fe5c165
JS
1034 irsp->ulpTimeout, phba->hba_flag,
1035 phba->fcf.fcf_flag);
38b92ef8 1036
dea3101e 1037 /* Check for retry */
2e0fef85 1038 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 1039 goto out;
2e0fef85 1040
76a95d75
JS
1041 /* FLOGI failure */
1042 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1043 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1044 irsp->ulpStatus, irsp->un.ulpWord[4],
1045 irsp->ulpTimeout);
1046
dea3101e 1047 /* FLOGI failed, so there is no fabric */
2e0fef85
JS
1048 spin_lock_irq(shost->host_lock);
1049 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1050 spin_unlock_irq(shost->host_lock);
dea3101e 1051
329f9bc7 1052 /* If private loop, then allow max outstanding els to be
dea3101e
JB
1053 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1054 * alpa map would take too long otherwise.
1055 */
1b51197d 1056 if (phba->alpa_map[0] == 0)
3de2a653 1057 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
ff78d8f9
JS
1058 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1059 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
e74c03c8
JS
1060 (vport->fc_prevDID != vport->fc_myDID) ||
1061 phba->fc_topology_changed)) {
1062 if (vport->fc_flag & FC_VFI_REGISTERED) {
1063 if (phba->fc_topology_changed) {
1064 lpfc_unregister_fcf_prep(phba);
1065 spin_lock_irq(shost->host_lock);
1066 vport->fc_flag &= ~FC_VFI_REGISTERED;
1067 spin_unlock_irq(shost->host_lock);
1068 phba->fc_topology_changed = 0;
1069 } else {
1070 lpfc_sli4_unreg_all_rpis(vport);
1071 }
1072 }
342b59ca
JS
1073
1074 /* Do not register VFI if the driver aborted FLOGI */
1075 if (!lpfc_error_lost_link(irsp))
1076 lpfc_issue_reg_vfi(vport);
ff78d8f9
JS
1077 lpfc_nlp_put(ndlp);
1078 goto out;
dea3101e 1079 }
dea3101e
JB
1080 goto flogifail;
1081 }
695a814e
JS
1082 spin_lock_irq(shost->host_lock);
1083 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 1084 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
695a814e 1085 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1086
1087 /*
1088 * The FLogI succeeded. Sync the data for the CPU before
1089 * accessing it.
1090 */
1091 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
a2fc4aef
JS
1092 if (!prsp)
1093 goto out;
dea3101e
JB
1094 sp = prsp->virt + sizeof(uint32_t);
1095
1096 /* FLOGI completes successfully */
e8b62011 1097 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
88f43a08
JS
1098 "0101 FLOGI completes successfully, I/O tag:x%x, "
1099 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
e8b62011 1100 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
e74c03c8
JS
1101 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1102 vport->port_state, vport->fc_flag);
dea3101e 1103
2e0fef85 1104 if (vport->port_state == LPFC_FLOGI) {
dea3101e
JB
1105 /*
1106 * If Common Service Parameters indicate Nport
1107 * we are point to point, if Fport we are Fabric.
1108 */
1109 if (sp->cmn.fPort)
2e0fef85 1110 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
76a95d75 1111 else if (!(phba->hba_flag & HBA_FCOE_MODE))
2e0fef85 1112 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
dbb6b3ab
JS
1113 else {
1114 lpfc_printf_vlog(vport, KERN_ERR,
1115 LOG_FIP | LOG_ELS,
1116 "2831 FLOGI response with cleared Fabric "
1117 "bit fcf_index 0x%x "
1118 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1119 "Fabric Name "
1120 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1121 phba->fcf.current_rec.fcf_indx,
1122 phba->fcf.current_rec.switch_name[0],
1123 phba->fcf.current_rec.switch_name[1],
1124 phba->fcf.current_rec.switch_name[2],
1125 phba->fcf.current_rec.switch_name[3],
1126 phba->fcf.current_rec.switch_name[4],
1127 phba->fcf.current_rec.switch_name[5],
1128 phba->fcf.current_rec.switch_name[6],
1129 phba->fcf.current_rec.switch_name[7],
1130 phba->fcf.current_rec.fabric_name[0],
1131 phba->fcf.current_rec.fabric_name[1],
1132 phba->fcf.current_rec.fabric_name[2],
1133 phba->fcf.current_rec.fabric_name[3],
1134 phba->fcf.current_rec.fabric_name[4],
1135 phba->fcf.current_rec.fabric_name[5],
1136 phba->fcf.current_rec.fabric_name[6],
1137 phba->fcf.current_rec.fabric_name[7]);
1138 lpfc_nlp_put(ndlp);
1139 spin_lock_irq(&phba->hbalock);
1140 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 1141 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
dbb6b3ab
JS
1142 spin_unlock_irq(&phba->hbalock);
1143 goto out;
1144 }
0c9ab6f5
JS
1145 if (!rc) {
1146 /* Mark the FCF discovery process done */
999d813f
JS
1147 if (phba->hba_flag & HBA_FIP_SUPPORT)
1148 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1149 LOG_ELS,
a93ff37a
JS
1150 "2769 FLOGI to FCF (x%x) "
1151 "completed successfully\n",
999d813f 1152 phba->fcf.current_rec.fcf_indx);
0c9ab6f5
JS
1153 spin_lock_irq(&phba->hbalock);
1154 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 1155 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
0c9ab6f5 1156 spin_unlock_irq(&phba->hbalock);
dea3101e 1157 goto out;
0c9ab6f5 1158 }
dea3101e
JB
1159 }
1160
1161flogifail:
8fe5c165
JS
1162 spin_lock_irq(&phba->hbalock);
1163 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1164 spin_unlock_irq(&phba->hbalock);
d6de08cc 1165
329f9bc7 1166 lpfc_nlp_put(ndlp);
dea3101e 1167
858c9f6c 1168 if (!lpfc_error_lost_link(irsp)) {
dea3101e 1169 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 1170 lpfc_disc_list_loopmap(vport);
dea3101e
JB
1171
1172 /* Start discovery */
2e0fef85 1173 lpfc_disc_start(vport);
87af33fe 1174 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
e3d2b802
JS
1175 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1176 IOERR_SLI_ABORTED) &&
1177 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1178 IOERR_SLI_DOWN))) &&
87af33fe
JS
1179 (phba->link_state != LPFC_CLEAR_LA)) {
1180 /* If FLOGI failed enable link interrupt. */
1181 lpfc_issue_clear_la(phba, vport);
dea3101e 1182 }
dea3101e
JB
1183out:
1184 lpfc_els_free_iocb(phba, cmdiocb);
1185}
1186
e59058c4 1187/**
3621a710 1188 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
e59058c4
JS
1189 * @vport: pointer to a host virtual N_Port data structure.
1190 * @ndlp: pointer to a node-list data structure.
1191 * @retry: number of retries to the command IOCB.
1192 *
1193 * This routine issues a Fabric Login (FLOGI) Request ELS command
1194 * for a @vport. The initiator service parameters are put into the payload
1195 * of the FLOGI Request IOCB and the top-level callback function pointer
1196 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1197 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1198 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1199 *
1200 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1201 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1202 * will be stored into the context1 field of the IOCB for the completion
1203 * callback function to the FLOGI ELS command.
1204 *
1205 * Return code
1206 * 0 - successfully issued flogi iocb for @vport
1207 * 1 - failed to issue flogi iocb for @vport
1208 **/
dea3101e 1209static int
2e0fef85 1210lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1211 uint8_t retry)
1212{
2e0fef85 1213 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1214 struct serv_parm *sp;
1215 IOCB_t *icmd;
1216 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1217 uint8_t *pcmd;
1218 uint16_t cmdsize;
1219 uint32_t tmo;
1220 int rc;
1221
92d7f7b0 1222 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2e0fef85
JS
1223 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1224 ndlp->nlp_DID, ELS_CMD_FLOGI);
92d7f7b0 1225
488d1469 1226 if (!elsiocb)
c9f8735b 1227 return 1;
dea3101e
JB
1228
1229 icmd = &elsiocb->iocb;
1230 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1231
1232 /* For FLOGI request, remainder of payload is service parameters */
1233 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
92d7f7b0
JS
1234 pcmd += sizeof(uint32_t);
1235 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1236 sp = (struct serv_parm *) pcmd;
1237
1238 /* Setup CSPs accordingly for Fabric */
1239 sp->cmn.e_d_tov = 0;
1240 sp->cmn.w2.r_a_tov = 0;
df9e1b59 1241 sp->cmn.virtual_fabric_support = 0;
dea3101e 1242 sp->cls1.classValid = 0;
dea3101e
JB
1243 if (sp->cmn.fcphLow < FC_PH3)
1244 sp->cmn.fcphLow = FC_PH3;
1245 if (sp->cmn.fcphHigh < FC_PH3)
1246 sp->cmn.fcphHigh = FC_PH3;
1247
c31098ce
JS
1248 if (phba->sli_rev == LPFC_SLI_REV4) {
1249 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1250 LPFC_SLI_INTF_IF_TYPE_0) {
1251 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1252 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1253 /* FLOGI needs to be 3 for WQE FCFI */
1254 /* Set the fcfi to the fcfi we registered with */
1255 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1256 }
0f37887e
JS
1257 /* Can't do SLI4 class2 without support sequence coalescing */
1258 sp->cls2.classValid = 0;
1259 sp->cls2.seqDelivery = 0;
5248a749 1260 } else {
0f37887e
JS
1261 /* Historical, setting sequential-delivery bit for SLI3 */
1262 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1263 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
5248a749
JS
1264 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1265 sp->cmn.request_multiple_Nport = 1;
1266 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1267 icmd->ulpCt_h = 1;
1268 icmd->ulpCt_l = 0;
1269 } else
1270 sp->cmn.request_multiple_Nport = 0;
92d7f7b0
JS
1271 }
1272
76a95d75 1273 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
858c9f6c
JS
1274 icmd->un.elsreq64.myID = 0;
1275 icmd->un.elsreq64.fl = 1;
1276 }
1277
dea3101e
JB
1278 tmo = phba->fc_ratov;
1279 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
2e0fef85 1280 lpfc_set_disctmo(vport);
dea3101e
JB
1281 phba->fc_ratov = tmo;
1282
1283 phba->fc_stat.elsXmitFLOGI++;
1284 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
858c9f6c
JS
1285
1286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1287 "Issue FLOGI: opt:x%x",
1288 phba->sli3_options, 0, 0);
1289
92d7f7b0 1290 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
dea3101e
JB
1291 if (rc == IOCB_ERROR) {
1292 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1293 return 1;
dea3101e 1294 }
c9f8735b 1295 return 0;
dea3101e
JB
1296}
1297
e59058c4 1298/**
3621a710 1299 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
e59058c4
JS
1300 * @phba: pointer to lpfc hba data structure.
1301 *
1302 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1303 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1304 * list and issues an abort IOCB commond on each outstanding IOCB that
1305 * contains a active Fabric_DID ndlp. Note that this function is to issue
1306 * the abort IOCB command on all the outstanding IOCBs, thus when this
1307 * function returns, it does not guarantee all the IOCBs are actually aborted.
1308 *
1309 * Return code
3ad2f3fb 1310 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
e59058c4 1311 **/
dea3101e 1312int
2e0fef85 1313lpfc_els_abort_flogi(struct lpfc_hba *phba)
dea3101e
JB
1314{
1315 struct lpfc_sli_ring *pring;
1316 struct lpfc_iocbq *iocb, *next_iocb;
1317 struct lpfc_nodelist *ndlp;
1318 IOCB_t *icmd;
1319
1320 /* Abort outstanding I/O on NPort <nlp_DID> */
1321 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
e8b62011
JS
1322 "0201 Abort outstanding I/O on NPort x%x\n",
1323 Fabric_DID);
dea3101e
JB
1324
1325 pring = &phba->sli.ring[LPFC_ELS_RING];
1326
1327 /*
1328 * Check the txcmplq for an iocb that matches the nport the driver is
1329 * searching for.
1330 */
2e0fef85 1331 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1332 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1333 icmd = &iocb->iocb;
1b51197d 1334 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
dea3101e 1335 ndlp = (struct lpfc_nodelist *)(iocb->context1);
58da1ffb
JS
1336 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1337 (ndlp->nlp_DID == Fabric_DID))
07951076 1338 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e
JB
1339 }
1340 }
2e0fef85 1341 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1342
1343 return 0;
1344}
1345
e59058c4 1346/**
3621a710 1347 * lpfc_initial_flogi - Issue an initial fabric login for a vport
e59058c4
JS
1348 * @vport: pointer to a host virtual N_Port data structure.
1349 *
1350 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1351 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1352 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1353 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1354 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1355 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1356 * @vport.
1357 *
1358 * Return code
1359 * 0 - failed to issue initial flogi for @vport
1360 * 1 - successfully issued initial flogi for @vport
1361 **/
dea3101e 1362int
2e0fef85 1363lpfc_initial_flogi(struct lpfc_vport *vport)
dea3101e 1364{
2e0fef85 1365 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1366 struct lpfc_nodelist *ndlp;
1367
98c9ea5c
JS
1368 vport->port_state = LPFC_FLOGI;
1369 lpfc_set_disctmo(vport);
1370
c9f8735b 1371 /* First look for the Fabric ndlp */
2e0fef85 1372 ndlp = lpfc_findnode_did(vport, Fabric_DID);
c9f8735b 1373 if (!ndlp) {
dea3101e 1374 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b
JW
1375 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1376 if (!ndlp)
1377 return 0;
2e0fef85 1378 lpfc_nlp_init(vport, ndlp, Fabric_DID);
6fb120a7
JS
1379 /* Set the node type */
1380 ndlp->nlp_type |= NLP_FABRIC;
e47c9093
JS
1381 /* Put ndlp onto node list */
1382 lpfc_enqueue_node(vport, ndlp);
1383 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1384 /* re-setup ndlp without removing from node list */
1385 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1386 if (!ndlp)
1387 return 0;
dea3101e 1388 }
87af33fe 1389
5ac6b303 1390 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
fa4066b6
JS
1391 /* This decrement of reference count to node shall kick off
1392 * the release of the node.
1393 */
329f9bc7 1394 lpfc_nlp_put(ndlp);
5ac6b303
JS
1395 return 0;
1396 }
c9f8735b 1397 return 1;
dea3101e
JB
1398}
1399
e59058c4 1400/**
3621a710 1401 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
e59058c4
JS
1402 * @vport: pointer to a host virtual N_Port data structure.
1403 *
1404 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1405 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1406 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1407 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1408 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1409 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1410 * @vport.
1411 *
1412 * Return code
1413 * 0 - failed to issue initial fdisc for @vport
1414 * 1 - successfully issued initial fdisc for @vport
1415 **/
92d7f7b0
JS
1416int
1417lpfc_initial_fdisc(struct lpfc_vport *vport)
1418{
1419 struct lpfc_hba *phba = vport->phba;
1420 struct lpfc_nodelist *ndlp;
1421
1422 /* First look for the Fabric ndlp */
1423 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1424 if (!ndlp) {
1425 /* Cannot find existing Fabric ndlp, so allocate a new one */
1426 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1427 if (!ndlp)
1428 return 0;
1429 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
1430 /* Put ndlp onto node list */
1431 lpfc_enqueue_node(vport, ndlp);
1432 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1433 /* re-setup ndlp without removing from node list */
1434 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1435 if (!ndlp)
1436 return 0;
92d7f7b0 1437 }
e47c9093 1438
92d7f7b0 1439 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
fa4066b6
JS
1440 /* decrement node reference count to trigger the release of
1441 * the node.
1442 */
92d7f7b0 1443 lpfc_nlp_put(ndlp);
fa4066b6 1444 return 0;
92d7f7b0
JS
1445 }
1446 return 1;
1447}
87af33fe 1448
e59058c4 1449/**
3621a710 1450 * lpfc_more_plogi - Check and issue remaining plogis for a vport
e59058c4
JS
1451 * @vport: pointer to a host virtual N_Port data structure.
1452 *
1453 * This routine checks whether there are more remaining Port Logins
1454 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1455 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1456 * to issue ELS PLOGIs up to the configured discover threads with the
1457 * @vport (@vport->cfg_discovery_threads). The function also decrement
1458 * the @vport's num_disc_node by 1 if it is not already 0.
1459 **/
87af33fe 1460void
2e0fef85 1461lpfc_more_plogi(struct lpfc_vport *vport)
dea3101e 1462{
2e0fef85
JS
1463 if (vport->num_disc_nodes)
1464 vport->num_disc_nodes--;
dea3101e
JB
1465
1466 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
e8b62011
JS
1467 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1468 "0232 Continue discovery with %d PLOGIs to go "
1469 "Data: x%x x%x x%x\n",
1470 vport->num_disc_nodes, vport->fc_plogi_cnt,
1471 vport->fc_flag, vport->port_state);
dea3101e 1472 /* Check to see if there are more PLOGIs to be sent */
2e0fef85
JS
1473 if (vport->fc_flag & FC_NLP_MORE)
1474 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
db6f1c2f 1475 lpfc_els_disc_plogi(vport);
2e0fef85 1476
dea3101e
JB
1477 return;
1478}
1479
e59058c4 1480/**
3621a710 1481 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
e59058c4
JS
1482 * @phba: pointer to lpfc hba data structure.
1483 * @prsp: pointer to response IOCB payload.
1484 * @ndlp: pointer to a node-list data structure.
1485 *
1486 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1487 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1488 * The following cases are considered N_Port confirmed:
1489 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1490 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1491 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1492 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1493 * 1) if there is a node on vport list other than the @ndlp with the same
1494 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1495 * on that node to release the RPI associated with the node; 2) if there is
1496 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1497 * into, a new node shall be allocated (or activated). In either case, the
1498 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1499 * be released and the new_ndlp shall be put on to the vport node list and
1500 * its pointer returned as the confirmed node.
1501 *
1502 * Note that before the @ndlp got "released", the keepDID from not-matching
1503 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1504 * of the @ndlp. This is because the release of @ndlp is actually to put it
1505 * into an inactive state on the vport node list and the vport node list
1506 * management algorithm does not allow two node with a same DID.
1507 *
1508 * Return code
1509 * pointer to the PLOGI N_Port @ndlp
1510 **/
488d1469 1511static struct lpfc_nodelist *
92d7f7b0 1512lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
488d1469
JS
1513 struct lpfc_nodelist *ndlp)
1514{
2e0fef85 1515 struct lpfc_vport *vport = ndlp->vport;
953ceeda 1516 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 1517 struct lpfc_nodelist *new_ndlp;
0ff10d46
JS
1518 struct lpfc_rport_data *rdata;
1519 struct fc_rport *rport;
488d1469 1520 struct serv_parm *sp;
92d7f7b0 1521 uint8_t name[sizeof(struct lpfc_name)];
e5abba4c 1522 uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
953ceeda 1523 uint16_t keep_nlp_state;
38b92ef8
JS
1524 int put_node;
1525 int put_rport;
cff261f6 1526 unsigned long *active_rrqs_xri_bitmap = NULL;
488d1469 1527
2fb9bd8b
JS
1528 /* Fabric nodes can have the same WWPN so we don't bother searching
1529 * by WWPN. Just return the ndlp that was given to us.
1530 */
1531 if (ndlp->nlp_type & NLP_FABRIC)
1532 return ndlp;
1533
92d7f7b0 1534 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
685f0bf7 1535 memset(name, 0, sizeof(struct lpfc_name));
488d1469 1536
685f0bf7 1537 /* Now we find out if the NPort we are logging into, matches the WWPN
488d1469
JS
1538 * we have for that ndlp. If not, we have some work to do.
1539 */
2e0fef85 1540 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
488d1469 1541
e47c9093 1542 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
488d1469 1543 return ndlp;
cff261f6
JS
1544 if (phba->sli_rev == LPFC_SLI_REV4) {
1545 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1546 GFP_KERNEL);
1547 if (active_rrqs_xri_bitmap)
1548 memset(active_rrqs_xri_bitmap, 0,
1549 phba->cfg_rrq_xri_bitmap_sz);
1550 }
488d1469 1551
34f5ad8b
JS
1552 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1553 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1554 ndlp, ndlp->nlp_DID, new_ndlp);
1555
488d1469 1556 if (!new_ndlp) {
2e0fef85
JS
1557 rc = memcmp(&ndlp->nlp_portname, name,
1558 sizeof(struct lpfc_name));
cff261f6
JS
1559 if (!rc) {
1560 if (active_rrqs_xri_bitmap)
1561 mempool_free(active_rrqs_xri_bitmap,
1562 phba->active_rrq_pool);
92795650 1563 return ndlp;
cff261f6 1564 }
488d1469 1565 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
cff261f6
JS
1566 if (!new_ndlp) {
1567 if (active_rrqs_xri_bitmap)
1568 mempool_free(active_rrqs_xri_bitmap,
1569 phba->active_rrq_pool);
488d1469 1570 return ndlp;
cff261f6 1571 }
2e0fef85 1572 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
e47c9093 1573 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
58da1ffb
JS
1574 rc = memcmp(&ndlp->nlp_portname, name,
1575 sizeof(struct lpfc_name));
cff261f6
JS
1576 if (!rc) {
1577 if (active_rrqs_xri_bitmap)
1578 mempool_free(active_rrqs_xri_bitmap,
1579 phba->active_rrq_pool);
58da1ffb 1580 return ndlp;
cff261f6 1581 }
e47c9093
JS
1582 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1583 NLP_STE_UNUSED_NODE);
cff261f6
JS
1584 if (!new_ndlp) {
1585 if (active_rrqs_xri_bitmap)
1586 mempool_free(active_rrqs_xri_bitmap,
1587 phba->active_rrq_pool);
e47c9093 1588 return ndlp;
cff261f6 1589 }
58da1ffb 1590 keepDID = new_ndlp->nlp_DID;
cff261f6
JS
1591 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1592 memcpy(active_rrqs_xri_bitmap,
1593 new_ndlp->active_rrqs_xri_bitmap,
1594 phba->cfg_rrq_xri_bitmap_sz);
19ca7609 1595 } else {
58da1ffb 1596 keepDID = new_ndlp->nlp_DID;
cff261f6
JS
1597 if (phba->sli_rev == LPFC_SLI_REV4 &&
1598 active_rrqs_xri_bitmap)
1599 memcpy(active_rrqs_xri_bitmap,
1600 new_ndlp->active_rrqs_xri_bitmap,
1601 phba->cfg_rrq_xri_bitmap_sz);
19ca7609 1602 }
488d1469 1603
2e0fef85 1604 lpfc_unreg_rpi(vport, new_ndlp);
488d1469 1605 new_ndlp->nlp_DID = ndlp->nlp_DID;
92795650 1606 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
19ca7609 1607 if (phba->sli_rev == LPFC_SLI_REV4)
cff261f6
JS
1608 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1609 ndlp->active_rrqs_xri_bitmap,
1610 phba->cfg_rrq_xri_bitmap_sz);
0ff10d46 1611
953ceeda 1612 spin_lock_irq(shost->host_lock);
e5abba4c
JS
1613 keep_nlp_flag = new_ndlp->nlp_flag;
1614 new_ndlp->nlp_flag = ndlp->nlp_flag;
1615 ndlp->nlp_flag = keep_nlp_flag;
953ceeda 1616 spin_unlock_irq(shost->host_lock);
0ff10d46 1617
953ceeda
JS
1618 /* Set nlp_states accordingly */
1619 keep_nlp_state = new_ndlp->nlp_state;
2e0fef85 1620 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
488d1469 1621
2e0fef85 1622 /* Move this back to NPR state */
87af33fe
JS
1623 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1624 /* The new_ndlp is replacing ndlp totally, so we need
1625 * to put ndlp on UNUSED list and try to free it.
1626 */
34f5ad8b
JS
1627 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1628 "3179 PLOGI confirm NEW: %x %x\n",
1629 new_ndlp->nlp_DID, keepDID);
0ff10d46
JS
1630
1631 /* Fix up the rport accordingly */
1632 rport = ndlp->rport;
1633 if (rport) {
1634 rdata = rport->dd_data;
1635 if (rdata->pnode == ndlp) {
466e840b 1636 /* break the link before dropping the ref */
0ff10d46 1637 ndlp->rport = NULL;
466e840b 1638 lpfc_nlp_put(ndlp);
0ff10d46
JS
1639 rdata->pnode = lpfc_nlp_get(new_ndlp);
1640 new_ndlp->rport = rport;
1641 }
1642 new_ndlp->nlp_type = ndlp->nlp_type;
1643 }
58da1ffb
JS
1644 /* We shall actually free the ndlp with both nlp_DID and
1645 * nlp_portname fields equals 0 to avoid any ndlp on the
1646 * nodelist never to be used.
1647 */
1648 if (ndlp->nlp_DID == 0) {
1649 spin_lock_irq(&phba->ndlp_lock);
1650 NLP_SET_FREE_REQ(ndlp);
1651 spin_unlock_irq(&phba->ndlp_lock);
1652 }
0ff10d46 1653
58da1ffb
JS
1654 /* Two ndlps cannot have the same did on the nodelist */
1655 ndlp->nlp_DID = keepDID;
cff261f6
JS
1656 if (phba->sli_rev == LPFC_SLI_REV4 &&
1657 active_rrqs_xri_bitmap)
1658 memcpy(ndlp->active_rrqs_xri_bitmap,
1659 active_rrqs_xri_bitmap,
1660 phba->cfg_rrq_xri_bitmap_sz);
e5abba4c
JS
1661
1662 if (!NLP_CHK_NODE_ACT(ndlp))
1663 lpfc_drop_node(vport, ndlp);
87af33fe 1664 }
92795650 1665 else {
34f5ad8b
JS
1666 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1667 "3180 PLOGI confirm SWAP: %x %x\n",
1668 new_ndlp->nlp_DID, keepDID);
1669
2e0fef85 1670 lpfc_unreg_rpi(vport, ndlp);
34f5ad8b 1671
58da1ffb
JS
1672 /* Two ndlps cannot have the same did */
1673 ndlp->nlp_DID = keepDID;
cff261f6
JS
1674 if (phba->sli_rev == LPFC_SLI_REV4 &&
1675 active_rrqs_xri_bitmap)
1676 memcpy(ndlp->active_rrqs_xri_bitmap,
1677 active_rrqs_xri_bitmap,
1678 phba->cfg_rrq_xri_bitmap_sz);
34f5ad8b 1679
953ceeda
JS
1680 /* Since we are switching over to the new_ndlp,
1681 * reset the old ndlp state
34f5ad8b
JS
1682 */
1683 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1684 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
953ceeda
JS
1685 keep_nlp_state = NLP_STE_NPR_NODE;
1686 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
34f5ad8b 1687
38b92ef8
JS
1688 /* Fix up the rport accordingly */
1689 rport = ndlp->rport;
1690 if (rport) {
1691 rdata = rport->dd_data;
1692 put_node = rdata->pnode != NULL;
1693 put_rport = ndlp->rport != NULL;
1694 rdata->pnode = NULL;
1695 ndlp->rport = NULL;
1696 if (put_node)
1697 lpfc_nlp_put(ndlp);
1698 if (put_rport)
1699 put_device(&rport->dev);
1700 }
92795650 1701 }
cff261f6
JS
1702 if (phba->sli_rev == LPFC_SLI_REV4 &&
1703 active_rrqs_xri_bitmap)
1704 mempool_free(active_rrqs_xri_bitmap,
1705 phba->active_rrq_pool);
488d1469
JS
1706 return new_ndlp;
1707}
1708
e59058c4 1709/**
3621a710 1710 * lpfc_end_rscn - Check and handle more rscn for a vport
e59058c4
JS
1711 * @vport: pointer to a host virtual N_Port data structure.
1712 *
1713 * This routine checks whether more Registration State Change
1714 * Notifications (RSCNs) came in while the discovery state machine was in
1715 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1716 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1717 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1718 * handling the RSCNs.
1719 **/
87af33fe
JS
1720void
1721lpfc_end_rscn(struct lpfc_vport *vport)
1722{
1723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1724
1725 if (vport->fc_flag & FC_RSCN_MODE) {
1726 /*
1727 * Check to see if more RSCNs came in while we were
1728 * processing this one.
1729 */
1730 if (vport->fc_rscn_id_cnt ||
1731 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1732 lpfc_els_handle_rscn(vport);
1733 else {
1734 spin_lock_irq(shost->host_lock);
1735 vport->fc_flag &= ~FC_RSCN_MODE;
1736 spin_unlock_irq(shost->host_lock);
1737 }
1738 }
1739}
1740
19ca7609
JS
1741/**
1742 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1743 * @phba: pointer to lpfc hba data structure.
1744 * @cmdiocb: pointer to lpfc command iocb data structure.
1745 * @rspiocb: pointer to lpfc response iocb data structure.
1746 *
1747 * This routine will call the clear rrq function to free the rrq and
1748 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1749 * exist then the clear_rrq is still called because the rrq needs to
1750 * be freed.
1751 **/
1752
1753static void
1754lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1755 struct lpfc_iocbq *rspiocb)
1756{
1757 struct lpfc_vport *vport = cmdiocb->vport;
1758 IOCB_t *irsp;
1759 struct lpfc_nodelist *ndlp;
1760 struct lpfc_node_rrq *rrq;
1761
1762 /* we pass cmdiocb to state machine which needs rspiocb as well */
1763 rrq = cmdiocb->context_un.rrq;
1764 cmdiocb->context_un.rsp_iocb = rspiocb;
1765
1766 irsp = &rspiocb->iocb;
1767 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1768 "RRQ cmpl: status:x%x/x%x did:x%x",
1769 irsp->ulpStatus, irsp->un.ulpWord[4],
1770 irsp->un.elsreq64.remoteID);
1771
1772 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1773 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1774 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1775 "2882 RRQ completes to NPort x%x "
1776 "with no ndlp. Data: x%x x%x x%x\n",
1777 irsp->un.elsreq64.remoteID,
1778 irsp->ulpStatus, irsp->un.ulpWord[4],
1779 irsp->ulpIoTag);
1780 goto out;
1781 }
1782
1783 /* rrq completes to NPort <nlp_DID> */
1784 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1785 "2880 RRQ completes to NPort x%x "
1786 "Data: x%x x%x x%x x%x x%x\n",
1787 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1788 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1789
1790 if (irsp->ulpStatus) {
1791 /* Check for retry */
1792 /* RRQ failed Don't print the vport to vport rjts */
1793 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1794 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1795 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1796 (phba)->pport->cfg_log_verbose & LOG_ELS)
1797 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1798 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1799 ndlp->nlp_DID, irsp->ulpStatus,
1800 irsp->un.ulpWord[4]);
1801 }
1802out:
1803 if (rrq)
1804 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1805 lpfc_els_free_iocb(phba, cmdiocb);
1806 return;
1807}
e59058c4 1808/**
3621a710 1809 * lpfc_cmpl_els_plogi - Completion callback function for plogi
e59058c4
JS
1810 * @phba: pointer to lpfc hba data structure.
1811 * @cmdiocb: pointer to lpfc command iocb data structure.
1812 * @rspiocb: pointer to lpfc response iocb data structure.
1813 *
1814 * This routine is the completion callback function for issuing the Port
1815 * Login (PLOGI) command. For PLOGI completion, there must be an active
1816 * ndlp on the vport node list that matches the remote node ID from the
25985edc 1817 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
e59058c4
JS
1818 * ignored and command IOCB released. The PLOGI response IOCB status is
1819 * checked for error conditons. If there is error status reported, PLOGI
1820 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1821 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1822 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1823 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1824 * there are additional N_Port nodes with the vport that need to perform
1825 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1826 * PLOGIs.
1827 **/
dea3101e 1828static void
2e0fef85
JS
1829lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1830 struct lpfc_iocbq *rspiocb)
dea3101e 1831{
2e0fef85
JS
1832 struct lpfc_vport *vport = cmdiocb->vport;
1833 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1834 IOCB_t *irsp;
dea3101e 1835 struct lpfc_nodelist *ndlp;
92795650 1836 struct lpfc_dmabuf *prsp;
eb016566 1837 int disc, rc;
dea3101e 1838
dea3101e
JB
1839 /* we pass cmdiocb to state machine which needs rspiocb as well */
1840 cmdiocb->context_un.rsp_iocb = rspiocb;
1841
1842 irsp = &rspiocb->iocb;
858c9f6c
JS
1843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1844 "PLOGI cmpl: status:x%x/x%x did:x%x",
1845 irsp->ulpStatus, irsp->un.ulpWord[4],
1846 irsp->un.elsreq64.remoteID);
1847
2e0fef85 1848 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
e47c9093 1849 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
e8b62011
JS
1850 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1851 "0136 PLOGI completes to NPort x%x "
1852 "with no ndlp. Data: x%x x%x x%x\n",
1853 irsp->un.elsreq64.remoteID,
1854 irsp->ulpStatus, irsp->un.ulpWord[4],
1855 irsp->ulpIoTag);
488d1469 1856 goto out;
ed957684 1857 }
dea3101e
JB
1858
1859 /* Since ndlp can be freed in the disc state machine, note if this node
1860 * is being used during discovery.
1861 */
2e0fef85 1862 spin_lock_irq(shost->host_lock);
dea3101e 1863 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
488d1469 1864 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85 1865 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1866 rc = 0;
1867
1868 /* PLOGI completes to NPort <nlp_DID> */
e8b62011
JS
1869 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1870 "0102 PLOGI completes to NPort x%x "
1871 "Data: x%x x%x x%x x%x x%x\n",
1872 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1873 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1874 /* Check to see if link went down during discovery */
2e0fef85
JS
1875 if (lpfc_els_chk_latt(vport)) {
1876 spin_lock_irq(shost->host_lock);
dea3101e 1877 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1878 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1879 goto out;
1880 }
1881
dea3101e
JB
1882 if (irsp->ulpStatus) {
1883 /* Check for retry */
1884 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1885 /* ELS command is being retried */
1886 if (disc) {
2e0fef85 1887 spin_lock_irq(shost->host_lock);
dea3101e 1888 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1889 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1890 }
1891 goto out;
1892 }
2a9bf3d0
JS
1893 /* PLOGI failed Don't print the vport to vport rjts */
1894 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1895 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1896 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1897 (phba)->pport->cfg_log_verbose & LOG_ELS)
1898 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
e40a02c1
JS
1899 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1900 ndlp->nlp_DID, irsp->ulpStatus,
1901 irsp->un.ulpWord[4]);
dea3101e 1902 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1903 if (lpfc_error_lost_link(irsp))
c9f8735b 1904 rc = NLP_STE_FREED_NODE;
e47c9093 1905 else
2e0fef85 1906 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1907 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1908 } else {
1909 /* Good status, call state machine */
92795650 1910 prsp = list_entry(((struct lpfc_dmabuf *)
92d7f7b0
JS
1911 cmdiocb->context2)->list.next,
1912 struct lpfc_dmabuf, list);
1913 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2e0fef85 1914 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1915 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1916 }
1917
2e0fef85 1918 if (disc && vport->num_disc_nodes) {
dea3101e 1919 /* Check to see if there are more PLOGIs to be sent */
2e0fef85 1920 lpfc_more_plogi(vport);
dea3101e 1921
2e0fef85
JS
1922 if (vport->num_disc_nodes == 0) {
1923 spin_lock_irq(shost->host_lock);
1924 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1925 spin_unlock_irq(shost->host_lock);
dea3101e 1926
2e0fef85 1927 lpfc_can_disctmo(vport);
87af33fe 1928 lpfc_end_rscn(vport);
dea3101e
JB
1929 }
1930 }
1931
1932out:
1933 lpfc_els_free_iocb(phba, cmdiocb);
1934 return;
1935}
1936
e59058c4 1937/**
3621a710 1938 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
e59058c4
JS
1939 * @vport: pointer to a host virtual N_Port data structure.
1940 * @did: destination port identifier.
1941 * @retry: number of retries to the command IOCB.
1942 *
1943 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1944 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1945 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1946 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1947 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1948 *
1949 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1950 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1951 * will be stored into the context1 field of the IOCB for the completion
1952 * callback function to the PLOGI ELS command.
1953 *
1954 * Return code
1955 * 0 - Successfully issued a plogi for @vport
1956 * 1 - failed to issue a plogi for @vport
1957 **/
dea3101e 1958int
2e0fef85 1959lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
dea3101e 1960{
2e0fef85 1961 struct lpfc_hba *phba = vport->phba;
dea3101e 1962 struct serv_parm *sp;
98c9ea5c 1963 struct lpfc_nodelist *ndlp;
dea3101e 1964 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1965 uint8_t *pcmd;
1966 uint16_t cmdsize;
92d7f7b0 1967 int ret;
dea3101e 1968
98c9ea5c 1969 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
1970 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1971 ndlp = NULL;
98c9ea5c 1972
e47c9093 1973 /* If ndlp is not NULL, we will bump the reference count on it */
92d7f7b0 1974 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
98c9ea5c 1975 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2e0fef85 1976 ELS_CMD_PLOGI);
c9f8735b
JW
1977 if (!elsiocb)
1978 return 1;
dea3101e 1979
dea3101e
JB
1980 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1981
1982 /* For PLOGI request, remainder of payload is service parameters */
1983 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
92d7f7b0
JS
1984 pcmd += sizeof(uint32_t);
1985 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1986 sp = (struct serv_parm *) pcmd;
1987
5ac6b303
JS
1988 /*
1989 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1990 * to device on remote loops work.
1991 */
1992 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1993 sp->cmn.altBbCredit = 1;
1994
dea3101e
JB
1995 if (sp->cmn.fcphLow < FC_PH_4_3)
1996 sp->cmn.fcphLow = FC_PH_4_3;
1997
1998 if (sp->cmn.fcphHigh < FC_PH3)
1999 sp->cmn.fcphHigh = FC_PH3;
2000
858c9f6c
JS
2001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2002 "Issue PLOGI: did:x%x",
2003 did, 0, 0);
2004
dea3101e
JB
2005 phba->fc_stat.elsXmitPLOGI++;
2006 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
3772a991 2007 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2008
2009 if (ret == IOCB_ERROR) {
dea3101e 2010 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2011 return 1;
dea3101e 2012 }
c9f8735b 2013 return 0;
dea3101e
JB
2014}
2015
e59058c4 2016/**
3621a710 2017 * lpfc_cmpl_els_prli - Completion callback function for prli
e59058c4
JS
2018 * @phba: pointer to lpfc hba data structure.
2019 * @cmdiocb: pointer to lpfc command iocb data structure.
2020 * @rspiocb: pointer to lpfc response iocb data structure.
2021 *
2022 * This routine is the completion callback function for a Process Login
2023 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2024 * status. If there is error status reported, PRLI retry shall be attempted
2025 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2026 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2027 * ndlp to mark the PRLI completion.
2028 **/
dea3101e 2029static void
2e0fef85
JS
2030lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2031 struct lpfc_iocbq *rspiocb)
dea3101e 2032{
2e0fef85
JS
2033 struct lpfc_vport *vport = cmdiocb->vport;
2034 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2035 IOCB_t *irsp;
dea3101e
JB
2036 struct lpfc_nodelist *ndlp;
2037
dea3101e
JB
2038 /* we pass cmdiocb to state machine which needs rspiocb as well */
2039 cmdiocb->context_un.rsp_iocb = rspiocb;
2040
2041 irsp = &(rspiocb->iocb);
2042 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2e0fef85 2043 spin_lock_irq(shost->host_lock);
dea3101e 2044 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 2045 spin_unlock_irq(shost->host_lock);
dea3101e 2046
858c9f6c
JS
2047 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2048 "PRLI cmpl: status:x%x/x%x did:x%x",
2049 irsp->ulpStatus, irsp->un.ulpWord[4],
2050 ndlp->nlp_DID);
dea3101e 2051 /* PRLI completes to NPort <nlp_DID> */
e8b62011
JS
2052 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2053 "0103 PRLI completes to NPort x%x "
2054 "Data: x%x x%x x%x x%x\n",
2055 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2056 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 2057
2e0fef85 2058 vport->fc_prli_sent--;
dea3101e 2059 /* Check to see if link went down during discovery */
2e0fef85 2060 if (lpfc_els_chk_latt(vport))
dea3101e
JB
2061 goto out;
2062
2063 if (irsp->ulpStatus) {
2064 /* Check for retry */
2065 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2066 /* ELS command is being retried */
2067 goto out;
2068 }
2069 /* PRLI failed */
e40a02c1
JS
2070 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2071 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
2072 ndlp->nlp_DID, irsp->ulpStatus,
2073 irsp->un.ulpWord[4]);
dea3101e 2074 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2075 if (lpfc_error_lost_link(irsp))
dea3101e 2076 goto out;
e47c9093 2077 else
2e0fef85 2078 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2079 NLP_EVT_CMPL_PRLI);
e47c9093 2080 } else
dea3101e 2081 /* Good status, call state machine */
2e0fef85 2082 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2083 NLP_EVT_CMPL_PRLI);
dea3101e
JB
2084out:
2085 lpfc_els_free_iocb(phba, cmdiocb);
2086 return;
2087}
2088
e59058c4 2089/**
3621a710 2090 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
e59058c4
JS
2091 * @vport: pointer to a host virtual N_Port data structure.
2092 * @ndlp: pointer to a node-list data structure.
2093 * @retry: number of retries to the command IOCB.
2094 *
2095 * This routine issues a Process Login (PRLI) ELS command for the
2096 * @vport. The PRLI service parameters are set up in the payload of the
2097 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2098 * is put to the IOCB completion callback func field before invoking the
2099 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2100 *
2101 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2102 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2103 * will be stored into the context1 field of the IOCB for the completion
2104 * callback function to the PRLI ELS command.
2105 *
2106 * Return code
2107 * 0 - successfully issued prli iocb command for @vport
2108 * 1 - failed to issue prli iocb command for @vport
2109 **/
dea3101e 2110int
2e0fef85 2111lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2112 uint8_t retry)
2113{
2e0fef85
JS
2114 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2115 struct lpfc_hba *phba = vport->phba;
dea3101e 2116 PRLI *npr;
dea3101e 2117 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2118 uint8_t *pcmd;
2119 uint16_t cmdsize;
2120
92d7f7b0 2121 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2e0fef85
JS
2122 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2123 ndlp->nlp_DID, ELS_CMD_PRLI);
488d1469 2124 if (!elsiocb)
c9f8735b 2125 return 1;
dea3101e 2126
dea3101e
JB
2127 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2128
2129 /* For PRLI request, remainder of payload is service parameters */
92d7f7b0 2130 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
dea3101e 2131 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
92d7f7b0 2132 pcmd += sizeof(uint32_t);
dea3101e
JB
2133
2134 /* For PRLI, remainder of payload is PRLI parameter page */
2135 npr = (PRLI *) pcmd;
2136 /*
2137 * If our firmware version is 3.20 or later,
2138 * set the following bits for FC-TAPE support.
2139 */
2140 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2141 npr->ConfmComplAllowed = 1;
2142 npr->Retry = 1;
2143 npr->TaskRetryIdReq = 1;
2144 }
2145 npr->estabImagePair = 1;
2146 npr->readXferRdyDis = 1;
3cb01c57
JS
2147 if (vport->cfg_first_burst_size)
2148 npr->writeXferRdyDis = 1;
dea3101e
JB
2149
2150 /* For FCP support */
2151 npr->prliType = PRLI_FCP_TYPE;
2152 npr->initiatorFunc = 1;
2153
858c9f6c
JS
2154 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2155 "Issue PRLI: did:x%x",
2156 ndlp->nlp_DID, 0, 0);
2157
dea3101e
JB
2158 phba->fc_stat.elsXmitPRLI++;
2159 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2e0fef85 2160 spin_lock_irq(shost->host_lock);
dea3101e 2161 ndlp->nlp_flag |= NLP_PRLI_SND;
2e0fef85 2162 spin_unlock_irq(shost->host_lock);
3772a991
JS
2163 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2164 IOCB_ERROR) {
2e0fef85 2165 spin_lock_irq(shost->host_lock);
dea3101e 2166 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 2167 spin_unlock_irq(shost->host_lock);
dea3101e 2168 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2169 return 1;
dea3101e 2170 }
2e0fef85 2171 vport->fc_prli_sent++;
c9f8735b 2172 return 0;
dea3101e
JB
2173}
2174
90160e01 2175/**
3621a710 2176 * lpfc_rscn_disc - Perform rscn discovery for a vport
90160e01
JS
2177 * @vport: pointer to a host virtual N_Port data structure.
2178 *
2179 * This routine performs Registration State Change Notification (RSCN)
2180 * discovery for a @vport. If the @vport's node port recovery count is not
2181 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2182 * the nodes that need recovery. If none of the PLOGI were needed through
2183 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2184 * invoked to check and handle possible more RSCN came in during the period
2185 * of processing the current ones.
2186 **/
2187static void
2188lpfc_rscn_disc(struct lpfc_vport *vport)
2189{
2190 lpfc_can_disctmo(vport);
2191
2192 /* RSCN discovery */
2193 /* go thru NPR nodes and issue ELS PLOGIs */
2194 if (vport->fc_npr_cnt)
2195 if (lpfc_els_disc_plogi(vport))
2196 return;
2197
2198 lpfc_end_rscn(vport);
2199}
2200
2201/**
3621a710 2202 * lpfc_adisc_done - Complete the adisc phase of discovery
90160e01
JS
2203 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2204 *
2205 * This function is called when the final ADISC is completed during discovery.
2206 * This function handles clearing link attention or issuing reg_vpi depending
2207 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2208 * discovery.
2209 * This function is called with no locks held.
2210 **/
2211static void
2212lpfc_adisc_done(struct lpfc_vport *vport)
2213{
2214 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2215 struct lpfc_hba *phba = vport->phba;
2216
2217 /*
2218 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2219 * and continue discovery.
2220 */
2221 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6fb120a7
JS
2222 !(vport->fc_flag & FC_RSCN_MODE) &&
2223 (phba->sli_rev < LPFC_SLI_REV4)) {
d454c91f
JS
2224 /* The ADISCs are complete. Doesn't matter if they
2225 * succeeded or failed because the ADISC completion
2226 * routine guarantees to call the state machine and
2227 * the RPI is either unregistered (failed ADISC response)
2228 * or the RPI is still valid and the node is marked
2229 * mapped for a target. The exchanges should be in the
2230 * correct state. This code is specific to SLI3.
2231 */
2232 lpfc_issue_clear_la(phba, vport);
90160e01
JS
2233 lpfc_issue_reg_vpi(phba, vport);
2234 return;
2235 }
2236 /*
2237 * For SLI2, we need to set port_state to READY
2238 * and continue discovery.
2239 */
2240 if (vport->port_state < LPFC_VPORT_READY) {
2241 /* If we get here, there is nothing to ADISC */
85c0f177 2242 lpfc_issue_clear_la(phba, vport);
90160e01
JS
2243 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2244 vport->num_disc_nodes = 0;
2245 /* go thru NPR list, issue ELS PLOGIs */
2246 if (vport->fc_npr_cnt)
2247 lpfc_els_disc_plogi(vport);
2248 if (!vport->num_disc_nodes) {
2249 spin_lock_irq(shost->host_lock);
2250 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2251 spin_unlock_irq(shost->host_lock);
2252 lpfc_can_disctmo(vport);
2253 lpfc_end_rscn(vport);
2254 }
2255 }
2256 vport->port_state = LPFC_VPORT_READY;
2257 } else
2258 lpfc_rscn_disc(vport);
2259}
2260
e59058c4 2261/**
3621a710 2262 * lpfc_more_adisc - Issue more adisc as needed
e59058c4
JS
2263 * @vport: pointer to a host virtual N_Port data structure.
2264 *
2265 * This routine determines whether there are more ndlps on a @vport
2266 * node list need to have Address Discover (ADISC) issued. If so, it will
2267 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2268 * remaining nodes which need to have ADISC sent.
2269 **/
0ff10d46 2270void
2e0fef85 2271lpfc_more_adisc(struct lpfc_vport *vport)
dea3101e 2272{
2e0fef85
JS
2273 if (vport->num_disc_nodes)
2274 vport->num_disc_nodes--;
dea3101e 2275 /* Continue discovery with <num_disc_nodes> ADISCs to go */
e8b62011
JS
2276 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2277 "0210 Continue discovery with %d ADISCs to go "
2278 "Data: x%x x%x x%x\n",
2279 vport->num_disc_nodes, vport->fc_adisc_cnt,
2280 vport->fc_flag, vport->port_state);
dea3101e 2281 /* Check to see if there are more ADISCs to be sent */
2e0fef85
JS
2282 if (vport->fc_flag & FC_NLP_MORE) {
2283 lpfc_set_disctmo(vport);
2284 /* go thru NPR nodes and issue any remaining ELS ADISCs */
eb016566 2285 lpfc_els_disc_adisc(vport);
dea3101e 2286 }
90160e01
JS
2287 if (!vport->num_disc_nodes)
2288 lpfc_adisc_done(vport);
dea3101e
JB
2289 return;
2290}
2291
e59058c4 2292/**
3621a710 2293 * lpfc_cmpl_els_adisc - Completion callback function for adisc
e59058c4
JS
2294 * @phba: pointer to lpfc hba data structure.
2295 * @cmdiocb: pointer to lpfc command iocb data structure.
2296 * @rspiocb: pointer to lpfc response iocb data structure.
2297 *
2298 * This routine is the completion function for issuing the Address Discover
2299 * (ADISC) command. It first checks to see whether link went down during
2300 * the discovery process. If so, the node will be marked as node port
2301 * recovery for issuing discover IOCB by the link attention handler and
2302 * exit. Otherwise, the response status is checked. If error was reported
2303 * in the response status, the ADISC command shall be retried by invoking
2304 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2305 * the response status, the state machine is invoked to set transition
2306 * with respect to NLP_EVT_CMPL_ADISC event.
2307 **/
dea3101e 2308static void
2e0fef85
JS
2309lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2310 struct lpfc_iocbq *rspiocb)
dea3101e 2311{
2e0fef85
JS
2312 struct lpfc_vport *vport = cmdiocb->vport;
2313 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2314 IOCB_t *irsp;
dea3101e 2315 struct lpfc_nodelist *ndlp;
2e0fef85 2316 int disc;
dea3101e
JB
2317
2318 /* we pass cmdiocb to state machine which needs rspiocb as well */
2319 cmdiocb->context_un.rsp_iocb = rspiocb;
2320
2321 irsp = &(rspiocb->iocb);
2322 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
dea3101e 2323
858c9f6c
JS
2324 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2325 "ADISC cmpl: status:x%x/x%x did:x%x",
2326 irsp->ulpStatus, irsp->un.ulpWord[4],
2327 ndlp->nlp_DID);
2328
dea3101e
JB
2329 /* Since ndlp can be freed in the disc state machine, note if this node
2330 * is being used during discovery.
2331 */
2e0fef85 2332 spin_lock_irq(shost->host_lock);
dea3101e 2333 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
c9f8735b 2334 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2e0fef85 2335 spin_unlock_irq(shost->host_lock);
dea3101e 2336 /* ADISC completes to NPort <nlp_DID> */
e8b62011
JS
2337 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2338 "0104 ADISC completes to NPort x%x "
2339 "Data: x%x x%x x%x x%x x%x\n",
2340 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2341 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 2342 /* Check to see if link went down during discovery */
2e0fef85
JS
2343 if (lpfc_els_chk_latt(vport)) {
2344 spin_lock_irq(shost->host_lock);
dea3101e 2345 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2346 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2347 goto out;
2348 }
2349
2350 if (irsp->ulpStatus) {
2351 /* Check for retry */
2352 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2353 /* ELS command is being retried */
2354 if (disc) {
2e0fef85 2355 spin_lock_irq(shost->host_lock);
dea3101e 2356 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85
JS
2357 spin_unlock_irq(shost->host_lock);
2358 lpfc_set_disctmo(vport);
dea3101e
JB
2359 }
2360 goto out;
2361 }
2362 /* ADISC failed */
e40a02c1
JS
2363 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2364 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2365 ndlp->nlp_DID, irsp->ulpStatus,
2366 irsp->un.ulpWord[4]);
dea3101e 2367 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2368 if (!lpfc_error_lost_link(irsp))
2e0fef85 2369 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
858c9f6c 2370 NLP_EVT_CMPL_ADISC);
e47c9093 2371 } else
dea3101e 2372 /* Good status, call state machine */
2e0fef85 2373 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
dea3101e 2374 NLP_EVT_CMPL_ADISC);
dea3101e 2375
90160e01
JS
2376 /* Check to see if there are more ADISCs to be sent */
2377 if (disc && vport->num_disc_nodes)
2e0fef85 2378 lpfc_more_adisc(vport);
dea3101e
JB
2379out:
2380 lpfc_els_free_iocb(phba, cmdiocb);
2381 return;
2382}
2383
e59058c4 2384/**
3621a710 2385 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
e59058c4
JS
2386 * @vport: pointer to a virtual N_Port data structure.
2387 * @ndlp: pointer to a node-list data structure.
2388 * @retry: number of retries to the command IOCB.
2389 *
2390 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2391 * @vport. It prepares the payload of the ADISC ELS command, updates the
2392 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2393 * to issue the ADISC ELS command.
2394 *
2395 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2396 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2397 * will be stored into the context1 field of the IOCB for the completion
2398 * callback function to the ADISC ELS command.
2399 *
2400 * Return code
2401 * 0 - successfully issued adisc
2402 * 1 - failed to issue adisc
2403 **/
dea3101e 2404int
2e0fef85 2405lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2406 uint8_t retry)
2407{
2e0fef85
JS
2408 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2409 struct lpfc_hba *phba = vport->phba;
dea3101e 2410 ADISC *ap;
dea3101e 2411 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2412 uint8_t *pcmd;
2413 uint16_t cmdsize;
2414
92d7f7b0 2415 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2e0fef85
JS
2416 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2417 ndlp->nlp_DID, ELS_CMD_ADISC);
488d1469 2418 if (!elsiocb)
c9f8735b 2419 return 1;
dea3101e 2420
dea3101e
JB
2421 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2422
2423 /* For ADISC request, remainder of payload is service parameters */
2424 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
92d7f7b0 2425 pcmd += sizeof(uint32_t);
dea3101e
JB
2426
2427 /* Fill in ADISC payload */
2428 ap = (ADISC *) pcmd;
2429 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
2430 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2431 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2432 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 2433
858c9f6c
JS
2434 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2435 "Issue ADISC: did:x%x",
2436 ndlp->nlp_DID, 0, 0);
2437
dea3101e
JB
2438 phba->fc_stat.elsXmitADISC++;
2439 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2e0fef85 2440 spin_lock_irq(shost->host_lock);
dea3101e 2441 ndlp->nlp_flag |= NLP_ADISC_SND;
2e0fef85 2442 spin_unlock_irq(shost->host_lock);
3772a991
JS
2443 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2444 IOCB_ERROR) {
2e0fef85 2445 spin_lock_irq(shost->host_lock);
dea3101e 2446 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2e0fef85 2447 spin_unlock_irq(shost->host_lock);
dea3101e 2448 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2449 return 1;
dea3101e 2450 }
c9f8735b 2451 return 0;
dea3101e
JB
2452}
2453
e59058c4 2454/**
3621a710 2455 * lpfc_cmpl_els_logo - Completion callback function for logo
e59058c4
JS
2456 * @phba: pointer to lpfc hba data structure.
2457 * @cmdiocb: pointer to lpfc command iocb data structure.
2458 * @rspiocb: pointer to lpfc response iocb data structure.
2459 *
2460 * This routine is the completion function for issuing the ELS Logout (LOGO)
2461 * command. If no error status was reported from the LOGO response, the
2462 * state machine of the associated ndlp shall be invoked for transition with
2463 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2464 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2465 **/
dea3101e 2466static void
2e0fef85
JS
2467lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2468 struct lpfc_iocbq *rspiocb)
dea3101e 2469{
2e0fef85
JS
2470 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2471 struct lpfc_vport *vport = ndlp->vport;
2472 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2473 IOCB_t *irsp;
92494144 2474 struct lpfcMboxq *mbox;
086a345f
JS
2475 unsigned long flags;
2476 uint32_t skip_recovery = 0;
dea3101e 2477
dea3101e
JB
2478 /* we pass cmdiocb to state machine which needs rspiocb as well */
2479 cmdiocb->context_un.rsp_iocb = rspiocb;
2480
2481 irsp = &(rspiocb->iocb);
2e0fef85 2482 spin_lock_irq(shost->host_lock);
dea3101e 2483 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2484 spin_unlock_irq(shost->host_lock);
dea3101e 2485
858c9f6c
JS
2486 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2487 "LOGO cmpl: status:x%x/x%x did:x%x",
2488 irsp->ulpStatus, irsp->un.ulpWord[4],
2489 ndlp->nlp_DID);
086a345f 2490
dea3101e 2491 /* LOGO completes to NPort <nlp_DID> */
e8b62011
JS
2492 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2493 "0105 LOGO completes to NPort x%x "
2494 "Data: x%x x%x x%x x%x\n",
2495 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2496 irsp->ulpTimeout, vport->num_disc_nodes);
086a345f
JS
2497
2498 if (lpfc_els_chk_latt(vport)) {
2499 skip_recovery = 1;
dea3101e 2500 goto out;
086a345f 2501 }
dea3101e 2502
086a345f 2503 /* Check to see if link went down during discovery */
92d7f7b0
JS
2504 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2505 /* NLP_EVT_DEVICE_RM should unregister the RPI
2506 * which should abort all outstanding IOs.
2507 */
2508 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2509 NLP_EVT_DEVICE_RM);
086a345f 2510 skip_recovery = 1;
92d7f7b0
JS
2511 goto out;
2512 }
2513
dea3101e
JB
2514 if (irsp->ulpStatus) {
2515 /* Check for retry */
086a345f 2516 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
dea3101e 2517 /* ELS command is being retried */
086a345f 2518 skip_recovery = 1;
dea3101e 2519 goto out;
086a345f 2520 }
dea3101e 2521 /* LOGO failed */
e40a02c1
JS
2522 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2523 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2524 ndlp->nlp_DID, irsp->ulpStatus,
2525 irsp->un.ulpWord[4]);
dea3101e 2526 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
086a345f
JS
2527 if (lpfc_error_lost_link(irsp)) {
2528 skip_recovery = 1;
dea3101e 2529 goto out;
086a345f
JS
2530 }
2531 }
2532
2533 /* Call state machine. This will unregister the rpi if needed. */
2534 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2535
dea3101e
JB
2536out:
2537 lpfc_els_free_iocb(phba, cmdiocb);
92494144
JS
2538 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2539 if ((vport->fc_flag & FC_PT2PT) &&
2540 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2541 phba->pport->fc_myDID = 0;
2542 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2543 if (mbox) {
2544 lpfc_config_link(phba, mbox);
2545 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2546 mbox->vport = vport;
2547 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2548 MBX_NOT_FINISHED) {
2549 mempool_free(mbox, phba->mbox_mem_pool);
086a345f 2550 skip_recovery = 1;
92494144
JS
2551 }
2552 }
2553 }
086a345f
JS
2554
2555 /*
2556 * If the node is a target, the handling attempts to recover the port.
2557 * For any other port type, the rpi is unregistered as an implicit
2558 * LOGO.
2559 */
2560 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2561 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2562 spin_lock_irqsave(shost->host_lock, flags);
2563 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2564 spin_unlock_irqrestore(shost->host_lock, flags);
2565
2566 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2567 "3187 LOGO completes to NPort x%x: Start "
2568 "Recovery Data: x%x x%x x%x x%x\n",
2569 ndlp->nlp_DID, irsp->ulpStatus,
2570 irsp->un.ulpWord[4], irsp->ulpTimeout,
2571 vport->num_disc_nodes);
2572 lpfc_disc_start(vport);
2573 }
dea3101e
JB
2574 return;
2575}
2576
e59058c4 2577/**
3621a710 2578 * lpfc_issue_els_logo - Issue a logo to an node on a vport
e59058c4
JS
2579 * @vport: pointer to a virtual N_Port data structure.
2580 * @ndlp: pointer to a node-list data structure.
2581 * @retry: number of retries to the command IOCB.
2582 *
2583 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2584 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2585 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2586 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2587 *
2588 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2589 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2590 * will be stored into the context1 field of the IOCB for the completion
2591 * callback function to the LOGO ELS command.
2592 *
2593 * Return code
2594 * 0 - successfully issued logo
2595 * 1 - failed to issue logo
2596 **/
dea3101e 2597int
2e0fef85 2598lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2599 uint8_t retry)
2600{
2e0fef85
JS
2601 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2602 struct lpfc_hba *phba = vport->phba;
dea3101e 2603 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2604 uint8_t *pcmd;
2605 uint16_t cmdsize;
92d7f7b0 2606 int rc;
dea3101e 2607
98c9ea5c
JS
2608 spin_lock_irq(shost->host_lock);
2609 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2610 spin_unlock_irq(shost->host_lock);
2611 return 0;
2612 }
2613 spin_unlock_irq(shost->host_lock);
2614
92d7f7b0 2615 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2e0fef85
JS
2616 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2617 ndlp->nlp_DID, ELS_CMD_LOGO);
488d1469 2618 if (!elsiocb)
c9f8735b 2619 return 1;
dea3101e 2620
dea3101e
JB
2621 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2622 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
92d7f7b0 2623 pcmd += sizeof(uint32_t);
dea3101e
JB
2624
2625 /* Fill in LOGO payload */
2e0fef85 2626 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
92d7f7b0
JS
2627 pcmd += sizeof(uint32_t);
2628 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 2629
858c9f6c
JS
2630 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2631 "Issue LOGO: did:x%x",
2632 ndlp->nlp_DID, 0, 0);
2633
086a345f
JS
2634 /*
2635 * If we are issuing a LOGO, we may try to recover the remote NPort
2636 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2637 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2638 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2639 * for that ELS cmd. To avoid this situation, lets get rid of the
2640 * RPI right now, before any ELS cmds are sent.
2641 */
2642 spin_lock_irq(shost->host_lock);
2643 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2644 spin_unlock_irq(shost->host_lock);
2645 if (lpfc_unreg_rpi(vport, ndlp)) {
2646 lpfc_els_free_iocb(phba, elsiocb);
2647 return 0;
2648 }
2649
dea3101e
JB
2650 phba->fc_stat.elsXmitLOGO++;
2651 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2e0fef85 2652 spin_lock_irq(shost->host_lock);
dea3101e 2653 ndlp->nlp_flag |= NLP_LOGO_SND;
086a345f 2654 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2e0fef85 2655 spin_unlock_irq(shost->host_lock);
3772a991 2656 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2657
2658 if (rc == IOCB_ERROR) {
2e0fef85 2659 spin_lock_irq(shost->host_lock);
dea3101e 2660 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2661 spin_unlock_irq(shost->host_lock);
dea3101e 2662 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2663 return 1;
dea3101e 2664 }
c9f8735b 2665 return 0;
dea3101e
JB
2666}
2667
e59058c4 2668/**
3621a710 2669 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
e59058c4
JS
2670 * @phba: pointer to lpfc hba data structure.
2671 * @cmdiocb: pointer to lpfc command iocb data structure.
2672 * @rspiocb: pointer to lpfc response iocb data structure.
2673 *
2674 * This routine is a generic completion callback function for ELS commands.
2675 * Specifically, it is the callback function which does not need to perform
2676 * any command specific operations. It is currently used by the ELS command
2677 * issuing routines for the ELS State Change Request (SCR),
2678 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2679 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2680 * certain debug loggings, this callback function simply invokes the
2681 * lpfc_els_chk_latt() routine to check whether link went down during the
2682 * discovery process.
2683 **/
dea3101e 2684static void
2e0fef85
JS
2685lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2686 struct lpfc_iocbq *rspiocb)
dea3101e 2687{
2e0fef85 2688 struct lpfc_vport *vport = cmdiocb->vport;
dea3101e
JB
2689 IOCB_t *irsp;
2690
2691 irsp = &rspiocb->iocb;
2692
858c9f6c
JS
2693 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2694 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2695 irsp->ulpStatus, irsp->un.ulpWord[4],
2696 irsp->un.elsreq64.remoteID);
dea3101e 2697 /* ELS cmd tag <ulpIoTag> completes */
e8b62011
JS
2698 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2699 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2700 irsp->ulpIoTag, irsp->ulpStatus,
2701 irsp->un.ulpWord[4], irsp->ulpTimeout);
dea3101e 2702 /* Check to see if link went down during discovery */
2e0fef85 2703 lpfc_els_chk_latt(vport);
dea3101e
JB
2704 lpfc_els_free_iocb(phba, cmdiocb);
2705 return;
2706}
2707
e59058c4 2708/**
3621a710 2709 * lpfc_issue_els_scr - Issue a scr to an node on a vport
e59058c4
JS
2710 * @vport: pointer to a host virtual N_Port data structure.
2711 * @nportid: N_Port identifier to the remote node.
2712 * @retry: number of retries to the command IOCB.
2713 *
2714 * This routine issues a State Change Request (SCR) to a fabric node
2715 * on a @vport. The remote node @nportid is passed into the function. It
2716 * first search the @vport node list to find the matching ndlp. If no such
2717 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2718 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2719 * routine is invoked to send the SCR IOCB.
2720 *
2721 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2722 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2723 * will be stored into the context1 field of the IOCB for the completion
2724 * callback function to the SCR ELS command.
2725 *
2726 * Return code
2727 * 0 - Successfully issued scr command
2728 * 1 - Failed to issue scr command
2729 **/
dea3101e 2730int
2e0fef85 2731lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2732{
2e0fef85 2733 struct lpfc_hba *phba = vport->phba;
dea3101e 2734 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2735 uint8_t *pcmd;
2736 uint16_t cmdsize;
2737 struct lpfc_nodelist *ndlp;
2738
92d7f7b0 2739 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
dea3101e 2740
e47c9093
JS
2741 ndlp = lpfc_findnode_did(vport, nportid);
2742 if (!ndlp) {
2743 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2744 if (!ndlp)
2745 return 1;
2746 lpfc_nlp_init(vport, ndlp, nportid);
2747 lpfc_enqueue_node(vport, ndlp);
2748 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2749 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2750 if (!ndlp)
2751 return 1;
2752 }
2e0fef85
JS
2753
2754 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2755 ndlp->nlp_DID, ELS_CMD_SCR);
dea3101e 2756
488d1469 2757 if (!elsiocb) {
fa4066b6
JS
2758 /* This will trigger the release of the node just
2759 * allocated
2760 */
329f9bc7 2761 lpfc_nlp_put(ndlp);
c9f8735b 2762 return 1;
dea3101e
JB
2763 }
2764
dea3101e
JB
2765 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2766
2767 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
92d7f7b0 2768 pcmd += sizeof(uint32_t);
dea3101e
JB
2769
2770 /* For SCR, remainder of payload is SCR parameter page */
92d7f7b0 2771 memset(pcmd, 0, sizeof(SCR));
dea3101e
JB
2772 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2773
858c9f6c
JS
2774 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2775 "Issue SCR: did:x%x",
2776 ndlp->nlp_DID, 0, 0);
2777
dea3101e
JB
2778 phba->fc_stat.elsXmitSCR++;
2779 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2780 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2781 IOCB_ERROR) {
fa4066b6
JS
2782 /* The additional lpfc_nlp_put will cause the following
2783 * lpfc_els_free_iocb routine to trigger the rlease of
2784 * the node.
2785 */
329f9bc7 2786 lpfc_nlp_put(ndlp);
dea3101e 2787 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2788 return 1;
dea3101e 2789 }
fa4066b6
JS
2790 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2791 * trigger the release of node.
2792 */
cff261f6 2793
329f9bc7 2794 lpfc_nlp_put(ndlp);
c9f8735b 2795 return 0;
dea3101e
JB
2796}
2797
e59058c4 2798/**
3621a710 2799 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
e59058c4
JS
2800 * @vport: pointer to a host virtual N_Port data structure.
2801 * @nportid: N_Port identifier to the remote node.
2802 * @retry: number of retries to the command IOCB.
2803 *
2804 * This routine issues a Fibre Channel Address Resolution Response
2805 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2806 * is passed into the function. It first search the @vport node list to find
2807 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2808 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2809 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2810 *
2811 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2812 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2813 * will be stored into the context1 field of the IOCB for the completion
2814 * callback function to the PARPR ELS command.
2815 *
2816 * Return code
2817 * 0 - Successfully issued farpr command
2818 * 1 - Failed to issue farpr command
2819 **/
dea3101e 2820static int
2e0fef85 2821lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2822{
2e0fef85 2823 struct lpfc_hba *phba = vport->phba;
dea3101e 2824 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2825 FARP *fp;
2826 uint8_t *pcmd;
2827 uint32_t *lp;
2828 uint16_t cmdsize;
2829 struct lpfc_nodelist *ondlp;
2830 struct lpfc_nodelist *ndlp;
2831
92d7f7b0 2832 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
dea3101e 2833
e47c9093
JS
2834 ndlp = lpfc_findnode_did(vport, nportid);
2835 if (!ndlp) {
2836 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2837 if (!ndlp)
2838 return 1;
2839 lpfc_nlp_init(vport, ndlp, nportid);
2840 lpfc_enqueue_node(vport, ndlp);
2841 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2842 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2843 if (!ndlp)
2844 return 1;
2845 }
2e0fef85
JS
2846
2847 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2848 ndlp->nlp_DID, ELS_CMD_RNID);
488d1469 2849 if (!elsiocb) {
fa4066b6
JS
2850 /* This will trigger the release of the node just
2851 * allocated
2852 */
329f9bc7 2853 lpfc_nlp_put(ndlp);
c9f8735b 2854 return 1;
dea3101e
JB
2855 }
2856
dea3101e
JB
2857 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2858
2859 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
92d7f7b0 2860 pcmd += sizeof(uint32_t);
dea3101e
JB
2861
2862 /* Fill in FARPR payload */
2863 fp = (FARP *) (pcmd);
92d7f7b0 2864 memset(fp, 0, sizeof(FARP));
dea3101e
JB
2865 lp = (uint32_t *) pcmd;
2866 *lp++ = be32_to_cpu(nportid);
2e0fef85 2867 *lp++ = be32_to_cpu(vport->fc_myDID);
dea3101e
JB
2868 fp->Rflags = 0;
2869 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2870
92d7f7b0
JS
2871 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2872 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2873 ondlp = lpfc_findnode_did(vport, nportid);
e47c9093 2874 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
dea3101e 2875 memcpy(&fp->OportName, &ondlp->nlp_portname,
92d7f7b0 2876 sizeof(struct lpfc_name));
dea3101e 2877 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
92d7f7b0 2878 sizeof(struct lpfc_name));
dea3101e
JB
2879 }
2880
858c9f6c
JS
2881 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2882 "Issue FARPR: did:x%x",
2883 ndlp->nlp_DID, 0, 0);
2884
dea3101e
JB
2885 phba->fc_stat.elsXmitFARPR++;
2886 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2887 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2888 IOCB_ERROR) {
fa4066b6
JS
2889 /* The additional lpfc_nlp_put will cause the following
2890 * lpfc_els_free_iocb routine to trigger the release of
2891 * the node.
2892 */
329f9bc7 2893 lpfc_nlp_put(ndlp);
dea3101e 2894 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2895 return 1;
dea3101e 2896 }
fa4066b6
JS
2897 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2898 * trigger the release of the node.
2899 */
329f9bc7 2900 lpfc_nlp_put(ndlp);
c9f8735b 2901 return 0;
dea3101e
JB
2902}
2903
e59058c4 2904/**
3621a710 2905 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
e59058c4
JS
2906 * @vport: pointer to a host virtual N_Port data structure.
2907 * @nlp: pointer to a node-list data structure.
2908 *
2909 * This routine cancels the timer with a delayed IOCB-command retry for
2910 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2911 * removes the ELS retry event if it presents. In addition, if the
2912 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2913 * commands are sent for the @vport's nodes that require issuing discovery
2914 * ADISC.
2915 **/
fdcebe28 2916void
2e0fef85 2917lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
fdcebe28 2918{
2e0fef85 2919 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
e47c9093 2920 struct lpfc_work_evt *evtp;
2e0fef85 2921
0d2b6b83
JS
2922 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2923 return;
2e0fef85 2924 spin_lock_irq(shost->host_lock);
fdcebe28 2925 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2926 spin_unlock_irq(shost->host_lock);
fdcebe28
JS
2927 del_timer_sync(&nlp->nlp_delayfunc);
2928 nlp->nlp_last_elscmd = 0;
e47c9093 2929 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
fdcebe28 2930 list_del_init(&nlp->els_retry_evt.evt_listp);
e47c9093
JS
2931 /* Decrement nlp reference count held for the delayed retry */
2932 evtp = &nlp->els_retry_evt;
2933 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2934 }
fdcebe28 2935 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2e0fef85 2936 spin_lock_irq(shost->host_lock);
fdcebe28 2937 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85
JS
2938 spin_unlock_irq(shost->host_lock);
2939 if (vport->num_disc_nodes) {
0d2b6b83
JS
2940 if (vport->port_state < LPFC_VPORT_READY) {
2941 /* Check if there are more ADISCs to be sent */
2942 lpfc_more_adisc(vport);
0d2b6b83
JS
2943 } else {
2944 /* Check if there are more PLOGIs to be sent */
2945 lpfc_more_plogi(vport);
90160e01
JS
2946 if (vport->num_disc_nodes == 0) {
2947 spin_lock_irq(shost->host_lock);
2948 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2949 spin_unlock_irq(shost->host_lock);
2950 lpfc_can_disctmo(vport);
2951 lpfc_end_rscn(vport);
2952 }
fdcebe28
JS
2953 }
2954 }
2955 }
2956 return;
2957}
2958
e59058c4 2959/**
3621a710 2960 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
e59058c4
JS
2961 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2962 *
2963 * This routine is invoked by the ndlp delayed-function timer to check
2964 * whether there is any pending ELS retry event(s) with the node. If not, it
2965 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2966 * adds the delayed events to the HBA work list and invokes the
2967 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2968 * event. Note that lpfc_nlp_get() is called before posting the event to
2969 * the work list to hold reference count of ndlp so that it guarantees the
2970 * reference to ndlp will still be available when the worker thread gets
2971 * to the event associated with the ndlp.
2972 **/
dea3101e
JB
2973void
2974lpfc_els_retry_delay(unsigned long ptr)
2975{
2e0fef85
JS
2976 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2977 struct lpfc_vport *vport = ndlp->vport;
2e0fef85 2978 struct lpfc_hba *phba = vport->phba;
92d7f7b0 2979 unsigned long flags;
2e0fef85 2980 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
dea3101e 2981
92d7f7b0 2982 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2983 if (!list_empty(&evtp->evt_listp)) {
92d7f7b0 2984 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2985 return;
2986 }
2987
fa4066b6
JS
2988 /* We need to hold the node by incrementing the reference
2989 * count until the queued work is done
2990 */
2991 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
2992 if (evtp->evt_arg1) {
2993 evtp->evt = LPFC_EVT_ELS_RETRY;
2994 list_add_tail(&evtp->evt_listp, &phba->work_list);
92d7f7b0 2995 lpfc_worker_wake_up(phba);
5e9d9b82 2996 }
92d7f7b0 2997 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2998 return;
2999}
3000
e59058c4 3001/**
3621a710 3002 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
e59058c4
JS
3003 * @ndlp: pointer to a node-list data structure.
3004 *
3005 * This routine is the worker-thread handler for processing the @ndlp delayed
3006 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3007 * the last ELS command from the associated ndlp and invokes the proper ELS
3008 * function according to the delayed ELS command to retry the command.
3009 **/
dea3101e
JB
3010void
3011lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3012{
2e0fef85
JS
3013 struct lpfc_vport *vport = ndlp->vport;
3014 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
eb016566 3015 uint32_t cmd, retry;
dea3101e 3016
2e0fef85 3017 spin_lock_irq(shost->host_lock);
5024ab17
JW
3018 cmd = ndlp->nlp_last_elscmd;
3019 ndlp->nlp_last_elscmd = 0;
dea3101e
JB
3020
3021 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2e0fef85 3022 spin_unlock_irq(shost->host_lock);
dea3101e
JB
3023 return;
3024 }
3025
3026 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 3027 spin_unlock_irq(shost->host_lock);
1a169689
JS
3028 /*
3029 * If a discovery event readded nlp_delayfunc after timer
3030 * firing and before processing the timer, cancel the
3031 * nlp_delayfunc.
3032 */
3033 del_timer_sync(&ndlp->nlp_delayfunc);
dea3101e 3034 retry = ndlp->nlp_retry;
4d9ab994 3035 ndlp->nlp_retry = 0;
dea3101e
JB
3036
3037 switch (cmd) {
3038 case ELS_CMD_FLOGI:
2e0fef85 3039 lpfc_issue_els_flogi(vport, ndlp, retry);
dea3101e
JB
3040 break;
3041 case ELS_CMD_PLOGI:
2e0fef85 3042 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
5024ab17 3043 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3044 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6ad42535 3045 }
dea3101e
JB
3046 break;
3047 case ELS_CMD_ADISC:
2e0fef85 3048 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
5024ab17 3049 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3050 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6ad42535 3051 }
dea3101e
JB
3052 break;
3053 case ELS_CMD_PRLI:
2e0fef85 3054 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
5024ab17 3055 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3056 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
6ad42535 3057 }
dea3101e
JB
3058 break;
3059 case ELS_CMD_LOGO:
2e0fef85 3060 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
5024ab17 3061 ndlp->nlp_prev_state = ndlp->nlp_state;
086a345f 3062 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
6ad42535 3063 }
dea3101e 3064 break;
92d7f7b0 3065 case ELS_CMD_FDISC:
fedd3b7b
JS
3066 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3067 lpfc_issue_els_fdisc(vport, ndlp, retry);
92d7f7b0 3068 break;
dea3101e
JB
3069 }
3070 return;
3071}
3072
e59058c4 3073/**
3621a710 3074 * lpfc_els_retry - Make retry decision on an els command iocb
e59058c4
JS
3075 * @phba: pointer to lpfc hba data structure.
3076 * @cmdiocb: pointer to lpfc command iocb data structure.
3077 * @rspiocb: pointer to lpfc response iocb data structure.
3078 *
3079 * This routine makes a retry decision on an ELS command IOCB, which has
3080 * failed. The following ELS IOCBs use this function for retrying the command
3081 * when previously issued command responsed with error status: FLOGI, PLOGI,
3082 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3083 * returned error status, it makes the decision whether a retry shall be
3084 * issued for the command, and whether a retry shall be made immediately or
3085 * delayed. In the former case, the corresponding ELS command issuing-function
3086 * is called to retry the command. In the later case, the ELS command shall
3087 * be posted to the ndlp delayed event and delayed function timer set to the
3088 * ndlp for the delayed command issusing.
3089 *
3090 * Return code
3091 * 0 - No retry of els command is made
3092 * 1 - Immediate or delayed retry of els command is made
3093 **/
dea3101e 3094static int
2e0fef85
JS
3095lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3096 struct lpfc_iocbq *rspiocb)
dea3101e 3097{
2e0fef85
JS
3098 struct lpfc_vport *vport = cmdiocb->vport;
3099 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3100 IOCB_t *irsp = &rspiocb->iocb;
3101 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3102 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
dea3101e
JB
3103 uint32_t *elscmd;
3104 struct ls_rjt stat;
2e0fef85 3105 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
98c9ea5c 3106 int logerr = 0;
2e0fef85 3107 uint32_t cmd = 0;
488d1469 3108 uint32_t did;
dea3101e 3109
488d1469 3110
dea3101e
JB
3111 /* Note: context2 may be 0 for internal driver abort
3112 * of delays ELS command.
3113 */
3114
3115 if (pcmd && pcmd->virt) {
3116 elscmd = (uint32_t *) (pcmd->virt);
3117 cmd = *elscmd++;
3118 }
3119
e47c9093 3120 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
488d1469
JS
3121 did = ndlp->nlp_DID;
3122 else {
3123 /* We should only hit this case for retrying PLOGI */
3124 did = irsp->un.elsreq64.remoteID;
2e0fef85 3125 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
3126 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3127 && (cmd != ELS_CMD_PLOGI))
488d1469
JS
3128 return 1;
3129 }
3130
858c9f6c
JS
3131 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3132 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
3133 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3134
dea3101e
JB
3135 switch (irsp->ulpStatus) {
3136 case IOSTAT_FCP_RSP_ERROR:
1151e3ec 3137 break;
dea3101e 3138 case IOSTAT_REMOTE_STOP:
1151e3ec
JS
3139 if (phba->sli_rev == LPFC_SLI_REV4) {
3140 /* This IO was aborted by the target, we don't
3141 * know the rxid and because we did not send the
3142 * ABTS we cannot generate and RRQ.
3143 */
3144 lpfc_set_rrq_active(phba, ndlp,
ee0f4fe1 3145 cmdiocb->sli4_lxritag, 0, 0);
1151e3ec 3146 }
dea3101e 3147 break;
dea3101e 3148 case IOSTAT_LOCAL_REJECT:
e3d2b802 3149 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
dea3101e 3150 case IOERR_LOOP_OPEN_FAILURE:
eaf15d5b
JS
3151 if (cmd == ELS_CMD_FLOGI) {
3152 if (PCI_DEVICE_ID_HORNET ==
3153 phba->pcidev->device) {
76a95d75 3154 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
eaf15d5b
JS
3155 phba->pport->fc_myDID = 0;
3156 phba->alpa_map[0] = 0;
3157 phba->alpa_map[1] = 0;
3158 }
3159 }
2e0fef85 3160 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
92d7f7b0 3161 delay = 1000;
dea3101e
JB
3162 retry = 1;
3163 break;
3164
92d7f7b0 3165 case IOERR_ILLEGAL_COMMAND:
7f5f3d0d
JS
3166 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3167 "0124 Retry illegal cmd x%x "
3168 "retry:x%x delay:x%x\n",
3169 cmd, cmdiocb->retry, delay);
3170 retry = 1;
3171 /* All command's retry policy */
3172 maxretry = 8;
3173 if (cmdiocb->retry > 2)
3174 delay = 1000;
92d7f7b0
JS
3175 break;
3176
dea3101e 3177 case IOERR_NO_RESOURCES:
98c9ea5c 3178 logerr = 1; /* HBA out of resources */
858c9f6c
JS
3179 retry = 1;
3180 if (cmdiocb->retry > 100)
3181 delay = 100;
3182 maxretry = 250;
3183 break;
3184
3185 case IOERR_ILLEGAL_FRAME:
92d7f7b0 3186 delay = 100;
dea3101e
JB
3187 retry = 1;
3188 break;
3189
858c9f6c 3190 case IOERR_SEQUENCE_TIMEOUT:
dea3101e 3191 case IOERR_INVALID_RPI:
5b5b36a9
JS
3192 if (cmd == ELS_CMD_PLOGI &&
3193 did == NameServer_DID) {
3194 /* Continue forever if plogi to */
3195 /* the nameserver fails */
3196 maxretry = 0;
3197 delay = 100;
3198 }
dea3101e
JB
3199 retry = 1;
3200 break;
3201 }
3202 break;
3203
3204 case IOSTAT_NPORT_RJT:
3205 case IOSTAT_FABRIC_RJT:
3206 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3207 retry = 1;
3208 break;
3209 }
3210 break;
3211
3212 case IOSTAT_NPORT_BSY:
3213 case IOSTAT_FABRIC_BSY:
98c9ea5c 3214 logerr = 1; /* Fabric / Remote NPort out of resources */
dea3101e
JB
3215 retry = 1;
3216 break;
3217
3218 case IOSTAT_LS_RJT:
3219 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3220 /* Added for Vendor specifc support
3221 * Just keep retrying for these Rsn / Exp codes
3222 */
3223 switch (stat.un.b.lsRjtRsnCode) {
3224 case LSRJT_UNABLE_TPC:
3225 if (stat.un.b.lsRjtRsnCodeExp ==
3226 LSEXP_CMD_IN_PROGRESS) {
3227 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 3228 delay = 1000;
dea3101e
JB
3229 maxretry = 48;
3230 }
3231 retry = 1;
3232 break;
3233 }
ffc95493
JS
3234 if (stat.un.b.lsRjtRsnCodeExp ==
3235 LSEXP_CANT_GIVE_DATA) {
3236 if (cmd == ELS_CMD_PLOGI) {
3237 delay = 1000;
3238 maxretry = 48;
3239 }
3240 retry = 1;
3241 break;
3242 }
4c1b64ba
JS
3243 if ((cmd == ELS_CMD_PLOGI) ||
3244 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3245 delay = 1000;
dea3101e
JB
3246 maxretry = lpfc_max_els_tries + 1;
3247 retry = 1;
3248 break;
3249 }
92d7f7b0
JS
3250 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3251 (cmd == ELS_CMD_FDISC) &&
3252 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
e8b62011
JS
3253 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3254 "0125 FDISC Failed (x%x). "
3255 "Fabric out of resources\n",
3256 stat.un.lsRjtError);
92d7f7b0
JS
3257 lpfc_vport_set_state(vport,
3258 FC_VPORT_NO_FABRIC_RSCS);
3259 }
dea3101e
JB
3260 break;
3261
3262 case LSRJT_LOGICAL_BSY:
858c9f6c
JS
3263 if ((cmd == ELS_CMD_PLOGI) ||
3264 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3265 delay = 1000;
dea3101e 3266 maxretry = 48;
92d7f7b0 3267 } else if (cmd == ELS_CMD_FDISC) {
51ef4c26
JS
3268 /* FDISC retry policy */
3269 maxretry = 48;
3270 if (cmdiocb->retry >= 32)
3271 delay = 1000;
dea3101e
JB
3272 }
3273 retry = 1;
3274 break;
92d7f7b0
JS
3275
3276 case LSRJT_LOGICAL_ERR:
7f5f3d0d
JS
3277 /* There are some cases where switches return this
3278 * error when they are not ready and should be returning
3279 * Logical Busy. We should delay every time.
3280 */
3281 if (cmd == ELS_CMD_FDISC &&
3282 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3283 maxretry = 3;
3284 delay = 1000;
3285 retry = 1;
3286 break;
3287 }
92d7f7b0
JS
3288 case LSRJT_PROTOCOL_ERR:
3289 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3290 (cmd == ELS_CMD_FDISC) &&
3291 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3292 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3293 ) {
e8b62011 3294 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 3295 "0122 FDISC Failed (x%x). "
e8b62011
JS
3296 "Fabric Detected Bad WWN\n",
3297 stat.un.lsRjtError);
92d7f7b0
JS
3298 lpfc_vport_set_state(vport,
3299 FC_VPORT_FABRIC_REJ_WWN);
3300 }
3301 break;
7bdedb34
JS
3302 case LSRJT_VENDOR_UNIQUE:
3303 if ((stat.un.b.vendorUnique == 0x45) &&
3304 (cmd == ELS_CMD_FLOGI)) {
3305 goto out_retry;
3306 }
3307 break;
dea3101e
JB
3308 }
3309 break;
3310
3311 case IOSTAT_INTERMED_RSP:
3312 case IOSTAT_BA_RJT:
3313 break;
3314
3315 default:
3316 break;
3317 }
3318
488d1469 3319 if (did == FDMI_DID)
dea3101e 3320 retry = 1;
dea3101e 3321
df9e1b59 3322 if ((cmd == ELS_CMD_FLOGI) &&
76a95d75 3323 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
1b32f6aa 3324 !lpfc_error_lost_link(irsp)) {
98c9ea5c
JS
3325 /* FLOGI retry policy */
3326 retry = 1;
df9e1b59 3327 /* retry FLOGI forever */
6eae4303
JS
3328 if (phba->link_flag != LS_LOOPBACK_MODE)
3329 maxretry = 0;
3330 else
3331 maxretry = 2;
3332
6669f9bb
JS
3333 if (cmdiocb->retry >= 100)
3334 delay = 5000;
3335 else if (cmdiocb->retry >= 32)
98c9ea5c 3336 delay = 1000;
df9e1b59
JS
3337 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3338 /* retry FDISCs every second up to devloss */
3339 retry = 1;
3340 maxretry = vport->cfg_devloss_tmo;
3341 delay = 1000;
98c9ea5c
JS
3342 }
3343
6669f9bb
JS
3344 cmdiocb->retry++;
3345 if (maxretry && (cmdiocb->retry >= maxretry)) {
dea3101e
JB
3346 phba->fc_stat.elsRetryExceeded++;
3347 retry = 0;
3348 }
3349
ed957684
JS
3350 if ((vport->load_flag & FC_UNLOADING) != 0)
3351 retry = 0;
3352
7bdedb34 3353out_retry:
dea3101e 3354 if (retry) {
38b92ef8
JS
3355 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3356 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3357 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3358 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3359 "2849 Stop retry ELS command "
3360 "x%x to remote NPORT x%x, "
3361 "Data: x%x x%x\n", cmd, did,
3362 cmdiocb->retry, delay);
3363 return 0;
3364 }
3365 }
dea3101e
JB
3366
3367 /* Retry ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
3368 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3369 "0107 Retry ELS command x%x to remote "
3370 "NPORT x%x Data: x%x x%x\n",
3371 cmd, did, cmdiocb->retry, delay);
dea3101e 3372
858c9f6c
JS
3373 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3374 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
e3d2b802
JS
3375 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3376 IOERR_NO_RESOURCES))) {
858c9f6c
JS
3377 /* Don't reset timer for no resources */
3378
dea3101e 3379 /* If discovery / RSCN timer is running, reset it */
2e0fef85 3380 if (timer_pending(&vport->fc_disctmo) ||
92d7f7b0 3381 (vport->fc_flag & FC_RSCN_MODE))
2e0fef85 3382 lpfc_set_disctmo(vport);
dea3101e
JB
3383 }
3384
3385 phba->fc_stat.elsXmitRetry++;
58da1ffb 3386 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
dea3101e
JB
3387 phba->fc_stat.elsDelayRetry++;
3388 ndlp->nlp_retry = cmdiocb->retry;
3389
92d7f7b0
JS
3390 /* delay is specified in milliseconds */
3391 mod_timer(&ndlp->nlp_delayfunc,
3392 jiffies + msecs_to_jiffies(delay));
2e0fef85 3393 spin_lock_irq(shost->host_lock);
dea3101e 3394 ndlp->nlp_flag |= NLP_DELAY_TMO;
2e0fef85 3395 spin_unlock_irq(shost->host_lock);
dea3101e 3396
5024ab17 3397 ndlp->nlp_prev_state = ndlp->nlp_state;
858c9f6c
JS
3398 if (cmd == ELS_CMD_PRLI)
3399 lpfc_nlp_set_state(vport, ndlp,
4c1b64ba 3400 NLP_STE_PRLI_ISSUE);
858c9f6c
JS
3401 else
3402 lpfc_nlp_set_state(vport, ndlp,
3403 NLP_STE_NPR_NODE);
dea3101e
JB
3404 ndlp->nlp_last_elscmd = cmd;
3405
c9f8735b 3406 return 1;
dea3101e
JB
3407 }
3408 switch (cmd) {
3409 case ELS_CMD_FLOGI:
2e0fef85 3410 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
c9f8735b 3411 return 1;
92d7f7b0
JS
3412 case ELS_CMD_FDISC:
3413 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3414 return 1;
dea3101e 3415 case ELS_CMD_PLOGI:
58da1ffb 3416 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
488d1469 3417 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3418 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 3419 NLP_STE_PLOGI_ISSUE);
488d1469 3420 }
2e0fef85 3421 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
c9f8735b 3422 return 1;
dea3101e 3423 case ELS_CMD_ADISC:
5024ab17 3424 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3425 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3426 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
c9f8735b 3427 return 1;
dea3101e 3428 case ELS_CMD_PRLI:
5024ab17 3429 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3430 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3431 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
c9f8735b 3432 return 1;
dea3101e 3433 case ELS_CMD_LOGO:
5024ab17 3434 ndlp->nlp_prev_state = ndlp->nlp_state;
086a345f 3435 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2e0fef85 3436 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
c9f8735b 3437 return 1;
dea3101e
JB
3438 }
3439 }
dea3101e 3440 /* No retry ELS command <elsCmd> to remote NPORT <did> */
98c9ea5c
JS
3441 if (logerr) {
3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3443 "0137 No retry ELS command x%x to remote "
3444 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3445 cmd, did, irsp->ulpStatus,
3446 irsp->un.ulpWord[4]);
3447 }
3448 else {
3449 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
a58cbd52
JS
3450 "0108 No retry ELS command x%x to remote "
3451 "NPORT x%x Retried:%d Error:x%x/%x\n",
3452 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3453 irsp->un.ulpWord[4]);
98c9ea5c 3454 }
c9f8735b 3455 return 0;
dea3101e
JB
3456}
3457
e59058c4 3458/**
3621a710 3459 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
e59058c4
JS
3460 * @phba: pointer to lpfc hba data structure.
3461 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3462 *
3463 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3464 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3465 * checks to see whether there is a lpfc DMA buffer associated with the
3466 * response of the command IOCB. If so, it will be released before releasing
3467 * the lpfc DMA buffer associated with the IOCB itself.
3468 *
3469 * Return code
3470 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3471 **/
09372820 3472static int
87af33fe
JS
3473lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3474{
3475 struct lpfc_dmabuf *buf_ptr;
3476
e59058c4 3477 /* Free the response before processing the command. */
87af33fe
JS
3478 if (!list_empty(&buf_ptr1->list)) {
3479 list_remove_head(&buf_ptr1->list, buf_ptr,
3480 struct lpfc_dmabuf,
3481 list);
3482 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3483 kfree(buf_ptr);
3484 }
3485 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3486 kfree(buf_ptr1);
3487 return 0;
3488}
3489
e59058c4 3490/**
3621a710 3491 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
e59058c4
JS
3492 * @phba: pointer to lpfc hba data structure.
3493 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3494 *
3495 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3496 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3497 * pool.
3498 *
3499 * Return code
3500 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3501 **/
09372820 3502static int
87af33fe
JS
3503lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3504{
3505 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3506 kfree(buf_ptr);
3507 return 0;
3508}
3509
e59058c4 3510/**
3621a710 3511 * lpfc_els_free_iocb - Free a command iocb and its associated resources
e59058c4
JS
3512 * @phba: pointer to lpfc hba data structure.
3513 * @elsiocb: pointer to lpfc els command iocb data structure.
3514 *
3515 * This routine frees a command IOCB and its associated resources. The
3516 * command IOCB data structure contains the reference to various associated
3517 * resources, these fields must be set to NULL if the associated reference
3518 * not present:
3519 * context1 - reference to ndlp
3520 * context2 - reference to cmd
3521 * context2->next - reference to rsp
3522 * context3 - reference to bpl
3523 *
3524 * It first properly decrements the reference count held on ndlp for the
3525 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3526 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3527 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3528 * adds the DMA buffer the @phba data structure for the delayed release.
3529 * If reference to the Buffer Pointer List (BPL) is present, the
3530 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3531 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3532 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3533 *
3534 * Return code
3535 * 0 - Success (currently, always return 0)
3536 **/
dea3101e 3537int
329f9bc7 3538lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
dea3101e
JB
3539{
3540 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
a8adb832
JS
3541 struct lpfc_nodelist *ndlp;
3542
3543 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3544 if (ndlp) {
3545 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3546 lpfc_nlp_put(ndlp);
dea3101e 3547
a8adb832
JS
3548 /* If the ndlp is not being used by another discovery
3549 * thread, free it.
3550 */
3551 if (!lpfc_nlp_not_used(ndlp)) {
3552 /* If ndlp is being used by another discovery
3553 * thread, just clear NLP_DEFER_RM
3554 */
3555 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3556 }
3557 }
3558 else
3559 lpfc_nlp_put(ndlp);
329f9bc7
JS
3560 elsiocb->context1 = NULL;
3561 }
dea3101e
JB
3562 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3563 if (elsiocb->context2) {
0ff10d46
JS
3564 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3565 /* Firmware could still be in progress of DMAing
3566 * payload, so don't free data buffer till after
3567 * a hbeat.
3568 */
3569 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3570 buf_ptr = elsiocb->context2;
3571 elsiocb->context2 = NULL;
3572 if (buf_ptr) {
3573 buf_ptr1 = NULL;
3574 spin_lock_irq(&phba->hbalock);
3575 if (!list_empty(&buf_ptr->list)) {
3576 list_remove_head(&buf_ptr->list,
3577 buf_ptr1, struct lpfc_dmabuf,
3578 list);
3579 INIT_LIST_HEAD(&buf_ptr1->list);
3580 list_add_tail(&buf_ptr1->list,
3581 &phba->elsbuf);
3582 phba->elsbuf_cnt++;
3583 }
3584 INIT_LIST_HEAD(&buf_ptr->list);
3585 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3586 phba->elsbuf_cnt++;
3587 spin_unlock_irq(&phba->hbalock);
3588 }
3589 } else {
3590 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3591 lpfc_els_free_data(phba, buf_ptr1);
3592 }
dea3101e
JB
3593 }
3594
3595 if (elsiocb->context3) {
3596 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
87af33fe 3597 lpfc_els_free_bpl(phba, buf_ptr);
dea3101e 3598 }
604a3e30 3599 lpfc_sli_release_iocbq(phba, elsiocb);
dea3101e
JB
3600 return 0;
3601}
3602
e59058c4 3603/**
3621a710 3604 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
e59058c4
JS
3605 * @phba: pointer to lpfc hba data structure.
3606 * @cmdiocb: pointer to lpfc command iocb data structure.
3607 * @rspiocb: pointer to lpfc response iocb data structure.
3608 *
3609 * This routine is the completion callback function to the Logout (LOGO)
3610 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3611 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3612 * release the ndlp if it has the last reference remaining (reference count
3613 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3614 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3615 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3616 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3617 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3618 * IOCB data structure.
3619 **/
dea3101e 3620static void
2e0fef85
JS
3621lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3622 struct lpfc_iocbq *rspiocb)
dea3101e 3623{
2e0fef85
JS
3624 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3625 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c
JS
3626 IOCB_t *irsp;
3627
3628 irsp = &rspiocb->iocb;
3629 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3630 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3631 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
dea3101e 3632 /* ACC to LOGO completes to NPort <nlp_DID> */
e8b62011
JS
3633 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3634 "0109 ACC to LOGO completes to NPort x%x "
3635 "Data: x%x x%x x%x\n",
3636 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3637 ndlp->nlp_rpi);
87af33fe
JS
3638
3639 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3640 /* NPort Recovery mode or node is just allocated */
3641 if (!lpfc_nlp_not_used(ndlp)) {
3642 /* If the ndlp is being used by another discovery
3643 * thread, just unregister the RPI.
3644 */
3645 lpfc_unreg_rpi(vport, ndlp);
fa4066b6
JS
3646 } else {
3647 /* Indicate the node has already released, should
3648 * not reference to it from within lpfc_els_free_iocb.
3649 */
3650 cmdiocb->context1 = NULL;
87af33fe 3651 }
dea3101e 3652 }
73d91e50
JS
3653
3654 /*
3655 * The driver received a LOGO from the rport and has ACK'd it.
df9e1b59 3656 * At this point, the driver is done so release the IOCB
73d91e50 3657 */
dea3101e 3658 lpfc_els_free_iocb(phba, cmdiocb);
dea3101e
JB
3659}
3660
e59058c4 3661/**
3621a710 3662 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
e59058c4
JS
3663 * @phba: pointer to lpfc hba data structure.
3664 * @pmb: pointer to the driver internal queue element for mailbox command.
3665 *
3666 * This routine is the completion callback function for unregister default
3667 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3668 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3669 * decrements the ndlp reference count held for this completion callback
3670 * function. After that, it invokes the lpfc_nlp_not_used() to check
3671 * whether there is only one reference left on the ndlp. If so, it will
3672 * perform one more decrement and trigger the release of the ndlp.
3673 **/
858c9f6c
JS
3674void
3675lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3676{
3677 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3678 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3679
3680 pmb->context1 = NULL;
d439d286
JS
3681 pmb->context2 = NULL;
3682
858c9f6c
JS
3683 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3684 kfree(mp);
3685 mempool_free(pmb, phba->mbox_mem_pool);
086a345f 3686 if (ndlp) {
be6bb941
JS
3687 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3688 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
3689 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3690 atomic_read(&ndlp->kref.refcount),
3691 ndlp->nlp_usg_map, ndlp);
086a345f
JS
3692 if (NLP_CHK_NODE_ACT(ndlp)) {
3693 lpfc_nlp_put(ndlp);
3694 /* This is the end of the default RPI cleanup logic for
3695 * this ndlp. If no other discovery threads are using
3696 * this ndlp, free all resources associated with it.
3697 */
3698 lpfc_nlp_not_used(ndlp);
3699 } else {
3700 lpfc_drop_node(ndlp->vport, ndlp);
3701 }
a8adb832 3702 }
3772a991 3703
858c9f6c
JS
3704 return;
3705}
3706
e59058c4 3707/**
3621a710 3708 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
e59058c4
JS
3709 * @phba: pointer to lpfc hba data structure.
3710 * @cmdiocb: pointer to lpfc command iocb data structure.
3711 * @rspiocb: pointer to lpfc response iocb data structure.
3712 *
3713 * This routine is the completion callback function for ELS Response IOCB
3714 * command. In normal case, this callback function just properly sets the
3715 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3716 * field in the command IOCB is not NULL, the referred mailbox command will
3717 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3718 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3719 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3720 * routine shall be invoked trying to release the ndlp if no other threads
3721 * are currently referring it.
3722 **/
dea3101e 3723static void
858c9f6c 3724lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
329f9bc7 3725 struct lpfc_iocbq *rspiocb)
dea3101e 3726{
2e0fef85
JS
3727 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3728 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3729 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
87af33fe
JS
3730 IOCB_t *irsp;
3731 uint8_t *pcmd;
dea3101e 3732 LPFC_MBOXQ_t *mbox = NULL;
2e0fef85 3733 struct lpfc_dmabuf *mp = NULL;
87af33fe 3734 uint32_t ls_rjt = 0;
dea3101e 3735
33ccf8d1
JS
3736 irsp = &rspiocb->iocb;
3737
dea3101e
JB
3738 if (cmdiocb->context_un.mbox)
3739 mbox = cmdiocb->context_un.mbox;
3740
fa4066b6
JS
3741 /* First determine if this is a LS_RJT cmpl. Note, this callback
3742 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3743 */
87af33fe 3744 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
58da1ffb
JS
3745 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3746 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
fa4066b6 3747 /* A LS_RJT associated with Default RPI cleanup has its own
3ad2f3fb 3748 * separate code path.
87af33fe
JS
3749 */
3750 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3751 ls_rjt = 1;
3752 }
3753
dea3101e 3754 /* Check to see if link went down during discovery */
58da1ffb 3755 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
dea3101e 3756 if (mbox) {
14691150
JS
3757 mp = (struct lpfc_dmabuf *) mbox->context1;
3758 if (mp) {
3759 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3760 kfree(mp);
3761 }
329f9bc7 3762 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3763 }
58da1ffb
JS
3764 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3765 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
fa4066b6 3766 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3767 ndlp = NULL;
fa4066b6
JS
3768 /* Indicate the node has already released,
3769 * should not reference to it from within
3770 * the routine lpfc_els_free_iocb.
3771 */
3772 cmdiocb->context1 = NULL;
3773 }
dea3101e
JB
3774 goto out;
3775 }
3776
858c9f6c 3777 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
51ef4c26 3778 "ELS rsp cmpl: status:x%x/x%x did:x%x",
858c9f6c 3779 irsp->ulpStatus, irsp->un.ulpWord[4],
51ef4c26 3780 cmdiocb->iocb.un.elsreq64.remoteID);
dea3101e 3781 /* ELS response tag <ulpIoTag> completes */
e8b62011
JS
3782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3783 "0110 ELS response tag x%x completes "
3784 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3785 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3786 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3787 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3788 ndlp->nlp_rpi);
dea3101e
JB
3789 if (mbox) {
3790 if ((rspiocb->iocb.ulpStatus == 0)
3791 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2e0fef85 3792 lpfc_unreg_rpi(vport, ndlp);
e47c9093
JS
3793 /* Increment reference count to ndlp to hold the
3794 * reference to ndlp for the callback function.
3795 */
329f9bc7 3796 mbox->context2 = lpfc_nlp_get(ndlp);
2e0fef85 3797 mbox->vport = vport;
858c9f6c
JS
3798 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3799 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3800 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3801 }
3802 else {
3803 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3804 ndlp->nlp_prev_state = ndlp->nlp_state;
3805 lpfc_nlp_set_state(vport, ndlp,
2e0fef85 3806 NLP_STE_REG_LOGIN_ISSUE);
858c9f6c 3807 }
4b7789b7
JS
3808
3809 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
0b727fea 3810 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
e47c9093 3811 != MBX_NOT_FINISHED)
dea3101e 3812 goto out;
4b7789b7
JS
3813
3814 /* Decrement the ndlp reference count we
3815 * set for this failed mailbox command.
3816 */
3817 lpfc_nlp_put(ndlp);
3818 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
98c9ea5c
JS
3819
3820 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3821 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3822 "0138 ELS rsp: Cannot issue reg_login for x%x "
3823 "Data: x%x x%x x%x\n",
3824 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3825 ndlp->nlp_rpi);
3826
fa4066b6 3827 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3828 ndlp = NULL;
fa4066b6
JS
3829 /* Indicate node has already been released,
3830 * should not reference to it from within
3831 * the routine lpfc_els_free_iocb.
3832 */
3833 cmdiocb->context1 = NULL;
3834 }
dea3101e 3835 } else {
858c9f6c
JS
3836 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3837 if (!lpfc_error_lost_link(irsp) &&
3838 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
fa4066b6 3839 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3840 ndlp = NULL;
fa4066b6
JS
3841 /* Indicate node has already been
3842 * released, should not reference
3843 * to it from within the routine
3844 * lpfc_els_free_iocb.
3845 */
3846 cmdiocb->context1 = NULL;
3847 }
dea3101e
JB
3848 }
3849 }
14691150
JS
3850 mp = (struct lpfc_dmabuf *) mbox->context1;
3851 if (mp) {
3852 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3853 kfree(mp);
3854 }
3855 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e
JB
3856 }
3857out:
58da1ffb 3858 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2e0fef85 3859 spin_lock_irq(shost->host_lock);
858c9f6c 3860 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2e0fef85 3861 spin_unlock_irq(shost->host_lock);
87af33fe
JS
3862
3863 /* If the node is not being used by another discovery thread,
3864 * and we are sending a reject, we are done with it.
3865 * Release driver reference count here and free associated
3866 * resources.
3867 */
3868 if (ls_rjt)
fa4066b6
JS
3869 if (lpfc_nlp_not_used(ndlp))
3870 /* Indicate node has already been released,
3871 * should not reference to it from within
3872 * the routine lpfc_els_free_iocb.
3873 */
3874 cmdiocb->context1 = NULL;
4b7789b7 3875
dea3101e 3876 }
87af33fe 3877
dea3101e
JB
3878 lpfc_els_free_iocb(phba, cmdiocb);
3879 return;
3880}
3881
e59058c4 3882/**
3621a710 3883 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
e59058c4
JS
3884 * @vport: pointer to a host virtual N_Port data structure.
3885 * @flag: the els command code to be accepted.
3886 * @oldiocb: pointer to the original lpfc command iocb data structure.
3887 * @ndlp: pointer to a node-list data structure.
3888 * @mbox: pointer to the driver internal queue element for mailbox command.
3889 *
3890 * This routine prepares and issues an Accept (ACC) response IOCB
3891 * command. It uses the @flag to properly set up the IOCB field for the
3892 * specific ACC response command to be issued and invokes the
3893 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3894 * @mbox pointer is passed in, it will be put into the context_un.mbox
3895 * field of the IOCB for the completion callback function to issue the
3896 * mailbox command to the HBA later when callback is invoked.
3897 *
3898 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3899 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3900 * will be stored into the context1 field of the IOCB for the completion
3901 * callback function to the corresponding response ELS IOCB command.
3902 *
3903 * Return code
3904 * 0 - Successfully issued acc response
3905 * 1 - Failed to issue acc response
3906 **/
dea3101e 3907int
2e0fef85
JS
3908lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3909 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
51ef4c26 3910 LPFC_MBOXQ_t *mbox)
dea3101e 3911{
2e0fef85
JS
3912 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3913 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3914 IOCB_t *icmd;
3915 IOCB_t *oldcmd;
3916 struct lpfc_iocbq *elsiocb;
dea3101e 3917 uint8_t *pcmd;
d6de08cc 3918 struct serv_parm *sp;
dea3101e
JB
3919 uint16_t cmdsize;
3920 int rc;
82d9a2a2 3921 ELS_PKT *els_pkt_ptr;
dea3101e 3922
dea3101e
JB
3923 oldcmd = &oldiocb->iocb;
3924
3925 switch (flag) {
3926 case ELS_CMD_ACC:
92d7f7b0 3927 cmdsize = sizeof(uint32_t);
2e0fef85
JS
3928 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3929 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3930 if (!elsiocb) {
2e0fef85 3931 spin_lock_irq(shost->host_lock);
5024ab17 3932 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3933 spin_unlock_irq(shost->host_lock);
c9f8735b 3934 return 1;
dea3101e 3935 }
2e0fef85 3936
dea3101e 3937 icmd = &elsiocb->iocb;
7851fe2c
JS
3938 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3939 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3940 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3941 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3942 pcmd += sizeof(uint32_t);
858c9f6c
JS
3943
3944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3945 "Issue ACC: did:x%x flg:x%x",
3946 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3947 break;
d6de08cc 3948 case ELS_CMD_FLOGI:
dea3101e 3949 case ELS_CMD_PLOGI:
92d7f7b0 3950 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2e0fef85
JS
3951 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3952 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3953 if (!elsiocb)
c9f8735b 3954 return 1;
488d1469 3955
dea3101e 3956 icmd = &elsiocb->iocb;
7851fe2c
JS
3957 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3958 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3959 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3960
3961 if (mbox)
3962 elsiocb->context_un.mbox = mbox;
3963
3964 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3965 pcmd += sizeof(uint32_t);
d6de08cc
JS
3966 sp = (struct serv_parm *)pcmd;
3967
3968 if (flag == ELS_CMD_FLOGI) {
3969 /* Copy the received service parameters back */
3970 memcpy(sp, &phba->fc_fabparam,
3971 sizeof(struct serv_parm));
3972
3973 /* Clear the F_Port bit */
3974 sp->cmn.fPort = 0;
3975
3976 /* Mark all class service parameters as invalid */
3977 sp->cls1.classValid = 0;
3978 sp->cls2.classValid = 0;
3979 sp->cls3.classValid = 0;
3980 sp->cls4.classValid = 0;
3981
3982 /* Copy our worldwide names */
3983 memcpy(&sp->portName, &vport->fc_sparam.portName,
3984 sizeof(struct lpfc_name));
3985 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
3986 sizeof(struct lpfc_name));
3987 } else {
3988 memcpy(pcmd, &vport->fc_sparam,
3989 sizeof(struct serv_parm));
3990 }
858c9f6c
JS
3991
3992 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
d6de08cc 3993 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
858c9f6c 3994 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3995 break;
82d9a2a2 3996 case ELS_CMD_PRLO:
92d7f7b0 3997 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2e0fef85 3998 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
82d9a2a2
JS
3999 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4000 if (!elsiocb)
4001 return 1;
4002
4003 icmd = &elsiocb->iocb;
7851fe2c
JS
4004 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4005 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
82d9a2a2
JS
4006 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4007
4008 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
92d7f7b0 4009 sizeof(uint32_t) + sizeof(PRLO));
82d9a2a2
JS
4010 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4011 els_pkt_ptr = (ELS_PKT *) pcmd;
4012 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
858c9f6c
JS
4013
4014 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4015 "Issue ACC PRLO: did:x%x flg:x%x",
4016 ndlp->nlp_DID, ndlp->nlp_flag, 0);
82d9a2a2 4017 break;
dea3101e 4018 default:
c9f8735b 4019 return 1;
dea3101e 4020 }
dea3101e 4021 /* Xmit ELS ACC response tag <ulpIoTag> */
e8b62011
JS
4022 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4023 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
e6446439
JS
4024 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4025 "fc_flag x%x\n",
e8b62011
JS
4026 elsiocb->iotag, elsiocb->iocb.ulpContext,
4027 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
e6446439 4028 ndlp->nlp_rpi, vport->fc_flag);
dea3101e 4029 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2e0fef85 4030 spin_lock_irq(shost->host_lock);
7c5e518c
JS
4031 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4032 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4033 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 4034 spin_unlock_irq(shost->host_lock);
dea3101e
JB
4035 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4036 } else {
858c9f6c 4037 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e
JB
4038 }
4039
4040 phba->fc_stat.elsXmitACC++;
3772a991 4041 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4042 if (rc == IOCB_ERROR) {
4043 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4044 return 1;
dea3101e 4045 }
c9f8735b 4046 return 0;
dea3101e
JB
4047}
4048
e59058c4 4049/**
3621a710 4050 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
e59058c4
JS
4051 * @vport: pointer to a virtual N_Port data structure.
4052 * @rejectError:
4053 * @oldiocb: pointer to the original lpfc command iocb data structure.
4054 * @ndlp: pointer to a node-list data structure.
4055 * @mbox: pointer to the driver internal queue element for mailbox command.
4056 *
4057 * This routine prepares and issue an Reject (RJT) response IOCB
4058 * command. If a @mbox pointer is passed in, it will be put into the
4059 * context_un.mbox field of the IOCB for the completion callback function
4060 * to issue to the HBA later.
4061 *
4062 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4063 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4064 * will be stored into the context1 field of the IOCB for the completion
4065 * callback function to the reject response ELS IOCB command.
4066 *
4067 * Return code
4068 * 0 - Successfully issued reject response
4069 * 1 - Failed to issue reject response
4070 **/
dea3101e 4071int
2e0fef85 4072lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
858c9f6c
JS
4073 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4074 LPFC_MBOXQ_t *mbox)
dea3101e 4075{
2e0fef85 4076 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4077 IOCB_t *icmd;
4078 IOCB_t *oldcmd;
4079 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4080 uint8_t *pcmd;
4081 uint16_t cmdsize;
4082 int rc;
4083
92d7f7b0 4084 cmdsize = 2 * sizeof(uint32_t);
2e0fef85
JS
4085 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4086 ndlp->nlp_DID, ELS_CMD_LS_RJT);
488d1469 4087 if (!elsiocb)
c9f8735b 4088 return 1;
dea3101e
JB
4089
4090 icmd = &elsiocb->iocb;
4091 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4092 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4093 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
4094 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4095
4096 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
92d7f7b0 4097 pcmd += sizeof(uint32_t);
dea3101e
JB
4098 *((uint32_t *) (pcmd)) = rejectError;
4099
51ef4c26 4100 if (mbox)
858c9f6c 4101 elsiocb->context_un.mbox = mbox;
858c9f6c 4102
dea3101e 4103 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
e8b62011
JS
4104 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4105 "0129 Xmit ELS RJT x%x response tag x%x "
4106 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4107 "rpi x%x\n",
4108 rejectError, elsiocb->iotag,
4109 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4110 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
858c9f6c
JS
4111 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4112 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
4113 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4114
dea3101e 4115 phba->fc_stat.elsXmitLSRJT++;
858c9f6c 4116 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 4117 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
51ef4c26 4118
dea3101e
JB
4119 if (rc == IOCB_ERROR) {
4120 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4121 return 1;
dea3101e 4122 }
c9f8735b 4123 return 0;
dea3101e
JB
4124}
4125
e59058c4 4126/**
3621a710 4127 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
e59058c4
JS
4128 * @vport: pointer to a virtual N_Port data structure.
4129 * @oldiocb: pointer to the original lpfc command iocb data structure.
4130 * @ndlp: pointer to a node-list data structure.
4131 *
4132 * This routine prepares and issues an Accept (ACC) response to Address
4133 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4134 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4135 *
4136 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4137 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4138 * will be stored into the context1 field of the IOCB for the completion
4139 * callback function to the ADISC Accept response ELS IOCB command.
4140 *
4141 * Return code
4142 * 0 - Successfully issued acc adisc response
4143 * 1 - Failed to issue adisc acc response
4144 **/
dea3101e 4145int
2e0fef85
JS
4146lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4147 struct lpfc_nodelist *ndlp)
dea3101e 4148{
2e0fef85 4149 struct lpfc_hba *phba = vport->phba;
dea3101e 4150 ADISC *ap;
2e0fef85 4151 IOCB_t *icmd, *oldcmd;
dea3101e 4152 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4153 uint8_t *pcmd;
4154 uint16_t cmdsize;
4155 int rc;
4156
92d7f7b0 4157 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2e0fef85
JS
4158 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4159 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4160 if (!elsiocb)
c9f8735b 4161 return 1;
dea3101e 4162
5b8bd0c9
JS
4163 icmd = &elsiocb->iocb;
4164 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4165 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4166 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5b8bd0c9 4167
dea3101e 4168 /* Xmit ADISC ACC response tag <ulpIoTag> */
e8b62011
JS
4169 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4170 "0130 Xmit ADISC ACC response iotag x%x xri: "
4171 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4172 elsiocb->iotag, elsiocb->iocb.ulpContext,
4173 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4174 ndlp->nlp_rpi);
dea3101e
JB
4175 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4176
4177 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4178 pcmd += sizeof(uint32_t);
dea3101e
JB
4179
4180 ap = (ADISC *) (pcmd);
4181 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
4182 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4183 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 4184 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 4185
858c9f6c
JS
4186 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4187 "Issue ACC ADISC: did:x%x flg:x%x",
4188 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4189
dea3101e 4190 phba->fc_stat.elsXmitACC++;
858c9f6c 4191 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 4192 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4193 if (rc == IOCB_ERROR) {
4194 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4195 return 1;
dea3101e 4196 }
c9f8735b 4197 return 0;
dea3101e
JB
4198}
4199
e59058c4 4200/**
3621a710 4201 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
e59058c4
JS
4202 * @vport: pointer to a virtual N_Port data structure.
4203 * @oldiocb: pointer to the original lpfc command iocb data structure.
4204 * @ndlp: pointer to a node-list data structure.
4205 *
4206 * This routine prepares and issues an Accept (ACC) response to Process
4207 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4208 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4209 *
4210 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4211 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4212 * will be stored into the context1 field of the IOCB for the completion
4213 * callback function to the PRLI Accept response ELS IOCB command.
4214 *
4215 * Return code
4216 * 0 - Successfully issued acc prli response
4217 * 1 - Failed to issue acc prli response
4218 **/
dea3101e 4219int
2e0fef85 4220lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5b8bd0c9 4221 struct lpfc_nodelist *ndlp)
dea3101e 4222{
2e0fef85 4223 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4224 PRLI *npr;
4225 lpfc_vpd_t *vpd;
4226 IOCB_t *icmd;
4227 IOCB_t *oldcmd;
4228 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4229 uint8_t *pcmd;
4230 uint16_t cmdsize;
4231 int rc;
4232
92d7f7b0 4233 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2e0fef85 4234 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
92d7f7b0 4235 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
c9f8735b
JW
4236 if (!elsiocb)
4237 return 1;
dea3101e 4238
5b8bd0c9
JS
4239 icmd = &elsiocb->iocb;
4240 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4241 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4242 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4243
dea3101e 4244 /* Xmit PRLI ACC response tag <ulpIoTag> */
e8b62011
JS
4245 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4246 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4247 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4248 elsiocb->iotag, elsiocb->iocb.ulpContext,
4249 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4250 ndlp->nlp_rpi);
dea3101e
JB
4251 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4252
4253 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
92d7f7b0 4254 pcmd += sizeof(uint32_t);
dea3101e
JB
4255
4256 /* For PRLI, remainder of payload is PRLI parameter page */
92d7f7b0 4257 memset(pcmd, 0, sizeof(PRLI));
dea3101e
JB
4258
4259 npr = (PRLI *) pcmd;
4260 vpd = &phba->vpd;
4261 /*
0d2b6b83
JS
4262 * If the remote port is a target and our firmware version is 3.20 or
4263 * later, set the following bits for FC-TAPE support.
dea3101e 4264 */
0d2b6b83
JS
4265 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4266 (vpd->rev.feaLevelHigh >= 0x02)) {
dea3101e
JB
4267 npr->ConfmComplAllowed = 1;
4268 npr->Retry = 1;
4269 npr->TaskRetryIdReq = 1;
4270 }
4271
4272 npr->acceptRspCode = PRLI_REQ_EXECUTED;
4273 npr->estabImagePair = 1;
4274 npr->readXferRdyDis = 1;
4275 npr->ConfmComplAllowed = 1;
4276
4277 npr->prliType = PRLI_FCP_TYPE;
4278 npr->initiatorFunc = 1;
4279
858c9f6c
JS
4280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4281 "Issue ACC PRLI: did:x%x flg:x%x",
4282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4283
dea3101e 4284 phba->fc_stat.elsXmitACC++;
858c9f6c 4285 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4286
3772a991 4287 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4288 if (rc == IOCB_ERROR) {
4289 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4290 return 1;
dea3101e 4291 }
c9f8735b 4292 return 0;
dea3101e
JB
4293}
4294
e59058c4 4295/**
3621a710 4296 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
e59058c4
JS
4297 * @vport: pointer to a virtual N_Port data structure.
4298 * @format: rnid command format.
4299 * @oldiocb: pointer to the original lpfc command iocb data structure.
4300 * @ndlp: pointer to a node-list data structure.
4301 *
4302 * This routine issues a Request Node Identification Data (RNID) Accept
4303 * (ACC) response. It constructs the RNID ACC response command according to
4304 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4305 * issue the response. Note that this command does not need to hold the ndlp
4306 * reference count for the callback. So, the ndlp reference count taken by
4307 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4308 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4309 * there is no ndlp reference available.
4310 *
4311 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4312 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4313 * will be stored into the context1 field of the IOCB for the completion
4314 * callback function. However, for the RNID Accept Response ELS command,
4315 * this is undone later by this routine after the IOCB is allocated.
4316 *
4317 * Return code
4318 * 0 - Successfully issued acc rnid response
4319 * 1 - Failed to issue acc rnid response
4320 **/
dea3101e 4321static int
2e0fef85 4322lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
329f9bc7 4323 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
dea3101e 4324{
2e0fef85 4325 struct lpfc_hba *phba = vport->phba;
dea3101e 4326 RNID *rn;
2e0fef85 4327 IOCB_t *icmd, *oldcmd;
dea3101e 4328 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4329 uint8_t *pcmd;
4330 uint16_t cmdsize;
4331 int rc;
4332
92d7f7b0
JS
4333 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4334 + (2 * sizeof(struct lpfc_name));
dea3101e 4335 if (format)
92d7f7b0 4336 cmdsize += sizeof(RNID_TOP_DISC);
dea3101e 4337
2e0fef85
JS
4338 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4339 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4340 if (!elsiocb)
c9f8735b 4341 return 1;
dea3101e 4342
5b8bd0c9
JS
4343 icmd = &elsiocb->iocb;
4344 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4345 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4346 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4347
dea3101e 4348 /* Xmit RNID ACC response tag <ulpIoTag> */
e8b62011
JS
4349 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4350 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4351 elsiocb->iotag, elsiocb->iocb.ulpContext);
dea3101e 4352 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
dea3101e 4353 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4354 pcmd += sizeof(uint32_t);
dea3101e 4355
92d7f7b0 4356 memset(pcmd, 0, sizeof(RNID));
dea3101e
JB
4357 rn = (RNID *) (pcmd);
4358 rn->Format = format;
92d7f7b0
JS
4359 rn->CommonLen = (2 * sizeof(struct lpfc_name));
4360 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4361 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
dea3101e
JB
4362 switch (format) {
4363 case 0:
4364 rn->SpecificLen = 0;
4365 break;
4366 case RNID_TOPOLOGY_DISC:
92d7f7b0 4367 rn->SpecificLen = sizeof(RNID_TOP_DISC);
dea3101e 4368 memcpy(&rn->un.topologyDisc.portName,
92d7f7b0 4369 &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e
JB
4370 rn->un.topologyDisc.unitType = RNID_HBA;
4371 rn->un.topologyDisc.physPort = 0;
4372 rn->un.topologyDisc.attachedNodes = 0;
4373 break;
4374 default:
4375 rn->CommonLen = 0;
4376 rn->SpecificLen = 0;
4377 break;
4378 }
4379
858c9f6c
JS
4380 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4381 "Issue ACC RNID: did:x%x flg:x%x",
4382 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4383
dea3101e 4384 phba->fc_stat.elsXmitACC++;
858c9f6c 4385 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4386
3772a991 4387 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4388 if (rc == IOCB_ERROR) {
4389 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4390 return 1;
dea3101e 4391 }
c9f8735b 4392 return 0;
dea3101e
JB
4393}
4394
19ca7609
JS
4395/**
4396 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4397 * @vport: pointer to a virtual N_Port data structure.
4398 * @iocb: pointer to the lpfc command iocb data structure.
4399 * @ndlp: pointer to a node-list data structure.
4400 *
4401 * Return
4402 **/
4403static void
4404lpfc_els_clear_rrq(struct lpfc_vport *vport,
4405 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4406{
4407 struct lpfc_hba *phba = vport->phba;
4408 uint8_t *pcmd;
4409 struct RRQ *rrq;
4410 uint16_t rxid;
1151e3ec 4411 uint16_t xri;
19ca7609
JS
4412 struct lpfc_node_rrq *prrq;
4413
4414
4415 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4416 pcmd += sizeof(uint32_t);
4417 rrq = (struct RRQ *)pcmd;
1151e3ec 4418 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
9589b062 4419 rxid = bf_get(rrq_rxid, rrq);
19ca7609
JS
4420
4421 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4422 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4423 " x%x x%x\n",
1151e3ec 4424 be32_to_cpu(bf_get(rrq_did, rrq)),
9589b062 4425 bf_get(rrq_oxid, rrq),
19ca7609
JS
4426 rxid,
4427 iocb->iotag, iocb->iocb.ulpContext);
4428
4429 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4430 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4431 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
1151e3ec 4432 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
9589b062 4433 xri = bf_get(rrq_oxid, rrq);
1151e3ec
JS
4434 else
4435 xri = rxid;
4436 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
19ca7609 4437 if (prrq)
1151e3ec 4438 lpfc_clr_rrq_active(phba, xri, prrq);
19ca7609
JS
4439 return;
4440}
4441
12265f68
JS
4442/**
4443 * lpfc_els_rsp_echo_acc - Issue echo acc response
4444 * @vport: pointer to a virtual N_Port data structure.
4445 * @data: pointer to echo data to return in the accept.
4446 * @oldiocb: pointer to the original lpfc command iocb data structure.
4447 * @ndlp: pointer to a node-list data structure.
4448 *
4449 * Return code
4450 * 0 - Successfully issued acc echo response
4451 * 1 - Failed to issue acc echo response
4452 **/
4453static int
4454lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4455 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4456{
4457 struct lpfc_hba *phba = vport->phba;
4458 struct lpfc_iocbq *elsiocb;
12265f68
JS
4459 uint8_t *pcmd;
4460 uint16_t cmdsize;
4461 int rc;
4462
12265f68
JS
4463 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4464
bf08611b
JS
4465 /* The accumulated length can exceed the BPL_SIZE. For
4466 * now, use this as the limit
4467 */
4468 if (cmdsize > LPFC_BPL_SIZE)
4469 cmdsize = LPFC_BPL_SIZE;
12265f68
JS
4470 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4471 ndlp->nlp_DID, ELS_CMD_ACC);
4472 if (!elsiocb)
4473 return 1;
4474
7851fe2c
JS
4475 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4476 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4477
12265f68
JS
4478 /* Xmit ECHO ACC response tag <ulpIoTag> */
4479 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4480 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4481 elsiocb->iotag, elsiocb->iocb.ulpContext);
4482 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4483 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4484 pcmd += sizeof(uint32_t);
4485 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4486
4487 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4488 "Issue ACC ECHO: did:x%x flg:x%x",
4489 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4490
4491 phba->fc_stat.elsXmitACC++;
4492 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
12265f68
JS
4493
4494 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4495 if (rc == IOCB_ERROR) {
4496 lpfc_els_free_iocb(phba, elsiocb);
4497 return 1;
4498 }
4499 return 0;
4500}
4501
e59058c4 4502/**
3621a710 4503 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
e59058c4
JS
4504 * @vport: pointer to a host virtual N_Port data structure.
4505 *
4506 * This routine issues Address Discover (ADISC) ELS commands to those
4507 * N_Ports which are in node port recovery state and ADISC has not been issued
4508 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4509 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4510 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4511 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4512 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4513 * IOCBs quit for later pick up. On the other hand, after walking through
4514 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4515 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4516 * no more ADISC need to be sent.
4517 *
4518 * Return code
4519 * The number of N_Ports with adisc issued.
4520 **/
dea3101e 4521int
2e0fef85 4522lpfc_els_disc_adisc(struct lpfc_vport *vport)
dea3101e 4523{
2e0fef85 4524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4525 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4526 int sentadisc = 0;
dea3101e 4527
685f0bf7 4528 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2e0fef85 4529 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4530 if (!NLP_CHK_NODE_ACT(ndlp))
4531 continue;
685f0bf7
JS
4532 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4533 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4534 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2e0fef85 4535 spin_lock_irq(shost->host_lock);
685f0bf7 4536 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2e0fef85 4537 spin_unlock_irq(shost->host_lock);
685f0bf7 4538 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4539 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4540 lpfc_issue_els_adisc(vport, ndlp, 0);
685f0bf7 4541 sentadisc++;
2e0fef85
JS
4542 vport->num_disc_nodes++;
4543 if (vport->num_disc_nodes >=
3de2a653 4544 vport->cfg_discovery_threads) {
2e0fef85
JS
4545 spin_lock_irq(shost->host_lock);
4546 vport->fc_flag |= FC_NLP_MORE;
4547 spin_unlock_irq(shost->host_lock);
685f0bf7 4548 break;
dea3101e
JB
4549 }
4550 }
4551 }
4552 if (sentadisc == 0) {
2e0fef85
JS
4553 spin_lock_irq(shost->host_lock);
4554 vport->fc_flag &= ~FC_NLP_MORE;
4555 spin_unlock_irq(shost->host_lock);
dea3101e 4556 }
2fe165b6 4557 return sentadisc;
dea3101e
JB
4558}
4559
e59058c4 4560/**
3621a710 4561 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
e59058c4
JS
4562 * @vport: pointer to a host virtual N_Port data structure.
4563 *
4564 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4565 * which are in node port recovery state, with a @vport. Each time an ELS
4566 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4567 * the per @vport number of discover count (num_disc_nodes) shall be
4568 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4569 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4570 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4571 * later pick up. On the other hand, after walking through all the ndlps with
4572 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4573 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4574 * PLOGI need to be sent.
4575 *
4576 * Return code
4577 * The number of N_Ports with plogi issued.
4578 **/
dea3101e 4579int
2e0fef85 4580lpfc_els_disc_plogi(struct lpfc_vport *vport)
dea3101e 4581{
2e0fef85 4582 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4583 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4584 int sentplogi = 0;
dea3101e 4585
2e0fef85
JS
4586 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4587 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4588 if (!NLP_CHK_NODE_ACT(ndlp))
4589 continue;
685f0bf7 4590 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
8b017a30
JS
4591 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4592 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4593 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
685f0bf7 4594 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4595 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4596 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
685f0bf7 4597 sentplogi++;
2e0fef85
JS
4598 vport->num_disc_nodes++;
4599 if (vport->num_disc_nodes >=
8b017a30 4600 vport->cfg_discovery_threads) {
2e0fef85
JS
4601 spin_lock_irq(shost->host_lock);
4602 vport->fc_flag |= FC_NLP_MORE;
4603 spin_unlock_irq(shost->host_lock);
685f0bf7 4604 break;
dea3101e
JB
4605 }
4606 }
4607 }
87af33fe
JS
4608 if (sentplogi) {
4609 lpfc_set_disctmo(vport);
4610 }
4611 else {
2e0fef85
JS
4612 spin_lock_irq(shost->host_lock);
4613 vport->fc_flag &= ~FC_NLP_MORE;
4614 spin_unlock_irq(shost->host_lock);
dea3101e 4615 }
2fe165b6 4616 return sentplogi;
dea3101e
JB
4617}
4618
6c92d1d0 4619uint32_t
86478875
JS
4620lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4621 uint32_t word0)
4622{
4623
4624 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
4625 desc->payload.els_req = word0;
4626 desc->length = cpu_to_be32(sizeof(desc->payload));
6c92d1d0
JS
4627
4628 return sizeof(struct fc_rdp_link_service_desc);
86478875
JS
4629}
4630
6c92d1d0 4631uint32_t
86478875
JS
4632lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4633 uint8_t *page_a0, uint8_t *page_a2)
4634{
4635 uint16_t wavelength;
4636 uint16_t temperature;
4637 uint16_t rx_power;
4638 uint16_t tx_bias;
4639 uint16_t tx_power;
4640 uint16_t vcc;
4641 uint16_t flag = 0;
4642 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
4643 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
4644
4645 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
4646
4647 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
4648 &page_a0[SSF_TRANSCEIVER_CODE_B4];
4649 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
4650 &page_a0[SSF_TRANSCEIVER_CODE_B5];
4651
4652 if ((trasn_code_byte4->fc_sw_laser) ||
4653 (trasn_code_byte5->fc_sw_laser_sl) ||
4654 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
4655 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
4656 } else if (trasn_code_byte4->fc_lw_laser) {
4657 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
4658 page_a0[SSF_WAVELENGTH_B0];
4659 if (wavelength == SFP_WAVELENGTH_LC1310)
4660 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
4661 if (wavelength == SFP_WAVELENGTH_LL1550)
4662 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
4663 }
4664 /* check if its SFP+ */
4665 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
4666 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
4667 << SFP_FLAG_CT_SHIFT;
4668
4669 /* check if its OPTICAL */
4670 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
4671 SFP_FLAG_IS_OPTICAL_PORT : 0)
4672 << SFP_FLAG_IS_OPTICAL_SHIFT;
4673
4674 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
4675 page_a2[SFF_TEMPERATURE_B0]);
4676 vcc = (page_a2[SFF_VCC_B1] << 8 |
4677 page_a2[SFF_VCC_B0]);
4678 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
4679 page_a2[SFF_TXPOWER_B0]);
4680 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
4681 page_a2[SFF_TX_BIAS_CURRENT_B0]);
4682 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
4683 page_a2[SFF_RXPOWER_B0]);
4684 desc->sfp_info.temperature = cpu_to_be16(temperature);
4685 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
4686 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
4687 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
4688 desc->sfp_info.vcc = cpu_to_be16(vcc);
4689
4690 desc->sfp_info.flags = cpu_to_be16(flag);
4691 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6c92d1d0
JS
4692
4693 return sizeof(struct fc_rdp_sfp_desc);
86478875
JS
4694}
4695
6c92d1d0 4696uint32_t
86478875
JS
4697lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4698 READ_LNK_VAR *stat)
4699{
4700 uint32_t type;
4701
4702 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
4703
4704 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
4705
4706 desc->info.port_type = cpu_to_be32(type);
4707
4708 desc->info.link_status.link_failure_cnt =
4709 cpu_to_be32(stat->linkFailureCnt);
4710 desc->info.link_status.loss_of_synch_cnt =
4711 cpu_to_be32(stat->lossSyncCnt);
4712 desc->info.link_status.loss_of_signal_cnt =
4713 cpu_to_be32(stat->lossSignalCnt);
4714 desc->info.link_status.primitive_seq_proto_err =
4715 cpu_to_be32(stat->primSeqErrCnt);
4716 desc->info.link_status.invalid_trans_word =
4717 cpu_to_be32(stat->invalidXmitWord);
4718 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
4719
4720 desc->length = cpu_to_be32(sizeof(desc->info));
6c92d1d0
JS
4721
4722 return sizeof(struct fc_rdp_link_error_status_desc);
86478875
JS
4723}
4724
6c92d1d0 4725uint32_t
56204984
JS
4726lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
4727 struct lpfc_vport *vport)
4728{
3aaaa314
JS
4729 uint32_t bbCredit;
4730
56204984
JS
4731 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
4732
3aaaa314
JS
4733 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
4734 (vport->fc_sparam.cmn.bbCreditMsb << 8);
4735 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
4736 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
4737 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
4738 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
4739 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
4740 } else {
56204984 4741 desc->bbc_info.attached_port_bbc = 0;
3aaaa314 4742 }
56204984
JS
4743
4744 desc->bbc_info.rtt = 0;
4745 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6c92d1d0
JS
4746
4747 return sizeof(struct fc_rdp_bbc_desc);
56204984
JS
4748}
4749
6c92d1d0 4750uint32_t
310429ef
JS
4751lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
4752 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
56204984 4753{
310429ef 4754 uint32_t flags = 0;
56204984
JS
4755
4756 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4757
3aaaa314
JS
4758 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
4759 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
4760 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
4761 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
310429ef
JS
4762
4763 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4764 flags |= RDP_OET_HIGH_ALARM;
4765 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4766 flags |= RDP_OET_LOW_ALARM;
4767 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4768 flags |= RDP_OET_HIGH_WARNING;
4769 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4770 flags |= RDP_OET_LOW_WARNING;
4771
56204984
JS
4772 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
4773 desc->oed_info.function_flags = cpu_to_be32(flags);
4774 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4775 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4776}
4777
6c92d1d0 4778uint32_t
310429ef
JS
4779lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
4780 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4781 uint8_t *page_a2)
4782{
310429ef 4783 uint32_t flags = 0;
56204984
JS
4784
4785 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4786
3aaaa314
JS
4787 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
4788 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
4789 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
4790 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
310429ef
JS
4791
4792 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
4793 flags |= RDP_OET_HIGH_ALARM;
4794 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
4795 flags |= RDP_OET_LOW_ALARM;
4796 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
4797 flags |= RDP_OET_HIGH_WARNING;
4798 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
4799 flags |= RDP_OET_LOW_WARNING;
4800
56204984
JS
4801 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
4802 desc->oed_info.function_flags = cpu_to_be32(flags);
4803 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4804 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4805}
4806
6c92d1d0 4807uint32_t
310429ef
JS
4808lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
4809 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4810 uint8_t *page_a2)
4811{
310429ef 4812 uint32_t flags = 0;
56204984
JS
4813
4814 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4815
3aaaa314
JS
4816 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
4817 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
4818 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
4819 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
310429ef
JS
4820
4821 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
4822 flags |= RDP_OET_HIGH_ALARM;
4823 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
4824 flags |= RDP_OET_LOW_ALARM;
4825 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
4826 flags |= RDP_OET_HIGH_WARNING;
4827 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
4828 flags |= RDP_OET_LOW_WARNING;
4829
56204984
JS
4830 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
4831 desc->oed_info.function_flags = cpu_to_be32(flags);
4832 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4833 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4834}
4835
6c92d1d0 4836uint32_t
310429ef
JS
4837lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
4838 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4839 uint8_t *page_a2)
4840{
310429ef 4841 uint32_t flags = 0;
56204984
JS
4842
4843 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4844
3aaaa314
JS
4845 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
4846 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
4847 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
4848 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
310429ef
JS
4849
4850 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
4851 flags |= RDP_OET_HIGH_ALARM;
4852 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
4853 flags |= RDP_OET_LOW_ALARM;
4854 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
4855 flags |= RDP_OET_HIGH_WARNING;
4856 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
4857 flags |= RDP_OET_LOW_WARNING;
4858
56204984
JS
4859 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
4860 desc->oed_info.function_flags = cpu_to_be32(flags);
4861 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4862 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4863}
4864
4865
6c92d1d0 4866uint32_t
310429ef
JS
4867lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
4868 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4869 uint8_t *page_a2)
4870{
310429ef 4871 uint32_t flags = 0;
56204984
JS
4872
4873 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4874
3aaaa314
JS
4875 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
4876 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
4877 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
4878 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
310429ef
JS
4879
4880 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
4881 flags |= RDP_OET_HIGH_ALARM;
4882 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
4883 flags |= RDP_OET_LOW_ALARM;
4884 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
4885 flags |= RDP_OET_HIGH_WARNING;
4886 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
4887 flags |= RDP_OET_LOW_WARNING;
4888
56204984
JS
4889 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
4890 desc->oed_info.function_flags = cpu_to_be32(flags);
4891 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4892 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4893}
4894
6c92d1d0 4895uint32_t
56204984
JS
4896lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
4897 uint8_t *page_a0, struct lpfc_vport *vport)
4898{
4899 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
4900 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
4901 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
4902 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
4903 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
4904 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
4905 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6c92d1d0 4906 return sizeof(struct fc_rdp_opd_sfp_desc);
56204984
JS
4907}
4908
6c92d1d0 4909uint32_t
4258e98e
JS
4910lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
4911{
4912 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
4913 return 0;
4914 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
4915
4916 desc->info.CorrectedBlocks =
4917 cpu_to_be32(stat->fecCorrBlkCount);
4918 desc->info.UncorrectableBlocks =
4919 cpu_to_be32(stat->fecUncorrBlkCount);
4920
4921 desc->length = cpu_to_be32(sizeof(desc->info));
4922
4923 return sizeof(struct fc_fec_rdp_desc);
4924}
4925
6c92d1d0 4926uint32_t
86478875
JS
4927lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
4928{
4929 uint16_t rdp_cap = 0;
4930 uint16_t rdp_speed;
4931
4932 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
4933
81e75177
JS
4934 switch (phba->fc_linkspeed) {
4935 case LPFC_LINK_SPEED_1GHZ:
86478875
JS
4936 rdp_speed = RDP_PS_1GB;
4937 break;
81e75177 4938 case LPFC_LINK_SPEED_2GHZ:
86478875
JS
4939 rdp_speed = RDP_PS_2GB;
4940 break;
81e75177 4941 case LPFC_LINK_SPEED_4GHZ:
86478875
JS
4942 rdp_speed = RDP_PS_4GB;
4943 break;
81e75177 4944 case LPFC_LINK_SPEED_8GHZ:
86478875
JS
4945 rdp_speed = RDP_PS_8GB;
4946 break;
81e75177 4947 case LPFC_LINK_SPEED_10GHZ:
86478875
JS
4948 rdp_speed = RDP_PS_10GB;
4949 break;
81e75177 4950 case LPFC_LINK_SPEED_16GHZ:
86478875
JS
4951 rdp_speed = RDP_PS_16GB;
4952 break;
a085e87c
JS
4953 case LPFC_LINK_SPEED_32GHZ:
4954 rdp_speed = RDP_PS_32GB;
4955 break;
86478875
JS
4956 default:
4957 rdp_speed = RDP_PS_UNKNOWN;
4958 break;
4959 }
4960
4961 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
4962
d38dd52c
JS
4963 if (phba->lmt & LMT_32Gb)
4964 rdp_cap |= RDP_PS_32GB;
86478875
JS
4965 if (phba->lmt & LMT_16Gb)
4966 rdp_cap |= RDP_PS_16GB;
4967 if (phba->lmt & LMT_10Gb)
4968 rdp_cap |= RDP_PS_10GB;
4969 if (phba->lmt & LMT_8Gb)
4970 rdp_cap |= RDP_PS_8GB;
4971 if (phba->lmt & LMT_4Gb)
4972 rdp_cap |= RDP_PS_4GB;
4973 if (phba->lmt & LMT_2Gb)
4974 rdp_cap |= RDP_PS_2GB;
4975 if (phba->lmt & LMT_1Gb)
4976 rdp_cap |= RDP_PS_1GB;
4977
4978 if (rdp_cap == 0)
4979 rdp_cap = RDP_CAP_UNKNOWN;
56204984
JS
4980 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
4981 rdp_cap |= RDP_CAP_USER_CONFIGURED;
86478875
JS
4982
4983 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
4984 desc->length = cpu_to_be32(sizeof(desc->info));
6c92d1d0 4985 return sizeof(struct fc_rdp_port_speed_desc);
86478875
JS
4986}
4987
6c92d1d0 4988uint32_t
86478875
JS
4989lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
4990 struct lpfc_hba *phba)
4991{
4992
4993 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
4994
4995 memcpy(desc->port_names.wwnn, phba->wwnn,
4996 sizeof(desc->port_names.wwnn));
4997
4998 memcpy(desc->port_names.wwpn, &phba->wwpn,
4999 sizeof(desc->port_names.wwpn));
5000
5001 desc->length = cpu_to_be32(sizeof(desc->port_names));
6c92d1d0 5002 return sizeof(struct fc_rdp_port_name_desc);
86478875
JS
5003}
5004
6c92d1d0 5005uint32_t
86478875
JS
5006lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5007 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5008{
5009
5010 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5011 if (vport->fc_flag & FC_FABRIC) {
5012 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5013 sizeof(desc->port_names.wwnn));
5014
5015 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5016 sizeof(desc->port_names.wwpn));
5017 } else { /* Point to Point */
5018 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5019 sizeof(desc->port_names.wwnn));
5020
5021 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
5022 sizeof(desc->port_names.wwpn));
5023 }
5024
5025 desc->length = cpu_to_be32(sizeof(desc->port_names));
6c92d1d0 5026 return sizeof(struct fc_rdp_port_name_desc);
86478875
JS
5027}
5028
5029void
5030lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5031 int status)
5032{
5033 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
5034 struct lpfc_vport *vport = ndlp->vport;
5035 struct lpfc_iocbq *elsiocb;
eb8d68c9 5036 struct ulp_bde64 *bpl;
86478875
JS
5037 IOCB_t *icmd;
5038 uint8_t *pcmd;
5039 struct ls_rjt *stat;
5040 struct fc_rdp_res_frame *rdp_res;
6c92d1d0 5041 uint32_t cmdsize, len;
310429ef 5042 uint16_t *flag_ptr;
6c92d1d0 5043 int rc;
86478875
JS
5044
5045 if (status != SUCCESS)
5046 goto error;
eb8d68c9
JS
5047
5048 /* This will change once we know the true size of the RDP payload */
86478875
JS
5049 cmdsize = sizeof(struct fc_rdp_res_frame);
5050
5051 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
5052 lpfc_max_els_tries, rdp_context->ndlp,
5053 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
5054 lpfc_nlp_put(ndlp);
5055 if (!elsiocb)
5056 goto free_rdp_context;
5057
5058 icmd = &elsiocb->iocb;
5059 icmd->ulpContext = rdp_context->rx_id;
5060 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5061
5062 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5063 "2171 Xmit RDP response tag x%x xri x%x, "
5064 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5065 elsiocb->iotag, elsiocb->iocb.ulpContext,
5066 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5067 ndlp->nlp_rpi);
5068 rdp_res = (struct fc_rdp_res_frame *)
5069 (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5070 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5071 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
5072 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5073
310429ef
JS
5074 /* Update Alarm and Warning */
5075 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
5076 phba->sfp_alarm |= *flag_ptr;
5077 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
5078 phba->sfp_warning |= *flag_ptr;
5079
86478875 5080 /* For RDP payload */
6c92d1d0
JS
5081 len = 8;
5082 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
5083 (len + pcmd), ELS_CMD_RDP);
86478875 5084
6c92d1d0 5085 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
86478875 5086 rdp_context->page_a0, rdp_context->page_a2);
6c92d1d0
JS
5087 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
5088 phba);
5089 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5090 (len + pcmd), &rdp_context->link_stat);
5091 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5092 (len + pcmd), phba);
5093 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5094 (len + pcmd), vport, ndlp);
5095 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
4258e98e 5096 &rdp_context->link_stat);
6c92d1d0
JS
5097 /* Check if nport is logged, BZ190632 */
5098 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
5099 goto lpfc_skip_descriptor;
5100
5101 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5102 &rdp_context->link_stat, vport);
5103 len += lpfc_rdp_res_oed_temp_desc(phba,
5104 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5105 rdp_context->page_a2);
5106 len += lpfc_rdp_res_oed_voltage_desc(phba,
5107 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5108 rdp_context->page_a2);
5109 len += lpfc_rdp_res_oed_txbias_desc(phba,
5110 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5111 rdp_context->page_a2);
5112 len += lpfc_rdp_res_oed_txpower_desc(phba,
5113 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5114 rdp_context->page_a2);
5115 len += lpfc_rdp_res_oed_rxpower_desc(phba,
5116 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5117 rdp_context->page_a2);
5118 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
5119 rdp_context->page_a0, vport);
5120
5121lpfc_skip_descriptor:
5122 rdp_res->length = cpu_to_be32(len - 8);
86478875
JS
5123 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5124
eb8d68c9
JS
5125 /* Now that we know the true size of the payload, update the BPL */
5126 bpl = (struct ulp_bde64 *)
5127 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
6c92d1d0 5128 bpl->tus.f.bdeSize = len;
eb8d68c9
JS
5129 bpl->tus.f.bdeFlags = 0;
5130 bpl->tus.w = le32_to_cpu(bpl->tus.w);
5131
86478875
JS
5132 phba->fc_stat.elsXmitACC++;
5133 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5134 if (rc == IOCB_ERROR)
5135 lpfc_els_free_iocb(phba, elsiocb);
5136
5137 kfree(rdp_context);
5138
5139 return;
5140error:
5141 cmdsize = 2 * sizeof(uint32_t);
5142 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
5143 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
5144 lpfc_nlp_put(ndlp);
5145 if (!elsiocb)
5146 goto free_rdp_context;
5147
5148 icmd = &elsiocb->iocb;
5149 icmd->ulpContext = rdp_context->rx_id;
5150 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5151 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5152
5153 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5154 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5155 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5156
5157 phba->fc_stat.elsXmitLSRJT++;
5158 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5159 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5160
5161 if (rc == IOCB_ERROR)
5162 lpfc_els_free_iocb(phba, elsiocb);
5163free_rdp_context:
5164 kfree(rdp_context);
5165}
5166
5167int
5168lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5169{
5170 LPFC_MBOXQ_t *mbox = NULL;
5171 int rc;
5172
5173 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5174 if (!mbox) {
5175 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
5176 "7105 failed to allocate mailbox memory");
5177 return 1;
5178 }
5179
5180 if (lpfc_sli4_dump_page_a0(phba, mbox))
5181 goto prep_mbox_fail;
5182 mbox->vport = rdp_context->ndlp->vport;
5183 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
5184 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
5185 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5186 if (rc == MBX_NOT_FINISHED)
5187 goto issue_mbox_fail;
5188
5189 return 0;
5190
5191prep_mbox_fail:
5192issue_mbox_fail:
5193 mempool_free(mbox, phba->mbox_mem_pool);
5194 return 1;
5195}
5196
5197/*
5198 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
5199 * @vport: pointer to a host virtual N_Port data structure.
5200 * @cmdiocb: pointer to lpfc command iocb data structure.
5201 * @ndlp: pointer to a node-list data structure.
5202 *
5203 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
5204 * IOCB. First, the payload of the unsolicited RDP is checked.
5205 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
5206 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
5207 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
5208 * gather all data and send RDP response.
5209 *
5210 * Return code
5211 * 0 - Sent the acc response
5212 * 1 - Sent the reject response.
5213 */
5214static int
5215lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5216 struct lpfc_nodelist *ndlp)
5217{
5218 struct lpfc_hba *phba = vport->phba;
5219 struct lpfc_dmabuf *pcmd;
5220 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
5221 struct fc_rdp_req_frame *rdp_req;
5222 struct lpfc_rdp_context *rdp_context;
5223 IOCB_t *cmd = NULL;
5224 struct ls_rjt stat;
5225
5226 if (phba->sli_rev < LPFC_SLI_REV4 ||
5227 (bf_get(lpfc_sli_intf_if_type,
5228 &phba->sli4_hba.sli_intf) !=
5229 LPFC_SLI_INTF_IF_TYPE_2)) {
5230 rjt_err = LSRJT_UNABLE_TPC;
5231 rjt_expl = LSEXP_REQ_UNSUPPORTED;
5232 goto error;
5233 }
5234
5235 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
5236 rjt_err = LSRJT_UNABLE_TPC;
5237 rjt_expl = LSEXP_REQ_UNSUPPORTED;
5238 goto error;
5239 }
5240
5241 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5242 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
5243
5244
5245 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5246 "2422 ELS RDP Request "
5247 "dec len %d tag x%x port_id %d len %d\n",
5248 be32_to_cpu(rdp_req->rdp_des_length),
5249 be32_to_cpu(rdp_req->nport_id_desc.tag),
5250 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
5251 be32_to_cpu(rdp_req->nport_id_desc.length));
5252
7d933313
JS
5253 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5254 !phba->cfg_enable_SmartSAN) {
5255 rjt_err = LSRJT_UNABLE_TPC;
5256 rjt_expl = LSEXP_PORT_LOGIN_REQ;
5257 goto error;
5258 }
86478875
JS
5259 if (sizeof(struct fc_rdp_nport_desc) !=
5260 be32_to_cpu(rdp_req->rdp_des_length))
5261 goto rjt_logerr;
5262 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
5263 goto rjt_logerr;
5264 if (RDP_NPORT_ID_SIZE !=
5265 be32_to_cpu(rdp_req->nport_id_desc.length))
5266 goto rjt_logerr;
699acd62 5267 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
86478875
JS
5268 if (!rdp_context) {
5269 rjt_err = LSRJT_UNABLE_TPC;
5270 goto error;
5271 }
5272
86478875
JS
5273 cmd = &cmdiocb->iocb;
5274 rdp_context->ndlp = lpfc_nlp_get(ndlp);
5275 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
5276 rdp_context->rx_id = cmd->ulpContext;
5277 rdp_context->cmpl = lpfc_els_rdp_cmpl;
5278 if (lpfc_get_rdp_info(phba, rdp_context)) {
5279 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
5280 "2423 Unable to send mailbox");
5281 kfree(rdp_context);
5282 rjt_err = LSRJT_UNABLE_TPC;
5283 lpfc_nlp_put(ndlp);
5284 goto error;
5285 }
5286
5287 return 0;
5288
5289rjt_logerr:
5290 rjt_err = LSRJT_LOGICAL_ERR;
5291
5292error:
5293 memset(&stat, 0, sizeof(stat));
5294 stat.un.b.lsRjtRsnCode = rjt_err;
5295 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
5296 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5297 return 1;
5298}
5299
5300
8b017a30
JS
5301static void
5302lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5303{
5304 MAILBOX_t *mb;
5305 IOCB_t *icmd;
5306 uint8_t *pcmd;
5307 struct lpfc_iocbq *elsiocb;
5308 struct lpfc_nodelist *ndlp;
5309 struct ls_rjt *stat;
481ad967 5310 union lpfc_sli4_cfg_shdr *shdr;
8b017a30
JS
5311 struct lpfc_lcb_context *lcb_context;
5312 struct fc_lcb_res_frame *lcb_res;
481ad967 5313 uint32_t cmdsize, shdr_status, shdr_add_status;
8b017a30
JS
5314 int rc;
5315
5316 mb = &pmb->u.mb;
8b017a30
JS
5317 lcb_context = (struct lpfc_lcb_context *)pmb->context1;
5318 ndlp = lcb_context->ndlp;
5319 pmb->context1 = NULL;
5320 pmb->context2 = NULL;
5321
481ad967
JS
5322 shdr = (union lpfc_sli4_cfg_shdr *)
5323 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
5324 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5325 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5326
5327 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
5328 "0194 SET_BEACON_CONFIG mailbox "
5329 "completed with status x%x add_status x%x,"
5330 " mbx status x%x\n",
5331 shdr_status, shdr_add_status, mb->mbxStatus);
5332
5333 if (mb->mbxStatus && !(shdr_status &&
5334 shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
8b017a30
JS
5335 mempool_free(pmb, phba->mbox_mem_pool);
5336 goto error;
5337 }
5338
5339 mempool_free(pmb, phba->mbox_mem_pool);
8b017a30
JS
5340 cmdsize = sizeof(struct fc_lcb_res_frame);
5341 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5342 lpfc_max_els_tries, ndlp,
5343 ndlp->nlp_DID, ELS_CMD_ACC);
5344
5345 /* Decrement the ndlp reference count from previous mbox command */
5346 lpfc_nlp_put(ndlp);
5347
5348 if (!elsiocb)
5349 goto free_lcb_context;
5350
5351 lcb_res = (struct fc_lcb_res_frame *)
5352 (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5353
5354 icmd = &elsiocb->iocb;
5355 icmd->ulpContext = lcb_context->rx_id;
5356 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5357
5358 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5359 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
5360 lcb_res->lcb_sub_command = lcb_context->sub_command;
5361 lcb_res->lcb_type = lcb_context->type;
5362 lcb_res->lcb_frequency = lcb_context->frequency;
5363 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5364 phba->fc_stat.elsXmitACC++;
5365 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5366 if (rc == IOCB_ERROR)
5367 lpfc_els_free_iocb(phba, elsiocb);
5368
5369 kfree(lcb_context);
5370 return;
5371
5372error:
5373 cmdsize = sizeof(struct fc_lcb_res_frame);
5374 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5375 lpfc_max_els_tries, ndlp,
5376 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5377 lpfc_nlp_put(ndlp);
5378 if (!elsiocb)
5379 goto free_lcb_context;
5380
5381 icmd = &elsiocb->iocb;
5382 icmd->ulpContext = lcb_context->rx_id;
5383 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5384 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5385
5386 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
5387 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5388 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5389
5390 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5391 phba->fc_stat.elsXmitLSRJT++;
5392 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5393 if (rc == IOCB_ERROR)
5394 lpfc_els_free_iocb(phba, elsiocb);
5395free_lcb_context:
5396 kfree(lcb_context);
5397}
5398
5399static int
5400lpfc_sli4_set_beacon(struct lpfc_vport *vport,
5401 struct lpfc_lcb_context *lcb_context,
5402 uint32_t beacon_state)
5403{
5404 struct lpfc_hba *phba = vport->phba;
5405 LPFC_MBOXQ_t *mbox = NULL;
5406 uint32_t len;
5407 int rc;
5408
5409 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5410 if (!mbox)
5411 return 1;
5412
5413 len = sizeof(struct lpfc_mbx_set_beacon_config) -
5414 sizeof(struct lpfc_sli4_cfg_mhdr);
5415 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5416 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
5417 LPFC_SLI4_MBX_EMBED);
5418 mbox->context1 = (void *)lcb_context;
5419 mbox->vport = phba->pport;
5420 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
5421 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
5422 phba->sli4_hba.physical_port);
5423 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
5424 beacon_state);
5425 bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
5426 bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
5427 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5428 if (rc == MBX_NOT_FINISHED) {
5429 mempool_free(mbox, phba->mbox_mem_pool);
5430 return 1;
5431 }
5432
5433 return 0;
5434}
5435
5436
5437/**
5438 * lpfc_els_rcv_lcb - Process an unsolicited LCB
5439 * @vport: pointer to a host virtual N_Port data structure.
5440 * @cmdiocb: pointer to lpfc command iocb data structure.
5441 * @ndlp: pointer to a node-list data structure.
5442 *
5443 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
5444 * First, the payload of the unsolicited LCB is checked.
5445 * Then based on Subcommand beacon will either turn on or off.
5446 *
5447 * Return code
5448 * 0 - Sent the acc response
5449 * 1 - Sent the reject response.
5450 **/
5451static int
5452lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5453 struct lpfc_nodelist *ndlp)
5454{
5455 struct lpfc_hba *phba = vport->phba;
5456 struct lpfc_dmabuf *pcmd;
8b017a30
JS
5457 uint8_t *lp;
5458 struct fc_lcb_request_frame *beacon;
5459 struct lpfc_lcb_context *lcb_context;
5460 uint8_t state, rjt_err;
5461 struct ls_rjt stat;
5462
8b017a30
JS
5463 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
5464 lp = (uint8_t *)pcmd->virt;
5465 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
5466
5467 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5468 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
5469 "type x%x frequency %x duration x%x\n",
5470 lp[0], lp[1], lp[2],
5471 beacon->lcb_command,
5472 beacon->lcb_sub_command,
5473 beacon->lcb_type,
5474 beacon->lcb_frequency,
5475 be16_to_cpu(beacon->lcb_duration));
5476
5477 if (phba->sli_rev < LPFC_SLI_REV4 ||
5478 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5479 LPFC_SLI_INTF_IF_TYPE_2)) {
5480 rjt_err = LSRJT_CMD_UNSUPPORTED;
5481 goto rjt;
5482 }
8b017a30
JS
5483
5484 if (phba->hba_flag & HBA_FCOE_MODE) {
5485 rjt_err = LSRJT_CMD_UNSUPPORTED;
5486 goto rjt;
5487 }
5488 if (beacon->lcb_frequency == 0) {
5489 rjt_err = LSRJT_CMD_UNSUPPORTED;
5490 goto rjt;
5491 }
5492 if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
5493 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5494 rjt_err = LSRJT_CMD_UNSUPPORTED;
5495 goto rjt;
5496 }
5497 if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
5498 (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
5499 rjt_err = LSRJT_CMD_UNSUPPORTED;
5500 goto rjt;
5501 }
5502 if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
5503 (beacon->lcb_type != LPFC_LCB_GREEN) &&
5504 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5505 rjt_err = LSRJT_CMD_UNSUPPORTED;
5506 goto rjt;
5507 }
5508 if (be16_to_cpu(beacon->lcb_duration) != 0) {
5509 rjt_err = LSRJT_CMD_UNSUPPORTED;
5510 goto rjt;
5511 }
5512
e7950423
SM
5513 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
5514 if (!lcb_context) {
5515 rjt_err = LSRJT_UNABLE_TPC;
5516 goto rjt;
5517 }
5518
8b017a30
JS
5519 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
5520 lcb_context->sub_command = beacon->lcb_sub_command;
5521 lcb_context->type = beacon->lcb_type;
5522 lcb_context->frequency = beacon->lcb_frequency;
5523 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5524 lcb_context->rx_id = cmdiocb->iocb.ulpContext;
5525 lcb_context->ndlp = lpfc_nlp_get(ndlp);
5526 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
5527 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
5528 LOG_ELS, "0193 failed to send mail box");
e7950423 5529 kfree(lcb_context);
8b017a30
JS
5530 lpfc_nlp_put(ndlp);
5531 rjt_err = LSRJT_UNABLE_TPC;
5532 goto rjt;
5533 }
5534 return 0;
5535rjt:
5536 memset(&stat, 0, sizeof(stat));
5537 stat.un.b.lsRjtRsnCode = rjt_err;
5538 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5539 return 1;
5540}
5541
5542
e59058c4 5543/**
3621a710 5544 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
e59058c4
JS
5545 * @vport: pointer to a host virtual N_Port data structure.
5546 *
5547 * This routine cleans up any Registration State Change Notification
5548 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
5549 * @vport together with the host_lock is used to prevent multiple thread
5550 * trying to access the RSCN array on a same @vport at the same time.
5551 **/
92d7f7b0 5552void
2e0fef85 5553lpfc_els_flush_rscn(struct lpfc_vport *vport)
dea3101e 5554{
2e0fef85
JS
5555 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5556 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
5557 int i;
5558
7f5f3d0d
JS
5559 spin_lock_irq(shost->host_lock);
5560 if (vport->fc_rscn_flush) {
5561 /* Another thread is walking fc_rscn_id_list on this vport */
5562 spin_unlock_irq(shost->host_lock);
5563 return;
5564 }
5565 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
5566 vport->fc_rscn_flush = 1;
5567 spin_unlock_irq(shost->host_lock);
5568
2e0fef85 5569 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0 5570 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2e0fef85 5571 vport->fc_rscn_id_list[i] = NULL;
dea3101e 5572 }
2e0fef85
JS
5573 spin_lock_irq(shost->host_lock);
5574 vport->fc_rscn_id_cnt = 0;
5575 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
5576 spin_unlock_irq(shost->host_lock);
5577 lpfc_can_disctmo(vport);
7f5f3d0d
JS
5578 /* Indicate we are done walking this fc_rscn_id_list */
5579 vport->fc_rscn_flush = 0;
dea3101e
JB
5580}
5581
e59058c4 5582/**
3621a710 5583 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
e59058c4
JS
5584 * @vport: pointer to a host virtual N_Port data structure.
5585 * @did: remote destination port identifier.
5586 *
5587 * This routine checks whether there is any pending Registration State
5588 * Configuration Notification (RSCN) to a @did on @vport.
5589 *
5590 * Return code
5591 * None zero - The @did matched with a pending rscn
5592 * 0 - not able to match @did with a pending rscn
5593 **/
dea3101e 5594int
2e0fef85 5595lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
dea3101e
JB
5596{
5597 D_ID ns_did;
5598 D_ID rscn_did;
dea3101e 5599 uint32_t *lp;
92d7f7b0 5600 uint32_t payload_len, i;
7f5f3d0d 5601 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
5602
5603 ns_did.un.word = did;
dea3101e
JB
5604
5605 /* Never match fabric nodes for RSCNs */
5606 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2e0fef85 5607 return 0;
dea3101e
JB
5608
5609 /* If we are doing a FULL RSCN rediscovery, match everything */
2e0fef85 5610 if (vport->fc_flag & FC_RSCN_DISCOVERY)
c9f8735b 5611 return did;
dea3101e 5612
7f5f3d0d
JS
5613 spin_lock_irq(shost->host_lock);
5614 if (vport->fc_rscn_flush) {
5615 /* Another thread is walking fc_rscn_id_list on this vport */
5616 spin_unlock_irq(shost->host_lock);
5617 return 0;
5618 }
5619 /* Indicate we are walking fc_rscn_id_list on this vport */
5620 vport->fc_rscn_flush = 1;
5621 spin_unlock_irq(shost->host_lock);
2e0fef85 5622 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0
JS
5623 lp = vport->fc_rscn_id_list[i]->virt;
5624 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5625 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 5626 while (payload_len) {
92d7f7b0
JS
5627 rscn_did.un.word = be32_to_cpu(*lp++);
5628 payload_len -= sizeof(uint32_t);
eaf15d5b
JS
5629 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
5630 case RSCN_ADDRESS_FORMAT_PORT:
6fb120a7
JS
5631 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5632 && (ns_did.un.b.area == rscn_did.un.b.area)
5633 && (ns_did.un.b.id == rscn_did.un.b.id))
7f5f3d0d 5634 goto return_did_out;
dea3101e 5635 break;
eaf15d5b 5636 case RSCN_ADDRESS_FORMAT_AREA:
dea3101e
JB
5637 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5638 && (ns_did.un.b.area == rscn_did.un.b.area))
7f5f3d0d 5639 goto return_did_out;
dea3101e 5640 break;
eaf15d5b 5641 case RSCN_ADDRESS_FORMAT_DOMAIN:
dea3101e 5642 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7f5f3d0d 5643 goto return_did_out;
dea3101e 5644 break;
eaf15d5b 5645 case RSCN_ADDRESS_FORMAT_FABRIC:
7f5f3d0d 5646 goto return_did_out;
dea3101e
JB
5647 }
5648 }
92d7f7b0 5649 }
7f5f3d0d
JS
5650 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5651 vport->fc_rscn_flush = 0;
92d7f7b0 5652 return 0;
7f5f3d0d
JS
5653return_did_out:
5654 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5655 vport->fc_rscn_flush = 0;
5656 return did;
dea3101e
JB
5657}
5658
e59058c4 5659/**
3621a710 5660 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
e59058c4
JS
5661 * @vport: pointer to a host virtual N_Port data structure.
5662 *
5663 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
5664 * state machine for a @vport's nodes that are with pending RSCN (Registration
5665 * State Change Notification).
5666 *
5667 * Return code
5668 * 0 - Successful (currently alway return 0)
5669 **/
dea3101e 5670static int
2e0fef85 5671lpfc_rscn_recovery_check(struct lpfc_vport *vport)
dea3101e 5672{
685f0bf7 5673 struct lpfc_nodelist *ndlp = NULL;
dea3101e 5674
0d2b6b83 5675 /* Move all affected nodes by pending RSCNs to NPR state. */
2e0fef85 5676 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093 5677 if (!NLP_CHK_NODE_ACT(ndlp) ||
0d2b6b83
JS
5678 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
5679 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
685f0bf7 5680 continue;
2e0fef85 5681 lpfc_disc_state_machine(vport, ndlp, NULL,
0d2b6b83
JS
5682 NLP_EVT_DEVICE_RECOVERY);
5683 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 5684 }
c9f8735b 5685 return 0;
dea3101e
JB
5686}
5687
ddcc50f0 5688/**
3621a710 5689 * lpfc_send_rscn_event - Send an RSCN event to management application
ddcc50f0
JS
5690 * @vport: pointer to a host virtual N_Port data structure.
5691 * @cmdiocb: pointer to lpfc command iocb data structure.
5692 *
5693 * lpfc_send_rscn_event sends an RSCN netlink event to management
5694 * applications.
5695 */
5696static void
5697lpfc_send_rscn_event(struct lpfc_vport *vport,
5698 struct lpfc_iocbq *cmdiocb)
5699{
5700 struct lpfc_dmabuf *pcmd;
5701 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5702 uint32_t *payload_ptr;
5703 uint32_t payload_len;
5704 struct lpfc_rscn_event_header *rscn_event_data;
5705
5706 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5707 payload_ptr = (uint32_t *) pcmd->virt;
5708 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
5709
5710 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
5711 payload_len, GFP_KERNEL);
5712 if (!rscn_event_data) {
5713 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5714 "0147 Failed to allocate memory for RSCN event\n");
5715 return;
5716 }
5717 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
5718 rscn_event_data->payload_length = payload_len;
5719 memcpy(rscn_event_data->rscn_payload, payload_ptr,
5720 payload_len);
5721
5722 fc_host_post_vendor_event(shost,
5723 fc_get_event_number(),
6599eaaa 5724 sizeof(struct lpfc_rscn_event_header) + payload_len,
ddcc50f0
JS
5725 (char *)rscn_event_data,
5726 LPFC_NL_VENDOR_ID);
5727
5728 kfree(rscn_event_data);
5729}
5730
e59058c4 5731/**
3621a710 5732 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
e59058c4
JS
5733 * @vport: pointer to a host virtual N_Port data structure.
5734 * @cmdiocb: pointer to lpfc command iocb data structure.
5735 * @ndlp: pointer to a node-list data structure.
5736 *
5737 * This routine processes an unsolicited RSCN (Registration State Change
5738 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
5739 * to invoke fc_host_post_event() routine to the FC transport layer. If the
5740 * discover state machine is about to begin discovery, it just accepts the
5741 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
5742 * contains N_Port IDs for other vports on this HBA, it just accepts the
5743 * RSCN and ignore processing it. If the state machine is in the recovery
5744 * state, the fc_rscn_id_list of this @vport is walked and the
5745 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
5746 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
5747 * routine is invoked to handle the RSCN event.
5748 *
5749 * Return code
5750 * 0 - Just sent the acc response
5751 * 1 - Sent the acc response and waited for name server completion
5752 **/
dea3101e 5753static int
2e0fef85 5754lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 5755 struct lpfc_nodelist *ndlp)
dea3101e 5756{
2e0fef85
JS
5757 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5758 struct lpfc_hba *phba = vport->phba;
dea3101e 5759 struct lpfc_dmabuf *pcmd;
92d7f7b0 5760 uint32_t *lp, *datap;
92d7f7b0 5761 uint32_t payload_len, length, nportid, *cmd;
7f5f3d0d 5762 int rscn_cnt;
92d7f7b0 5763 int rscn_id = 0, hba_id = 0;
d2873e4c 5764 int i;
dea3101e 5765
dea3101e
JB
5766 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5767 lp = (uint32_t *) pcmd->virt;
5768
92d7f7b0
JS
5769 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5770 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 5771 /* RSCN received */
e8b62011
JS
5772 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5773 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7f5f3d0d
JS
5774 vport->fc_flag, payload_len, *lp,
5775 vport->fc_rscn_id_cnt);
ddcc50f0
JS
5776
5777 /* Send an RSCN event to the management application */
5778 lpfc_send_rscn_event(vport, cmdiocb);
5779
d2873e4c 5780 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2e0fef85 5781 fc_host_post_event(shost, fc_get_event_number(),
d2873e4c
JS
5782 FCH_EVT_RSCN, lp[i]);
5783
dea3101e
JB
5784 /* If we are about to begin discovery, just ACC the RSCN.
5785 * Discovery processing will satisfy it.
5786 */
2e0fef85 5787 if (vport->port_state <= LPFC_NS_QRY) {
858c9f6c
JS
5788 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5789 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
5790 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5791
51ef4c26 5792 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
c9f8735b 5793 return 0;
dea3101e
JB
5794 }
5795
92d7f7b0
JS
5796 /* If this RSCN just contains NPortIDs for other vports on this HBA,
5797 * just ACC and ignore it.
5798 */
5799 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3de2a653 5800 !(vport->cfg_peer_port_login)) {
92d7f7b0
JS
5801 i = payload_len;
5802 datap = lp;
5803 while (i > 0) {
5804 nportid = *datap++;
5805 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
5806 i -= sizeof(uint32_t);
5807 rscn_id++;
549e55cd
JS
5808 if (lpfc_find_vport_by_did(phba, nportid))
5809 hba_id++;
92d7f7b0
JS
5810 }
5811 if (rscn_id == hba_id) {
5812 /* ALL NPortIDs in RSCN are on HBA */
e8b62011 5813 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
d7c255b2 5814 "0219 Ignore RSCN "
e8b62011
JS
5815 "Data: x%x x%x x%x x%x\n",
5816 vport->fc_flag, payload_len,
7f5f3d0d 5817 *lp, vport->fc_rscn_id_cnt);
858c9f6c
JS
5818 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5819 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
5820 ndlp->nlp_DID, vport->port_state,
5821 ndlp->nlp_flag);
5822
92d7f7b0 5823 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
51ef4c26 5824 ndlp, NULL);
92d7f7b0
JS
5825 return 0;
5826 }
5827 }
5828
7f5f3d0d
JS
5829 spin_lock_irq(shost->host_lock);
5830 if (vport->fc_rscn_flush) {
5831 /* Another thread is walking fc_rscn_id_list on this vport */
7f5f3d0d 5832 vport->fc_flag |= FC_RSCN_DISCOVERY;
97957244 5833 spin_unlock_irq(shost->host_lock);
58da1ffb
JS
5834 /* Send back ACC */
5835 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7f5f3d0d
JS
5836 return 0;
5837 }
5838 /* Indicate we are walking fc_rscn_id_list on this vport */
5839 vport->fc_rscn_flush = 1;
5840 spin_unlock_irq(shost->host_lock);
af901ca1 5841 /* Get the array count after successfully have the token */
7f5f3d0d 5842 rscn_cnt = vport->fc_rscn_id_cnt;
dea3101e
JB
5843 /* If we are already processing an RSCN, save the received
5844 * RSCN payload buffer, cmdiocb->context2 to process later.
5845 */
2e0fef85 5846 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
858c9f6c
JS
5847 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5848 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
5849 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5850
09372820 5851 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
5852 vport->fc_flag |= FC_RSCN_DEFERRED;
5853 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2e0fef85 5854 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2e0fef85
JS
5855 vport->fc_flag |= FC_RSCN_MODE;
5856 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
5857 if (rscn_cnt) {
5858 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
5859 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
5860 }
5861 if ((rscn_cnt) &&
5862 (payload_len + length <= LPFC_BPL_SIZE)) {
5863 *cmd &= ELS_CMD_MASK;
7f5f3d0d 5864 *cmd |= cpu_to_be32(payload_len + length);
92d7f7b0
JS
5865 memcpy(((uint8_t *)cmd) + length, lp,
5866 payload_len);
5867 } else {
5868 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
5869 vport->fc_rscn_id_cnt++;
5870 /* If we zero, cmdiocb->context2, the calling
5871 * routine will not try to free it.
5872 */
5873 cmdiocb->context2 = NULL;
5874 }
dea3101e 5875 /* Deferred RSCN */
e8b62011
JS
5876 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5877 "0235 Deferred RSCN "
5878 "Data: x%x x%x x%x\n",
5879 vport->fc_rscn_id_cnt, vport->fc_flag,
5880 vport->port_state);
dea3101e 5881 } else {
2e0fef85
JS
5882 vport->fc_flag |= FC_RSCN_DISCOVERY;
5883 spin_unlock_irq(shost->host_lock);
dea3101e 5884 /* ReDiscovery RSCN */
e8b62011
JS
5885 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5886 "0234 ReDiscovery RSCN "
5887 "Data: x%x x%x x%x\n",
5888 vport->fc_rscn_id_cnt, vport->fc_flag,
5889 vport->port_state);
dea3101e 5890 }
7f5f3d0d
JS
5891 /* Indicate we are done walking fc_rscn_id_list on this vport */
5892 vport->fc_rscn_flush = 0;
dea3101e 5893 /* Send back ACC */
51ef4c26 5894 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 5895 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 5896 lpfc_rscn_recovery_check(vport);
09372820 5897 spin_lock_irq(shost->host_lock);
92d7f7b0 5898 vport->fc_flag &= ~FC_RSCN_DEFERRED;
09372820 5899 spin_unlock_irq(shost->host_lock);
c9f8735b 5900 return 0;
dea3101e 5901 }
858c9f6c
JS
5902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5903 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
5904 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5905
2e0fef85
JS
5906 spin_lock_irq(shost->host_lock);
5907 vport->fc_flag |= FC_RSCN_MODE;
5908 spin_unlock_irq(shost->host_lock);
5909 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7f5f3d0d
JS
5910 /* Indicate we are done walking fc_rscn_id_list on this vport */
5911 vport->fc_rscn_flush = 0;
dea3101e
JB
5912 /*
5913 * If we zero, cmdiocb->context2, the calling routine will
5914 * not try to free it.
5915 */
5916 cmdiocb->context2 = NULL;
2e0fef85 5917 lpfc_set_disctmo(vport);
dea3101e 5918 /* Send back ACC */
51ef4c26 5919 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 5920 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 5921 lpfc_rscn_recovery_check(vport);
2e0fef85 5922 return lpfc_els_handle_rscn(vport);
dea3101e
JB
5923}
5924
e59058c4 5925/**
3621a710 5926 * lpfc_els_handle_rscn - Handle rscn for a vport
e59058c4
JS
5927 * @vport: pointer to a host virtual N_Port data structure.
5928 *
5929 * This routine handles the Registration State Configuration Notification
5930 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
5931 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
5932 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
5933 * NameServer shall be issued. If CT command to the NameServer fails to be
5934 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
5935 * RSCN activities with the @vport.
5936 *
5937 * Return code
5938 * 0 - Cleaned up rscn on the @vport
5939 * 1 - Wait for plogi to name server before proceed
5940 **/
dea3101e 5941int
2e0fef85 5942lpfc_els_handle_rscn(struct lpfc_vport *vport)
dea3101e
JB
5943{
5944 struct lpfc_nodelist *ndlp;
2e0fef85 5945 struct lpfc_hba *phba = vport->phba;
dea3101e 5946
92d7f7b0
JS
5947 /* Ignore RSCN if the port is being torn down. */
5948 if (vport->load_flag & FC_UNLOADING) {
5949 lpfc_els_flush_rscn(vport);
5950 return 0;
5951 }
5952
dea3101e 5953 /* Start timer for RSCN processing */
2e0fef85 5954 lpfc_set_disctmo(vport);
dea3101e
JB
5955
5956 /* RSCN processed */
e8b62011
JS
5957 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5958 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
5959 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
5960 vport->port_state);
dea3101e
JB
5961
5962 /* To process RSCN, first compare RSCN data with NameServer */
2e0fef85 5963 vport->fc_ns_retry = 0;
0ff10d46
JS
5964 vport->num_disc_nodes = 0;
5965
2e0fef85 5966 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093
JS
5967 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
5968 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
dea3101e 5969 /* Good ndlp, issue CT Request to NameServer */
92d7f7b0 5970 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
dea3101e
JB
5971 /* Wait for NameServer query cmpl before we can
5972 continue */
c9f8735b 5973 return 1;
dea3101e
JB
5974 } else {
5975 /* If login to NameServer does not exist, issue one */
5976 /* Good status, issue PLOGI to NameServer */
2e0fef85 5977 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 5978 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
dea3101e
JB
5979 /* Wait for NameServer login cmpl before we can
5980 continue */
c9f8735b 5981 return 1;
2e0fef85 5982
e47c9093
JS
5983 if (ndlp) {
5984 ndlp = lpfc_enable_node(vport, ndlp,
5985 NLP_STE_PLOGI_ISSUE);
5986 if (!ndlp) {
5987 lpfc_els_flush_rscn(vport);
5988 return 0;
5989 }
5990 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
dea3101e 5991 } else {
e47c9093
JS
5992 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5993 if (!ndlp) {
5994 lpfc_els_flush_rscn(vport);
5995 return 0;
5996 }
2e0fef85 5997 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5024ab17 5998 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 5999 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
dea3101e 6000 }
e47c9093
JS
6001 ndlp->nlp_type |= NLP_FABRIC;
6002 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
6003 /* Wait for NameServer login cmpl before we can
6004 * continue
6005 */
6006 return 1;
dea3101e
JB
6007 }
6008
2e0fef85 6009 lpfc_els_flush_rscn(vport);
c9f8735b 6010 return 0;
dea3101e
JB
6011}
6012
e59058c4 6013/**
3621a710 6014 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
e59058c4
JS
6015 * @vport: pointer to a host virtual N_Port data structure.
6016 * @cmdiocb: pointer to lpfc command iocb data structure.
6017 * @ndlp: pointer to a node-list data structure.
6018 *
6019 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6020 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6021 * point topology. As an unsolicited FLOGI should not be received in a loop
6022 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6023 * lpfc_check_sparm() routine is invoked to check the parameters in the
6024 * unsolicited FLOGI. If parameters validation failed, the routine
6025 * lpfc_els_rsp_reject() shall be called with reject reason code set to
6026 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6027 * FLOGI shall be compared with the Port WWN of the @vport to determine who
6028 * will initiate PLOGI. The higher lexicographical value party shall has
6029 * higher priority (as the winning port) and will initiate PLOGI and
6030 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6031 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6032 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6033 *
6034 * Return code
6035 * 0 - Successfully processed the unsolicited flogi
6036 * 1 - Failed to process the unsolicited flogi
6037 **/
dea3101e 6038static int
2e0fef85 6039lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 6040 struct lpfc_nodelist *ndlp)
dea3101e 6041{
2e0fef85
JS
6042 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6043 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
6044 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6045 uint32_t *lp = (uint32_t *) pcmd->virt;
6046 IOCB_t *icmd = &cmdiocb->iocb;
6047 struct serv_parm *sp;
6048 LPFC_MBOXQ_t *mbox;
dea3101e
JB
6049 uint32_t cmd, did;
6050 int rc;
e74c03c8
JS
6051 uint32_t fc_flag = 0;
6052 uint32_t port_state = 0;
dea3101e
JB
6053
6054 cmd = *lp++;
6055 sp = (struct serv_parm *) lp;
6056
6057 /* FLOGI received */
6058
2e0fef85 6059 lpfc_set_disctmo(vport);
dea3101e 6060
76a95d75 6061 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
dea3101e
JB
6062 /* We should never receive a FLOGI in loop mode, ignore it */
6063 did = icmd->un.elsreq64.remoteID;
6064
6065 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
6066 Loop Mode */
e8b62011
JS
6067 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6068 "0113 An FLOGI ELS command x%x was "
6069 "received from DID x%x in Loop Mode\n",
6070 cmd, did);
c9f8735b 6071 return 1;
dea3101e
JB
6072 }
6073
d6de08cc 6074 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
dea3101e 6075
dea3101e 6076
d6de08cc
JS
6077 /*
6078 * If our portname is greater than the remote portname,
6079 * then we initiate Nport login.
6080 */
939723a4 6081
d6de08cc
JS
6082 rc = memcmp(&vport->fc_portname, &sp->portName,
6083 sizeof(struct lpfc_name));
939723a4 6084
d6de08cc
JS
6085 if (!rc) {
6086 if (phba->sli_rev < LPFC_SLI_REV4) {
6087 mbox = mempool_alloc(phba->mbox_mem_pool,
6088 GFP_KERNEL);
6089 if (!mbox)
6090 return 1;
6091 lpfc_linkdown(phba);
6092 lpfc_init_link(phba, mbox,
6093 phba->cfg_topology,
6094 phba->cfg_link_speed);
6095 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6096 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6097 mbox->vport = vport;
6098 rc = lpfc_sli_issue_mbox(phba, mbox,
6099 MBX_NOWAIT);
6100 lpfc_set_loopback_flag(phba);
6101 if (rc == MBX_NOT_FINISHED)
6102 mempool_free(mbox, phba->mbox_mem_pool);
6103 return 1;
6104 }
6105
6106 /* abort the flogi coming back to ourselves
6107 * due to external loopback on the port.
939723a4 6108 */
d6de08cc
JS
6109 lpfc_els_abort_flogi(phba);
6110 return 0;
6111
6112 } else if (rc > 0) { /* greater than */
2e0fef85 6113 spin_lock_irq(shost->host_lock);
d6de08cc 6114 vport->fc_flag |= FC_PT2PT_PLOGI;
2e0fef85 6115 spin_unlock_irq(shost->host_lock);
939723a4 6116
d6de08cc
JS
6117 /* If we have the high WWPN we can assign our own
6118 * myDID; otherwise, we have to WAIT for a PLOGI
6119 * from the remote NPort to find out what it
6120 * will be.
939723a4 6121 */
d6de08cc 6122 vport->fc_myDID = PT2PT_LocalID;
dea3101e 6123 } else {
d6de08cc
JS
6124 vport->fc_myDID = PT2PT_RemoteID;
6125 }
939723a4 6126
d6de08cc
JS
6127 /*
6128 * The vport state should go to LPFC_FLOGI only
6129 * AFTER we issue a FLOGI, not receive one.
6130 */
6131 spin_lock_irq(shost->host_lock);
6132 fc_flag = vport->fc_flag;
6133 port_state = vport->port_state;
6134 vport->fc_flag |= FC_PT2PT;
6135 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6136 spin_unlock_irq(shost->host_lock);
6137 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6138 "3311 Rcv Flogi PS x%x new PS x%x "
6139 "fc_flag x%x new fc_flag x%x\n",
6140 port_state, vport->port_state,
6141 fc_flag, vport->fc_flag);
939723a4 6142
d6de08cc
JS
6143 /*
6144 * We temporarily set fc_myDID to make it look like we are
6145 * a Fabric. This is done just so we end up with the right
6146 * did / sid on the FLOGI ACC rsp.
6147 */
6148 did = vport->fc_myDID;
6149 vport->fc_myDID = Fabric_DID;
dea3101e 6150
d6de08cc 6151 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
eec3d312 6152
dea3101e 6153 /* Send back ACC */
d6de08cc 6154 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
dea3101e 6155
939723a4
JS
6156 /* Now lets put fc_myDID back to what its supposed to be */
6157 vport->fc_myDID = did;
6158
c9f8735b 6159 return 0;
dea3101e
JB
6160}
6161
e59058c4 6162/**
3621a710 6163 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
e59058c4
JS
6164 * @vport: pointer to a host virtual N_Port data structure.
6165 * @cmdiocb: pointer to lpfc command iocb data structure.
6166 * @ndlp: pointer to a node-list data structure.
6167 *
6168 * This routine processes Request Node Identification Data (RNID) IOCB
6169 * received as an ELS unsolicited event. Only when the RNID specified format
6170 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
6171 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
6172 * Accept (ACC) the RNID ELS command. All the other RNID formats are
6173 * rejected by invoking the lpfc_els_rsp_reject() routine.
6174 *
6175 * Return code
6176 * 0 - Successfully processed rnid iocb (currently always return 0)
6177 **/
dea3101e 6178static int
2e0fef85
JS
6179lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6180 struct lpfc_nodelist *ndlp)
dea3101e
JB
6181{
6182 struct lpfc_dmabuf *pcmd;
6183 uint32_t *lp;
dea3101e
JB
6184 RNID *rn;
6185 struct ls_rjt stat;
eb016566 6186 uint32_t cmd;
dea3101e 6187
dea3101e
JB
6188 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6189 lp = (uint32_t *) pcmd->virt;
6190
6191 cmd = *lp++;
6192 rn = (RNID *) lp;
6193
6194 /* RNID received */
6195
6196 switch (rn->Format) {
6197 case 0:
6198 case RNID_TOPOLOGY_DISC:
6199 /* Send back ACC */
2e0fef85 6200 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
dea3101e
JB
6201 break;
6202 default:
6203 /* Reject this request because format not supported */
6204 stat.un.b.lsRjtRsvd0 = 0;
6205 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6206 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6207 stat.un.b.vendorUnique = 0;
858c9f6c
JS
6208 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6209 NULL);
dea3101e 6210 }
c9f8735b 6211 return 0;
dea3101e
JB
6212}
6213
12265f68
JS
6214/**
6215 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
6216 * @vport: pointer to a host virtual N_Port data structure.
6217 * @cmdiocb: pointer to lpfc command iocb data structure.
6218 * @ndlp: pointer to a node-list data structure.
6219 *
6220 * Return code
6221 * 0 - Successfully processed echo iocb (currently always return 0)
6222 **/
6223static int
6224lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6225 struct lpfc_nodelist *ndlp)
6226{
6227 uint8_t *pcmd;
6228
6229 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
6230
6231 /* skip over first word of echo command to find echo data */
6232 pcmd += sizeof(uint32_t);
6233
6234 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
6235 return 0;
6236}
6237
e59058c4 6238/**
3621a710 6239 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
e59058c4
JS
6240 * @vport: pointer to a host virtual N_Port data structure.
6241 * @cmdiocb: pointer to lpfc command iocb data structure.
6242 * @ndlp: pointer to a node-list data structure.
6243 *
6244 * This routine processes a Link Incident Report Registration(LIRR) IOCB
6245 * received as an ELS unsolicited event. Currently, this function just invokes
6246 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
6247 *
6248 * Return code
6249 * 0 - Successfully processed lirr iocb (currently always return 0)
6250 **/
dea3101e 6251static int
2e0fef85
JS
6252lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6253 struct lpfc_nodelist *ndlp)
7bb3b137
JW
6254{
6255 struct ls_rjt stat;
6256
6257 /* For now, unconditionally reject this command */
6258 stat.un.b.lsRjtRsvd0 = 0;
6259 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6260 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6261 stat.un.b.vendorUnique = 0;
858c9f6c 6262 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
6263 return 0;
6264}
6265
5ffc266e
JS
6266/**
6267 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
6268 * @vport: pointer to a host virtual N_Port data structure.
6269 * @cmdiocb: pointer to lpfc command iocb data structure.
6270 * @ndlp: pointer to a node-list data structure.
6271 *
6272 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
6273 * received as an ELS unsolicited event. A request to RRQ shall only
6274 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
6275 * Nx_Port N_Port_ID of the target Exchange is the same as the
6276 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
6277 * not accepted, an LS_RJT with reason code "Unable to perform
6278 * command request" and reason code explanation "Invalid Originator
6279 * S_ID" shall be returned. For now, we just unconditionally accept
6280 * RRQ from the target.
6281 **/
6282static void
6283lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6284 struct lpfc_nodelist *ndlp)
6285{
6286 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
19ca7609
JS
6287 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6288 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5ffc266e
JS
6289}
6290
12265f68
JS
6291/**
6292 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6293 * @phba: pointer to lpfc hba data structure.
6294 * @pmb: pointer to the driver internal queue element for mailbox command.
6295 *
6296 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6297 * mailbox command. This callback function is to actually send the Accept
6298 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6299 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6300 * mailbox command, constructs the RPS response with the link statistics
6301 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6302 * response to the RPS.
6303 *
6304 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6305 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6306 * will be stored into the context1 field of the IOCB for the completion
6307 * callback function to the RPS Accept Response ELS IOCB command.
6308 *
6309 **/
6310static void
6311lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6312{
6313 MAILBOX_t *mb;
6314 IOCB_t *icmd;
6315 struct RLS_RSP *rls_rsp;
6316 uint8_t *pcmd;
6317 struct lpfc_iocbq *elsiocb;
6318 struct lpfc_nodelist *ndlp;
7851fe2c
JS
6319 uint16_t oxid;
6320 uint16_t rxid;
12265f68
JS
6321 uint32_t cmdsize;
6322
6323 mb = &pmb->u.mb;
6324
6325 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
6326 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6327 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
12265f68
JS
6328 pmb->context1 = NULL;
6329 pmb->context2 = NULL;
6330
6331 if (mb->mbxStatus) {
6332 mempool_free(pmb, phba->mbox_mem_pool);
6333 return;
6334 }
6335
6336 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
12265f68
JS
6337 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6338 lpfc_max_els_tries, ndlp,
6339 ndlp->nlp_DID, ELS_CMD_ACC);
6340
6341 /* Decrement the ndlp reference count from previous mbox command */
6342 lpfc_nlp_put(ndlp);
6343
37db57e3
JS
6344 if (!elsiocb) {
6345 mempool_free(pmb, phba->mbox_mem_pool);
12265f68 6346 return;
37db57e3 6347 }
12265f68
JS
6348
6349 icmd = &elsiocb->iocb;
7851fe2c
JS
6350 icmd->ulpContext = rxid;
6351 icmd->unsli3.rcvsli3.ox_id = oxid;
12265f68
JS
6352
6353 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6354 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6355 pcmd += sizeof(uint32_t); /* Skip past command */
6356 rls_rsp = (struct RLS_RSP *)pcmd;
6357
6358 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6359 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6360 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6361 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6362 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6363 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
37db57e3 6364 mempool_free(pmb, phba->mbox_mem_pool);
12265f68
JS
6365 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6366 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6367 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
6368 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6369 elsiocb->iotag, elsiocb->iocb.ulpContext,
6370 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6371 ndlp->nlp_rpi);
6372 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6373 phba->fc_stat.elsXmitACC++;
6374 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6375 lpfc_els_free_iocb(phba, elsiocb);
6376}
6377
e59058c4 6378/**
3621a710 6379 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
e59058c4
JS
6380 * @phba: pointer to lpfc hba data structure.
6381 * @pmb: pointer to the driver internal queue element for mailbox command.
6382 *
6383 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6384 * mailbox command. This callback function is to actually send the Accept
6385 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6386 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6387 * mailbox command, constructs the RPS response with the link statistics
6388 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6389 * response to the RPS.
6390 *
6391 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6392 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6393 * will be stored into the context1 field of the IOCB for the completion
6394 * callback function to the RPS Accept Response ELS IOCB command.
6395 *
6396 **/
082c0266 6397static void
329f9bc7 6398lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7bb3b137 6399{
7bb3b137
JW
6400 MAILBOX_t *mb;
6401 IOCB_t *icmd;
6402 RPS_RSP *rps_rsp;
6403 uint8_t *pcmd;
6404 struct lpfc_iocbq *elsiocb;
6405 struct lpfc_nodelist *ndlp;
7851fe2c
JS
6406 uint16_t status;
6407 uint16_t oxid;
6408 uint16_t rxid;
7bb3b137
JW
6409 uint32_t cmdsize;
6410
04c68496 6411 mb = &pmb->u.mb;
7bb3b137
JW
6412
6413 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
6414 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6415 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
041976fb
RD
6416 pmb->context1 = NULL;
6417 pmb->context2 = NULL;
7bb3b137
JW
6418
6419 if (mb->mbxStatus) {
329f9bc7 6420 mempool_free(pmb, phba->mbox_mem_pool);
7bb3b137
JW
6421 return;
6422 }
6423
6424 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
329f9bc7 6425 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
6426 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6427 lpfc_max_els_tries, ndlp,
6428 ndlp->nlp_DID, ELS_CMD_ACC);
fa4066b6
JS
6429
6430 /* Decrement the ndlp reference count from previous mbox command */
329f9bc7 6431 lpfc_nlp_put(ndlp);
fa4066b6 6432
c9f8735b 6433 if (!elsiocb)
7bb3b137 6434 return;
7bb3b137
JW
6435
6436 icmd = &elsiocb->iocb;
7851fe2c
JS
6437 icmd->ulpContext = rxid;
6438 icmd->unsli3.rcvsli3.ox_id = oxid;
7bb3b137
JW
6439
6440 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6441 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 6442 pcmd += sizeof(uint32_t); /* Skip past command */
7bb3b137
JW
6443 rps_rsp = (RPS_RSP *)pcmd;
6444
76a95d75 6445 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
7bb3b137
JW
6446 status = 0x10;
6447 else
6448 status = 0x8;
2e0fef85 6449 if (phba->pport->fc_flag & FC_FABRIC)
7bb3b137
JW
6450 status |= 0x4;
6451
6452 rps_rsp->rsvd1 = 0;
09372820
JS
6453 rps_rsp->portStatus = cpu_to_be16(status);
6454 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6455 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6456 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6457 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6458 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6459 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7bb3b137 6460 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
e8b62011
JS
6461 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6462 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
6463 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6464 elsiocb->iotag, elsiocb->iocb.ulpContext,
6465 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6466 ndlp->nlp_rpi);
858c9f6c 6467 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 6468 phba->fc_stat.elsXmitACC++;
3772a991 6469 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7bb3b137 6470 lpfc_els_free_iocb(phba, elsiocb);
7bb3b137
JW
6471 return;
6472}
6473
e59058c4 6474/**
12265f68
JS
6475 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
6476 * @vport: pointer to a host virtual N_Port data structure.
6477 * @cmdiocb: pointer to lpfc command iocb data structure.
6478 * @ndlp: pointer to a node-list data structure.
6479 *
6480 * This routine processes Read Port Status (RPL) IOCB received as an
6481 * ELS unsolicited event. It first checks the remote port state. If the
6482 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6483 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6484 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6485 * for reading the HBA link statistics. It is for the callback function,
6486 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
6487 * to actually sending out RPL Accept (ACC) response.
6488 *
6489 * Return codes
6490 * 0 - Successfully processed rls iocb (currently always return 0)
6491 **/
6492static int
6493lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6494 struct lpfc_nodelist *ndlp)
6495{
6496 struct lpfc_hba *phba = vport->phba;
6497 LPFC_MBOXQ_t *mbox;
12265f68
JS
6498 struct ls_rjt stat;
6499
6500 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6501 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6502 /* reject the unsolicited RPS request and done with it */
6503 goto reject_out;
6504
12265f68
JS
6505 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6506 if (mbox) {
6507 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
6508 mbox->context1 = (void *)((unsigned long)
6509 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6510 cmdiocb->iocb.ulpContext)); /* rx_id */
12265f68
JS
6511 mbox->context2 = lpfc_nlp_get(ndlp);
6512 mbox->vport = vport;
6513 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
6514 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6515 != MBX_NOT_FINISHED)
6516 /* Mbox completion will send ELS Response */
6517 return 0;
6518 /* Decrement reference count used for the failed mbox
6519 * command.
6520 */
6521 lpfc_nlp_put(ndlp);
6522 mempool_free(mbox, phba->mbox_mem_pool);
6523 }
6524reject_out:
6525 /* issue rejection response */
6526 stat.un.b.lsRjtRsvd0 = 0;
6527 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6528 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6529 stat.un.b.vendorUnique = 0;
6530 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6531 return 0;
6532}
6533
6534/**
6535 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
6536 * @vport: pointer to a host virtual N_Port data structure.
6537 * @cmdiocb: pointer to lpfc command iocb data structure.
6538 * @ndlp: pointer to a node-list data structure.
6539 *
6540 * This routine processes Read Timout Value (RTV) IOCB received as an
6541 * ELS unsolicited event. It first checks the remote port state. If the
6542 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6543 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6544 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
6545 * Value (RTV) unsolicited IOCB event.
6546 *
6547 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6548 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6549 * will be stored into the context1 field of the IOCB for the completion
6550 * callback function to the RPS Accept Response ELS IOCB command.
6551 *
6552 * Return codes
6553 * 0 - Successfully processed rtv iocb (currently always return 0)
6554 **/
6555static int
6556lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6557 struct lpfc_nodelist *ndlp)
6558{
6559 struct lpfc_hba *phba = vport->phba;
6560 struct ls_rjt stat;
6561 struct RTV_RSP *rtv_rsp;
6562 uint8_t *pcmd;
6563 struct lpfc_iocbq *elsiocb;
6564 uint32_t cmdsize;
6565
6566
6567 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6568 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6569 /* reject the unsolicited RPS request and done with it */
6570 goto reject_out;
6571
6572 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
6573 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6574 lpfc_max_els_tries, ndlp,
6575 ndlp->nlp_DID, ELS_CMD_ACC);
6576
6577 if (!elsiocb)
6578 return 1;
6579
6580 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6581 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6582 pcmd += sizeof(uint32_t); /* Skip past command */
6583
6584 /* use the command's xri in the response */
7851fe2c
JS
6585 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
6586 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
12265f68
JS
6587
6588 rtv_rsp = (struct RTV_RSP *)pcmd;
6589
6590 /* populate RTV payload */
6591 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
6592 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
6593 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
6594 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
6595 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
6596
6597 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6598 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6599 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
6600 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
6601 "Data: x%x x%x x%x\n",
6602 elsiocb->iotag, elsiocb->iocb.ulpContext,
6603 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6604 ndlp->nlp_rpi,
6605 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
6606 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6607 phba->fc_stat.elsXmitACC++;
6608 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6609 lpfc_els_free_iocb(phba, elsiocb);
6610 return 0;
6611
6612reject_out:
6613 /* issue rejection response */
6614 stat.un.b.lsRjtRsvd0 = 0;
6615 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6616 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6617 stat.un.b.vendorUnique = 0;
6618 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6619 return 0;
6620}
6621
6622/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
e59058c4
JS
6623 * @vport: pointer to a host virtual N_Port data structure.
6624 * @cmdiocb: pointer to lpfc command iocb data structure.
6625 * @ndlp: pointer to a node-list data structure.
6626 *
6627 * This routine processes Read Port Status (RPS) IOCB received as an
6628 * ELS unsolicited event. It first checks the remote port state. If the
6629 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6630 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
6631 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6632 * for reading the HBA link statistics. It is for the callback function,
6633 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
6634 * to actually sending out RPS Accept (ACC) response.
6635 *
6636 * Return codes
6637 * 0 - Successfully processed rps iocb (currently always return 0)
6638 **/
7bb3b137 6639static int
2e0fef85
JS
6640lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6641 struct lpfc_nodelist *ndlp)
dea3101e 6642{
2e0fef85 6643 struct lpfc_hba *phba = vport->phba;
dea3101e 6644 uint32_t *lp;
7bb3b137
JW
6645 uint8_t flag;
6646 LPFC_MBOXQ_t *mbox;
6647 struct lpfc_dmabuf *pcmd;
6648 RPS *rps;
6649 struct ls_rjt stat;
6650
2fe165b6 6651 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
90160e01
JS
6652 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6653 /* reject the unsolicited RPS request and done with it */
6654 goto reject_out;
7bb3b137
JW
6655
6656 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6657 lp = (uint32_t *) pcmd->virt;
6658 flag = (be32_to_cpu(*lp++) & 0xf);
6659 rps = (RPS *) lp;
6660
6661 if ((flag == 0) ||
6662 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2e0fef85 6663 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
92d7f7b0 6664 sizeof(struct lpfc_name)) == 0))) {
2e0fef85 6665
92d7f7b0
JS
6666 printk("Fix me....\n");
6667 dump_stack();
2e0fef85
JS
6668 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6669 if (mbox) {
7bb3b137 6670 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
6671 mbox->context1 = (void *)((unsigned long)
6672 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6673 cmdiocb->iocb.ulpContext)); /* rx_id */
329f9bc7 6674 mbox->context2 = lpfc_nlp_get(ndlp);
92d7f7b0 6675 mbox->vport = vport;
7bb3b137 6676 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
fa4066b6 6677 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
0b727fea 6678 != MBX_NOT_FINISHED)
7bb3b137
JW
6679 /* Mbox completion will send ELS Response */
6680 return 0;
fa4066b6
JS
6681 /* Decrement reference count used for the failed mbox
6682 * command.
6683 */
329f9bc7 6684 lpfc_nlp_put(ndlp);
7bb3b137
JW
6685 mempool_free(mbox, phba->mbox_mem_pool);
6686 }
6687 }
90160e01
JS
6688
6689reject_out:
6690 /* issue rejection response */
7bb3b137
JW
6691 stat.un.b.lsRjtRsvd0 = 0;
6692 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6693 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6694 stat.un.b.vendorUnique = 0;
858c9f6c 6695 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
6696 return 0;
6697}
6698
19ca7609
JS
6699/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
6700 * @vport: pointer to a host virtual N_Port data structure.
6701 * @ndlp: pointer to a node-list data structure.
6702 * @did: DID of the target.
6703 * @rrq: Pointer to the rrq struct.
6704 *
6705 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
6706 * Successful the the completion handler will clear the RRQ.
6707 *
6708 * Return codes
6709 * 0 - Successfully sent rrq els iocb.
6710 * 1 - Failed to send rrq els iocb.
6711 **/
6712static int
6713lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6714 uint32_t did, struct lpfc_node_rrq *rrq)
6715{
6716 struct lpfc_hba *phba = vport->phba;
6717 struct RRQ *els_rrq;
19ca7609
JS
6718 struct lpfc_iocbq *elsiocb;
6719 uint8_t *pcmd;
6720 uint16_t cmdsize;
6721 int ret;
6722
6723
6724 if (ndlp != rrq->ndlp)
6725 ndlp = rrq->ndlp;
6726 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6727 return 1;
6728
6729 /* If ndlp is not NULL, we will bump the reference count on it */
6730 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
6731 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
6732 ELS_CMD_RRQ);
6733 if (!elsiocb)
6734 return 1;
6735
19ca7609
JS
6736 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6737
6738 /* For RRQ request, remainder of payload is Exchange IDs */
6739 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
6740 pcmd += sizeof(uint32_t);
6741 els_rrq = (struct RRQ *) pcmd;
6742
ee0f4fe1 6743 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
19ca7609
JS
6744 bf_set(rrq_rxid, els_rrq, rrq->rxid);
6745 bf_set(rrq_did, els_rrq, vport->fc_myDID);
6746 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
6747 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
6748
6749
6750 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6751 "Issue RRQ: did:x%x",
6752 did, rrq->xritag, rrq->rxid);
6753 elsiocb->context_un.rrq = rrq;
6754 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
6755 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6756
6757 if (ret == IOCB_ERROR) {
6758 lpfc_els_free_iocb(phba, elsiocb);
6759 return 1;
6760 }
6761 return 0;
6762}
6763
6764/**
6765 * lpfc_send_rrq - Sends ELS RRQ if needed.
6766 * @phba: pointer to lpfc hba data structure.
6767 * @rrq: pointer to the active rrq.
6768 *
6769 * This routine will call the lpfc_issue_els_rrq if the rrq is
6770 * still active for the xri. If this function returns a failure then
6771 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
6772 *
6773 * Returns 0 Success.
6774 * 1 Failure.
6775 **/
6776int
6777lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
6778{
6779 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
6780 rrq->nlp_DID);
6781 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
6782 return lpfc_issue_els_rrq(rrq->vport, ndlp,
6783 rrq->nlp_DID, rrq);
6784 else
6785 return 1;
6786}
6787
e59058c4 6788/**
3621a710 6789 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
e59058c4
JS
6790 * @vport: pointer to a host virtual N_Port data structure.
6791 * @cmdsize: size of the ELS command.
6792 * @oldiocb: pointer to the original lpfc command iocb data structure.
6793 * @ndlp: pointer to a node-list data structure.
6794 *
6795 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
6796 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
6797 *
6798 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6799 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6800 * will be stored into the context1 field of the IOCB for the completion
6801 * callback function to the RPL Accept Response ELS command.
6802 *
6803 * Return code
6804 * 0 - Successfully issued ACC RPL ELS command
6805 * 1 - Failed to issue ACC RPL ELS command
6806 **/
082c0266 6807static int
2e0fef85
JS
6808lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
6809 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7bb3b137 6810{
2e0fef85
JS
6811 struct lpfc_hba *phba = vport->phba;
6812 IOCB_t *icmd, *oldcmd;
7bb3b137
JW
6813 RPL_RSP rpl_rsp;
6814 struct lpfc_iocbq *elsiocb;
7bb3b137 6815 uint8_t *pcmd;
dea3101e 6816
2e0fef85
JS
6817 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6818 ndlp->nlp_DID, ELS_CMD_ACC);
7bb3b137 6819
488d1469 6820 if (!elsiocb)
7bb3b137 6821 return 1;
488d1469 6822
7bb3b137
JW
6823 icmd = &elsiocb->iocb;
6824 oldcmd = &oldiocb->iocb;
7851fe2c
JS
6825 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6826 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7bb3b137
JW
6827
6828 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6829 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 6830 pcmd += sizeof(uint16_t);
7bb3b137
JW
6831 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
6832 pcmd += sizeof(uint16_t);
6833
6834 /* Setup the RPL ACC payload */
6835 rpl_rsp.listLen = be32_to_cpu(1);
6836 rpl_rsp.index = 0;
6837 rpl_rsp.port_num_blk.portNum = 0;
2e0fef85
JS
6838 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
6839 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7bb3b137 6840 sizeof(struct lpfc_name));
7bb3b137 6841 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7bb3b137 6842 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
e8b62011
JS
6843 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6844 "0120 Xmit ELS RPL ACC response tag x%x "
6845 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
6846 "rpi x%x\n",
6847 elsiocb->iotag, elsiocb->iocb.ulpContext,
6848 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6849 ndlp->nlp_rpi);
858c9f6c 6850 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 6851 phba->fc_stat.elsXmitACC++;
3772a991
JS
6852 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6853 IOCB_ERROR) {
7bb3b137
JW
6854 lpfc_els_free_iocb(phba, elsiocb);
6855 return 1;
6856 }
6857 return 0;
6858}
6859
e59058c4 6860/**
3621a710 6861 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
e59058c4
JS
6862 * @vport: pointer to a host virtual N_Port data structure.
6863 * @cmdiocb: pointer to lpfc command iocb data structure.
6864 * @ndlp: pointer to a node-list data structure.
6865 *
6866 * This routine processes Read Port List (RPL) IOCB received as an ELS
6867 * unsolicited event. It first checks the remote port state. If the remote
6868 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
6869 * invokes the lpfc_els_rsp_reject() routine to send reject response.
6870 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
6871 * to accept the RPL.
6872 *
6873 * Return code
6874 * 0 - Successfully processed rpl iocb (currently always return 0)
6875 **/
7bb3b137 6876static int
2e0fef85
JS
6877lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6878 struct lpfc_nodelist *ndlp)
7bb3b137
JW
6879{
6880 struct lpfc_dmabuf *pcmd;
6881 uint32_t *lp;
6882 uint32_t maxsize;
6883 uint16_t cmdsize;
6884 RPL *rpl;
6885 struct ls_rjt stat;
6886
2fe165b6
JW
6887 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6888 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
90160e01 6889 /* issue rejection response */
7bb3b137
JW
6890 stat.un.b.lsRjtRsvd0 = 0;
6891 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6892 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6893 stat.un.b.vendorUnique = 0;
858c9f6c
JS
6894 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6895 NULL);
90160e01
JS
6896 /* rejected the unsolicited RPL request and done with it */
6897 return 0;
7bb3b137
JW
6898 }
6899
dea3101e
JB
6900 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6901 lp = (uint32_t *) pcmd->virt;
7bb3b137 6902 rpl = (RPL *) (lp + 1);
7bb3b137 6903 maxsize = be32_to_cpu(rpl->maxsize);
dea3101e 6904
7bb3b137
JW
6905 /* We support only one port */
6906 if ((rpl->index == 0) &&
6907 ((maxsize == 0) ||
6908 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
6909 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2fe165b6 6910 } else {
7bb3b137
JW
6911 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
6912 }
2e0fef85 6913 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
dea3101e
JB
6914
6915 return 0;
6916}
6917
e59058c4 6918/**
3621a710 6919 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
e59058c4
JS
6920 * @vport: pointer to a virtual N_Port data structure.
6921 * @cmdiocb: pointer to lpfc command iocb data structure.
6922 * @ndlp: pointer to a node-list data structure.
6923 *
6924 * This routine processes Fibre Channel Address Resolution Protocol
6925 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
6926 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
6927 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
6928 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
6929 * remote PortName is compared against the FC PortName stored in the @vport
6930 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
6931 * compared against the FC NodeName stored in the @vport data structure.
6932 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
6933 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
6934 * invoked to send out FARP Response to the remote node. Before sending the
6935 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
6936 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
6937 * routine is invoked to log into the remote port first.
6938 *
6939 * Return code
6940 * 0 - Either the FARP Match Mode not supported or successfully processed
6941 **/
dea3101e 6942static int
2e0fef85
JS
6943lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6944 struct lpfc_nodelist *ndlp)
dea3101e
JB
6945{
6946 struct lpfc_dmabuf *pcmd;
6947 uint32_t *lp;
6948 IOCB_t *icmd;
6949 FARP *fp;
6950 uint32_t cmd, cnt, did;
6951
6952 icmd = &cmdiocb->iocb;
6953 did = icmd->un.elsreq64.remoteID;
6954 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6955 lp = (uint32_t *) pcmd->virt;
6956
6957 cmd = *lp++;
6958 fp = (FARP *) lp;
dea3101e 6959 /* FARP-REQ received from DID <did> */
e8b62011
JS
6960 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6961 "0601 FARP-REQ received from DID x%x\n", did);
dea3101e
JB
6962 /* We will only support match on WWPN or WWNN */
6963 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
c9f8735b 6964 return 0;
dea3101e
JB
6965 }
6966
6967 cnt = 0;
6968 /* If this FARP command is searching for my portname */
6969 if (fp->Mflags & FARP_MATCH_PORT) {
2e0fef85 6970 if (memcmp(&fp->RportName, &vport->fc_portname,
92d7f7b0 6971 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
6972 cnt = 1;
6973 }
6974
6975 /* If this FARP command is searching for my nodename */
6976 if (fp->Mflags & FARP_MATCH_NODE) {
2e0fef85 6977 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
92d7f7b0 6978 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
6979 cnt = 1;
6980 }
6981
6982 if (cnt) {
6983 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
6984 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
6985 /* Log back into the node before sending the FARP. */
6986 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5024ab17 6987 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 6988 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 6989 NLP_STE_PLOGI_ISSUE);
2e0fef85 6990 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
dea3101e
JB
6991 }
6992
6993 /* Send a FARP response to that node */
2e0fef85
JS
6994 if (fp->Rflags & FARP_REQUEST_FARPR)
6995 lpfc_issue_els_farpr(vport, did, 0);
dea3101e
JB
6996 }
6997 }
c9f8735b 6998 return 0;
dea3101e
JB
6999}
7000
e59058c4 7001/**
3621a710 7002 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
e59058c4
JS
7003 * @vport: pointer to a host virtual N_Port data structure.
7004 * @cmdiocb: pointer to lpfc command iocb data structure.
7005 * @ndlp: pointer to a node-list data structure.
7006 *
7007 * This routine processes Fibre Channel Address Resolution Protocol
7008 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7009 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7010 * the FARP response request.
7011 *
7012 * Return code
7013 * 0 - Successfully processed FARPR IOCB (currently always return 0)
7014 **/
dea3101e 7015static int
2e0fef85
JS
7016lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7017 struct lpfc_nodelist *ndlp)
dea3101e
JB
7018{
7019 struct lpfc_dmabuf *pcmd;
7020 uint32_t *lp;
7021 IOCB_t *icmd;
7022 uint32_t cmd, did;
7023
7024 icmd = &cmdiocb->iocb;
7025 did = icmd->un.elsreq64.remoteID;
7026 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7027 lp = (uint32_t *) pcmd->virt;
7028
7029 cmd = *lp++;
7030 /* FARP-RSP received from DID <did> */
e8b62011
JS
7031 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7032 "0600 FARP-RSP received from DID x%x\n", did);
dea3101e 7033 /* ACCEPT the Farp resp request */
51ef4c26 7034 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e
JB
7035
7036 return 0;
7037}
7038
e59058c4 7039/**
3621a710 7040 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
e59058c4
JS
7041 * @vport: pointer to a host virtual N_Port data structure.
7042 * @cmdiocb: pointer to lpfc command iocb data structure.
7043 * @fan_ndlp: pointer to a node-list data structure.
7044 *
7045 * This routine processes a Fabric Address Notification (FAN) IOCB
7046 * command received as an ELS unsolicited event. The FAN ELS command will
7047 * only be processed on a physical port (i.e., the @vport represents the
7048 * physical port). The fabric NodeName and PortName from the FAN IOCB are
7049 * compared against those in the phba data structure. If any of those is
7050 * different, the lpfc_initial_flogi() routine is invoked to initialize
7051 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7052 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7053 * is invoked to register login to the fabric.
7054 *
7055 * Return code
7056 * 0 - Successfully processed fan iocb (currently always return 0).
7057 **/
dea3101e 7058static int
2e0fef85
JS
7059lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7060 struct lpfc_nodelist *fan_ndlp)
dea3101e 7061{
0d2b6b83 7062 struct lpfc_hba *phba = vport->phba;
dea3101e 7063 uint32_t *lp;
5024ab17 7064 FAN *fp;
dea3101e 7065
0d2b6b83
JS
7066 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
7067 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
7068 fp = (FAN *) ++lp;
5024ab17 7069 /* FAN received; Fan does not have a reply sequence */
0d2b6b83
JS
7070 if ((vport == phba->pport) &&
7071 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5024ab17 7072 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
0d2b6b83 7073 sizeof(struct lpfc_name))) ||
5024ab17 7074 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
0d2b6b83
JS
7075 sizeof(struct lpfc_name)))) {
7076 /* This port has switched fabrics. FLOGI is required */
76a95d75 7077 lpfc_issue_init_vfi(vport);
0d2b6b83
JS
7078 } else {
7079 /* FAN verified - skip FLOGI */
7080 vport->fc_myDID = vport->fc_prevDID;
6fb120a7
JS
7081 if (phba->sli_rev < LPFC_SLI_REV4)
7082 lpfc_issue_fabric_reglogin(vport);
1b51197d
JS
7083 else {
7084 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7085 "3138 Need register VFI: (x%x/%x)\n",
7086 vport->fc_prevDID, vport->fc_myDID);
6fb120a7 7087 lpfc_issue_reg_vfi(vport);
1b51197d 7088 }
5024ab17 7089 }
dea3101e 7090 }
c9f8735b 7091 return 0;
dea3101e
JB
7092}
7093
e59058c4 7094/**
3621a710 7095 * lpfc_els_timeout - Handler funciton to the els timer
e59058c4
JS
7096 * @ptr: holder for the timer function associated data.
7097 *
7098 * This routine is invoked by the ELS timer after timeout. It posts the ELS
7099 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7100 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7101 * up the worker thread. It is for the worker thread to invoke the routine
7102 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7103 **/
dea3101e
JB
7104void
7105lpfc_els_timeout(unsigned long ptr)
7106{
2e0fef85
JS
7107 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
7108 struct lpfc_hba *phba = vport->phba;
5e9d9b82 7109 uint32_t tmo_posted;
dea3101e
JB
7110 unsigned long iflag;
7111
2e0fef85 7112 spin_lock_irqsave(&vport->work_port_lock, iflag);
5e9d9b82 7113 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
06918ac5 7114 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
2e0fef85 7115 vport->work_port_events |= WORKER_ELS_TMO;
5e9d9b82 7116 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
92d7f7b0 7117
06918ac5 7118 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
5e9d9b82 7119 lpfc_worker_wake_up(phba);
dea3101e
JB
7120 return;
7121}
7122
2a9bf3d0 7123
e59058c4 7124/**
3621a710 7125 * lpfc_els_timeout_handler - Process an els timeout event
e59058c4
JS
7126 * @vport: pointer to a virtual N_Port data structure.
7127 *
7128 * This routine is the actual handler function that processes an ELS timeout
7129 * event. It walks the ELS ring to get and abort all the IOCBs (except the
7130 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7131 * invoking the lpfc_sli_issue_abort_iotag() routine.
7132 **/
dea3101e 7133void
2e0fef85 7134lpfc_els_timeout_handler(struct lpfc_vport *vport)
dea3101e 7135{
2e0fef85 7136 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
7137 struct lpfc_sli_ring *pring;
7138 struct lpfc_iocbq *tmp_iocb, *piocb;
7139 IOCB_t *cmd = NULL;
7140 struct lpfc_dmabuf *pcmd;
2e0fef85 7141 uint32_t els_command = 0;
dea3101e 7142 uint32_t timeout;
2e0fef85 7143 uint32_t remote_ID = 0xffffffff;
2a9bf3d0
JS
7144 LIST_HEAD(abort_list);
7145
dea3101e 7146
dea3101e
JB
7147 timeout = (uint32_t)(phba->fc_ratov << 1);
7148
7149 pring = &phba->sli.ring[LPFC_ELS_RING];
06918ac5
JS
7150 if ((phba->pport->load_flag & FC_UNLOADING))
7151 return;
2a9bf3d0 7152 spin_lock_irq(&phba->hbalock);
0976e1a6
JS
7153 if (phba->sli_rev == LPFC_SLI_REV4)
7154 spin_lock(&pring->ring_lock);
2a9bf3d0 7155
06918ac5
JS
7156 if ((phba->pport->load_flag & FC_UNLOADING)) {
7157 if (phba->sli_rev == LPFC_SLI_REV4)
7158 spin_unlock(&pring->ring_lock);
7159 spin_unlock_irq(&phba->hbalock);
7160 return;
7161 }
7162
0976e1a6 7163 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
dea3101e
JB
7164 cmd = &piocb->iocb;
7165
2e0fef85
JS
7166 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
7167 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7168 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
dea3101e 7169 continue;
2e0fef85
JS
7170
7171 if (piocb->vport != vport)
7172 continue;
7173
dea3101e 7174 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2e0fef85
JS
7175 if (pcmd)
7176 els_command = *(uint32_t *) (pcmd->virt);
dea3101e 7177
92d7f7b0
JS
7178 if (els_command == ELS_CMD_FARP ||
7179 els_command == ELS_CMD_FARPR ||
7180 els_command == ELS_CMD_FDISC)
7181 continue;
7182
dea3101e 7183 if (piocb->drvrTimeout > 0) {
92d7f7b0 7184 if (piocb->drvrTimeout >= timeout)
dea3101e 7185 piocb->drvrTimeout -= timeout;
92d7f7b0 7186 else
dea3101e 7187 piocb->drvrTimeout = 0;
dea3101e
JB
7188 continue;
7189 }
7190
2e0fef85
JS
7191 remote_ID = 0xffffffff;
7192 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
dea3101e 7193 remote_ID = cmd->un.elsreq64.remoteID;
2e0fef85
JS
7194 else {
7195 struct lpfc_nodelist *ndlp;
7196 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
58da1ffb 7197 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2e0fef85 7198 remote_ID = ndlp->nlp_DID;
dea3101e 7199 }
2a9bf3d0
JS
7200 list_add_tail(&piocb->dlist, &abort_list);
7201 }
0976e1a6
JS
7202 if (phba->sli_rev == LPFC_SLI_REV4)
7203 spin_unlock(&pring->ring_lock);
2a9bf3d0
JS
7204 spin_unlock_irq(&phba->hbalock);
7205
7206 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
15026c9e 7207 cmd = &piocb->iocb;
e8b62011 7208 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2a9bf3d0
JS
7209 "0127 ELS timeout Data: x%x x%x x%x "
7210 "x%x\n", els_command,
7211 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
7212 spin_lock_irq(&phba->hbalock);
7213 list_del_init(&piocb->dlist);
07951076 7214 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
2a9bf3d0 7215 spin_unlock_irq(&phba->hbalock);
dea3101e 7216 }
5a0e326d 7217
0e9bb8d7 7218 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
06918ac5
JS
7219 if (!(phba->pport->load_flag & FC_UNLOADING))
7220 mod_timer(&vport->els_tmofunc,
7221 jiffies + msecs_to_jiffies(1000 * timeout));
dea3101e
JB
7222}
7223
e59058c4 7224/**
3621a710 7225 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
e59058c4
JS
7226 * @vport: pointer to a host virtual N_Port data structure.
7227 *
7228 * This routine is used to clean up all the outstanding ELS commands on a
7229 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
7230 * routine. After that, it walks the ELS transmit queue to remove all the
7231 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
7232 * the IOCBs with a non-NULL completion callback function, the callback
7233 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7234 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
7235 * callback function, the IOCB will simply be released. Finally, it walks
7236 * the ELS transmit completion queue to issue an abort IOCB to any transmit
7237 * completion queue IOCB that is associated with the @vport and is not
7238 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
7239 * part of the discovery state machine) out to HBA by invoking the
7240 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
7241 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
7242 * the IOCBs are aborted when this function returns.
7243 **/
dea3101e 7244void
2e0fef85 7245lpfc_els_flush_cmd(struct lpfc_vport *vport)
dea3101e 7246{
0976e1a6 7247 LIST_HEAD(abort_list);
2e0fef85 7248 struct lpfc_hba *phba = vport->phba;
329f9bc7 7249 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e
JB
7250 struct lpfc_iocbq *tmp_iocb, *piocb;
7251 IOCB_t *cmd = NULL;
92d7f7b0
JS
7252
7253 lpfc_fabric_abort_vport(vport);
0976e1a6
JS
7254 /*
7255 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
7256 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
7257 * ultimately grabs the ring_lock, the driver must splice the list into
7258 * a working list and release the locks before calling the abort.
7259 */
7260 spin_lock_irq(&phba->hbalock);
7261 if (phba->sli_rev == LPFC_SLI_REV4)
7262 spin_lock(&pring->ring_lock);
7263
7264 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7265 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
7266 continue;
7267
7268 if (piocb->vport != vport)
7269 continue;
7270 list_add_tail(&piocb->dlist, &abort_list);
7271 }
7272 if (phba->sli_rev == LPFC_SLI_REV4)
7273 spin_unlock(&pring->ring_lock);
7274 spin_unlock_irq(&phba->hbalock);
7275 /* Abort each iocb on the aborted list and remove the dlist links. */
7276 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7277 spin_lock_irq(&phba->hbalock);
7278 list_del_init(&piocb->dlist);
7279 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7280 spin_unlock_irq(&phba->hbalock);
7281 }
7282 if (!list_empty(&abort_list))
7283 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7284 "3387 abort list for txq not empty\n");
7285 INIT_LIST_HEAD(&abort_list);
dea3101e 7286
2e0fef85 7287 spin_lock_irq(&phba->hbalock);
0976e1a6
JS
7288 if (phba->sli_rev == LPFC_SLI_REV4)
7289 spin_lock(&pring->ring_lock);
7290
dea3101e
JB
7291 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
7292 cmd = &piocb->iocb;
7293
7294 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
7295 continue;
7296 }
7297
7298 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
329f9bc7
JS
7299 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
7300 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
7301 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7302 cmd->ulpCommand == CMD_ABORT_XRI_CN)
dea3101e 7303 continue;
dea3101e 7304
2e0fef85
JS
7305 if (piocb->vport != vport)
7306 continue;
7307
0976e1a6
JS
7308 list_del_init(&piocb->list);
7309 list_add_tail(&piocb->list, &abort_list);
dea3101e 7310 }
0976e1a6
JS
7311 if (phba->sli_rev == LPFC_SLI_REV4)
7312 spin_unlock(&pring->ring_lock);
2e0fef85 7313 spin_unlock_irq(&phba->hbalock);
2534ba75 7314
a257bf90 7315 /* Cancell all the IOCBs from the completions list */
0976e1a6
JS
7316 lpfc_sli_cancel_iocbs(phba, &abort_list,
7317 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
2534ba75 7318
dea3101e
JB
7319 return;
7320}
7321
e59058c4 7322/**
3621a710 7323 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
e59058c4
JS
7324 * @phba: pointer to lpfc hba data structure.
7325 *
7326 * This routine is used to clean up all the outstanding ELS commands on a
7327 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
7328 * routine. After that, it walks the ELS transmit queue to remove all the
7329 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
7330 * the IOCBs with the completion callback function associated, the callback
7331 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7332 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
7333 * callback function associated, the IOCB will simply be released. Finally,
7334 * it walks the ELS transmit completion queue to issue an abort IOCB to any
7335 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
7336 * management plane IOCBs that are not part of the discovery state machine)
7337 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
7338 **/
549e55cd
JS
7339void
7340lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
7341{
0976e1a6
JS
7342 struct lpfc_vport *vport;
7343 list_for_each_entry(vport, &phba->port_list, listentry)
7344 lpfc_els_flush_cmd(vport);
a257bf90 7345
549e55cd
JS
7346 return;
7347}
7348
ea2151b4 7349/**
3621a710 7350 * lpfc_send_els_failure_event - Posts an ELS command failure event
ea2151b4
JS
7351 * @phba: Pointer to hba context object.
7352 * @cmdiocbp: Pointer to command iocb which reported error.
7353 * @rspiocbp: Pointer to response iocb which reported error.
7354 *
7355 * This function sends an event when there is an ELS command
7356 * failure.
7357 **/
7358void
7359lpfc_send_els_failure_event(struct lpfc_hba *phba,
7360 struct lpfc_iocbq *cmdiocbp,
7361 struct lpfc_iocbq *rspiocbp)
7362{
7363 struct lpfc_vport *vport = cmdiocbp->vport;
7364 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7365 struct lpfc_lsrjt_event lsrjt_event;
7366 struct lpfc_fabric_event_header fabric_event;
7367 struct ls_rjt stat;
7368 struct lpfc_nodelist *ndlp;
7369 uint32_t *pcmd;
7370
7371 ndlp = cmdiocbp->context1;
7372 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
7373 return;
7374
7375 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
7376 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
7377 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
7378 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
7379 sizeof(struct lpfc_name));
7380 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
7381 sizeof(struct lpfc_name));
7382 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7383 cmdiocbp->context2)->virt);
49198b37 7384 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
ea2151b4
JS
7385 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
7386 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
7387 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
7388 fc_host_post_vendor_event(shost,
7389 fc_get_event_number(),
7390 sizeof(lsrjt_event),
7391 (char *)&lsrjt_event,
ddcc50f0 7392 LPFC_NL_VENDOR_ID);
ea2151b4
JS
7393 return;
7394 }
7395 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
7396 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
7397 fabric_event.event_type = FC_REG_FABRIC_EVENT;
7398 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
7399 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
7400 else
7401 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
7402 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
7403 sizeof(struct lpfc_name));
7404 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
7405 sizeof(struct lpfc_name));
7406 fc_host_post_vendor_event(shost,
7407 fc_get_event_number(),
7408 sizeof(fabric_event),
7409 (char *)&fabric_event,
ddcc50f0 7410 LPFC_NL_VENDOR_ID);
ea2151b4
JS
7411 return;
7412 }
7413
7414}
7415
7416/**
3621a710 7417 * lpfc_send_els_event - Posts unsolicited els event
ea2151b4
JS
7418 * @vport: Pointer to vport object.
7419 * @ndlp: Pointer FC node object.
7420 * @cmd: ELS command code.
7421 *
7422 * This function posts an event when there is an incoming
7423 * unsolicited ELS command.
7424 **/
7425static void
7426lpfc_send_els_event(struct lpfc_vport *vport,
7427 struct lpfc_nodelist *ndlp,
ddcc50f0 7428 uint32_t *payload)
ea2151b4 7429{
ddcc50f0
JS
7430 struct lpfc_els_event_header *els_data = NULL;
7431 struct lpfc_logo_event *logo_data = NULL;
ea2151b4
JS
7432 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7433
ddcc50f0
JS
7434 if (*payload == ELS_CMD_LOGO) {
7435 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
7436 if (!logo_data) {
7437 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7438 "0148 Failed to allocate memory "
7439 "for LOGO event\n");
7440 return;
7441 }
7442 els_data = &logo_data->header;
7443 } else {
7444 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
7445 GFP_KERNEL);
7446 if (!els_data) {
7447 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7448 "0149 Failed to allocate memory "
7449 "for ELS event\n");
7450 return;
7451 }
7452 }
7453 els_data->event_type = FC_REG_ELS_EVENT;
7454 switch (*payload) {
ea2151b4 7455 case ELS_CMD_PLOGI:
ddcc50f0 7456 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
ea2151b4
JS
7457 break;
7458 case ELS_CMD_PRLO:
ddcc50f0 7459 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
ea2151b4
JS
7460 break;
7461 case ELS_CMD_ADISC:
ddcc50f0
JS
7462 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
7463 break;
7464 case ELS_CMD_LOGO:
7465 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
7466 /* Copy the WWPN in the LOGO payload */
7467 memcpy(logo_data->logo_wwpn, &payload[2],
7468 sizeof(struct lpfc_name));
ea2151b4
JS
7469 break;
7470 default:
e916141c 7471 kfree(els_data);
ea2151b4
JS
7472 return;
7473 }
ddcc50f0
JS
7474 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
7475 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
7476 if (*payload == ELS_CMD_LOGO) {
7477 fc_host_post_vendor_event(shost,
7478 fc_get_event_number(),
7479 sizeof(struct lpfc_logo_event),
7480 (char *)logo_data,
7481 LPFC_NL_VENDOR_ID);
7482 kfree(logo_data);
7483 } else {
7484 fc_host_post_vendor_event(shost,
7485 fc_get_event_number(),
7486 sizeof(struct lpfc_els_event_header),
7487 (char *)els_data,
7488 LPFC_NL_VENDOR_ID);
7489 kfree(els_data);
7490 }
ea2151b4
JS
7491
7492 return;
7493}
7494
7495
e59058c4 7496/**
3621a710 7497 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
e59058c4
JS
7498 * @phba: pointer to lpfc hba data structure.
7499 * @pring: pointer to a SLI ring.
7500 * @vport: pointer to a host virtual N_Port data structure.
7501 * @elsiocb: pointer to lpfc els command iocb data structure.
7502 *
7503 * This routine is used for processing the IOCB associated with a unsolicited
7504 * event. It first determines whether there is an existing ndlp that matches
7505 * the DID from the unsolicited IOCB. If not, it will create a new one with
7506 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
7507 * IOCB is then used to invoke the proper routine and to set up proper state
7508 * of the discovery state machine.
7509 **/
ed957684
JS
7510static void
7511lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
92d7f7b0 7512 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
dea3101e 7513{
87af33fe 7514 struct Scsi_Host *shost;
dea3101e 7515 struct lpfc_nodelist *ndlp;
dea3101e 7516 struct ls_rjt stat;
92d7f7b0 7517 uint32_t *payload;
303f2f9c
JS
7518 uint32_t cmd, did, newnode;
7519 uint8_t rjt_exp, rjt_err = 0;
ed957684 7520 IOCB_t *icmd = &elsiocb->iocb;
dea3101e 7521
e47c9093 7522 if (!vport || !(elsiocb->context2))
dea3101e 7523 goto dropit;
2e0fef85 7524
dea3101e 7525 newnode = 0;
92d7f7b0
JS
7526 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
7527 cmd = *payload;
ed957684 7528 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
495a714c 7529 lpfc_post_buffer(phba, pring, 1);
dea3101e 7530
858c9f6c
JS
7531 did = icmd->un.rcvels.remoteID;
7532 if (icmd->ulpStatus) {
7533 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7534 "RCV Unsol ELS: status:x%x/x%x did:x%x",
7535 icmd->ulpStatus, icmd->un.ulpWord[4], did);
dea3101e 7536 goto dropit;
858c9f6c 7537 }
dea3101e
JB
7538
7539 /* Check to see if link went down during discovery */
ed957684 7540 if (lpfc_els_chk_latt(vport))
dea3101e 7541 goto dropit;
dea3101e 7542
c868595d 7543 /* Ignore traffic received during vport shutdown. */
92d7f7b0
JS
7544 if (vport->load_flag & FC_UNLOADING)
7545 goto dropit;
7546
92494144
JS
7547 /* If NPort discovery is delayed drop incoming ELS */
7548 if ((vport->fc_flag & FC_DISC_DELAYED) &&
7549 (cmd != ELS_CMD_PLOGI))
7550 goto dropit;
7551
2e0fef85 7552 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 7553 if (!ndlp) {
dea3101e 7554 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b 7555 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
ed957684 7556 if (!ndlp)
dea3101e 7557 goto dropit;
dea3101e 7558
2e0fef85 7559 lpfc_nlp_init(vport, ndlp, did);
98c9ea5c 7560 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
dea3101e 7561 newnode = 1;
e47c9093 7562 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
dea3101e 7563 ndlp->nlp_type |= NLP_FABRIC;
58da1ffb
JS
7564 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
7565 ndlp = lpfc_enable_node(vport, ndlp,
7566 NLP_STE_UNUSED_NODE);
7567 if (!ndlp)
7568 goto dropit;
7569 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7570 newnode = 1;
7571 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7572 ndlp->nlp_type |= NLP_FABRIC;
7573 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
7574 /* This is similar to the new node path */
7575 ndlp = lpfc_nlp_get(ndlp);
7576 if (!ndlp)
7577 goto dropit;
7578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7579 newnode = 1;
87af33fe 7580 }
dea3101e
JB
7581
7582 phba->fc_stat.elsRcvFrame++;
e47c9093 7583
12838e74
JS
7584 /*
7585 * Do not process any unsolicited ELS commands
7586 * if the ndlp is in DEV_LOSS
7587 */
466e840b
JS
7588 shost = lpfc_shost_from_vport(vport);
7589 spin_lock_irq(shost->host_lock);
7590 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
7591 spin_unlock_irq(shost->host_lock);
12838e74 7592 goto dropit;
466e840b
JS
7593 }
7594 spin_unlock_irq(shost->host_lock);
12838e74 7595
329f9bc7 7596 elsiocb->context1 = lpfc_nlp_get(ndlp);
2e0fef85 7597 elsiocb->vport = vport;
dea3101e
JB
7598
7599 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
7600 cmd &= ELS_CMD_MASK;
7601 }
7602 /* ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
7603 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7604 "0112 ELS command x%x received from NPORT x%x "
e74c03c8
JS
7605 "Data: x%x x%x x%x x%x\n",
7606 cmd, did, vport->port_state, vport->fc_flag,
7607 vport->fc_myDID, vport->fc_prevDID);
eec3d312
JS
7608
7609 /* reject till our FLOGI completes */
7610 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
d6de08cc 7611 (cmd != ELS_CMD_FLOGI)) {
eec3d312
JS
7612 rjt_err = LSRJT_UNABLE_TPC;
7613 rjt_exp = LSEXP_NOTHING_MORE;
7614 goto lsrjt;
7615 }
7616
dea3101e
JB
7617 switch (cmd) {
7618 case ELS_CMD_PLOGI:
858c9f6c
JS
7619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7620 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
7621 did, vport->port_state, ndlp->nlp_flag);
7622
dea3101e 7623 phba->fc_stat.elsRcvPLOGI++;
858c9f6c 7624 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
e74c03c8
JS
7625 if (phba->sli_rev == LPFC_SLI_REV4 &&
7626 (phba->pport->fc_flag & FC_PT2PT)) {
7627 vport->fc_prevDID = vport->fc_myDID;
7628 /* Our DID needs to be updated before registering
7629 * the vfi. This is done in lpfc_rcv_plogi but
7630 * that is called after the reg_vfi.
7631 */
7632 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
7633 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7634 "3312 Remote port assigned DID x%x "
7635 "%x\n", vport->fc_myDID,
7636 vport->fc_prevDID);
7637 }
858c9f6c 7638
ddcc50f0 7639 lpfc_send_els_event(vport, ndlp, payload);
92494144
JS
7640
7641 /* If Nport discovery is delayed, reject PLOGIs */
7642 if (vport->fc_flag & FC_DISC_DELAYED) {
7643 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7644 rjt_exp = LSEXP_NOTHING_MORE;
92494144
JS
7645 break;
7646 }
d6de08cc 7647
858c9f6c 7648 if (vport->port_state < LPFC_DISC_AUTH) {
1b32f6aa
JS
7649 if (!(phba->pport->fc_flag & FC_PT2PT) ||
7650 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
7651 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7652 rjt_exp = LSEXP_NOTHING_MORE;
1b32f6aa
JS
7653 break;
7654 }
dea3101e 7655 }
87af33fe 7656
87af33fe
JS
7657 spin_lock_irq(shost->host_lock);
7658 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
7659 spin_unlock_irq(shost->host_lock);
7660
2e0fef85
JS
7661 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7662 NLP_EVT_RCV_PLOGI);
858c9f6c 7663
dea3101e
JB
7664 break;
7665 case ELS_CMD_FLOGI:
858c9f6c
JS
7666 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7667 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
7668 did, vport->port_state, ndlp->nlp_flag);
7669
dea3101e 7670 phba->fc_stat.elsRcvFLOGI++;
51ef4c26 7671 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
87af33fe 7672 if (newnode)
98c9ea5c 7673 lpfc_nlp_put(ndlp);
dea3101e
JB
7674 break;
7675 case ELS_CMD_LOGO:
858c9f6c
JS
7676 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7677 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
7678 did, vport->port_state, ndlp->nlp_flag);
7679
dea3101e 7680 phba->fc_stat.elsRcvLOGO++;
ddcc50f0 7681 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 7682 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7683 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7684 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7685 break;
7686 }
2e0fef85 7687 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
dea3101e
JB
7688 break;
7689 case ELS_CMD_PRLO:
858c9f6c
JS
7690 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7691 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
7692 did, vport->port_state, ndlp->nlp_flag);
7693
dea3101e 7694 phba->fc_stat.elsRcvPRLO++;
ddcc50f0 7695 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 7696 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7697 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7698 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7699 break;
7700 }
2e0fef85 7701 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
dea3101e 7702 break;
8b017a30
JS
7703 case ELS_CMD_LCB:
7704 phba->fc_stat.elsRcvLCB++;
7705 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
7706 break;
86478875
JS
7707 case ELS_CMD_RDP:
7708 phba->fc_stat.elsRcvRDP++;
7709 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
7710 break;
dea3101e
JB
7711 case ELS_CMD_RSCN:
7712 phba->fc_stat.elsRcvRSCN++;
51ef4c26 7713 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
87af33fe 7714 if (newnode)
98c9ea5c 7715 lpfc_nlp_put(ndlp);
dea3101e
JB
7716 break;
7717 case ELS_CMD_ADISC:
858c9f6c
JS
7718 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7719 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
7720 did, vport->port_state, ndlp->nlp_flag);
7721
ddcc50f0 7722 lpfc_send_els_event(vport, ndlp, payload);
dea3101e 7723 phba->fc_stat.elsRcvADISC++;
2e0fef85 7724 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7725 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7726 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7727 break;
7728 }
2e0fef85
JS
7729 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7730 NLP_EVT_RCV_ADISC);
dea3101e
JB
7731 break;
7732 case ELS_CMD_PDISC:
858c9f6c
JS
7733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7734 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
7735 did, vport->port_state, ndlp->nlp_flag);
7736
dea3101e 7737 phba->fc_stat.elsRcvPDISC++;
2e0fef85 7738 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7739 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7740 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7741 break;
7742 }
2e0fef85
JS
7743 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7744 NLP_EVT_RCV_PDISC);
dea3101e
JB
7745 break;
7746 case ELS_CMD_FARPR:
858c9f6c
JS
7747 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7748 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
7749 did, vport->port_state, ndlp->nlp_flag);
7750
dea3101e 7751 phba->fc_stat.elsRcvFARPR++;
2e0fef85 7752 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
dea3101e
JB
7753 break;
7754 case ELS_CMD_FARP:
858c9f6c
JS
7755 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7756 "RCV FARP: did:x%x/ste:x%x flg:x%x",
7757 did, vport->port_state, ndlp->nlp_flag);
7758
dea3101e 7759 phba->fc_stat.elsRcvFARP++;
2e0fef85 7760 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
dea3101e
JB
7761 break;
7762 case ELS_CMD_FAN:
858c9f6c
JS
7763 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7764 "RCV FAN: did:x%x/ste:x%x flg:x%x",
7765 did, vport->port_state, ndlp->nlp_flag);
7766
dea3101e 7767 phba->fc_stat.elsRcvFAN++;
2e0fef85 7768 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
dea3101e 7769 break;
dea3101e 7770 case ELS_CMD_PRLI:
858c9f6c
JS
7771 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7772 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
7773 did, vport->port_state, ndlp->nlp_flag);
7774
dea3101e 7775 phba->fc_stat.elsRcvPRLI++;
2e0fef85 7776 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7777 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7778 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7779 break;
7780 }
2e0fef85 7781 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
dea3101e 7782 break;
7bb3b137 7783 case ELS_CMD_LIRR:
858c9f6c
JS
7784 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7785 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
7786 did, vport->port_state, ndlp->nlp_flag);
7787
7bb3b137 7788 phba->fc_stat.elsRcvLIRR++;
2e0fef85 7789 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
87af33fe 7790 if (newnode)
98c9ea5c 7791 lpfc_nlp_put(ndlp);
7bb3b137 7792 break;
12265f68
JS
7793 case ELS_CMD_RLS:
7794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7795 "RCV RLS: did:x%x/ste:x%x flg:x%x",
7796 did, vport->port_state, ndlp->nlp_flag);
7797
7798 phba->fc_stat.elsRcvRLS++;
7799 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
7800 if (newnode)
7801 lpfc_nlp_put(ndlp);
7802 break;
7bb3b137 7803 case ELS_CMD_RPS:
858c9f6c
JS
7804 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7805 "RCV RPS: did:x%x/ste:x%x flg:x%x",
7806 did, vport->port_state, ndlp->nlp_flag);
7807
7bb3b137 7808 phba->fc_stat.elsRcvRPS++;
2e0fef85 7809 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
87af33fe 7810 if (newnode)
98c9ea5c 7811 lpfc_nlp_put(ndlp);
7bb3b137
JW
7812 break;
7813 case ELS_CMD_RPL:
858c9f6c
JS
7814 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7815 "RCV RPL: did:x%x/ste:x%x flg:x%x",
7816 did, vport->port_state, ndlp->nlp_flag);
7817
7bb3b137 7818 phba->fc_stat.elsRcvRPL++;
2e0fef85 7819 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
87af33fe 7820 if (newnode)
98c9ea5c 7821 lpfc_nlp_put(ndlp);
7bb3b137 7822 break;
dea3101e 7823 case ELS_CMD_RNID:
858c9f6c
JS
7824 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7825 "RCV RNID: did:x%x/ste:x%x flg:x%x",
7826 did, vport->port_state, ndlp->nlp_flag);
7827
dea3101e 7828 phba->fc_stat.elsRcvRNID++;
2e0fef85 7829 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
87af33fe 7830 if (newnode)
98c9ea5c 7831 lpfc_nlp_put(ndlp);
dea3101e 7832 break;
12265f68
JS
7833 case ELS_CMD_RTV:
7834 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7835 "RCV RTV: did:x%x/ste:x%x flg:x%x",
7836 did, vport->port_state, ndlp->nlp_flag);
7837 phba->fc_stat.elsRcvRTV++;
7838 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
7839 if (newnode)
7840 lpfc_nlp_put(ndlp);
7841 break;
5ffc266e
JS
7842 case ELS_CMD_RRQ:
7843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7844 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
7845 did, vport->port_state, ndlp->nlp_flag);
7846
7847 phba->fc_stat.elsRcvRRQ++;
7848 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
7849 if (newnode)
7850 lpfc_nlp_put(ndlp);
7851 break;
12265f68
JS
7852 case ELS_CMD_ECHO:
7853 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7854 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
7855 did, vport->port_state, ndlp->nlp_flag);
7856
7857 phba->fc_stat.elsRcvECHO++;
7858 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
7859 if (newnode)
7860 lpfc_nlp_put(ndlp);
7861 break;
303f2f9c
JS
7862 case ELS_CMD_REC:
7863 /* receive this due to exchange closed */
7864 rjt_err = LSRJT_UNABLE_TPC;
7865 rjt_exp = LSEXP_INVALID_OX_RX;
7866 break;
dea3101e 7867 default:
858c9f6c
JS
7868 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7869 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
7870 cmd, did, vport->port_state);
7871
dea3101e 7872 /* Unsupported ELS command, reject */
63e801ce 7873 rjt_err = LSRJT_CMD_UNSUPPORTED;
303f2f9c 7874 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7875
7876 /* Unknown ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
7877 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7878 "0115 Unknown ELS command x%x "
7879 "received from NPORT x%x\n", cmd, did);
87af33fe 7880 if (newnode)
98c9ea5c 7881 lpfc_nlp_put(ndlp);
dea3101e
JB
7882 break;
7883 }
7884
eec3d312 7885lsrjt:
dea3101e
JB
7886 /* check if need to LS_RJT received ELS cmd */
7887 if (rjt_err) {
92d7f7b0 7888 memset(&stat, 0, sizeof(stat));
858c9f6c 7889 stat.un.b.lsRjtRsnCode = rjt_err;
303f2f9c 7890 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
858c9f6c
JS
7891 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
7892 NULL);
dea3101e
JB
7893 }
7894
d7c255b2
JS
7895 lpfc_nlp_put(elsiocb->context1);
7896 elsiocb->context1 = NULL;
ed957684
JS
7897 return;
7898
7899dropit:
98c9ea5c 7900 if (vport && !(vport->load_flag & FC_UNLOADING))
6fb120a7
JS
7901 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7902 "0111 Dropping received ELS cmd "
ed957684 7903 "Data: x%x x%x x%x\n",
6fb120a7 7904 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
ed957684
JS
7905 phba->fc_stat.elsRcvDrop++;
7906}
7907
e59058c4 7908/**
3621a710 7909 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
e59058c4
JS
7910 * @phba: pointer to lpfc hba data structure.
7911 * @pring: pointer to a SLI ring.
7912 * @elsiocb: pointer to lpfc els iocb data structure.
7913 *
7914 * This routine is used to process an unsolicited event received from a SLI
7915 * (Service Level Interface) ring. The actual processing of the data buffer
7916 * associated with the unsolicited event is done by invoking the routine
7917 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
7918 * SLI ring on which the unsolicited event was received.
7919 **/
ed957684
JS
7920void
7921lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7922 struct lpfc_iocbq *elsiocb)
7923{
7924 struct lpfc_vport *vport = phba->pport;
ed957684 7925 IOCB_t *icmd = &elsiocb->iocb;
ed957684 7926 dma_addr_t paddr;
92d7f7b0
JS
7927 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
7928 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
7929
d7c255b2 7930 elsiocb->context1 = NULL;
92d7f7b0
JS
7931 elsiocb->context2 = NULL;
7932 elsiocb->context3 = NULL;
ed957684 7933
92d7f7b0
JS
7934 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
7935 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
7936 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
e3d2b802
JS
7937 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
7938 IOERR_RCV_BUFFER_WAITING) {
ed957684
JS
7939 phba->fc_stat.NoRcvBuf++;
7940 /* Not enough posted buffers; Try posting more buffers */
92d7f7b0 7941 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
495a714c 7942 lpfc_post_buffer(phba, pring, 0);
ed957684
JS
7943 return;
7944 }
7945
92d7f7b0
JS
7946 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
7947 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
7948 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
7949 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
7950 vport = phba->pport;
6fb120a7
JS
7951 else
7952 vport = lpfc_find_vport_by_vpid(phba,
6d368e53 7953 icmd->unsli3.rcvsli3.vpi);
92d7f7b0 7954 }
6d368e53 7955
7f5f3d0d
JS
7956 /* If there are no BDEs associated
7957 * with this IOCB, there is nothing to do.
7958 */
ed957684
JS
7959 if (icmd->ulpBdeCount == 0)
7960 return;
7961
7f5f3d0d
JS
7962 /* type of ELS cmd is first 32bit word
7963 * in packet
7964 */
ed957684 7965 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
92d7f7b0 7966 elsiocb->context2 = bdeBuf1;
ed957684
JS
7967 } else {
7968 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
7969 icmd->un.cont64[0].addrLow);
92d7f7b0
JS
7970 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
7971 paddr);
ed957684
JS
7972 }
7973
92d7f7b0
JS
7974 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
7975 /*
7976 * The different unsolicited event handlers would tell us
7977 * if they are done with "mp" by setting context2 to NULL.
7978 */
dea3101e 7979 if (elsiocb->context2) {
92d7f7b0
JS
7980 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
7981 elsiocb->context2 = NULL;
dea3101e 7982 }
ed957684
JS
7983
7984 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
92d7f7b0 7985 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
ed957684 7986 icmd->ulpBdeCount == 2) {
92d7f7b0
JS
7987 elsiocb->context2 = bdeBuf2;
7988 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
ed957684
JS
7989 /* free mp if we are done with it */
7990 if (elsiocb->context2) {
92d7f7b0
JS
7991 lpfc_in_buf_free(phba, elsiocb->context2);
7992 elsiocb->context2 = NULL;
7993 }
7994 }
7995}
7996
4258e98e
JS
7997void
7998lpfc_start_fdmi(struct lpfc_vport *vport)
7999{
8000 struct lpfc_hba *phba = vport->phba;
8001 struct lpfc_nodelist *ndlp;
8002
8003 /* If this is the first time, allocate an ndlp and initialize
8004 * it. Otherwise, make sure the node is enabled and then do the
8005 * login.
8006 */
8007 ndlp = lpfc_findnode_did(vport, FDMI_DID);
8008 if (!ndlp) {
8009 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
8010 if (ndlp) {
8011 lpfc_nlp_init(vport, ndlp, FDMI_DID);
8012 ndlp->nlp_type |= NLP_FABRIC;
8013 } else {
8014 return;
8015 }
8016 }
8017 if (!NLP_CHK_NODE_ACT(ndlp))
8018 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
8019
8020 if (ndlp) {
8021 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8022 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8023 }
8024}
8025
e59058c4 8026/**
3621a710 8027 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
e59058c4
JS
8028 * @phba: pointer to lpfc hba data structure.
8029 * @vport: pointer to a virtual N_Port data structure.
8030 *
8031 * This routine issues a Port Login (PLOGI) to the Name Server with
8032 * State Change Request (SCR) for a @vport. This routine will create an
8033 * ndlp for the Name Server associated to the @vport if such node does
8034 * not already exist. The PLOGI to Name Server is issued by invoking the
8035 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
8036 * (FDMI) is configured to the @vport, a FDMI node will be created and
8037 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
8038 **/
92d7f7b0
JS
8039void
8040lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
8041{
4258e98e 8042 struct lpfc_nodelist *ndlp;
92494144
JS
8043 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8044
8045 /*
8046 * If lpfc_delay_discovery parameter is set and the clean address
8047 * bit is cleared and fc fabric parameters chenged, delay FC NPort
8048 * discovery.
8049 */
8050 spin_lock_irq(shost->host_lock);
8051 if (vport->fc_flag & FC_DISC_DELAYED) {
8052 spin_unlock_irq(shost->host_lock);
18775708
JS
8053 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
8054 "3334 Delay fc port discovery for %d seconds\n",
8055 phba->fc_ratov);
92494144 8056 mod_timer(&vport->delayed_disc_tmo,
256ec0d0 8057 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
92494144
JS
8058 return;
8059 }
8060 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
8061
8062 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8063 if (!ndlp) {
8064 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
8065 if (!ndlp) {
76a95d75 8066 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
8067 lpfc_disc_start(vport);
8068 return;
8069 }
8070 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8071 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8072 "0251 NameServer login: no memory\n");
92d7f7b0
JS
8073 return;
8074 }
8075 lpfc_nlp_init(vport, ndlp, NameServer_DID);
e47c9093
JS
8076 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
8077 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
8078 if (!ndlp) {
76a95d75 8079 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
e47c9093
JS
8080 lpfc_disc_start(vport);
8081 return;
8082 }
8083 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8084 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8085 "0348 NameServer login: node freed\n");
8086 return;
8087 }
92d7f7b0 8088 }
58da1ffb 8089 ndlp->nlp_type |= NLP_FABRIC;
92d7f7b0
JS
8090
8091 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8092
8093 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
8094 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8095 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8096 "0252 Cannot issue NameServer login\n");
92d7f7b0
JS
8097 return;
8098 }
8099
8663cbbe
JS
8100 if ((phba->cfg_enable_SmartSAN ||
8101 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
8102 (vport->load_flag & FC_ALLOW_FDMI))
4258e98e 8103 lpfc_start_fdmi(vport);
92d7f7b0
JS
8104}
8105
e59058c4 8106/**
3621a710 8107 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
e59058c4
JS
8108 * @phba: pointer to lpfc hba data structure.
8109 * @pmb: pointer to the driver internal queue element for mailbox command.
8110 *
8111 * This routine is the completion callback function to register new vport
8112 * mailbox command. If the new vport mailbox command completes successfully,
8113 * the fabric registration login shall be performed on physical port (the
8114 * new vport created is actually a physical port, with VPI 0) or the port
8115 * login to Name Server for State Change Request (SCR) will be performed
8116 * on virtual port (real virtual port, with VPI greater than 0).
8117 **/
92d7f7b0
JS
8118static void
8119lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8120{
8121 struct lpfc_vport *vport = pmb->vport;
8122 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8123 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
04c68496 8124 MAILBOX_t *mb = &pmb->u.mb;
695a814e 8125 int rc;
92d7f7b0 8126
09372820 8127 spin_lock_irq(shost->host_lock);
92d7f7b0 8128 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
09372820 8129 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
8130
8131 if (mb->mbxStatus) {
e8b62011 8132 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
38b92ef8
JS
8133 "0915 Register VPI failed : Status: x%x"
8134 " upd bit: x%x \n", mb->mbxStatus,
8135 mb->un.varRegVpi.upd);
8136 if (phba->sli_rev == LPFC_SLI_REV4 &&
8137 mb->un.varRegVpi.upd)
8138 goto mbox_err_exit ;
92d7f7b0
JS
8139
8140 switch (mb->mbxStatus) {
8141 case 0x11: /* unsupported feature */
8142 case 0x9603: /* max_vpi exceeded */
7f5f3d0d 8143 case 0x9602: /* Link event since CLEAR_LA */
92d7f7b0
JS
8144 /* giving up on vport registration */
8145 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8146 spin_lock_irq(shost->host_lock);
8147 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8148 spin_unlock_irq(shost->host_lock);
8149 lpfc_can_disctmo(vport);
8150 break;
695a814e
JS
8151 /* If reg_vpi fail with invalid VPI status, re-init VPI */
8152 case 0x20:
8153 spin_lock_irq(shost->host_lock);
8154 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8155 spin_unlock_irq(shost->host_lock);
8156 lpfc_init_vpi(phba, pmb, vport->vpi);
8157 pmb->vport = vport;
8158 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
8159 rc = lpfc_sli_issue_mbox(phba, pmb,
8160 MBX_NOWAIT);
8161 if (rc == MBX_NOT_FINISHED) {
8162 lpfc_printf_vlog(vport,
8163 KERN_ERR, LOG_MBOX,
8164 "2732 Failed to issue INIT_VPI"
8165 " mailbox command\n");
8166 } else {
8167 lpfc_nlp_put(ndlp);
8168 return;
8169 }
8170
92d7f7b0
JS
8171 default:
8172 /* Try to recover from this error */
5af5eee7
JS
8173 if (phba->sli_rev == LPFC_SLI_REV4)
8174 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 8175 lpfc_mbx_unreg_vpi(vport);
09372820 8176 spin_lock_irq(shost->host_lock);
92d7f7b0 8177 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 8178 spin_unlock_irq(shost->host_lock);
4b40c59e
JS
8179 if (vport->port_type == LPFC_PHYSICAL_PORT
8180 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
76a95d75 8181 lpfc_issue_init_vfi(vport);
7f5f3d0d
JS
8182 else
8183 lpfc_initial_fdisc(vport);
92d7f7b0
JS
8184 break;
8185 }
92d7f7b0 8186 } else {
695a814e 8187 spin_lock_irq(shost->host_lock);
1987807d 8188 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e
JS
8189 spin_unlock_irq(shost->host_lock);
8190 if (vport == phba->pport) {
6fb120a7
JS
8191 if (phba->sli_rev < LPFC_SLI_REV4)
8192 lpfc_issue_fabric_reglogin(vport);
695a814e 8193 else {
fc2b989b
JS
8194 /*
8195 * If the physical port is instantiated using
8196 * FDISC, do not start vport discovery.
8197 */
8198 if (vport->port_state != LPFC_FDISC)
8199 lpfc_start_fdiscs(phba);
695a814e
JS
8200 lpfc_do_scr_ns_plogi(phba, vport);
8201 }
8202 } else
92d7f7b0
JS
8203 lpfc_do_scr_ns_plogi(phba, vport);
8204 }
38b92ef8 8205mbox_err_exit:
fa4066b6
JS
8206 /* Now, we decrement the ndlp reference count held for this
8207 * callback function
8208 */
8209 lpfc_nlp_put(ndlp);
8210
92d7f7b0
JS
8211 mempool_free(pmb, phba->mbox_mem_pool);
8212 return;
8213}
8214
e59058c4 8215/**
3621a710 8216 * lpfc_register_new_vport - Register a new vport with a HBA
e59058c4
JS
8217 * @phba: pointer to lpfc hba data structure.
8218 * @vport: pointer to a host virtual N_Port data structure.
8219 * @ndlp: pointer to a node-list data structure.
8220 *
8221 * This routine registers the @vport as a new virtual port with a HBA.
8222 * It is done through a registering vpi mailbox command.
8223 **/
695a814e 8224void
92d7f7b0
JS
8225lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
8226 struct lpfc_nodelist *ndlp)
8227{
09372820 8228 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
8229 LPFC_MBOXQ_t *mbox;
8230
8231 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8232 if (mbox) {
6fb120a7 8233 lpfc_reg_vpi(vport, mbox);
92d7f7b0
JS
8234 mbox->vport = vport;
8235 mbox->context2 = lpfc_nlp_get(ndlp);
8236 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
0b727fea 8237 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
92d7f7b0 8238 == MBX_NOT_FINISHED) {
fa4066b6
JS
8239 /* mailbox command not success, decrement ndlp
8240 * reference count for this command
8241 */
8242 lpfc_nlp_put(ndlp);
92d7f7b0 8243 mempool_free(mbox, phba->mbox_mem_pool);
92d7f7b0 8244
e8b62011
JS
8245 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8246 "0253 Register VPI: Can't send mbox\n");
fa4066b6 8247 goto mbox_err_exit;
92d7f7b0
JS
8248 }
8249 } else {
e8b62011
JS
8250 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8251 "0254 Register VPI: no memory\n");
fa4066b6 8252 goto mbox_err_exit;
92d7f7b0 8253 }
fa4066b6
JS
8254 return;
8255
8256mbox_err_exit:
8257 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8258 spin_lock_irq(shost->host_lock);
8259 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
8260 spin_unlock_irq(shost->host_lock);
8261 return;
92d7f7b0
JS
8262}
8263
695a814e 8264/**
0c9ab6f5 8265 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
695a814e
JS
8266 * @phba: pointer to lpfc hba data structure.
8267 *
0c9ab6f5 8268 * This routine cancels the retry delay timers to all the vports.
695a814e
JS
8269 **/
8270void
0c9ab6f5 8271lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
695a814e
JS
8272{
8273 struct lpfc_vport **vports;
8274 struct lpfc_nodelist *ndlp;
695a814e 8275 uint32_t link_state;
0c9ab6f5 8276 int i;
695a814e
JS
8277
8278 /* Treat this failure as linkdown for all vports */
8279 link_state = phba->link_state;
8280 lpfc_linkdown(phba);
8281 phba->link_state = link_state;
8282
8283 vports = lpfc_create_vport_work_array(phba);
8284
8285 if (vports) {
8286 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
8287 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
8288 if (ndlp)
8289 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
8290 lpfc_els_flush_cmd(vports[i]);
8291 }
8292 lpfc_destroy_vport_work_array(phba, vports);
8293 }
0c9ab6f5
JS
8294}
8295
8296/**
8297 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
8298 * @phba: pointer to lpfc hba data structure.
8299 *
8300 * This routine abort all pending discovery commands and
8301 * start a timer to retry FLOGI for the physical port
8302 * discovery.
8303 **/
8304void
8305lpfc_retry_pport_discovery(struct lpfc_hba *phba)
8306{
8307 struct lpfc_nodelist *ndlp;
8308 struct Scsi_Host *shost;
8309
8310 /* Cancel the all vports retry delay retry timers */
8311 lpfc_cancel_all_vport_retry_delay_timer(phba);
695a814e
JS
8312
8313 /* If fabric require FLOGI, then re-instantiate physical login */
8314 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
8315 if (!ndlp)
8316 return;
8317
695a814e 8318 shost = lpfc_shost_from_vport(phba->pport);
256ec0d0 8319 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
695a814e
JS
8320 spin_lock_irq(shost->host_lock);
8321 ndlp->nlp_flag |= NLP_DELAY_TMO;
8322 spin_unlock_irq(shost->host_lock);
8323 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
8324 phba->pport->port_state = LPFC_FLOGI;
8325 return;
8326}
8327
8328/**
8329 * lpfc_fabric_login_reqd - Check if FLOGI required.
8330 * @phba: pointer to lpfc hba data structure.
8331 * @cmdiocb: pointer to FDISC command iocb.
8332 * @rspiocb: pointer to FDISC response iocb.
8333 *
8334 * This routine checks if a FLOGI is reguired for FDISC
8335 * to succeed.
8336 **/
8337static int
8338lpfc_fabric_login_reqd(struct lpfc_hba *phba,
8339 struct lpfc_iocbq *cmdiocb,
8340 struct lpfc_iocbq *rspiocb)
8341{
8342
8343 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
8344 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
8345 return 0;
8346 else
8347 return 1;
8348}
8349
e59058c4 8350/**
3621a710 8351 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
e59058c4
JS
8352 * @phba: pointer to lpfc hba data structure.
8353 * @cmdiocb: pointer to lpfc command iocb data structure.
8354 * @rspiocb: pointer to lpfc response iocb data structure.
8355 *
8356 * This routine is the completion callback function to a Fabric Discover
8357 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
8358 * single threaded, each FDISC completion callback function will reset
8359 * the discovery timer for all vports such that the timers will not get
8360 * unnecessary timeout. The function checks the FDISC IOCB status. If error
8361 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
8362 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
8363 * assigned to the vport has been changed with the completion of the FDISC
8364 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
8365 * are unregistered from the HBA, and then the lpfc_register_new_vport()
8366 * routine is invoked to register new vport with the HBA. Otherwise, the
8367 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
8368 * Server for State Change Request (SCR).
8369 **/
92d7f7b0
JS
8370static void
8371lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8372 struct lpfc_iocbq *rspiocb)
8373{
8374 struct lpfc_vport *vport = cmdiocb->vport;
8375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8376 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
8377 struct lpfc_nodelist *np;
8378 struct lpfc_nodelist *next_np;
8379 IOCB_t *irsp = &rspiocb->iocb;
8380 struct lpfc_iocbq *piocb;
92494144
JS
8381 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
8382 struct serv_parm *sp;
8383 uint8_t fabric_param_changed;
92d7f7b0 8384
e8b62011
JS
8385 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8386 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
8387 irsp->ulpStatus, irsp->un.ulpWord[4],
8388 vport->fc_prevDID);
92d7f7b0
JS
8389 /* Since all FDISCs are being single threaded, we
8390 * must reset the discovery timer for ALL vports
8391 * waiting to send FDISC when one completes.
8392 */
8393 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
8394 lpfc_set_disctmo(piocb->vport);
8395 }
8396
858c9f6c
JS
8397 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8398 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
8399 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
8400
92d7f7b0 8401 if (irsp->ulpStatus) {
695a814e
JS
8402
8403 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
8404 lpfc_retry_pport_discovery(phba);
8405 goto out;
8406 }
8407
92d7f7b0
JS
8408 /* Check for retry */
8409 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
8410 goto out;
92d7f7b0 8411 /* FDISC failed */
e8b62011 8412 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6b5151fd 8413 "0126 FDISC failed. (x%x/x%x)\n",
e8b62011 8414 irsp->ulpStatus, irsp->un.ulpWord[4]);
d7c255b2
JS
8415 goto fdisc_failed;
8416 }
d7c255b2 8417 spin_lock_irq(shost->host_lock);
695a814e 8418 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 8419 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
d7c255b2 8420 vport->fc_flag |= FC_FABRIC;
76a95d75 8421 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
d7c255b2
JS
8422 vport->fc_flag |= FC_PUBLIC_LOOP;
8423 spin_unlock_irq(shost->host_lock);
92d7f7b0 8424
d7c255b2
JS
8425 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
8426 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
92494144 8427 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
a2fc4aef
JS
8428 if (!prsp)
8429 goto out;
92494144
JS
8430 sp = prsp->virt + sizeof(uint32_t);
8431 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
8432 memcpy(&vport->fabric_portname, &sp->portName,
8433 sizeof(struct lpfc_name));
8434 memcpy(&vport->fabric_nodename, &sp->nodeName,
8435 sizeof(struct lpfc_name));
8436 if (fabric_param_changed &&
d7c255b2
JS
8437 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8438 /* If our NportID changed, we need to ensure all
8439 * remaining NPORTs get unreg_login'ed so we can
8440 * issue unreg_vpi.
8441 */
8442 list_for_each_entry_safe(np, next_np,
8443 &vport->fc_nodes, nlp_listp) {
8444 if (!NLP_CHK_NODE_ACT(ndlp) ||
8445 (np->nlp_state != NLP_STE_NPR_NODE) ||
8446 !(np->nlp_flag & NLP_NPR_ADISC))
8447 continue;
09372820 8448 spin_lock_irq(shost->host_lock);
d7c255b2 8449 np->nlp_flag &= ~NLP_NPR_ADISC;
09372820 8450 spin_unlock_irq(shost->host_lock);
d7c255b2 8451 lpfc_unreg_rpi(vport, np);
92d7f7b0 8452 }
78730cfe 8453 lpfc_cleanup_pending_mbox(vport);
5af5eee7
JS
8454
8455 if (phba->sli_rev == LPFC_SLI_REV4)
8456 lpfc_sli4_unreg_all_rpis(vport);
8457
d7c255b2
JS
8458 lpfc_mbx_unreg_vpi(vport);
8459 spin_lock_irq(shost->host_lock);
8460 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
0f65ff68
JS
8461 if (phba->sli_rev == LPFC_SLI_REV4)
8462 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4b40c59e
JS
8463 else
8464 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
d7c255b2 8465 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
8466 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
8467 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8468 /*
8469 * Driver needs to re-reg VPI in order for f/w
8470 * to update the MAC address.
8471 */
8472 lpfc_register_new_vport(phba, vport, ndlp);
5ac6b303 8473 goto out;
92d7f7b0
JS
8474 }
8475
ecfd03c6
JS
8476 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
8477 lpfc_issue_init_vpi(vport);
8478 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
d7c255b2
JS
8479 lpfc_register_new_vport(phba, vport, ndlp);
8480 else
8481 lpfc_do_scr_ns_plogi(phba, vport);
8482 goto out;
8483fdisc_failed:
c84163d1
JS
8484 if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
8485 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
d7c255b2
JS
8486 /* Cancel discovery timer */
8487 lpfc_can_disctmo(vport);
8488 lpfc_nlp_put(ndlp);
92d7f7b0
JS
8489out:
8490 lpfc_els_free_iocb(phba, cmdiocb);
8491}
8492
e59058c4 8493/**
3621a710 8494 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
e59058c4
JS
8495 * @vport: pointer to a virtual N_Port data structure.
8496 * @ndlp: pointer to a node-list data structure.
8497 * @retry: number of retries to the command IOCB.
8498 *
8499 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
8500 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
8501 * routine to issue the IOCB, which makes sure only one outstanding fabric
8502 * IOCB will be sent off HBA at any given time.
8503 *
8504 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8505 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8506 * will be stored into the context1 field of the IOCB for the completion
8507 * callback function to the FDISC ELS command.
8508 *
8509 * Return code
8510 * 0 - Successfully issued fdisc iocb command
8511 * 1 - Failed to issue fdisc iocb command
8512 **/
a6ababd2 8513static int
92d7f7b0
JS
8514lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8515 uint8_t retry)
8516{
8517 struct lpfc_hba *phba = vport->phba;
8518 IOCB_t *icmd;
8519 struct lpfc_iocbq *elsiocb;
8520 struct serv_parm *sp;
8521 uint8_t *pcmd;
8522 uint16_t cmdsize;
8523 int did = ndlp->nlp_DID;
8524 int rc;
92d7f7b0 8525
5ffc266e 8526 vport->port_state = LPFC_FDISC;
6b5151fd 8527 vport->fc_myDID = 0;
92d7f7b0
JS
8528 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
8529 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
8530 ELS_CMD_FDISC);
8531 if (!elsiocb) {
92d7f7b0 8532 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8533 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8534 "0255 Issue FDISC: no IOCB\n");
92d7f7b0
JS
8535 return 1;
8536 }
8537
8538 icmd = &elsiocb->iocb;
8539 icmd->un.elsreq64.myID = 0;
8540 icmd->un.elsreq64.fl = 1;
8541
73d91e50
JS
8542 /*
8543 * SLI3 ports require a different context type value than SLI4.
8544 * Catch SLI3 ports here and override the prep.
8545 */
8546 if (phba->sli_rev == LPFC_SLI_REV3) {
f1126688
JS
8547 icmd->ulpCt_h = 1;
8548 icmd->ulpCt_l = 0;
8549 }
92d7f7b0
JS
8550
8551 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8552 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
8553 pcmd += sizeof(uint32_t); /* CSP Word 1 */
8554 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
8555 sp = (struct serv_parm *) pcmd;
8556 /* Setup CSPs accordingly for Fabric */
8557 sp->cmn.e_d_tov = 0;
8558 sp->cmn.w2.r_a_tov = 0;
df9e1b59 8559 sp->cmn.virtual_fabric_support = 0;
92d7f7b0
JS
8560 sp->cls1.classValid = 0;
8561 sp->cls2.seqDelivery = 1;
8562 sp->cls3.seqDelivery = 1;
8563
8564 pcmd += sizeof(uint32_t); /* CSP Word 2 */
8565 pcmd += sizeof(uint32_t); /* CSP Word 3 */
8566 pcmd += sizeof(uint32_t); /* CSP Word 4 */
8567 pcmd += sizeof(uint32_t); /* Port Name */
8568 memcpy(pcmd, &vport->fc_portname, 8);
8569 pcmd += sizeof(uint32_t); /* Node Name */
8570 pcmd += sizeof(uint32_t); /* Node Name */
8571 memcpy(pcmd, &vport->fc_nodename, 8);
8572
8573 lpfc_set_disctmo(vport);
8574
8575 phba->fc_stat.elsXmitFDISC++;
8576 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
8577
858c9f6c
JS
8578 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8579 "Issue FDISC: did:x%x",
8580 did, 0, 0);
8581
92d7f7b0
JS
8582 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
8583 if (rc == IOCB_ERROR) {
8584 lpfc_els_free_iocb(phba, elsiocb);
92d7f7b0 8585 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8586 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8587 "0256 Issue FDISC: Cannot send IOCB\n");
92d7f7b0
JS
8588 return 1;
8589 }
8590 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
92d7f7b0
JS
8591 return 0;
8592}
8593
e59058c4 8594/**
3621a710 8595 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
e59058c4
JS
8596 * @phba: pointer to lpfc hba data structure.
8597 * @cmdiocb: pointer to lpfc command iocb data structure.
8598 * @rspiocb: pointer to lpfc response iocb data structure.
8599 *
8600 * This routine is the completion callback function to the issuing of a LOGO
8601 * ELS command off a vport. It frees the command IOCB and then decrement the
8602 * reference count held on ndlp for this completion function, indicating that
8603 * the reference to the ndlp is no long needed. Note that the
8604 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
8605 * callback function and an additional explicit ndlp reference decrementation
8606 * will trigger the actual release of the ndlp.
8607 **/
92d7f7b0
JS
8608static void
8609lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8610 struct lpfc_iocbq *rspiocb)
8611{
8612 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c 8613 IOCB_t *irsp;
e47c9093 8614 struct lpfc_nodelist *ndlp;
9589b062 8615 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
858c9f6c 8616
9589b062 8617 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
858c9f6c
JS
8618 irsp = &rspiocb->iocb;
8619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8620 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
8621 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
92d7f7b0
JS
8622
8623 lpfc_els_free_iocb(phba, cmdiocb);
8624 vport->unreg_vpi_cmpl = VPORT_ERROR;
e47c9093
JS
8625
8626 /* Trigger the release of the ndlp after logo */
8627 lpfc_nlp_put(ndlp);
9589b062
JS
8628
8629 /* NPIV LOGO completes to NPort <nlp_DID> */
8630 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8631 "2928 NPIV LOGO completes to NPort x%x "
8632 "Data: x%x x%x x%x x%x\n",
8633 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
8634 irsp->ulpTimeout, vport->num_disc_nodes);
8635
8636 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
8637 spin_lock_irq(shost->host_lock);
73dc0dbe 8638 vport->fc_flag &= ~FC_NDISC_ACTIVE;
9589b062
JS
8639 vport->fc_flag &= ~FC_FABRIC;
8640 spin_unlock_irq(shost->host_lock);
73dc0dbe 8641 lpfc_can_disctmo(vport);
9589b062 8642 }
92d7f7b0
JS
8643}
8644
e59058c4 8645/**
3621a710 8646 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
e59058c4
JS
8647 * @vport: pointer to a virtual N_Port data structure.
8648 * @ndlp: pointer to a node-list data structure.
8649 *
8650 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
8651 *
8652 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8653 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8654 * will be stored into the context1 field of the IOCB for the completion
8655 * callback function to the LOGO ELS command.
8656 *
8657 * Return codes
8658 * 0 - Successfully issued logo off the @vport
8659 * 1 - Failed to issue logo off the @vport
8660 **/
92d7f7b0
JS
8661int
8662lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
8663{
8664 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8665 struct lpfc_hba *phba = vport->phba;
92d7f7b0
JS
8666 struct lpfc_iocbq *elsiocb;
8667 uint8_t *pcmd;
8668 uint16_t cmdsize;
8669
8670 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
8671 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
8672 ELS_CMD_LOGO);
8673 if (!elsiocb)
8674 return 1;
8675
92d7f7b0
JS
8676 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8677 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
8678 pcmd += sizeof(uint32_t);
8679
8680 /* Fill in LOGO payload */
8681 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
8682 pcmd += sizeof(uint32_t);
8683 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
8684
858c9f6c
JS
8685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8686 "Issue LOGO npiv did:x%x flg:x%x",
8687 ndlp->nlp_DID, ndlp->nlp_flag, 0);
8688
92d7f7b0
JS
8689 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
8690 spin_lock_irq(shost->host_lock);
8691 ndlp->nlp_flag |= NLP_LOGO_SND;
8692 spin_unlock_irq(shost->host_lock);
3772a991
JS
8693 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
8694 IOCB_ERROR) {
92d7f7b0
JS
8695 spin_lock_irq(shost->host_lock);
8696 ndlp->nlp_flag &= ~NLP_LOGO_SND;
8697 spin_unlock_irq(shost->host_lock);
8698 lpfc_els_free_iocb(phba, elsiocb);
8699 return 1;
8700 }
8701 return 0;
8702}
8703
e59058c4 8704/**
3621a710 8705 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
e59058c4
JS
8706 * @ptr: holder for the timer function associated data.
8707 *
8708 * This routine is invoked by the fabric iocb block timer after
8709 * timeout. It posts the fabric iocb block timeout event by setting the
8710 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
8711 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
8712 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
8713 * posted event WORKER_FABRIC_BLOCK_TMO.
8714 **/
92d7f7b0
JS
8715void
8716lpfc_fabric_block_timeout(unsigned long ptr)
8717{
8718 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
8719 unsigned long iflags;
8720 uint32_t tmo_posted;
5e9d9b82 8721
92d7f7b0
JS
8722 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8723 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
8724 if (!tmo_posted)
8725 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
8726 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8727
5e9d9b82
JS
8728 if (!tmo_posted)
8729 lpfc_worker_wake_up(phba);
8730 return;
92d7f7b0
JS
8731}
8732
e59058c4 8733/**
3621a710 8734 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
e59058c4
JS
8735 * @phba: pointer to lpfc hba data structure.
8736 *
8737 * This routine issues one fabric iocb from the driver internal list to
8738 * the HBA. It first checks whether it's ready to issue one fabric iocb to
8739 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
8740 * remove one pending fabric iocb from the driver internal list and invokes
8741 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
8742 **/
92d7f7b0
JS
8743static void
8744lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
8745{
8746 struct lpfc_iocbq *iocb;
8747 unsigned long iflags;
8748 int ret;
92d7f7b0
JS
8749 IOCB_t *cmd;
8750
8751repeat:
8752 iocb = NULL;
8753 spin_lock_irqsave(&phba->hbalock, iflags);
7f5f3d0d 8754 /* Post any pending iocb to the SLI layer */
92d7f7b0
JS
8755 if (atomic_read(&phba->fabric_iocb_count) == 0) {
8756 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
8757 list);
8758 if (iocb)
7f5f3d0d 8759 /* Increment fabric iocb count to hold the position */
92d7f7b0
JS
8760 atomic_inc(&phba->fabric_iocb_count);
8761 }
8762 spin_unlock_irqrestore(&phba->hbalock, iflags);
8763 if (iocb) {
8764 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8765 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8766 iocb->iocb_flag |= LPFC_IO_FABRIC;
8767
858c9f6c
JS
8768 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8769 "Fabric sched1: ste:x%x",
8770 iocb->vport->port_state, 0, 0);
8771
3772a991 8772 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
8773
8774 if (ret == IOCB_ERROR) {
8775 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8776 iocb->fabric_iocb_cmpl = NULL;
8777 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8778 cmd = &iocb->iocb;
8779 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
8780 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
8781 iocb->iocb_cmpl(phba, iocb, iocb);
8782
8783 atomic_dec(&phba->fabric_iocb_count);
8784 goto repeat;
8785 }
8786 }
8787
8788 return;
8789}
8790
e59058c4 8791/**
3621a710 8792 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
e59058c4
JS
8793 * @phba: pointer to lpfc hba data structure.
8794 *
8795 * This routine unblocks the issuing fabric iocb command. The function
8796 * will clear the fabric iocb block bit and then invoke the routine
8797 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
8798 * from the driver internal fabric iocb list.
8799 **/
92d7f7b0
JS
8800void
8801lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
8802{
8803 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
8804
8805 lpfc_resume_fabric_iocbs(phba);
8806 return;
8807}
8808
e59058c4 8809/**
3621a710 8810 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
e59058c4
JS
8811 * @phba: pointer to lpfc hba data structure.
8812 *
8813 * This routine blocks the issuing fabric iocb for a specified amount of
8814 * time (currently 100 ms). This is done by set the fabric iocb block bit
8815 * and set up a timeout timer for 100ms. When the block bit is set, no more
8816 * fabric iocb will be issued out of the HBA.
8817 **/
92d7f7b0
JS
8818static void
8819lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
8820{
8821 int blocked;
8822
8823 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7f5f3d0d 8824 /* Start a timer to unblock fabric iocbs after 100ms */
92d7f7b0 8825 if (!blocked)
256ec0d0
JS
8826 mod_timer(&phba->fabric_block_timer,
8827 jiffies + msecs_to_jiffies(100));
92d7f7b0
JS
8828
8829 return;
8830}
8831
e59058c4 8832/**
3621a710 8833 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
e59058c4
JS
8834 * @phba: pointer to lpfc hba data structure.
8835 * @cmdiocb: pointer to lpfc command iocb data structure.
8836 * @rspiocb: pointer to lpfc response iocb data structure.
8837 *
8838 * This routine is the callback function that is put to the fabric iocb's
8839 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
8840 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
8841 * function first restores and invokes the original iocb's callback function
8842 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
8843 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
8844 **/
92d7f7b0
JS
8845static void
8846lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8847 struct lpfc_iocbq *rspiocb)
8848{
8849 struct ls_rjt stat;
8850
8851 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
8852 BUG();
8853
8854 switch (rspiocb->iocb.ulpStatus) {
8855 case IOSTAT_NPORT_RJT:
8856 case IOSTAT_FABRIC_RJT:
8857 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
8858 lpfc_block_fabric_iocbs(phba);
ed957684 8859 }
92d7f7b0
JS
8860 break;
8861
8862 case IOSTAT_NPORT_BSY:
8863 case IOSTAT_FABRIC_BSY:
8864 lpfc_block_fabric_iocbs(phba);
8865 break;
8866
8867 case IOSTAT_LS_RJT:
8868 stat.un.lsRjtError =
8869 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
8870 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
8871 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
8872 lpfc_block_fabric_iocbs(phba);
8873 break;
8874 }
8875
8876 if (atomic_read(&phba->fabric_iocb_count) == 0)
8877 BUG();
8878
8879 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
8880 cmdiocb->fabric_iocb_cmpl = NULL;
8881 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
8882 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
8883
8884 atomic_dec(&phba->fabric_iocb_count);
8885 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7f5f3d0d
JS
8886 /* Post any pending iocbs to HBA */
8887 lpfc_resume_fabric_iocbs(phba);
92d7f7b0
JS
8888 }
8889}
8890
e59058c4 8891/**
3621a710 8892 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
e59058c4
JS
8893 * @phba: pointer to lpfc hba data structure.
8894 * @iocb: pointer to lpfc command iocb data structure.
8895 *
8896 * This routine is used as the top-level API for issuing a fabric iocb command
8897 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
8898 * function makes sure that only one fabric bound iocb will be outstanding at
8899 * any given time. As such, this function will first check to see whether there
8900 * is already an outstanding fabric iocb on the wire. If so, it will put the
8901 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
8902 * issued later. Otherwise, it will issue the iocb on the wire and update the
8903 * fabric iocb count it indicate that there is one fabric iocb on the wire.
8904 *
8905 * Note, this implementation has a potential sending out fabric IOCBs out of
8906 * order. The problem is caused by the construction of the "ready" boolen does
8907 * not include the condition that the internal fabric IOCB list is empty. As
8908 * such, it is possible a fabric IOCB issued by this routine might be "jump"
8909 * ahead of the fabric IOCBs in the internal list.
8910 *
8911 * Return code
8912 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
8913 * IOCB_ERROR - failed to issue fabric iocb
8914 **/
a6ababd2 8915static int
92d7f7b0
JS
8916lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
8917{
8918 unsigned long iflags;
92d7f7b0
JS
8919 int ready;
8920 int ret;
8921
8922 if (atomic_read(&phba->fabric_iocb_count) > 1)
8923 BUG();
8924
8925 spin_lock_irqsave(&phba->hbalock, iflags);
8926 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
8927 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
8928
7f5f3d0d
JS
8929 if (ready)
8930 /* Increment fabric iocb count to hold the position */
8931 atomic_inc(&phba->fabric_iocb_count);
92d7f7b0
JS
8932 spin_unlock_irqrestore(&phba->hbalock, iflags);
8933 if (ready) {
8934 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8935 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8936 iocb->iocb_flag |= LPFC_IO_FABRIC;
8937
858c9f6c
JS
8938 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8939 "Fabric sched2: ste:x%x",
8940 iocb->vport->port_state, 0, 0);
8941
3772a991 8942 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
8943
8944 if (ret == IOCB_ERROR) {
8945 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8946 iocb->fabric_iocb_cmpl = NULL;
8947 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8948 atomic_dec(&phba->fabric_iocb_count);
8949 }
8950 } else {
8951 spin_lock_irqsave(&phba->hbalock, iflags);
8952 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
8953 spin_unlock_irqrestore(&phba->hbalock, iflags);
8954 ret = IOCB_SUCCESS;
8955 }
8956 return ret;
8957}
8958
e59058c4 8959/**
3621a710 8960 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
e59058c4
JS
8961 * @vport: pointer to a virtual N_Port data structure.
8962 *
8963 * This routine aborts all the IOCBs associated with a @vport from the
8964 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
8965 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
8966 * list, removes each IOCB associated with the @vport off the list, set the
8967 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
8968 * associated with the IOCB.
8969 **/
a6ababd2 8970static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
92d7f7b0
JS
8971{
8972 LIST_HEAD(completions);
8973 struct lpfc_hba *phba = vport->phba;
8974 struct lpfc_iocbq *tmp_iocb, *piocb;
92d7f7b0
JS
8975
8976 spin_lock_irq(&phba->hbalock);
8977 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
8978 list) {
8979
8980 if (piocb->vport != vport)
8981 continue;
8982
8983 list_move_tail(&piocb->list, &completions);
8984 }
8985 spin_unlock_irq(&phba->hbalock);
8986
a257bf90
JS
8987 /* Cancel all the IOCBs from the completions list */
8988 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8989 IOERR_SLI_ABORTED);
92d7f7b0
JS
8990}
8991
e59058c4 8992/**
3621a710 8993 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
e59058c4
JS
8994 * @ndlp: pointer to a node-list data structure.
8995 *
8996 * This routine aborts all the IOCBs associated with an @ndlp from the
8997 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
8998 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
8999 * list, removes each IOCB associated with the @ndlp off the list, set the
9000 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9001 * associated with the IOCB.
9002 **/
92d7f7b0
JS
9003void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9004{
9005 LIST_HEAD(completions);
a257bf90 9006 struct lpfc_hba *phba = ndlp->phba;
92d7f7b0
JS
9007 struct lpfc_iocbq *tmp_iocb, *piocb;
9008 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
92d7f7b0
JS
9009
9010 spin_lock_irq(&phba->hbalock);
9011 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9012 list) {
9013 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
9014
9015 list_move_tail(&piocb->list, &completions);
ed957684 9016 }
dea3101e 9017 }
92d7f7b0
JS
9018 spin_unlock_irq(&phba->hbalock);
9019
a257bf90
JS
9020 /* Cancel all the IOCBs from the completions list */
9021 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9022 IOERR_SLI_ABORTED);
92d7f7b0
JS
9023}
9024
e59058c4 9025/**
3621a710 9026 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
e59058c4
JS
9027 * @phba: pointer to lpfc hba data structure.
9028 *
9029 * This routine aborts all the IOCBs currently on the driver internal
9030 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
9031 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
9032 * list, removes IOCBs off the list, set the status feild to
9033 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
9034 * the IOCB.
9035 **/
92d7f7b0
JS
9036void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
9037{
9038 LIST_HEAD(completions);
92d7f7b0
JS
9039
9040 spin_lock_irq(&phba->hbalock);
9041 list_splice_init(&phba->fabric_iocb_list, &completions);
9042 spin_unlock_irq(&phba->hbalock);
9043
a257bf90
JS
9044 /* Cancel all the IOCBs from the completions list */
9045 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9046 IOERR_SLI_ABORTED);
dea3101e 9047}
6fb120a7 9048
1151e3ec
JS
9049/**
9050 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
9051 * @vport: pointer to lpfc vport data structure.
9052 *
9053 * This routine is invoked by the vport cleanup for deletions and the cleanup
9054 * for an ndlp on removal.
9055 **/
9056void
9057lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
9058{
9059 struct lpfc_hba *phba = vport->phba;
9060 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9061 unsigned long iflag = 0;
9062
9063 spin_lock_irqsave(&phba->hbalock, iflag);
9064 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
9065 list_for_each_entry_safe(sglq_entry, sglq_next,
9066 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9067 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
9068 sglq_entry->ndlp = NULL;
9069 }
9070 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
9071 spin_unlock_irqrestore(&phba->hbalock, iflag);
9072 return;
9073}
9074
6fb120a7
JS
9075/**
9076 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
9077 * @phba: pointer to lpfc hba data structure.
9078 * @axri: pointer to the els xri abort wcqe structure.
9079 *
9080 * This routine is invoked by the worker thread to process a SLI4 slow-path
9081 * ELS aborted xri.
9082 **/
9083void
9084lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
9085 struct sli4_wcqe_xri_aborted *axri)
9086{
9087 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 9088 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7851fe2c 9089 uint16_t lxri = 0;
19ca7609 9090
6fb120a7
JS
9091 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9092 unsigned long iflag = 0;
19ca7609 9093 struct lpfc_nodelist *ndlp;
589a52d6 9094 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6fb120a7 9095
0f65ff68
JS
9096 spin_lock_irqsave(&phba->hbalock, iflag);
9097 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7
JS
9098 list_for_each_entry_safe(sglq_entry, sglq_next,
9099 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9100 if (sglq_entry->sli4_xritag == xri) {
9101 list_del(&sglq_entry->list);
19ca7609
JS
9102 ndlp = sglq_entry->ndlp;
9103 sglq_entry->ndlp = NULL;
dafe8cea 9104 spin_lock(&pring->ring_lock);
6fb120a7
JS
9105 list_add_tail(&sglq_entry->list,
9106 &phba->sli4_hba.lpfc_sgl_list);
0f65ff68 9107 sglq_entry->state = SGL_FREED;
dafe8cea 9108 spin_unlock(&pring->ring_lock);
0f65ff68 9109 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7 9110 spin_unlock_irqrestore(&phba->hbalock, iflag);
ee0f4fe1
JS
9111 lpfc_set_rrq_active(phba, ndlp,
9112 sglq_entry->sli4_lxritag,
9113 rxid, 1);
589a52d6
JS
9114
9115 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 9116 if (!(list_empty(&pring->txq)))
589a52d6 9117 lpfc_worker_wake_up(phba);
6fb120a7
JS
9118 return;
9119 }
9120 }
0f65ff68 9121 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7851fe2c
JS
9122 lxri = lpfc_sli4_xri_inrange(phba, xri);
9123 if (lxri == NO_XRI) {
9124 spin_unlock_irqrestore(&phba->hbalock, iflag);
9125 return;
9126 }
dafe8cea 9127 spin_lock(&pring->ring_lock);
7851fe2c 9128 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
0f65ff68 9129 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
dafe8cea 9130 spin_unlock(&pring->ring_lock);
0f65ff68
JS
9131 spin_unlock_irqrestore(&phba->hbalock, iflag);
9132 return;
9133 }
9134 sglq_entry->state = SGL_XRI_ABORTED;
dafe8cea 9135 spin_unlock(&pring->ring_lock);
0f65ff68
JS
9136 spin_unlock_irqrestore(&phba->hbalock, iflag);
9137 return;
6fb120a7 9138}
086a345f
JS
9139
9140/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
9141 * @vport: pointer to virtual port object.
9142 * @ndlp: nodelist pointer for the impacted node.
9143 *
9144 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
9145 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
9146 * the driver is required to send a LOGO to the remote node before it
9147 * attempts to recover its login to the remote node.
9148 */
9149void
9150lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9151 struct lpfc_nodelist *ndlp)
9152{
9153 struct Scsi_Host *shost;
9154 struct lpfc_hba *phba;
9155 unsigned long flags = 0;
9156
9157 shost = lpfc_shost_from_vport(vport);
9158 phba = vport->phba;
9159 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
9160 lpfc_printf_log(phba, KERN_INFO,
9161 LOG_SLI, "3093 No rport recovery needed. "
9162 "rport in state 0x%x\n", ndlp->nlp_state);
9163 return;
9164 }
9165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9166 "3094 Start rport recovery on shost id 0x%x "
9167 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9168 "flags 0x%x\n",
9169 shost->host_no, ndlp->nlp_DID,
9170 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
9171 ndlp->nlp_flag);
9172 /*
9173 * The rport is not responding. Remove the FCP-2 flag to prevent
9174 * an ADISC in the follow-up recovery code.
9175 */
9176 spin_lock_irqsave(shost->host_lock, flags);
9177 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
9178 spin_unlock_irqrestore(shost->host_lock, flags);
9179 lpfc_issue_els_logo(vport, ndlp, 0);
9180 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
9181}
9182