]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_els.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
50611577 4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
09372820 21/* See Fibre Channel protocol T11 FC-LS for details */
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e
JB
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
e74c03c8 32
da0436e9 33#include "lpfc_hw4.h"
dea3101e
JB
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
da0436e9 36#include "lpfc_sli4.h"
ea2151b4 37#include "lpfc_nl.h"
dea3101e
JB
38#include "lpfc_disc.h"
39#include "lpfc_scsi.h"
40#include "lpfc.h"
41#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h"
92d7f7b0 43#include "lpfc_vport.h"
858c9f6c 44#include "lpfc_debugfs.h"
dea3101e
JB
45
46static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
47 struct lpfc_iocbq *);
92d7f7b0
JS
48static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
49 struct lpfc_iocbq *);
a6ababd2
AB
50static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
51static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
52 struct lpfc_nodelist *ndlp, uint8_t retry);
53static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
54 struct lpfc_iocbq *iocb);
92d7f7b0 55
dea3101e
JB
56static int lpfc_max_els_tries = 3;
57
e59058c4 58/**
3621a710 59 * lpfc_els_chk_latt - Check host link attention event for a vport
e59058c4
JS
60 * @vport: pointer to a host virtual N_Port data structure.
61 *
62 * This routine checks whether there is an outstanding host link
63 * attention event during the discovery process with the @vport. It is done
64 * by reading the HBA's Host Attention (HA) register. If there is any host
65 * link attention events during this @vport's discovery process, the @vport
66 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
67 * be issued if the link state is not already in host link cleared state,
68 * and a return code shall indicate whether the host link attention event
69 * had happened.
70 *
71 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
72 * state in LPFC_VPORT_READY, the request for checking host link attention
73 * event will be ignored and a return code shall indicate no host link
74 * attention event had happened.
75 *
76 * Return codes
77 * 0 - no host link attention event happened
78 * 1 - host link attention event happened
79 **/
858c9f6c 80int
2e0fef85 81lpfc_els_chk_latt(struct lpfc_vport *vport)
dea3101e 82{
2e0fef85
JS
83 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
84 struct lpfc_hba *phba = vport->phba;
dea3101e 85 uint32_t ha_copy;
dea3101e 86
2e0fef85 87 if (vport->port_state >= LPFC_VPORT_READY ||
3772a991
JS
88 phba->link_state == LPFC_LINK_DOWN ||
89 phba->sli_rev > LPFC_SLI_REV3)
dea3101e
JB
90 return 0;
91
92 /* Read the HBA Host Attention Register */
9940b97b
JS
93 if (lpfc_readl(phba->HAregaddr, &ha_copy))
94 return 1;
dea3101e
JB
95
96 if (!(ha_copy & HA_LATT))
97 return 0;
98
99 /* Pending Link Event during Discovery */
e8b62011
JS
100 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
101 "0237 Pending Link Event during "
102 "Discovery: State x%x\n",
103 phba->pport->port_state);
dea3101e
JB
104
105 /* CLEAR_LA should re-enable link attention events and
25985edc 106 * we should then immediately take a LATT event. The
dea3101e
JB
107 * LATT processing should call lpfc_linkdown() which
108 * will cleanup any left over in-progress discovery
109 * events.
110 */
2e0fef85
JS
111 spin_lock_irq(shost->host_lock);
112 vport->fc_flag |= FC_ABORT_DISCOVERY;
113 spin_unlock_irq(shost->host_lock);
dea3101e 114
92d7f7b0 115 if (phba->link_state != LPFC_CLEAR_LA)
ed957684 116 lpfc_issue_clear_la(phba, vport);
dea3101e 117
c9f8735b 118 return 1;
dea3101e
JB
119}
120
e59058c4 121/**
3621a710 122 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
e59058c4
JS
123 * @vport: pointer to a host virtual N_Port data structure.
124 * @expectRsp: flag indicating whether response is expected.
125 * @cmdSize: size of the ELS command.
126 * @retry: number of retries to the command IOCB when it fails.
127 * @ndlp: pointer to a node-list data structure.
128 * @did: destination identifier.
129 * @elscmd: the ELS command code.
130 *
131 * This routine is used for allocating a lpfc-IOCB data structure from
132 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
133 * passed into the routine for discovery state machine to issue an Extended
134 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
135 * and preparation routine that is used by all the discovery state machine
136 * routines and the ELS command-specific fields will be later set up by
137 * the individual discovery machine routines after calling this routine
138 * allocating and preparing a generic IOCB data structure. It fills in the
139 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
140 * payload and response payload (if expected). The reference count on the
141 * ndlp is incremented by 1 and the reference to the ndlp is put into
142 * context1 of the IOCB data structure for this IOCB to hold the ndlp
143 * reference for the command's callback function to access later.
144 *
145 * Return code
146 * Pointer to the newly allocated/prepared els iocb data structure
147 * NULL - when els iocb data structure allocation/preparation failed
148 **/
f1c3b0fc 149struct lpfc_iocbq *
2e0fef85
JS
150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
151 uint16_t cmdSize, uint8_t retry,
152 struct lpfc_nodelist *ndlp, uint32_t did,
153 uint32_t elscmd)
dea3101e 154{
2e0fef85 155 struct lpfc_hba *phba = vport->phba;
0bd4ca25 156 struct lpfc_iocbq *elsiocb;
dea3101e
JB
157 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
158 struct ulp_bde64 *bpl;
159 IOCB_t *icmd;
160
dea3101e 161
2e0fef85
JS
162 if (!lpfc_is_link_up(phba))
163 return NULL;
dea3101e 164
dea3101e 165 /* Allocate buffer for command iocb */
0bd4ca25 166 elsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
167
168 if (elsiocb == NULL)
169 return NULL;
e47c9093 170
0c287589
JS
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
45ed1190 176 (phba->hba_flag & HBA_FIP_SUPPORT) &&
0c287589
JS
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
c868595d
JS
180 switch (elscmd) {
181 case ELS_CMD_FLOGI:
f0d9bccc
JS
182 elsiocb->iocb_flag |=
183 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
184 & LPFC_FIP_ELS_ID_MASK);
185 break;
186 case ELS_CMD_FDISC:
f0d9bccc
JS
187 elsiocb->iocb_flag |=
188 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
189 & LPFC_FIP_ELS_ID_MASK);
190 break;
191 case ELS_CMD_LOGO:
f0d9bccc
JS
192 elsiocb->iocb_flag |=
193 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
c868595d
JS
194 & LPFC_FIP_ELS_ID_MASK);
195 break;
196 }
0c287589 197 else
c868595d 198 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
0c287589 199
dea3101e
JB
200 icmd = &elsiocb->iocb;
201
202 /* fill in BDEs for command */
203 /* Allocate buffer for command payload */
98c9ea5c
JS
204 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
205 if (pcmd)
206 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
fa4066b6
JS
207 if (!pcmd || !pcmd->virt)
208 goto els_iocb_free_pcmb_exit;
dea3101e
JB
209
210 INIT_LIST_HEAD(&pcmd->list);
211
212 /* Allocate buffer for response payload */
213 if (expectRsp) {
92d7f7b0 214 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e
JB
215 if (prsp)
216 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
217 &prsp->phys);
fa4066b6
JS
218 if (!prsp || !prsp->virt)
219 goto els_iocb_free_prsp_exit;
dea3101e 220 INIT_LIST_HEAD(&prsp->list);
e47c9093 221 } else
dea3101e 222 prsp = NULL;
dea3101e
JB
223
224 /* Allocate buffer for Buffer ptr list */
92d7f7b0 225 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 226 if (pbuflist)
ed957684
JS
227 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
228 &pbuflist->phys);
fa4066b6
JS
229 if (!pbuflist || !pbuflist->virt)
230 goto els_iocb_free_pbuf_exit;
dea3101e
JB
231
232 INIT_LIST_HEAD(&pbuflist->list);
233
dea3101e 234 if (expectRsp) {
939723a4
JS
235 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
236 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
237 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
92d7f7b0 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
939723a4
JS
239
240 icmd->un.elsreq64.remoteID = did; /* DID */
dea3101e 241 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
88f43a08
JS
242 if (elscmd == ELS_CMD_FLOGI)
243 icmd->ulpTimeout = FF_DEF_RATOV * 2;
244 else
245 icmd->ulpTimeout = phba->fc_ratov * 2;
dea3101e 246 } else {
939723a4
JS
247 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
248 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
249 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
250 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
251 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
dea3101e
JB
252 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
253 }
dea3101e
JB
254 icmd->ulpBdeCount = 1;
255 icmd->ulpLe = 1;
256 icmd->ulpClass = CLASS3;
257
939723a4
JS
258 /*
259 * If we have NPIV enabled, we want to send ELS traffic by VPI.
260 * For SLI4, since the driver controls VPIs we also want to include
261 * all ELS pt2pt protocol traffic as well.
262 */
263 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
264 ((phba->sli_rev == LPFC_SLI_REV4) &&
265 (vport->fc_flag & FC_PT2PT))) {
266
267 if (expectRsp) {
268 icmd->un.elsreq64.myID = vport->fc_myDID;
269
270 /* For ELS_REQUEST64_CR, use the VPI by default */
271 icmd->ulpContext = phba->vpi_ids[vport->vpi];
272 }
92d7f7b0 273
92d7f7b0 274 icmd->ulpCt_h = 0;
eada272d
JS
275 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
276 if (elscmd == ELS_CMD_ECHO)
277 icmd->ulpCt_l = 0; /* context = invalid RPI */
278 else
279 icmd->ulpCt_l = 1; /* context = VPI */
92d7f7b0
JS
280 }
281
dea3101e
JB
282 bpl = (struct ulp_bde64 *) pbuflist->virt;
283 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
284 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
285 bpl->tus.f.bdeSize = cmdSize;
286 bpl->tus.f.bdeFlags = 0;
287 bpl->tus.w = le32_to_cpu(bpl->tus.w);
288
289 if (expectRsp) {
290 bpl++;
291 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
292 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
293 bpl->tus.f.bdeSize = FCELSSIZE;
34b02dcd 294 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
dea3101e
JB
295 bpl->tus.w = le32_to_cpu(bpl->tus.w);
296 }
297
fa4066b6 298 /* prevent preparing iocb with NULL ndlp reference */
51ef4c26 299 elsiocb->context1 = lpfc_nlp_get(ndlp);
fa4066b6
JS
300 if (!elsiocb->context1)
301 goto els_iocb_free_pbuf_exit;
329f9bc7
JS
302 elsiocb->context2 = pcmd;
303 elsiocb->context3 = pbuflist;
dea3101e 304 elsiocb->retry = retry;
2e0fef85 305 elsiocb->vport = vport;
dea3101e
JB
306 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
307
308 if (prsp) {
309 list_add(&prsp->list, &pcmd->list);
310 }
dea3101e
JB
311 if (expectRsp) {
312 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
313 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
314 "0116 Xmit ELS command x%x to remote "
e74c03c8
JS
315 "NPORT x%x I/O tag: x%x, port state:x%x"
316 " fc_flag:x%x\n",
e8b62011 317 elscmd, did, elsiocb->iotag,
e74c03c8
JS
318 vport->port_state,
319 vport->fc_flag);
dea3101e
JB
320 } else {
321 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
e8b62011
JS
322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
323 "0117 Xmit ELS response x%x to remote "
e74c03c8
JS
324 "NPORT x%x I/O tag: x%x, size: x%x "
325 "port_state x%x fc_flag x%x\n",
e8b62011 326 elscmd, ndlp->nlp_DID, elsiocb->iotag,
e74c03c8
JS
327 cmdSize, vport->port_state,
328 vport->fc_flag);
dea3101e 329 }
c9f8735b 330 return elsiocb;
dea3101e 331
fa4066b6 332els_iocb_free_pbuf_exit:
eaf15d5b
JS
333 if (expectRsp)
334 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
fa4066b6
JS
335 kfree(pbuflist);
336
337els_iocb_free_prsp_exit:
338 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
339 kfree(prsp);
340
341els_iocb_free_pcmb_exit:
342 kfree(pcmd);
343 lpfc_sli_release_iocbq(phba, elsiocb);
344 return NULL;
345}
dea3101e 346
e59058c4 347/**
3621a710 348 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
e59058c4
JS
349 * @vport: pointer to a host virtual N_Port data structure.
350 *
351 * This routine issues a fabric registration login for a @vport. An
352 * active ndlp node with Fabric_DID must already exist for this @vport.
353 * The routine invokes two mailbox commands to carry out fabric registration
354 * login through the HBA firmware: the first mailbox command requests the
355 * HBA to perform link configuration for the @vport; and the second mailbox
356 * command requests the HBA to perform the actual fabric registration login
357 * with the @vport.
358 *
359 * Return code
360 * 0 - successfully issued fabric registration login for @vport
361 * -ENXIO -- failed to issue fabric registration login for @vport
362 **/
3772a991 363int
92d7f7b0 364lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
dea3101e 365{
2e0fef85 366 struct lpfc_hba *phba = vport->phba;
dea3101e 367 LPFC_MBOXQ_t *mbox;
14691150 368 struct lpfc_dmabuf *mp;
92d7f7b0
JS
369 struct lpfc_nodelist *ndlp;
370 struct serv_parm *sp;
dea3101e 371 int rc;
98c9ea5c 372 int err = 0;
dea3101e 373
92d7f7b0
JS
374 sp = &phba->fc_fabparam;
375 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093 376 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
98c9ea5c 377 err = 1;
92d7f7b0 378 goto fail;
98c9ea5c 379 }
92d7f7b0
JS
380
381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
382 if (!mbox) {
383 err = 2;
92d7f7b0 384 goto fail;
98c9ea5c 385 }
92d7f7b0
JS
386
387 vport->port_state = LPFC_FABRIC_CFG_LINK;
388 lpfc_config_link(phba, mbox);
389 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
390 mbox->vport = vport;
391
0b727fea 392 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
393 if (rc == MBX_NOT_FINISHED) {
394 err = 3;
92d7f7b0 395 goto fail_free_mbox;
98c9ea5c 396 }
92d7f7b0
JS
397
398 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
399 if (!mbox) {
400 err = 4;
92d7f7b0 401 goto fail;
98c9ea5c 402 }
4042629e
JS
403 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
404 ndlp->nlp_rpi);
98c9ea5c
JS
405 if (rc) {
406 err = 5;
92d7f7b0 407 goto fail_free_mbox;
98c9ea5c 408 }
92d7f7b0
JS
409
410 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
411 mbox->vport = vport;
e47c9093
JS
412 /* increment the reference count on ndlp to hold reference
413 * for the callback routine.
414 */
92d7f7b0
JS
415 mbox->context2 = lpfc_nlp_get(ndlp);
416
0b727fea 417 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
418 if (rc == MBX_NOT_FINISHED) {
419 err = 6;
92d7f7b0 420 goto fail_issue_reg_login;
98c9ea5c 421 }
92d7f7b0
JS
422
423 return 0;
424
425fail_issue_reg_login:
e47c9093
JS
426 /* decrement the reference count on ndlp just incremented
427 * for the failed mbox command.
428 */
92d7f7b0
JS
429 lpfc_nlp_put(ndlp);
430 mp = (struct lpfc_dmabuf *) mbox->context1;
431 lpfc_mbuf_free(phba, mp->virt, mp->phys);
432 kfree(mp);
433fail_free_mbox:
434 mempool_free(mbox, phba->mbox_mem_pool);
435
436fail:
437 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 438 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
98c9ea5c 439 "0249 Cannot issue Register Fabric login: Err %d\n", err);
92d7f7b0
JS
440 return -ENXIO;
441}
442
6fb120a7
JS
443/**
444 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
445 * @vport: pointer to a host virtual N_Port data structure.
446 *
447 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
1b51197d 448 * the @vport. This mailbox command is necessary for SLI4 port only.
6fb120a7
JS
449 *
450 * Return code
451 * 0 - successfully issued REG_VFI for @vport
452 * A failure code otherwise.
453 **/
1b51197d 454int
6fb120a7
JS
455lpfc_issue_reg_vfi(struct lpfc_vport *vport)
456{
457 struct lpfc_hba *phba = vport->phba;
d6de08cc 458 LPFC_MBOXQ_t *mboxq = NULL;
6fb120a7 459 struct lpfc_nodelist *ndlp;
d6de08cc 460 struct lpfc_dmabuf *dmabuf = NULL;
6fb120a7
JS
461 int rc = 0;
462
939723a4 463 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
1b51197d 464 if ((phba->sli_rev == LPFC_SLI_REV4) &&
939723a4
JS
465 !(phba->link_flag & LS_LOOPBACK_MODE) &&
466 !(vport->fc_flag & FC_PT2PT)) {
1b51197d
JS
467 ndlp = lpfc_findnode_did(vport, Fabric_DID);
468 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
469 rc = -ENODEV;
470 goto fail;
471 }
6fb120a7
JS
472 }
473
d6de08cc
JS
474 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
475 if (!mboxq) {
6fb120a7
JS
476 rc = -ENOMEM;
477 goto fail;
478 }
6d368e53 479
d6de08cc
JS
480 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
481 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
482 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
483 if (!dmabuf) {
484 rc = -ENOMEM;
485 goto fail;
486 }
487 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
488 if (!dmabuf->virt) {
489 rc = -ENOMEM;
490 goto fail;
491 }
492 memcpy(dmabuf->virt, &phba->fc_fabparam,
493 sizeof(struct serv_parm));
6fb120a7 494 }
d6de08cc 495
6fb120a7 496 vport->port_state = LPFC_FABRIC_CFG_LINK;
d6de08cc
JS
497 if (dmabuf)
498 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
499 else
500 lpfc_reg_vfi(mboxq, vport, 0);
ae05ebe3 501
6fb120a7
JS
502 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
503 mboxq->vport = vport;
504 mboxq->context1 = dmabuf;
505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
506 if (rc == MBX_NOT_FINISHED) {
507 rc = -ENXIO;
d6de08cc 508 goto fail;
6fb120a7
JS
509 }
510 return 0;
511
6fb120a7 512fail:
d6de08cc
JS
513 if (mboxq)
514 mempool_free(mboxq, phba->mbox_mem_pool);
515 if (dmabuf) {
516 if (dmabuf->virt)
517 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
518 kfree(dmabuf);
519 }
520
6fb120a7
JS
521 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
522 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
523 "0289 Issue Register VFI failed: Err %d\n", rc);
524 return rc;
525}
526
1b51197d
JS
527/**
528 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
529 * @vport: pointer to a host virtual N_Port data structure.
530 *
531 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
532 * the @vport. This mailbox command is necessary for SLI4 port only.
533 *
534 * Return code
535 * 0 - successfully issued REG_VFI for @vport
536 * A failure code otherwise.
537 **/
538int
539lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
540{
541 struct lpfc_hba *phba = vport->phba;
542 struct Scsi_Host *shost;
543 LPFC_MBOXQ_t *mboxq;
544 int rc;
545
546 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
547 if (!mboxq) {
548 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
549 "2556 UNREG_VFI mbox allocation failed"
550 "HBA state x%x\n", phba->pport->port_state);
551 return -ENOMEM;
552 }
553
554 lpfc_unreg_vfi(mboxq, vport);
555 mboxq->vport = vport;
556 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
557
558 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
559 if (rc == MBX_NOT_FINISHED) {
560 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
561 "2557 UNREG_VFI issue mbox failed rc x%x "
562 "HBA state x%x\n",
563 rc, phba->pport->port_state);
564 mempool_free(mboxq, phba->mbox_mem_pool);
565 return -EIO;
566 }
567
568 shost = lpfc_shost_from_vport(vport);
569 spin_lock_irq(shost->host_lock);
570 vport->fc_flag &= ~FC_VFI_REGISTERED;
571 spin_unlock_irq(shost->host_lock);
572 return 0;
573}
574
92494144
JS
575/**
576 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
577 * @vport: pointer to a host virtual N_Port data structure.
578 * @sp: pointer to service parameter data structure.
579 *
580 * This routine is called from FLOGI/FDISC completion handler functions.
581 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
582 * node nodename is changed in the completion service parameter else return
583 * 0. This function also set flag in the vport data structure to delay
584 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
585 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
586 * node nodename is changed in the completion service parameter.
587 *
588 * Return code
589 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
590 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
591 *
592 **/
593static uint8_t
594lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
595 struct serv_parm *sp)
596{
8eb8b960 597 struct lpfc_hba *phba = vport->phba;
92494144
JS
598 uint8_t fabric_param_changed = 0;
599 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
600
601 if ((vport->fc_prevDID != vport->fc_myDID) ||
602 memcmp(&vport->fabric_portname, &sp->portName,
603 sizeof(struct lpfc_name)) ||
604 memcmp(&vport->fabric_nodename, &sp->nodeName,
605 sizeof(struct lpfc_name)))
606 fabric_param_changed = 1;
607
608 /*
609 * Word 1 Bit 31 in common service parameter is overloaded.
610 * Word 1 Bit 31 in FLOGI request is multiple NPort request
611 * Word 1 Bit 31 in FLOGI response is clean address bit
612 *
613 * If fabric parameter is changed and clean address bit is
614 * cleared delay nport discovery if
615 * - vport->fc_prevDID != 0 (not initial discovery) OR
616 * - lpfc_delay_discovery module parameter is set.
617 */
618 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
8eb8b960 619 (vport->fc_prevDID || phba->cfg_delay_discovery)) {
92494144
JS
620 spin_lock_irq(shost->host_lock);
621 vport->fc_flag |= FC_DISC_DELAYED;
622 spin_unlock_irq(shost->host_lock);
623 }
624
625 return fabric_param_changed;
626}
627
628
e59058c4 629/**
3621a710 630 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
e59058c4
JS
631 * @vport: pointer to a host virtual N_Port data structure.
632 * @ndlp: pointer to a node-list data structure.
633 * @sp: pointer to service parameter data structure.
634 * @irsp: pointer to the IOCB within the lpfc response IOCB.
635 *
636 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
637 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
638 * port in a fabric topology. It properly sets up the parameters to the @ndlp
639 * from the IOCB response. It also check the newly assigned N_Port ID to the
640 * @vport against the previously assigned N_Port ID. If it is different from
641 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
642 * is invoked on all the remaining nodes with the @vport to unregister the
643 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
644 * is invoked to register login to the fabric.
645 *
646 * Return code
647 * 0 - Success (currently, always return 0)
648 **/
92d7f7b0
JS
649static int
650lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
651 struct serv_parm *sp, IOCB_t *irsp)
652{
653 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
654 struct lpfc_hba *phba = vport->phba;
655 struct lpfc_nodelist *np;
656 struct lpfc_nodelist *next_np;
92494144 657 uint8_t fabric_param_changed;
92d7f7b0 658
2e0fef85
JS
659 spin_lock_irq(shost->host_lock);
660 vport->fc_flag |= FC_FABRIC;
661 spin_unlock_irq(shost->host_lock);
dea3101e
JB
662
663 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
664 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
665 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
666
12265f68 667 phba->fc_edtovResol = sp->cmn.edtovResolution;
dea3101e
JB
668 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
669
76a95d75 670 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2e0fef85
JS
671 spin_lock_irq(shost->host_lock);
672 vport->fc_flag |= FC_PUBLIC_LOOP;
673 spin_unlock_irq(shost->host_lock);
dea3101e
JB
674 }
675
2e0fef85 676 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
dea3101e 677 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
92d7f7b0 678 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
dea3101e
JB
679 ndlp->nlp_class_sup = 0;
680 if (sp->cls1.classValid)
681 ndlp->nlp_class_sup |= FC_COS_CLASS1;
682 if (sp->cls2.classValid)
683 ndlp->nlp_class_sup |= FC_COS_CLASS2;
684 if (sp->cls3.classValid)
685 ndlp->nlp_class_sup |= FC_COS_CLASS3;
686 if (sp->cls4.classValid)
687 ndlp->nlp_class_sup |= FC_COS_CLASS4;
688 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
689 sp->cmn.bbRcvSizeLsb;
92494144
JS
690
691 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
4258e98e
JS
692 if (fabric_param_changed) {
693 /* Reset FDMI attribute masks based on config parameter */
8663cbbe
JS
694 if (phba->cfg_enable_SmartSAN ||
695 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
696 /* Setup appropriate attribute masks */
697 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 698 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
699 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
700 else
701 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8663cbbe
JS
702 } else {
703 vport->fdmi_hba_mask = 0;
704 vport->fdmi_port_mask = 0;
4258e98e
JS
705 }
706
707 }
92494144
JS
708 memcpy(&vport->fabric_portname, &sp->portName,
709 sizeof(struct lpfc_name));
710 memcpy(&vport->fabric_nodename, &sp->nodeName,
711 sizeof(struct lpfc_name));
dea3101e
JB
712 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
713
92d7f7b0
JS
714 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
715 if (sp->cmn.response_multiple_NPort) {
e8b62011
JS
716 lpfc_printf_vlog(vport, KERN_WARNING,
717 LOG_ELS | LOG_VPORT,
718 "1816 FLOGI NPIV supported, "
719 "response data 0x%x\n",
720 sp->cmn.response_multiple_NPort);
1b51197d 721 spin_lock_irq(&phba->hbalock);
92d7f7b0 722 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
1b51197d 723 spin_unlock_irq(&phba->hbalock);
92d7f7b0
JS
724 } else {
725 /* Because we asked f/w for NPIV it still expects us
e8b62011
JS
726 to call reg_vnpid atleast for the physcial host */
727 lpfc_printf_vlog(vport, KERN_WARNING,
728 LOG_ELS | LOG_VPORT,
729 "1817 Fabric does not support NPIV "
730 "- configuring single port mode.\n");
1b51197d 731 spin_lock_irq(&phba->hbalock);
92d7f7b0 732 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
1b51197d 733 spin_unlock_irq(&phba->hbalock);
92d7f7b0
JS
734 }
735 }
dea3101e 736
ae05ebe3
JS
737 /*
738 * For FC we need to do some special processing because of the SLI
739 * Port's default settings of the Common Service Parameters.
740 */
d6de08cc
JS
741 if ((phba->sli_rev == LPFC_SLI_REV4) &&
742 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
ae05ebe3 743 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
d6de08cc 744 if (fabric_param_changed)
ae05ebe3
JS
745 lpfc_unregister_fcf_prep(phba);
746
747 /* This should just update the VFI CSPs*/
748 if (vport->fc_flag & FC_VFI_REGISTERED)
749 lpfc_issue_reg_vfi(vport);
750 }
751
92494144 752 if (fabric_param_changed &&
92d7f7b0 753 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
dea3101e 754
92d7f7b0
JS
755 /* If our NportID changed, we need to ensure all
756 * remaining NPORTs get unreg_login'ed.
757 */
758 list_for_each_entry_safe(np, next_np,
759 &vport->fc_nodes, nlp_listp) {
d7c255b2 760 if (!NLP_CHK_NODE_ACT(np))
e47c9093 761 continue;
92d7f7b0
JS
762 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
763 !(np->nlp_flag & NLP_NPR_ADISC))
764 continue;
765 spin_lock_irq(shost->host_lock);
766 np->nlp_flag &= ~NLP_NPR_ADISC;
767 spin_unlock_irq(shost->host_lock);
768 lpfc_unreg_rpi(vport, np);
769 }
78730cfe 770 lpfc_cleanup_pending_mbox(vport);
5af5eee7 771
5248a749 772 if (phba->sli_rev == LPFC_SLI_REV4) {
5af5eee7 773 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 774 lpfc_mbx_unreg_vpi(vport);
09372820 775 spin_lock_irq(shost->host_lock);
ecfd03c6
JS
776 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
777 spin_unlock_irq(shost->host_lock);
778 }
27aa1b73
JS
779
780 /*
781 * For SLI3 and SLI4, the VPI needs to be reregistered in
782 * response to this fabric parameter change event.
783 */
784 spin_lock_irq(shost->host_lock);
785 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
786 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
787 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
788 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
789 /*
790 * Driver needs to re-reg VPI in order for f/w
791 * to update the MAC address.
792 */
9589b062 793 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
38b92ef8
JS
794 lpfc_register_new_vport(phba, vport, ndlp);
795 return 0;
92d7f7b0 796 }
dea3101e 797
6fb120a7
JS
798 if (phba->sli_rev < LPFC_SLI_REV4) {
799 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
800 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
801 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
802 lpfc_register_new_vport(phba, vport, ndlp);
803 else
804 lpfc_issue_fabric_reglogin(vport);
805 } else {
806 ndlp->nlp_type |= NLP_FABRIC;
807 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
695a814e
JS
808 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
809 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
6fb120a7
JS
810 lpfc_start_fdiscs(phba);
811 lpfc_do_scr_ns_plogi(phba, vport);
695a814e 812 } else if (vport->fc_flag & FC_VFI_REGISTERED)
ecfd03c6 813 lpfc_issue_init_vpi(vport);
1b51197d
JS
814 else {
815 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
816 "3135 Need register VFI: (x%x/%x)\n",
817 vport->fc_prevDID, vport->fc_myDID);
6fb120a7 818 lpfc_issue_reg_vfi(vport);
1b51197d 819 }
92d7f7b0 820 }
dea3101e 821 return 0;
dea3101e 822}
1b51197d 823
e59058c4 824/**
3621a710 825 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
e59058c4
JS
826 * @vport: pointer to a host virtual N_Port data structure.
827 * @ndlp: pointer to a node-list data structure.
828 * @sp: pointer to service parameter data structure.
829 *
830 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
831 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
832 * in a point-to-point topology. First, the @vport's N_Port Name is compared
833 * with the received N_Port Name: if the @vport's N_Port Name is greater than
834 * the received N_Port Name lexicographically, this node shall assign local
835 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
836 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
837 * this node shall just wait for the remote node to issue PLOGI and assign
838 * N_Port IDs.
839 *
840 * Return code
841 * 0 - Success
842 * -ENXIO - Fail
843 **/
dea3101e 844static int
2e0fef85
JS
845lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
846 struct serv_parm *sp)
dea3101e 847{
2e0fef85
JS
848 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
849 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
850 LPFC_MBOXQ_t *mbox;
851 int rc;
852
2e0fef85
JS
853 spin_lock_irq(shost->host_lock);
854 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
d6de08cc 855 vport->fc_flag |= FC_PT2PT;
2e0fef85 856 spin_unlock_irq(shost->host_lock);
dea3101e 857
d6de08cc
JS
858 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
859 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
860 lpfc_unregister_fcf_prep(phba);
861
862 spin_lock_irq(shost->host_lock);
863 vport->fc_flag &= ~FC_VFI_REGISTERED;
864 spin_unlock_irq(shost->host_lock);
865 phba->fc_topology_changed = 0;
866 }
867
2e0fef85 868 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 869 sizeof(vport->fc_portname));
2eb6862a 870
dea3101e
JB
871 if (rc >= 0) {
872 /* This side will initiate the PLOGI */
2e0fef85
JS
873 spin_lock_irq(shost->host_lock);
874 vport->fc_flag |= FC_PT2PT_PLOGI;
875 spin_unlock_irq(shost->host_lock);
dea3101e
JB
876
877 /*
d6de08cc
JS
878 * N_Port ID cannot be 0, set our Id to LocalID
879 * the other side will be RemoteID.
dea3101e
JB
880 */
881
882 /* not equal */
883 if (rc)
2e0fef85 884 vport->fc_myDID = PT2PT_LocalID;
dea3101e 885
e47c9093
JS
886 /* Decrement ndlp reference count indicating that ndlp can be
887 * safely released when other references to it are done.
888 */
329f9bc7 889 lpfc_nlp_put(ndlp);
dea3101e 890
2e0fef85 891 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
dea3101e
JB
892 if (!ndlp) {
893 /*
894 * Cannot find existing Fabric ndlp, so allocate a
895 * new one
896 */
897 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
898 if (!ndlp)
899 goto fail;
2e0fef85 900 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
e47c9093
JS
901 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
902 ndlp = lpfc_enable_node(vport, ndlp,
903 NLP_STE_UNUSED_NODE);
904 if(!ndlp)
905 goto fail;
dea3101e
JB
906 }
907
908 memcpy(&ndlp->nlp_portname, &sp->portName,
2e0fef85 909 sizeof(struct lpfc_name));
dea3101e 910 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
2e0fef85 911 sizeof(struct lpfc_name));
e47c9093 912 /* Set state will put ndlp onto node list if not already done */
2e0fef85
JS
913 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
914 spin_lock_irq(shost->host_lock);
dea3101e 915 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 916 spin_unlock_irq(shost->host_lock);
e47c9093
JS
917 } else
918 /* This side will wait for the PLOGI, decrement ndlp reference
919 * count indicating that ndlp can be released when other
920 * references to it are done.
921 */
329f9bc7 922 lpfc_nlp_put(ndlp);
dea3101e 923
09372820
JS
924 /* If we are pt2pt with another NPort, force NPIV off! */
925 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
926
d6de08cc
JS
927 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
928 if (!mbox)
929 goto fail;
e74c03c8 930
d6de08cc
JS
931 lpfc_config_link(phba, mbox);
932
933 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
934 mbox->vport = vport;
935 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
936 if (rc == MBX_NOT_FINISHED) {
937 mempool_free(mbox, phba->mbox_mem_pool);
938 goto fail;
e74c03c8 939 }
dea3101e 940
dea3101e 941 return 0;
92d7f7b0 942fail:
dea3101e
JB
943 return -ENXIO;
944}
945
e59058c4 946/**
3621a710 947 * lpfc_cmpl_els_flogi - Completion callback function for flogi
e59058c4
JS
948 * @phba: pointer to lpfc hba data structure.
949 * @cmdiocb: pointer to lpfc command iocb data structure.
950 * @rspiocb: pointer to lpfc response iocb data structure.
951 *
952 * This routine is the top-level completion callback function for issuing
953 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
954 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
955 * retry has been made (either immediately or delayed with lpfc_els_retry()
956 * returning 1), the command IOCB will be released and function returned.
957 * If the retry attempt has been given up (possibly reach the maximum
958 * number of retries), one additional decrement of ndlp reference shall be
959 * invoked before going out after releasing the command IOCB. This will
960 * actually release the remote node (Note, lpfc_els_free_iocb() will also
961 * invoke one decrement of ndlp reference count). If no error reported in
962 * the IOCB status, the command Port ID field is used to determine whether
963 * this is a point-to-point topology or a fabric topology: if the Port ID
964 * field is assigned, it is a fabric topology; otherwise, it is a
965 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
966 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
967 * specific topology completion conditions.
968 **/
dea3101e 969static void
329f9bc7
JS
970lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
971 struct lpfc_iocbq *rspiocb)
dea3101e 972{
2e0fef85
JS
973 struct lpfc_vport *vport = cmdiocb->vport;
974 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
975 IOCB_t *irsp = &rspiocb->iocb;
976 struct lpfc_nodelist *ndlp = cmdiocb->context1;
977 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
978 struct serv_parm *sp;
0c9ab6f5 979 uint16_t fcf_index;
dea3101e
JB
980 int rc;
981
982 /* Check to see if link went down during discovery */
2e0fef85 983 if (lpfc_els_chk_latt(vport)) {
fa4066b6
JS
984 /* One additional decrement on node reference count to
985 * trigger the release of the node
986 */
329f9bc7 987 lpfc_nlp_put(ndlp);
dea3101e
JB
988 goto out;
989 }
990
858c9f6c
JS
991 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
992 "FLOGI cmpl: status:x%x/x%x state:x%x",
993 irsp->ulpStatus, irsp->un.ulpWord[4],
994 vport->port_state);
995
dea3101e 996 if (irsp->ulpStatus) {
0c9ab6f5 997 /*
a93ff37a 998 * In case of FIP mode, perform roundrobin FCF failover
0c9ab6f5
JS
999 * due to new FCF discovery
1000 */
1001 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
80c17849
JS
1002 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1003 if (phba->link_state < LPFC_LINK_UP)
1004 goto stop_rr_fcf_flogi;
1005 if ((phba->fcoe_cvl_eventtag_attn ==
1006 phba->fcoe_cvl_eventtag) &&
1007 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
1008 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1009 IOERR_SLI_ABORTED))
80c17849
JS
1010 goto stop_rr_fcf_flogi;
1011 else
1012 phba->fcoe_cvl_eventtag_attn =
1013 phba->fcoe_cvl_eventtag;
0c9ab6f5 1014 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
a93ff37a
JS
1015 "2611 FLOGI failed on FCF (x%x), "
1016 "status:x%x/x%x, tmo:x%x, perform "
1017 "roundrobin FCF failover\n",
38b92ef8
JS
1018 phba->fcf.current_rec.fcf_indx,
1019 irsp->ulpStatus, irsp->un.ulpWord[4],
1020 irsp->ulpTimeout);
7d791df7
JS
1021 lpfc_sli4_set_fcf_flogi_fail(phba,
1022 phba->fcf.current_rec.fcf_indx);
0c9ab6f5 1023 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
a93ff37a
JS
1024 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1025 if (rc)
1026 goto out;
0c9ab6f5
JS
1027 }
1028
80c17849 1029stop_rr_fcf_flogi:
38b92ef8
JS
1030 /* FLOGI failure */
1031 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8fe5c165
JS
1032 "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
1033 "Data x%x x%x\n",
38b92ef8 1034 irsp->ulpStatus, irsp->un.ulpWord[4],
8fe5c165
JS
1035 irsp->ulpTimeout, phba->hba_flag,
1036 phba->fcf.fcf_flag);
38b92ef8 1037
dea3101e 1038 /* Check for retry */
2e0fef85 1039 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 1040 goto out;
2e0fef85 1041
76a95d75
JS
1042 /* FLOGI failure */
1043 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1044 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1045 irsp->ulpStatus, irsp->un.ulpWord[4],
1046 irsp->ulpTimeout);
1047
dea3101e 1048 /* FLOGI failed, so there is no fabric */
2e0fef85
JS
1049 spin_lock_irq(shost->host_lock);
1050 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1051 spin_unlock_irq(shost->host_lock);
dea3101e 1052
329f9bc7 1053 /* If private loop, then allow max outstanding els to be
dea3101e
JB
1054 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1055 * alpa map would take too long otherwise.
1056 */
1b51197d 1057 if (phba->alpa_map[0] == 0)
3de2a653 1058 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
ff78d8f9
JS
1059 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1060 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
e74c03c8
JS
1061 (vport->fc_prevDID != vport->fc_myDID) ||
1062 phba->fc_topology_changed)) {
1063 if (vport->fc_flag & FC_VFI_REGISTERED) {
1064 if (phba->fc_topology_changed) {
1065 lpfc_unregister_fcf_prep(phba);
1066 spin_lock_irq(shost->host_lock);
1067 vport->fc_flag &= ~FC_VFI_REGISTERED;
1068 spin_unlock_irq(shost->host_lock);
1069 phba->fc_topology_changed = 0;
1070 } else {
1071 lpfc_sli4_unreg_all_rpis(vport);
1072 }
1073 }
342b59ca
JS
1074
1075 /* Do not register VFI if the driver aborted FLOGI */
1076 if (!lpfc_error_lost_link(irsp))
1077 lpfc_issue_reg_vfi(vport);
ff78d8f9
JS
1078 lpfc_nlp_put(ndlp);
1079 goto out;
dea3101e 1080 }
dea3101e
JB
1081 goto flogifail;
1082 }
695a814e
JS
1083 spin_lock_irq(shost->host_lock);
1084 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 1085 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
695a814e 1086 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1087
1088 /*
1089 * The FLogI succeeded. Sync the data for the CPU before
1090 * accessing it.
1091 */
1092 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
a2fc4aef
JS
1093 if (!prsp)
1094 goto out;
dea3101e
JB
1095 sp = prsp->virt + sizeof(uint32_t);
1096
1097 /* FLOGI completes successfully */
e8b62011 1098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
88f43a08
JS
1099 "0101 FLOGI completes successfully, I/O tag:x%x, "
1100 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
e8b62011 1101 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
e74c03c8
JS
1102 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1103 vport->port_state, vport->fc_flag);
dea3101e 1104
2e0fef85 1105 if (vport->port_state == LPFC_FLOGI) {
dea3101e
JB
1106 /*
1107 * If Common Service Parameters indicate Nport
1108 * we are point to point, if Fport we are Fabric.
1109 */
1110 if (sp->cmn.fPort)
2e0fef85 1111 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
76a95d75 1112 else if (!(phba->hba_flag & HBA_FCOE_MODE))
2e0fef85 1113 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
dbb6b3ab
JS
1114 else {
1115 lpfc_printf_vlog(vport, KERN_ERR,
1116 LOG_FIP | LOG_ELS,
1117 "2831 FLOGI response with cleared Fabric "
1118 "bit fcf_index 0x%x "
1119 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1120 "Fabric Name "
1121 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1122 phba->fcf.current_rec.fcf_indx,
1123 phba->fcf.current_rec.switch_name[0],
1124 phba->fcf.current_rec.switch_name[1],
1125 phba->fcf.current_rec.switch_name[2],
1126 phba->fcf.current_rec.switch_name[3],
1127 phba->fcf.current_rec.switch_name[4],
1128 phba->fcf.current_rec.switch_name[5],
1129 phba->fcf.current_rec.switch_name[6],
1130 phba->fcf.current_rec.switch_name[7],
1131 phba->fcf.current_rec.fabric_name[0],
1132 phba->fcf.current_rec.fabric_name[1],
1133 phba->fcf.current_rec.fabric_name[2],
1134 phba->fcf.current_rec.fabric_name[3],
1135 phba->fcf.current_rec.fabric_name[4],
1136 phba->fcf.current_rec.fabric_name[5],
1137 phba->fcf.current_rec.fabric_name[6],
1138 phba->fcf.current_rec.fabric_name[7]);
1139 lpfc_nlp_put(ndlp);
1140 spin_lock_irq(&phba->hbalock);
1141 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 1142 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
dbb6b3ab
JS
1143 spin_unlock_irq(&phba->hbalock);
1144 goto out;
1145 }
0c9ab6f5
JS
1146 if (!rc) {
1147 /* Mark the FCF discovery process done */
999d813f
JS
1148 if (phba->hba_flag & HBA_FIP_SUPPORT)
1149 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1150 LOG_ELS,
a93ff37a
JS
1151 "2769 FLOGI to FCF (x%x) "
1152 "completed successfully\n",
999d813f 1153 phba->fcf.current_rec.fcf_indx);
0c9ab6f5
JS
1154 spin_lock_irq(&phba->hbalock);
1155 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 1156 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
0c9ab6f5 1157 spin_unlock_irq(&phba->hbalock);
dea3101e 1158 goto out;
0c9ab6f5 1159 }
dea3101e
JB
1160 }
1161
1162flogifail:
8fe5c165
JS
1163 spin_lock_irq(&phba->hbalock);
1164 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1165 spin_unlock_irq(&phba->hbalock);
d6de08cc 1166
329f9bc7 1167 lpfc_nlp_put(ndlp);
dea3101e 1168
858c9f6c 1169 if (!lpfc_error_lost_link(irsp)) {
dea3101e 1170 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 1171 lpfc_disc_list_loopmap(vport);
dea3101e
JB
1172
1173 /* Start discovery */
2e0fef85 1174 lpfc_disc_start(vport);
87af33fe 1175 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
e3d2b802
JS
1176 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1177 IOERR_SLI_ABORTED) &&
1178 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1179 IOERR_SLI_DOWN))) &&
87af33fe
JS
1180 (phba->link_state != LPFC_CLEAR_LA)) {
1181 /* If FLOGI failed enable link interrupt. */
1182 lpfc_issue_clear_la(phba, vport);
dea3101e 1183 }
dea3101e
JB
1184out:
1185 lpfc_els_free_iocb(phba, cmdiocb);
1186}
1187
e59058c4 1188/**
3621a710 1189 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
e59058c4
JS
1190 * @vport: pointer to a host virtual N_Port data structure.
1191 * @ndlp: pointer to a node-list data structure.
1192 * @retry: number of retries to the command IOCB.
1193 *
1194 * This routine issues a Fabric Login (FLOGI) Request ELS command
1195 * for a @vport. The initiator service parameters are put into the payload
1196 * of the FLOGI Request IOCB and the top-level callback function pointer
1197 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1198 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1199 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1200 *
1201 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1202 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1203 * will be stored into the context1 field of the IOCB for the completion
1204 * callback function to the FLOGI ELS command.
1205 *
1206 * Return code
1207 * 0 - successfully issued flogi iocb for @vport
1208 * 1 - failed to issue flogi iocb for @vport
1209 **/
dea3101e 1210static int
2e0fef85 1211lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
1212 uint8_t retry)
1213{
2e0fef85 1214 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1215 struct serv_parm *sp;
1216 IOCB_t *icmd;
1217 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1218 uint8_t *pcmd;
1219 uint16_t cmdsize;
1220 uint32_t tmo;
1221 int rc;
1222
92d7f7b0 1223 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2e0fef85
JS
1224 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1225 ndlp->nlp_DID, ELS_CMD_FLOGI);
92d7f7b0 1226
488d1469 1227 if (!elsiocb)
c9f8735b 1228 return 1;
dea3101e
JB
1229
1230 icmd = &elsiocb->iocb;
1231 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1232
1233 /* For FLOGI request, remainder of payload is service parameters */
1234 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
92d7f7b0
JS
1235 pcmd += sizeof(uint32_t);
1236 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1237 sp = (struct serv_parm *) pcmd;
1238
1239 /* Setup CSPs accordingly for Fabric */
1240 sp->cmn.e_d_tov = 0;
1241 sp->cmn.w2.r_a_tov = 0;
df9e1b59 1242 sp->cmn.virtual_fabric_support = 0;
dea3101e 1243 sp->cls1.classValid = 0;
dea3101e
JB
1244 if (sp->cmn.fcphLow < FC_PH3)
1245 sp->cmn.fcphLow = FC_PH3;
1246 if (sp->cmn.fcphHigh < FC_PH3)
1247 sp->cmn.fcphHigh = FC_PH3;
1248
c31098ce
JS
1249 if (phba->sli_rev == LPFC_SLI_REV4) {
1250 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1251 LPFC_SLI_INTF_IF_TYPE_0) {
1252 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1253 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1254 /* FLOGI needs to be 3 for WQE FCFI */
1255 /* Set the fcfi to the fcfi we registered with */
1256 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1257 }
0f37887e
JS
1258 /* Can't do SLI4 class2 without support sequence coalescing */
1259 sp->cls2.classValid = 0;
1260 sp->cls2.seqDelivery = 0;
5248a749 1261 } else {
0f37887e
JS
1262 /* Historical, setting sequential-delivery bit for SLI3 */
1263 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1264 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
5248a749
JS
1265 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1266 sp->cmn.request_multiple_Nport = 1;
1267 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1268 icmd->ulpCt_h = 1;
1269 icmd->ulpCt_l = 0;
1270 } else
1271 sp->cmn.request_multiple_Nport = 0;
92d7f7b0
JS
1272 }
1273
76a95d75 1274 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
858c9f6c
JS
1275 icmd->un.elsreq64.myID = 0;
1276 icmd->un.elsreq64.fl = 1;
1277 }
1278
dea3101e
JB
1279 tmo = phba->fc_ratov;
1280 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
2e0fef85 1281 lpfc_set_disctmo(vport);
dea3101e
JB
1282 phba->fc_ratov = tmo;
1283
1284 phba->fc_stat.elsXmitFLOGI++;
1285 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
858c9f6c
JS
1286
1287 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1288 "Issue FLOGI: opt:x%x",
1289 phba->sli3_options, 0, 0);
1290
92d7f7b0 1291 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
dea3101e
JB
1292 if (rc == IOCB_ERROR) {
1293 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1294 return 1;
dea3101e 1295 }
c9f8735b 1296 return 0;
dea3101e
JB
1297}
1298
e59058c4 1299/**
3621a710 1300 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
e59058c4
JS
1301 * @phba: pointer to lpfc hba data structure.
1302 *
1303 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1304 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1305 * list and issues an abort IOCB commond on each outstanding IOCB that
1306 * contains a active Fabric_DID ndlp. Note that this function is to issue
1307 * the abort IOCB command on all the outstanding IOCBs, thus when this
1308 * function returns, it does not guarantee all the IOCBs are actually aborted.
1309 *
1310 * Return code
3ad2f3fb 1311 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
e59058c4 1312 **/
dea3101e 1313int
2e0fef85 1314lpfc_els_abort_flogi(struct lpfc_hba *phba)
dea3101e
JB
1315{
1316 struct lpfc_sli_ring *pring;
1317 struct lpfc_iocbq *iocb, *next_iocb;
1318 struct lpfc_nodelist *ndlp;
1319 IOCB_t *icmd;
1320
1321 /* Abort outstanding I/O on NPort <nlp_DID> */
1322 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
e8b62011
JS
1323 "0201 Abort outstanding I/O on NPort x%x\n",
1324 Fabric_DID);
dea3101e
JB
1325
1326 pring = &phba->sli.ring[LPFC_ELS_RING];
1327
1328 /*
1329 * Check the txcmplq for an iocb that matches the nport the driver is
1330 * searching for.
1331 */
2e0fef85 1332 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1333 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1334 icmd = &iocb->iocb;
1b51197d 1335 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
dea3101e 1336 ndlp = (struct lpfc_nodelist *)(iocb->context1);
58da1ffb
JS
1337 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1338 (ndlp->nlp_DID == Fabric_DID))
07951076 1339 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e
JB
1340 }
1341 }
2e0fef85 1342 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1343
1344 return 0;
1345}
1346
e59058c4 1347/**
3621a710 1348 * lpfc_initial_flogi - Issue an initial fabric login for a vport
e59058c4
JS
1349 * @vport: pointer to a host virtual N_Port data structure.
1350 *
1351 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1352 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1353 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1354 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1355 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1356 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1357 * @vport.
1358 *
1359 * Return code
1360 * 0 - failed to issue initial flogi for @vport
1361 * 1 - successfully issued initial flogi for @vport
1362 **/
dea3101e 1363int
2e0fef85 1364lpfc_initial_flogi(struct lpfc_vport *vport)
dea3101e 1365{
2e0fef85 1366 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1367 struct lpfc_nodelist *ndlp;
1368
98c9ea5c
JS
1369 vport->port_state = LPFC_FLOGI;
1370 lpfc_set_disctmo(vport);
1371
c9f8735b 1372 /* First look for the Fabric ndlp */
2e0fef85 1373 ndlp = lpfc_findnode_did(vport, Fabric_DID);
c9f8735b 1374 if (!ndlp) {
dea3101e 1375 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b
JW
1376 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1377 if (!ndlp)
1378 return 0;
2e0fef85 1379 lpfc_nlp_init(vport, ndlp, Fabric_DID);
6fb120a7
JS
1380 /* Set the node type */
1381 ndlp->nlp_type |= NLP_FABRIC;
e47c9093
JS
1382 /* Put ndlp onto node list */
1383 lpfc_enqueue_node(vport, ndlp);
1384 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1385 /* re-setup ndlp without removing from node list */
1386 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1387 if (!ndlp)
1388 return 0;
dea3101e 1389 }
87af33fe 1390
5ac6b303 1391 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
fa4066b6
JS
1392 /* This decrement of reference count to node shall kick off
1393 * the release of the node.
1394 */
329f9bc7 1395 lpfc_nlp_put(ndlp);
5ac6b303
JS
1396 return 0;
1397 }
c9f8735b 1398 return 1;
dea3101e
JB
1399}
1400
e59058c4 1401/**
3621a710 1402 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
e59058c4
JS
1403 * @vport: pointer to a host virtual N_Port data structure.
1404 *
1405 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1406 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1407 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1408 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1409 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1410 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1411 * @vport.
1412 *
1413 * Return code
1414 * 0 - failed to issue initial fdisc for @vport
1415 * 1 - successfully issued initial fdisc for @vport
1416 **/
92d7f7b0
JS
1417int
1418lpfc_initial_fdisc(struct lpfc_vport *vport)
1419{
1420 struct lpfc_hba *phba = vport->phba;
1421 struct lpfc_nodelist *ndlp;
1422
1423 /* First look for the Fabric ndlp */
1424 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1425 if (!ndlp) {
1426 /* Cannot find existing Fabric ndlp, so allocate a new one */
1427 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1428 if (!ndlp)
1429 return 0;
1430 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
1431 /* Put ndlp onto node list */
1432 lpfc_enqueue_node(vport, ndlp);
1433 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1434 /* re-setup ndlp without removing from node list */
1435 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1436 if (!ndlp)
1437 return 0;
92d7f7b0 1438 }
e47c9093 1439
92d7f7b0 1440 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
fa4066b6
JS
1441 /* decrement node reference count to trigger the release of
1442 * the node.
1443 */
92d7f7b0 1444 lpfc_nlp_put(ndlp);
fa4066b6 1445 return 0;
92d7f7b0
JS
1446 }
1447 return 1;
1448}
87af33fe 1449
e59058c4 1450/**
3621a710 1451 * lpfc_more_plogi - Check and issue remaining plogis for a vport
e59058c4
JS
1452 * @vport: pointer to a host virtual N_Port data structure.
1453 *
1454 * This routine checks whether there are more remaining Port Logins
1455 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1456 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1457 * to issue ELS PLOGIs up to the configured discover threads with the
1458 * @vport (@vport->cfg_discovery_threads). The function also decrement
1459 * the @vport's num_disc_node by 1 if it is not already 0.
1460 **/
87af33fe 1461void
2e0fef85 1462lpfc_more_plogi(struct lpfc_vport *vport)
dea3101e 1463{
2e0fef85
JS
1464 if (vport->num_disc_nodes)
1465 vport->num_disc_nodes--;
dea3101e
JB
1466
1467 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
e8b62011
JS
1468 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1469 "0232 Continue discovery with %d PLOGIs to go "
1470 "Data: x%x x%x x%x\n",
1471 vport->num_disc_nodes, vport->fc_plogi_cnt,
1472 vport->fc_flag, vport->port_state);
dea3101e 1473 /* Check to see if there are more PLOGIs to be sent */
2e0fef85
JS
1474 if (vport->fc_flag & FC_NLP_MORE)
1475 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
db6f1c2f 1476 lpfc_els_disc_plogi(vport);
2e0fef85 1477
dea3101e
JB
1478 return;
1479}
1480
e59058c4 1481/**
3621a710 1482 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
e59058c4
JS
1483 * @phba: pointer to lpfc hba data structure.
1484 * @prsp: pointer to response IOCB payload.
1485 * @ndlp: pointer to a node-list data structure.
1486 *
1487 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1488 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1489 * The following cases are considered N_Port confirmed:
1490 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1491 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1492 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1493 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1494 * 1) if there is a node on vport list other than the @ndlp with the same
1495 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1496 * on that node to release the RPI associated with the node; 2) if there is
1497 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1498 * into, a new node shall be allocated (or activated). In either case, the
1499 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1500 * be released and the new_ndlp shall be put on to the vport node list and
1501 * its pointer returned as the confirmed node.
1502 *
1503 * Note that before the @ndlp got "released", the keepDID from not-matching
1504 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1505 * of the @ndlp. This is because the release of @ndlp is actually to put it
1506 * into an inactive state on the vport node list and the vport node list
1507 * management algorithm does not allow two node with a same DID.
1508 *
1509 * Return code
1510 * pointer to the PLOGI N_Port @ndlp
1511 **/
488d1469 1512static struct lpfc_nodelist *
92d7f7b0 1513lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
488d1469
JS
1514 struct lpfc_nodelist *ndlp)
1515{
2e0fef85 1516 struct lpfc_vport *vport = ndlp->vport;
953ceeda 1517 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 1518 struct lpfc_nodelist *new_ndlp;
0ff10d46
JS
1519 struct lpfc_rport_data *rdata;
1520 struct fc_rport *rport;
488d1469 1521 struct serv_parm *sp;
92d7f7b0 1522 uint8_t name[sizeof(struct lpfc_name)];
e5abba4c 1523 uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
953ceeda 1524 uint16_t keep_nlp_state;
38b92ef8
JS
1525 int put_node;
1526 int put_rport;
cff261f6 1527 unsigned long *active_rrqs_xri_bitmap = NULL;
488d1469 1528
2fb9bd8b
JS
1529 /* Fabric nodes can have the same WWPN so we don't bother searching
1530 * by WWPN. Just return the ndlp that was given to us.
1531 */
1532 if (ndlp->nlp_type & NLP_FABRIC)
1533 return ndlp;
1534
92d7f7b0 1535 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
685f0bf7 1536 memset(name, 0, sizeof(struct lpfc_name));
488d1469 1537
685f0bf7 1538 /* Now we find out if the NPort we are logging into, matches the WWPN
488d1469
JS
1539 * we have for that ndlp. If not, we have some work to do.
1540 */
2e0fef85 1541 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
488d1469 1542
e47c9093 1543 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
488d1469 1544 return ndlp;
cff261f6
JS
1545 if (phba->sli_rev == LPFC_SLI_REV4) {
1546 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1547 GFP_KERNEL);
1548 if (active_rrqs_xri_bitmap)
1549 memset(active_rrqs_xri_bitmap, 0,
1550 phba->cfg_rrq_xri_bitmap_sz);
1551 }
488d1469 1552
34f5ad8b
JS
1553 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1554 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1555 ndlp, ndlp->nlp_DID, new_ndlp);
1556
488d1469 1557 if (!new_ndlp) {
2e0fef85
JS
1558 rc = memcmp(&ndlp->nlp_portname, name,
1559 sizeof(struct lpfc_name));
cff261f6
JS
1560 if (!rc) {
1561 if (active_rrqs_xri_bitmap)
1562 mempool_free(active_rrqs_xri_bitmap,
1563 phba->active_rrq_pool);
92795650 1564 return ndlp;
cff261f6 1565 }
488d1469 1566 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
cff261f6
JS
1567 if (!new_ndlp) {
1568 if (active_rrqs_xri_bitmap)
1569 mempool_free(active_rrqs_xri_bitmap,
1570 phba->active_rrq_pool);
488d1469 1571 return ndlp;
cff261f6 1572 }
2e0fef85 1573 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
e47c9093 1574 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
58da1ffb
JS
1575 rc = memcmp(&ndlp->nlp_portname, name,
1576 sizeof(struct lpfc_name));
cff261f6
JS
1577 if (!rc) {
1578 if (active_rrqs_xri_bitmap)
1579 mempool_free(active_rrqs_xri_bitmap,
1580 phba->active_rrq_pool);
58da1ffb 1581 return ndlp;
cff261f6 1582 }
e47c9093
JS
1583 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1584 NLP_STE_UNUSED_NODE);
cff261f6
JS
1585 if (!new_ndlp) {
1586 if (active_rrqs_xri_bitmap)
1587 mempool_free(active_rrqs_xri_bitmap,
1588 phba->active_rrq_pool);
e47c9093 1589 return ndlp;
cff261f6 1590 }
58da1ffb 1591 keepDID = new_ndlp->nlp_DID;
cff261f6
JS
1592 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1593 memcpy(active_rrqs_xri_bitmap,
1594 new_ndlp->active_rrqs_xri_bitmap,
1595 phba->cfg_rrq_xri_bitmap_sz);
19ca7609 1596 } else {
58da1ffb 1597 keepDID = new_ndlp->nlp_DID;
cff261f6
JS
1598 if (phba->sli_rev == LPFC_SLI_REV4 &&
1599 active_rrqs_xri_bitmap)
1600 memcpy(active_rrqs_xri_bitmap,
1601 new_ndlp->active_rrqs_xri_bitmap,
1602 phba->cfg_rrq_xri_bitmap_sz);
19ca7609 1603 }
488d1469 1604
2e0fef85 1605 lpfc_unreg_rpi(vport, new_ndlp);
488d1469 1606 new_ndlp->nlp_DID = ndlp->nlp_DID;
92795650 1607 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
19ca7609 1608 if (phba->sli_rev == LPFC_SLI_REV4)
cff261f6
JS
1609 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1610 ndlp->active_rrqs_xri_bitmap,
1611 phba->cfg_rrq_xri_bitmap_sz);
0ff10d46 1612
953ceeda 1613 spin_lock_irq(shost->host_lock);
e5abba4c
JS
1614 keep_nlp_flag = new_ndlp->nlp_flag;
1615 new_ndlp->nlp_flag = ndlp->nlp_flag;
1616 ndlp->nlp_flag = keep_nlp_flag;
953ceeda 1617 spin_unlock_irq(shost->host_lock);
0ff10d46 1618
953ceeda
JS
1619 /* Set nlp_states accordingly */
1620 keep_nlp_state = new_ndlp->nlp_state;
2e0fef85 1621 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
488d1469 1622
2e0fef85 1623 /* Move this back to NPR state */
87af33fe
JS
1624 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1625 /* The new_ndlp is replacing ndlp totally, so we need
1626 * to put ndlp on UNUSED list and try to free it.
1627 */
34f5ad8b
JS
1628 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1629 "3179 PLOGI confirm NEW: %x %x\n",
1630 new_ndlp->nlp_DID, keepDID);
0ff10d46
JS
1631
1632 /* Fix up the rport accordingly */
1633 rport = ndlp->rport;
1634 if (rport) {
1635 rdata = rport->dd_data;
1636 if (rdata->pnode == ndlp) {
466e840b 1637 /* break the link before dropping the ref */
0ff10d46 1638 ndlp->rport = NULL;
466e840b 1639 lpfc_nlp_put(ndlp);
0ff10d46
JS
1640 rdata->pnode = lpfc_nlp_get(new_ndlp);
1641 new_ndlp->rport = rport;
1642 }
1643 new_ndlp->nlp_type = ndlp->nlp_type;
1644 }
58da1ffb
JS
1645 /* We shall actually free the ndlp with both nlp_DID and
1646 * nlp_portname fields equals 0 to avoid any ndlp on the
1647 * nodelist never to be used.
1648 */
1649 if (ndlp->nlp_DID == 0) {
1650 spin_lock_irq(&phba->ndlp_lock);
1651 NLP_SET_FREE_REQ(ndlp);
1652 spin_unlock_irq(&phba->ndlp_lock);
1653 }
0ff10d46 1654
58da1ffb
JS
1655 /* Two ndlps cannot have the same did on the nodelist */
1656 ndlp->nlp_DID = keepDID;
cff261f6
JS
1657 if (phba->sli_rev == LPFC_SLI_REV4 &&
1658 active_rrqs_xri_bitmap)
1659 memcpy(ndlp->active_rrqs_xri_bitmap,
1660 active_rrqs_xri_bitmap,
1661 phba->cfg_rrq_xri_bitmap_sz);
e5abba4c
JS
1662
1663 if (!NLP_CHK_NODE_ACT(ndlp))
1664 lpfc_drop_node(vport, ndlp);
87af33fe 1665 }
92795650 1666 else {
34f5ad8b
JS
1667 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1668 "3180 PLOGI confirm SWAP: %x %x\n",
1669 new_ndlp->nlp_DID, keepDID);
1670
2e0fef85 1671 lpfc_unreg_rpi(vport, ndlp);
34f5ad8b 1672
58da1ffb
JS
1673 /* Two ndlps cannot have the same did */
1674 ndlp->nlp_DID = keepDID;
cff261f6
JS
1675 if (phba->sli_rev == LPFC_SLI_REV4 &&
1676 active_rrqs_xri_bitmap)
1677 memcpy(ndlp->active_rrqs_xri_bitmap,
1678 active_rrqs_xri_bitmap,
1679 phba->cfg_rrq_xri_bitmap_sz);
34f5ad8b 1680
953ceeda
JS
1681 /* Since we are switching over to the new_ndlp,
1682 * reset the old ndlp state
34f5ad8b
JS
1683 */
1684 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1685 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
953ceeda
JS
1686 keep_nlp_state = NLP_STE_NPR_NODE;
1687 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
34f5ad8b 1688
38b92ef8
JS
1689 /* Fix up the rport accordingly */
1690 rport = ndlp->rport;
1691 if (rport) {
1692 rdata = rport->dd_data;
1693 put_node = rdata->pnode != NULL;
1694 put_rport = ndlp->rport != NULL;
1695 rdata->pnode = NULL;
1696 ndlp->rport = NULL;
1697 if (put_node)
1698 lpfc_nlp_put(ndlp);
1699 if (put_rport)
1700 put_device(&rport->dev);
1701 }
92795650 1702 }
cff261f6
JS
1703 if (phba->sli_rev == LPFC_SLI_REV4 &&
1704 active_rrqs_xri_bitmap)
1705 mempool_free(active_rrqs_xri_bitmap,
1706 phba->active_rrq_pool);
488d1469
JS
1707 return new_ndlp;
1708}
1709
e59058c4 1710/**
3621a710 1711 * lpfc_end_rscn - Check and handle more rscn for a vport
e59058c4
JS
1712 * @vport: pointer to a host virtual N_Port data structure.
1713 *
1714 * This routine checks whether more Registration State Change
1715 * Notifications (RSCNs) came in while the discovery state machine was in
1716 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1717 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1718 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1719 * handling the RSCNs.
1720 **/
87af33fe
JS
1721void
1722lpfc_end_rscn(struct lpfc_vport *vport)
1723{
1724 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1725
1726 if (vport->fc_flag & FC_RSCN_MODE) {
1727 /*
1728 * Check to see if more RSCNs came in while we were
1729 * processing this one.
1730 */
1731 if (vport->fc_rscn_id_cnt ||
1732 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1733 lpfc_els_handle_rscn(vport);
1734 else {
1735 spin_lock_irq(shost->host_lock);
1736 vport->fc_flag &= ~FC_RSCN_MODE;
1737 spin_unlock_irq(shost->host_lock);
1738 }
1739 }
1740}
1741
19ca7609
JS
1742/**
1743 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1744 * @phba: pointer to lpfc hba data structure.
1745 * @cmdiocb: pointer to lpfc command iocb data structure.
1746 * @rspiocb: pointer to lpfc response iocb data structure.
1747 *
1748 * This routine will call the clear rrq function to free the rrq and
1749 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1750 * exist then the clear_rrq is still called because the rrq needs to
1751 * be freed.
1752 **/
1753
1754static void
1755lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1756 struct lpfc_iocbq *rspiocb)
1757{
1758 struct lpfc_vport *vport = cmdiocb->vport;
1759 IOCB_t *irsp;
1760 struct lpfc_nodelist *ndlp;
1761 struct lpfc_node_rrq *rrq;
1762
1763 /* we pass cmdiocb to state machine which needs rspiocb as well */
1764 rrq = cmdiocb->context_un.rrq;
1765 cmdiocb->context_un.rsp_iocb = rspiocb;
1766
1767 irsp = &rspiocb->iocb;
1768 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1769 "RRQ cmpl: status:x%x/x%x did:x%x",
1770 irsp->ulpStatus, irsp->un.ulpWord[4],
1771 irsp->un.elsreq64.remoteID);
1772
1773 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1774 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1775 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1776 "2882 RRQ completes to NPort x%x "
1777 "with no ndlp. Data: x%x x%x x%x\n",
1778 irsp->un.elsreq64.remoteID,
1779 irsp->ulpStatus, irsp->un.ulpWord[4],
1780 irsp->ulpIoTag);
1781 goto out;
1782 }
1783
1784 /* rrq completes to NPort <nlp_DID> */
1785 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1786 "2880 RRQ completes to NPort x%x "
1787 "Data: x%x x%x x%x x%x x%x\n",
1788 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1789 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1790
1791 if (irsp->ulpStatus) {
1792 /* Check for retry */
1793 /* RRQ failed Don't print the vport to vport rjts */
1794 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1795 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1796 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1797 (phba)->pport->cfg_log_verbose & LOG_ELS)
1798 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1799 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1800 ndlp->nlp_DID, irsp->ulpStatus,
1801 irsp->un.ulpWord[4]);
1802 }
1803out:
1804 if (rrq)
1805 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1806 lpfc_els_free_iocb(phba, cmdiocb);
1807 return;
1808}
e59058c4 1809/**
3621a710 1810 * lpfc_cmpl_els_plogi - Completion callback function for plogi
e59058c4
JS
1811 * @phba: pointer to lpfc hba data structure.
1812 * @cmdiocb: pointer to lpfc command iocb data structure.
1813 * @rspiocb: pointer to lpfc response iocb data structure.
1814 *
1815 * This routine is the completion callback function for issuing the Port
1816 * Login (PLOGI) command. For PLOGI completion, there must be an active
1817 * ndlp on the vport node list that matches the remote node ID from the
25985edc 1818 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
e59058c4
JS
1819 * ignored and command IOCB released. The PLOGI response IOCB status is
1820 * checked for error conditons. If there is error status reported, PLOGI
1821 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1822 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1823 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1824 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1825 * there are additional N_Port nodes with the vport that need to perform
1826 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1827 * PLOGIs.
1828 **/
dea3101e 1829static void
2e0fef85
JS
1830lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1831 struct lpfc_iocbq *rspiocb)
dea3101e 1832{
2e0fef85
JS
1833 struct lpfc_vport *vport = cmdiocb->vport;
1834 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1835 IOCB_t *irsp;
dea3101e 1836 struct lpfc_nodelist *ndlp;
92795650 1837 struct lpfc_dmabuf *prsp;
eb016566 1838 int disc, rc;
dea3101e 1839
dea3101e
JB
1840 /* we pass cmdiocb to state machine which needs rspiocb as well */
1841 cmdiocb->context_un.rsp_iocb = rspiocb;
1842
1843 irsp = &rspiocb->iocb;
858c9f6c
JS
1844 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1845 "PLOGI cmpl: status:x%x/x%x did:x%x",
1846 irsp->ulpStatus, irsp->un.ulpWord[4],
1847 irsp->un.elsreq64.remoteID);
1848
2e0fef85 1849 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
e47c9093 1850 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
e8b62011
JS
1851 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1852 "0136 PLOGI completes to NPort x%x "
1853 "with no ndlp. Data: x%x x%x x%x\n",
1854 irsp->un.elsreq64.remoteID,
1855 irsp->ulpStatus, irsp->un.ulpWord[4],
1856 irsp->ulpIoTag);
488d1469 1857 goto out;
ed957684 1858 }
dea3101e
JB
1859
1860 /* Since ndlp can be freed in the disc state machine, note if this node
1861 * is being used during discovery.
1862 */
2e0fef85 1863 spin_lock_irq(shost->host_lock);
dea3101e 1864 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
488d1469 1865 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85 1866 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1867 rc = 0;
1868
1869 /* PLOGI completes to NPort <nlp_DID> */
e8b62011
JS
1870 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1871 "0102 PLOGI completes to NPort x%x "
1872 "Data: x%x x%x x%x x%x x%x\n",
1873 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1874 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1875 /* Check to see if link went down during discovery */
2e0fef85
JS
1876 if (lpfc_els_chk_latt(vport)) {
1877 spin_lock_irq(shost->host_lock);
dea3101e 1878 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1879 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1880 goto out;
1881 }
1882
dea3101e
JB
1883 if (irsp->ulpStatus) {
1884 /* Check for retry */
1885 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1886 /* ELS command is being retried */
1887 if (disc) {
2e0fef85 1888 spin_lock_irq(shost->host_lock);
dea3101e 1889 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1890 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1891 }
1892 goto out;
1893 }
2a9bf3d0
JS
1894 /* PLOGI failed Don't print the vport to vport rjts */
1895 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1896 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1897 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1898 (phba)->pport->cfg_log_verbose & LOG_ELS)
1899 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
e40a02c1
JS
1900 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1901 ndlp->nlp_DID, irsp->ulpStatus,
1902 irsp->un.ulpWord[4]);
dea3101e 1903 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1904 if (lpfc_error_lost_link(irsp))
c9f8735b 1905 rc = NLP_STE_FREED_NODE;
e47c9093 1906 else
2e0fef85 1907 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1908 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1909 } else {
1910 /* Good status, call state machine */
92795650 1911 prsp = list_entry(((struct lpfc_dmabuf *)
92d7f7b0
JS
1912 cmdiocb->context2)->list.next,
1913 struct lpfc_dmabuf, list);
1914 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2e0fef85 1915 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1916 NLP_EVT_CMPL_PLOGI);
dea3101e
JB
1917 }
1918
2e0fef85 1919 if (disc && vport->num_disc_nodes) {
dea3101e 1920 /* Check to see if there are more PLOGIs to be sent */
2e0fef85 1921 lpfc_more_plogi(vport);
dea3101e 1922
2e0fef85
JS
1923 if (vport->num_disc_nodes == 0) {
1924 spin_lock_irq(shost->host_lock);
1925 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1926 spin_unlock_irq(shost->host_lock);
dea3101e 1927
2e0fef85 1928 lpfc_can_disctmo(vport);
87af33fe 1929 lpfc_end_rscn(vport);
dea3101e
JB
1930 }
1931 }
1932
1933out:
1934 lpfc_els_free_iocb(phba, cmdiocb);
1935 return;
1936}
1937
e59058c4 1938/**
3621a710 1939 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
e59058c4
JS
1940 * @vport: pointer to a host virtual N_Port data structure.
1941 * @did: destination port identifier.
1942 * @retry: number of retries to the command IOCB.
1943 *
1944 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1945 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1946 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1947 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1948 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1949 *
1950 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1951 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1952 * will be stored into the context1 field of the IOCB for the completion
1953 * callback function to the PLOGI ELS command.
1954 *
1955 * Return code
1956 * 0 - Successfully issued a plogi for @vport
1957 * 1 - failed to issue a plogi for @vport
1958 **/
dea3101e 1959int
2e0fef85 1960lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
dea3101e 1961{
2e0fef85 1962 struct lpfc_hba *phba = vport->phba;
dea3101e 1963 struct serv_parm *sp;
98c9ea5c 1964 struct lpfc_nodelist *ndlp;
dea3101e 1965 struct lpfc_iocbq *elsiocb;
dea3101e
JB
1966 uint8_t *pcmd;
1967 uint16_t cmdsize;
92d7f7b0 1968 int ret;
dea3101e 1969
98c9ea5c 1970 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
1971 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1972 ndlp = NULL;
98c9ea5c 1973
e47c9093 1974 /* If ndlp is not NULL, we will bump the reference count on it */
92d7f7b0 1975 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
98c9ea5c 1976 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2e0fef85 1977 ELS_CMD_PLOGI);
c9f8735b
JW
1978 if (!elsiocb)
1979 return 1;
dea3101e 1980
dea3101e
JB
1981 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1982
1983 /* For PLOGI request, remainder of payload is service parameters */
1984 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
92d7f7b0
JS
1985 pcmd += sizeof(uint32_t);
1986 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e
JB
1987 sp = (struct serv_parm *) pcmd;
1988
5ac6b303
JS
1989 /*
1990 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1991 * to device on remote loops work.
1992 */
1993 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1994 sp->cmn.altBbCredit = 1;
1995
dea3101e
JB
1996 if (sp->cmn.fcphLow < FC_PH_4_3)
1997 sp->cmn.fcphLow = FC_PH_4_3;
1998
1999 if (sp->cmn.fcphHigh < FC_PH3)
2000 sp->cmn.fcphHigh = FC_PH3;
2001
e0165f20
JS
2002 sp->cmn.valid_vendor_ver_level = 0;
2003 memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
2004
858c9f6c
JS
2005 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2006 "Issue PLOGI: did:x%x",
2007 did, 0, 0);
2008
dea3101e
JB
2009 phba->fc_stat.elsXmitPLOGI++;
2010 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
3772a991 2011 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2012
2013 if (ret == IOCB_ERROR) {
dea3101e 2014 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2015 return 1;
dea3101e 2016 }
c9f8735b 2017 return 0;
dea3101e
JB
2018}
2019
e59058c4 2020/**
3621a710 2021 * lpfc_cmpl_els_prli - Completion callback function for prli
e59058c4
JS
2022 * @phba: pointer to lpfc hba data structure.
2023 * @cmdiocb: pointer to lpfc command iocb data structure.
2024 * @rspiocb: pointer to lpfc response iocb data structure.
2025 *
2026 * This routine is the completion callback function for a Process Login
2027 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2028 * status. If there is error status reported, PRLI retry shall be attempted
2029 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2030 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2031 * ndlp to mark the PRLI completion.
2032 **/
dea3101e 2033static void
2e0fef85
JS
2034lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2035 struct lpfc_iocbq *rspiocb)
dea3101e 2036{
2e0fef85
JS
2037 struct lpfc_vport *vport = cmdiocb->vport;
2038 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2039 IOCB_t *irsp;
dea3101e
JB
2040 struct lpfc_nodelist *ndlp;
2041
dea3101e
JB
2042 /* we pass cmdiocb to state machine which needs rspiocb as well */
2043 cmdiocb->context_un.rsp_iocb = rspiocb;
2044
2045 irsp = &(rspiocb->iocb);
2046 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2e0fef85 2047 spin_lock_irq(shost->host_lock);
dea3101e 2048 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 2049 spin_unlock_irq(shost->host_lock);
dea3101e 2050
858c9f6c
JS
2051 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2052 "PRLI cmpl: status:x%x/x%x did:x%x",
2053 irsp->ulpStatus, irsp->un.ulpWord[4],
2054 ndlp->nlp_DID);
dea3101e 2055 /* PRLI completes to NPort <nlp_DID> */
e8b62011
JS
2056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2057 "0103 PRLI completes to NPort x%x "
2058 "Data: x%x x%x x%x x%x\n",
2059 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2060 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 2061
2e0fef85 2062 vport->fc_prli_sent--;
dea3101e 2063 /* Check to see if link went down during discovery */
2e0fef85 2064 if (lpfc_els_chk_latt(vport))
dea3101e
JB
2065 goto out;
2066
2067 if (irsp->ulpStatus) {
2068 /* Check for retry */
2069 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2070 /* ELS command is being retried */
2071 goto out;
2072 }
2073 /* PRLI failed */
e40a02c1
JS
2074 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2075 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
2076 ndlp->nlp_DID, irsp->ulpStatus,
2077 irsp->un.ulpWord[4]);
dea3101e 2078 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2079 if (lpfc_error_lost_link(irsp))
dea3101e 2080 goto out;
e47c9093 2081 else
2e0fef85 2082 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2083 NLP_EVT_CMPL_PRLI);
e47c9093 2084 } else
dea3101e 2085 /* Good status, call state machine */
2e0fef85 2086 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2087 NLP_EVT_CMPL_PRLI);
dea3101e
JB
2088out:
2089 lpfc_els_free_iocb(phba, cmdiocb);
2090 return;
2091}
2092
e59058c4 2093/**
3621a710 2094 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
e59058c4
JS
2095 * @vport: pointer to a host virtual N_Port data structure.
2096 * @ndlp: pointer to a node-list data structure.
2097 * @retry: number of retries to the command IOCB.
2098 *
2099 * This routine issues a Process Login (PRLI) ELS command for the
2100 * @vport. The PRLI service parameters are set up in the payload of the
2101 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2102 * is put to the IOCB completion callback func field before invoking the
2103 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2104 *
2105 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2106 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2107 * will be stored into the context1 field of the IOCB for the completion
2108 * callback function to the PRLI ELS command.
2109 *
2110 * Return code
2111 * 0 - successfully issued prli iocb command for @vport
2112 * 1 - failed to issue prli iocb command for @vport
2113 **/
dea3101e 2114int
2e0fef85 2115lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2116 uint8_t retry)
2117{
2e0fef85
JS
2118 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2119 struct lpfc_hba *phba = vport->phba;
dea3101e 2120 PRLI *npr;
dea3101e 2121 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2122 uint8_t *pcmd;
2123 uint16_t cmdsize;
2124
92d7f7b0 2125 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2e0fef85
JS
2126 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2127 ndlp->nlp_DID, ELS_CMD_PRLI);
488d1469 2128 if (!elsiocb)
c9f8735b 2129 return 1;
dea3101e 2130
dea3101e
JB
2131 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2132
2133 /* For PRLI request, remainder of payload is service parameters */
92d7f7b0 2134 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
dea3101e 2135 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
92d7f7b0 2136 pcmd += sizeof(uint32_t);
dea3101e
JB
2137
2138 /* For PRLI, remainder of payload is PRLI parameter page */
2139 npr = (PRLI *) pcmd;
2140 /*
2141 * If our firmware version is 3.20 or later,
2142 * set the following bits for FC-TAPE support.
2143 */
2144 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2145 npr->ConfmComplAllowed = 1;
2146 npr->Retry = 1;
2147 npr->TaskRetryIdReq = 1;
2148 }
2149 npr->estabImagePair = 1;
2150 npr->readXferRdyDis = 1;
3cb01c57
JS
2151 if (vport->cfg_first_burst_size)
2152 npr->writeXferRdyDis = 1;
dea3101e
JB
2153
2154 /* For FCP support */
2155 npr->prliType = PRLI_FCP_TYPE;
2156 npr->initiatorFunc = 1;
2157
858c9f6c
JS
2158 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2159 "Issue PRLI: did:x%x",
2160 ndlp->nlp_DID, 0, 0);
2161
dea3101e
JB
2162 phba->fc_stat.elsXmitPRLI++;
2163 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2e0fef85 2164 spin_lock_irq(shost->host_lock);
dea3101e 2165 ndlp->nlp_flag |= NLP_PRLI_SND;
2e0fef85 2166 spin_unlock_irq(shost->host_lock);
3772a991
JS
2167 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2168 IOCB_ERROR) {
2e0fef85 2169 spin_lock_irq(shost->host_lock);
dea3101e 2170 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 2171 spin_unlock_irq(shost->host_lock);
dea3101e 2172 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2173 return 1;
dea3101e 2174 }
2e0fef85 2175 vport->fc_prli_sent++;
c9f8735b 2176 return 0;
dea3101e
JB
2177}
2178
90160e01 2179/**
3621a710 2180 * lpfc_rscn_disc - Perform rscn discovery for a vport
90160e01
JS
2181 * @vport: pointer to a host virtual N_Port data structure.
2182 *
2183 * This routine performs Registration State Change Notification (RSCN)
2184 * discovery for a @vport. If the @vport's node port recovery count is not
2185 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2186 * the nodes that need recovery. If none of the PLOGI were needed through
2187 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2188 * invoked to check and handle possible more RSCN came in during the period
2189 * of processing the current ones.
2190 **/
2191static void
2192lpfc_rscn_disc(struct lpfc_vport *vport)
2193{
2194 lpfc_can_disctmo(vport);
2195
2196 /* RSCN discovery */
2197 /* go thru NPR nodes and issue ELS PLOGIs */
2198 if (vport->fc_npr_cnt)
2199 if (lpfc_els_disc_plogi(vport))
2200 return;
2201
2202 lpfc_end_rscn(vport);
2203}
2204
2205/**
3621a710 2206 * lpfc_adisc_done - Complete the adisc phase of discovery
90160e01
JS
2207 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2208 *
2209 * This function is called when the final ADISC is completed during discovery.
2210 * This function handles clearing link attention or issuing reg_vpi depending
2211 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2212 * discovery.
2213 * This function is called with no locks held.
2214 **/
2215static void
2216lpfc_adisc_done(struct lpfc_vport *vport)
2217{
2218 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2219 struct lpfc_hba *phba = vport->phba;
2220
2221 /*
2222 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2223 * and continue discovery.
2224 */
2225 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6fb120a7
JS
2226 !(vport->fc_flag & FC_RSCN_MODE) &&
2227 (phba->sli_rev < LPFC_SLI_REV4)) {
d454c91f
JS
2228 /* The ADISCs are complete. Doesn't matter if they
2229 * succeeded or failed because the ADISC completion
2230 * routine guarantees to call the state machine and
2231 * the RPI is either unregistered (failed ADISC response)
2232 * or the RPI is still valid and the node is marked
2233 * mapped for a target. The exchanges should be in the
2234 * correct state. This code is specific to SLI3.
2235 */
2236 lpfc_issue_clear_la(phba, vport);
90160e01
JS
2237 lpfc_issue_reg_vpi(phba, vport);
2238 return;
2239 }
2240 /*
2241 * For SLI2, we need to set port_state to READY
2242 * and continue discovery.
2243 */
2244 if (vport->port_state < LPFC_VPORT_READY) {
2245 /* If we get here, there is nothing to ADISC */
85c0f177 2246 lpfc_issue_clear_la(phba, vport);
90160e01
JS
2247 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2248 vport->num_disc_nodes = 0;
2249 /* go thru NPR list, issue ELS PLOGIs */
2250 if (vport->fc_npr_cnt)
2251 lpfc_els_disc_plogi(vport);
2252 if (!vport->num_disc_nodes) {
2253 spin_lock_irq(shost->host_lock);
2254 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2255 spin_unlock_irq(shost->host_lock);
2256 lpfc_can_disctmo(vport);
2257 lpfc_end_rscn(vport);
2258 }
2259 }
2260 vport->port_state = LPFC_VPORT_READY;
2261 } else
2262 lpfc_rscn_disc(vport);
2263}
2264
e59058c4 2265/**
3621a710 2266 * lpfc_more_adisc - Issue more adisc as needed
e59058c4
JS
2267 * @vport: pointer to a host virtual N_Port data structure.
2268 *
2269 * This routine determines whether there are more ndlps on a @vport
2270 * node list need to have Address Discover (ADISC) issued. If so, it will
2271 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2272 * remaining nodes which need to have ADISC sent.
2273 **/
0ff10d46 2274void
2e0fef85 2275lpfc_more_adisc(struct lpfc_vport *vport)
dea3101e 2276{
2e0fef85
JS
2277 if (vport->num_disc_nodes)
2278 vport->num_disc_nodes--;
dea3101e 2279 /* Continue discovery with <num_disc_nodes> ADISCs to go */
e8b62011
JS
2280 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2281 "0210 Continue discovery with %d ADISCs to go "
2282 "Data: x%x x%x x%x\n",
2283 vport->num_disc_nodes, vport->fc_adisc_cnt,
2284 vport->fc_flag, vport->port_state);
dea3101e 2285 /* Check to see if there are more ADISCs to be sent */
2e0fef85
JS
2286 if (vport->fc_flag & FC_NLP_MORE) {
2287 lpfc_set_disctmo(vport);
2288 /* go thru NPR nodes and issue any remaining ELS ADISCs */
eb016566 2289 lpfc_els_disc_adisc(vport);
dea3101e 2290 }
90160e01
JS
2291 if (!vport->num_disc_nodes)
2292 lpfc_adisc_done(vport);
dea3101e
JB
2293 return;
2294}
2295
e59058c4 2296/**
3621a710 2297 * lpfc_cmpl_els_adisc - Completion callback function for adisc
e59058c4
JS
2298 * @phba: pointer to lpfc hba data structure.
2299 * @cmdiocb: pointer to lpfc command iocb data structure.
2300 * @rspiocb: pointer to lpfc response iocb data structure.
2301 *
2302 * This routine is the completion function for issuing the Address Discover
2303 * (ADISC) command. It first checks to see whether link went down during
2304 * the discovery process. If so, the node will be marked as node port
2305 * recovery for issuing discover IOCB by the link attention handler and
2306 * exit. Otherwise, the response status is checked. If error was reported
2307 * in the response status, the ADISC command shall be retried by invoking
2308 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2309 * the response status, the state machine is invoked to set transition
2310 * with respect to NLP_EVT_CMPL_ADISC event.
2311 **/
dea3101e 2312static void
2e0fef85
JS
2313lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2314 struct lpfc_iocbq *rspiocb)
dea3101e 2315{
2e0fef85
JS
2316 struct lpfc_vport *vport = cmdiocb->vport;
2317 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2318 IOCB_t *irsp;
dea3101e 2319 struct lpfc_nodelist *ndlp;
2e0fef85 2320 int disc;
dea3101e
JB
2321
2322 /* we pass cmdiocb to state machine which needs rspiocb as well */
2323 cmdiocb->context_un.rsp_iocb = rspiocb;
2324
2325 irsp = &(rspiocb->iocb);
2326 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
dea3101e 2327
858c9f6c
JS
2328 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2329 "ADISC cmpl: status:x%x/x%x did:x%x",
2330 irsp->ulpStatus, irsp->un.ulpWord[4],
2331 ndlp->nlp_DID);
2332
dea3101e
JB
2333 /* Since ndlp can be freed in the disc state machine, note if this node
2334 * is being used during discovery.
2335 */
2e0fef85 2336 spin_lock_irq(shost->host_lock);
dea3101e 2337 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
c9f8735b 2338 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2e0fef85 2339 spin_unlock_irq(shost->host_lock);
dea3101e 2340 /* ADISC completes to NPort <nlp_DID> */
e8b62011
JS
2341 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2342 "0104 ADISC completes to NPort x%x "
2343 "Data: x%x x%x x%x x%x x%x\n",
2344 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2345 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 2346 /* Check to see if link went down during discovery */
2e0fef85
JS
2347 if (lpfc_els_chk_latt(vport)) {
2348 spin_lock_irq(shost->host_lock);
dea3101e 2349 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2350 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2351 goto out;
2352 }
2353
2354 if (irsp->ulpStatus) {
2355 /* Check for retry */
2356 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2357 /* ELS command is being retried */
2358 if (disc) {
2e0fef85 2359 spin_lock_irq(shost->host_lock);
dea3101e 2360 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85
JS
2361 spin_unlock_irq(shost->host_lock);
2362 lpfc_set_disctmo(vport);
dea3101e
JB
2363 }
2364 goto out;
2365 }
2366 /* ADISC failed */
e40a02c1
JS
2367 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2368 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2369 ndlp->nlp_DID, irsp->ulpStatus,
2370 irsp->un.ulpWord[4]);
dea3101e 2371 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 2372 if (!lpfc_error_lost_link(irsp))
2e0fef85 2373 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
858c9f6c 2374 NLP_EVT_CMPL_ADISC);
e47c9093 2375 } else
dea3101e 2376 /* Good status, call state machine */
2e0fef85 2377 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
dea3101e 2378 NLP_EVT_CMPL_ADISC);
dea3101e 2379
90160e01
JS
2380 /* Check to see if there are more ADISCs to be sent */
2381 if (disc && vport->num_disc_nodes)
2e0fef85 2382 lpfc_more_adisc(vport);
dea3101e
JB
2383out:
2384 lpfc_els_free_iocb(phba, cmdiocb);
2385 return;
2386}
2387
e59058c4 2388/**
3621a710 2389 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
e59058c4
JS
2390 * @vport: pointer to a virtual N_Port data structure.
2391 * @ndlp: pointer to a node-list data structure.
2392 * @retry: number of retries to the command IOCB.
2393 *
2394 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2395 * @vport. It prepares the payload of the ADISC ELS command, updates the
2396 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2397 * to issue the ADISC ELS command.
2398 *
2399 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2400 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2401 * will be stored into the context1 field of the IOCB for the completion
2402 * callback function to the ADISC ELS command.
2403 *
2404 * Return code
2405 * 0 - successfully issued adisc
2406 * 1 - failed to issue adisc
2407 **/
dea3101e 2408int
2e0fef85 2409lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2410 uint8_t retry)
2411{
2e0fef85
JS
2412 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2413 struct lpfc_hba *phba = vport->phba;
dea3101e 2414 ADISC *ap;
dea3101e 2415 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2416 uint8_t *pcmd;
2417 uint16_t cmdsize;
2418
92d7f7b0 2419 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2e0fef85
JS
2420 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2421 ndlp->nlp_DID, ELS_CMD_ADISC);
488d1469 2422 if (!elsiocb)
c9f8735b 2423 return 1;
dea3101e 2424
dea3101e
JB
2425 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2426
2427 /* For ADISC request, remainder of payload is service parameters */
2428 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
92d7f7b0 2429 pcmd += sizeof(uint32_t);
dea3101e
JB
2430
2431 /* Fill in ADISC payload */
2432 ap = (ADISC *) pcmd;
2433 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
2434 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2435 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2436 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 2437
858c9f6c
JS
2438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2439 "Issue ADISC: did:x%x",
2440 ndlp->nlp_DID, 0, 0);
2441
dea3101e
JB
2442 phba->fc_stat.elsXmitADISC++;
2443 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2e0fef85 2444 spin_lock_irq(shost->host_lock);
dea3101e 2445 ndlp->nlp_flag |= NLP_ADISC_SND;
2e0fef85 2446 spin_unlock_irq(shost->host_lock);
3772a991
JS
2447 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2448 IOCB_ERROR) {
2e0fef85 2449 spin_lock_irq(shost->host_lock);
dea3101e 2450 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2e0fef85 2451 spin_unlock_irq(shost->host_lock);
dea3101e 2452 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2453 return 1;
dea3101e 2454 }
c9f8735b 2455 return 0;
dea3101e
JB
2456}
2457
e59058c4 2458/**
3621a710 2459 * lpfc_cmpl_els_logo - Completion callback function for logo
e59058c4
JS
2460 * @phba: pointer to lpfc hba data structure.
2461 * @cmdiocb: pointer to lpfc command iocb data structure.
2462 * @rspiocb: pointer to lpfc response iocb data structure.
2463 *
2464 * This routine is the completion function for issuing the ELS Logout (LOGO)
2465 * command. If no error status was reported from the LOGO response, the
2466 * state machine of the associated ndlp shall be invoked for transition with
2467 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2468 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2469 **/
dea3101e 2470static void
2e0fef85
JS
2471lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2472 struct lpfc_iocbq *rspiocb)
dea3101e 2473{
2e0fef85
JS
2474 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2475 struct lpfc_vport *vport = ndlp->vport;
2476 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2477 IOCB_t *irsp;
92494144 2478 struct lpfcMboxq *mbox;
086a345f
JS
2479 unsigned long flags;
2480 uint32_t skip_recovery = 0;
dea3101e 2481
dea3101e
JB
2482 /* we pass cmdiocb to state machine which needs rspiocb as well */
2483 cmdiocb->context_un.rsp_iocb = rspiocb;
2484
2485 irsp = &(rspiocb->iocb);
2e0fef85 2486 spin_lock_irq(shost->host_lock);
dea3101e 2487 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2488 spin_unlock_irq(shost->host_lock);
dea3101e 2489
858c9f6c
JS
2490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2491 "LOGO cmpl: status:x%x/x%x did:x%x",
2492 irsp->ulpStatus, irsp->un.ulpWord[4],
2493 ndlp->nlp_DID);
086a345f 2494
dea3101e 2495 /* LOGO completes to NPort <nlp_DID> */
e8b62011
JS
2496 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2497 "0105 LOGO completes to NPort x%x "
2498 "Data: x%x x%x x%x x%x\n",
2499 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2500 irsp->ulpTimeout, vport->num_disc_nodes);
086a345f
JS
2501
2502 if (lpfc_els_chk_latt(vport)) {
2503 skip_recovery = 1;
dea3101e 2504 goto out;
086a345f 2505 }
dea3101e 2506
086a345f 2507 /* Check to see if link went down during discovery */
92d7f7b0
JS
2508 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2509 /* NLP_EVT_DEVICE_RM should unregister the RPI
2510 * which should abort all outstanding IOs.
2511 */
2512 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2513 NLP_EVT_DEVICE_RM);
086a345f 2514 skip_recovery = 1;
92d7f7b0
JS
2515 goto out;
2516 }
2517
dea3101e
JB
2518 if (irsp->ulpStatus) {
2519 /* Check for retry */
086a345f 2520 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
dea3101e 2521 /* ELS command is being retried */
086a345f 2522 skip_recovery = 1;
dea3101e 2523 goto out;
086a345f 2524 }
dea3101e 2525 /* LOGO failed */
e40a02c1
JS
2526 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2527 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2528 ndlp->nlp_DID, irsp->ulpStatus,
2529 irsp->un.ulpWord[4]);
dea3101e 2530 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
086a345f
JS
2531 if (lpfc_error_lost_link(irsp)) {
2532 skip_recovery = 1;
dea3101e 2533 goto out;
086a345f
JS
2534 }
2535 }
2536
2537 /* Call state machine. This will unregister the rpi if needed. */
2538 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2539
dea3101e
JB
2540out:
2541 lpfc_els_free_iocb(phba, cmdiocb);
92494144
JS
2542 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2543 if ((vport->fc_flag & FC_PT2PT) &&
2544 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2545 phba->pport->fc_myDID = 0;
2546 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2547 if (mbox) {
2548 lpfc_config_link(phba, mbox);
2549 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2550 mbox->vport = vport;
2551 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2552 MBX_NOT_FINISHED) {
2553 mempool_free(mbox, phba->mbox_mem_pool);
086a345f 2554 skip_recovery = 1;
92494144
JS
2555 }
2556 }
2557 }
086a345f
JS
2558
2559 /*
2560 * If the node is a target, the handling attempts to recover the port.
2561 * For any other port type, the rpi is unregistered as an implicit
2562 * LOGO.
2563 */
2564 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2565 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2566 spin_lock_irqsave(shost->host_lock, flags);
2567 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2568 spin_unlock_irqrestore(shost->host_lock, flags);
2569
2570 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2571 "3187 LOGO completes to NPort x%x: Start "
2572 "Recovery Data: x%x x%x x%x x%x\n",
2573 ndlp->nlp_DID, irsp->ulpStatus,
2574 irsp->un.ulpWord[4], irsp->ulpTimeout,
2575 vport->num_disc_nodes);
2576 lpfc_disc_start(vport);
2577 }
dea3101e
JB
2578 return;
2579}
2580
e59058c4 2581/**
3621a710 2582 * lpfc_issue_els_logo - Issue a logo to an node on a vport
e59058c4
JS
2583 * @vport: pointer to a virtual N_Port data structure.
2584 * @ndlp: pointer to a node-list data structure.
2585 * @retry: number of retries to the command IOCB.
2586 *
2587 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2588 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2589 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2590 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2591 *
2592 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2593 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2594 * will be stored into the context1 field of the IOCB for the completion
2595 * callback function to the LOGO ELS command.
2596 *
2597 * Return code
2598 * 0 - successfully issued logo
2599 * 1 - failed to issue logo
2600 **/
dea3101e 2601int
2e0fef85 2602lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e
JB
2603 uint8_t retry)
2604{
2e0fef85
JS
2605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2606 struct lpfc_hba *phba = vport->phba;
dea3101e 2607 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2608 uint8_t *pcmd;
2609 uint16_t cmdsize;
92d7f7b0 2610 int rc;
dea3101e 2611
98c9ea5c
JS
2612 spin_lock_irq(shost->host_lock);
2613 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2614 spin_unlock_irq(shost->host_lock);
2615 return 0;
2616 }
2617 spin_unlock_irq(shost->host_lock);
2618
92d7f7b0 2619 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2e0fef85
JS
2620 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2621 ndlp->nlp_DID, ELS_CMD_LOGO);
488d1469 2622 if (!elsiocb)
c9f8735b 2623 return 1;
dea3101e 2624
dea3101e
JB
2625 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2626 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
92d7f7b0 2627 pcmd += sizeof(uint32_t);
dea3101e
JB
2628
2629 /* Fill in LOGO payload */
2e0fef85 2630 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
92d7f7b0
JS
2631 pcmd += sizeof(uint32_t);
2632 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 2633
858c9f6c
JS
2634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2635 "Issue LOGO: did:x%x",
2636 ndlp->nlp_DID, 0, 0);
2637
086a345f
JS
2638 /*
2639 * If we are issuing a LOGO, we may try to recover the remote NPort
2640 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2641 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2642 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2643 * for that ELS cmd. To avoid this situation, lets get rid of the
2644 * RPI right now, before any ELS cmds are sent.
2645 */
2646 spin_lock_irq(shost->host_lock);
2647 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2648 spin_unlock_irq(shost->host_lock);
2649 if (lpfc_unreg_rpi(vport, ndlp)) {
2650 lpfc_els_free_iocb(phba, elsiocb);
2651 return 0;
2652 }
2653
dea3101e
JB
2654 phba->fc_stat.elsXmitLOGO++;
2655 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2e0fef85 2656 spin_lock_irq(shost->host_lock);
dea3101e 2657 ndlp->nlp_flag |= NLP_LOGO_SND;
086a345f 2658 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2e0fef85 2659 spin_unlock_irq(shost->host_lock);
3772a991 2660 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2661
2662 if (rc == IOCB_ERROR) {
2e0fef85 2663 spin_lock_irq(shost->host_lock);
dea3101e 2664 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2665 spin_unlock_irq(shost->host_lock);
dea3101e 2666 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2667 return 1;
dea3101e 2668 }
c9f8735b 2669 return 0;
dea3101e
JB
2670}
2671
e59058c4 2672/**
3621a710 2673 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
e59058c4
JS
2674 * @phba: pointer to lpfc hba data structure.
2675 * @cmdiocb: pointer to lpfc command iocb data structure.
2676 * @rspiocb: pointer to lpfc response iocb data structure.
2677 *
2678 * This routine is a generic completion callback function for ELS commands.
2679 * Specifically, it is the callback function which does not need to perform
2680 * any command specific operations. It is currently used by the ELS command
2681 * issuing routines for the ELS State Change Request (SCR),
2682 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2683 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2684 * certain debug loggings, this callback function simply invokes the
2685 * lpfc_els_chk_latt() routine to check whether link went down during the
2686 * discovery process.
2687 **/
dea3101e 2688static void
2e0fef85
JS
2689lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2690 struct lpfc_iocbq *rspiocb)
dea3101e 2691{
2e0fef85 2692 struct lpfc_vport *vport = cmdiocb->vport;
dea3101e
JB
2693 IOCB_t *irsp;
2694
2695 irsp = &rspiocb->iocb;
2696
858c9f6c
JS
2697 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2698 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2699 irsp->ulpStatus, irsp->un.ulpWord[4],
2700 irsp->un.elsreq64.remoteID);
dea3101e 2701 /* ELS cmd tag <ulpIoTag> completes */
e8b62011
JS
2702 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2703 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2704 irsp->ulpIoTag, irsp->ulpStatus,
2705 irsp->un.ulpWord[4], irsp->ulpTimeout);
dea3101e 2706 /* Check to see if link went down during discovery */
2e0fef85 2707 lpfc_els_chk_latt(vport);
dea3101e
JB
2708 lpfc_els_free_iocb(phba, cmdiocb);
2709 return;
2710}
2711
e59058c4 2712/**
3621a710 2713 * lpfc_issue_els_scr - Issue a scr to an node on a vport
e59058c4
JS
2714 * @vport: pointer to a host virtual N_Port data structure.
2715 * @nportid: N_Port identifier to the remote node.
2716 * @retry: number of retries to the command IOCB.
2717 *
2718 * This routine issues a State Change Request (SCR) to a fabric node
2719 * on a @vport. The remote node @nportid is passed into the function. It
2720 * first search the @vport node list to find the matching ndlp. If no such
2721 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2722 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2723 * routine is invoked to send the SCR IOCB.
2724 *
2725 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2726 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2727 * will be stored into the context1 field of the IOCB for the completion
2728 * callback function to the SCR ELS command.
2729 *
2730 * Return code
2731 * 0 - Successfully issued scr command
2732 * 1 - Failed to issue scr command
2733 **/
dea3101e 2734int
2e0fef85 2735lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2736{
2e0fef85 2737 struct lpfc_hba *phba = vport->phba;
dea3101e 2738 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2739 uint8_t *pcmd;
2740 uint16_t cmdsize;
2741 struct lpfc_nodelist *ndlp;
2742
92d7f7b0 2743 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
dea3101e 2744
e47c9093
JS
2745 ndlp = lpfc_findnode_did(vport, nportid);
2746 if (!ndlp) {
2747 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2748 if (!ndlp)
2749 return 1;
2750 lpfc_nlp_init(vport, ndlp, nportid);
2751 lpfc_enqueue_node(vport, ndlp);
2752 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2753 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2754 if (!ndlp)
2755 return 1;
2756 }
2e0fef85
JS
2757
2758 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2759 ndlp->nlp_DID, ELS_CMD_SCR);
dea3101e 2760
488d1469 2761 if (!elsiocb) {
fa4066b6
JS
2762 /* This will trigger the release of the node just
2763 * allocated
2764 */
329f9bc7 2765 lpfc_nlp_put(ndlp);
c9f8735b 2766 return 1;
dea3101e
JB
2767 }
2768
dea3101e
JB
2769 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2770
2771 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
92d7f7b0 2772 pcmd += sizeof(uint32_t);
dea3101e
JB
2773
2774 /* For SCR, remainder of payload is SCR parameter page */
92d7f7b0 2775 memset(pcmd, 0, sizeof(SCR));
dea3101e
JB
2776 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2777
858c9f6c
JS
2778 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2779 "Issue SCR: did:x%x",
2780 ndlp->nlp_DID, 0, 0);
2781
dea3101e
JB
2782 phba->fc_stat.elsXmitSCR++;
2783 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2784 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2785 IOCB_ERROR) {
fa4066b6
JS
2786 /* The additional lpfc_nlp_put will cause the following
2787 * lpfc_els_free_iocb routine to trigger the rlease of
2788 * the node.
2789 */
329f9bc7 2790 lpfc_nlp_put(ndlp);
dea3101e 2791 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2792 return 1;
dea3101e 2793 }
fa4066b6
JS
2794 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2795 * trigger the release of node.
2796 */
cff261f6 2797
329f9bc7 2798 lpfc_nlp_put(ndlp);
c9f8735b 2799 return 0;
dea3101e
JB
2800}
2801
e59058c4 2802/**
3621a710 2803 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
e59058c4
JS
2804 * @vport: pointer to a host virtual N_Port data structure.
2805 * @nportid: N_Port identifier to the remote node.
2806 * @retry: number of retries to the command IOCB.
2807 *
2808 * This routine issues a Fibre Channel Address Resolution Response
2809 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2810 * is passed into the function. It first search the @vport node list to find
2811 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2812 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2813 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2814 *
2815 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2816 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2817 * will be stored into the context1 field of the IOCB for the completion
2818 * callback function to the PARPR ELS command.
2819 *
2820 * Return code
2821 * 0 - Successfully issued farpr command
2822 * 1 - Failed to issue farpr command
2823 **/
dea3101e 2824static int
2e0fef85 2825lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2826{
2e0fef85 2827 struct lpfc_hba *phba = vport->phba;
dea3101e 2828 struct lpfc_iocbq *elsiocb;
dea3101e
JB
2829 FARP *fp;
2830 uint8_t *pcmd;
2831 uint32_t *lp;
2832 uint16_t cmdsize;
2833 struct lpfc_nodelist *ondlp;
2834 struct lpfc_nodelist *ndlp;
2835
92d7f7b0 2836 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
dea3101e 2837
e47c9093
JS
2838 ndlp = lpfc_findnode_did(vport, nportid);
2839 if (!ndlp) {
2840 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2841 if (!ndlp)
2842 return 1;
2843 lpfc_nlp_init(vport, ndlp, nportid);
2844 lpfc_enqueue_node(vport, ndlp);
2845 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2846 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2847 if (!ndlp)
2848 return 1;
2849 }
2e0fef85
JS
2850
2851 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2852 ndlp->nlp_DID, ELS_CMD_RNID);
488d1469 2853 if (!elsiocb) {
fa4066b6
JS
2854 /* This will trigger the release of the node just
2855 * allocated
2856 */
329f9bc7 2857 lpfc_nlp_put(ndlp);
c9f8735b 2858 return 1;
dea3101e
JB
2859 }
2860
dea3101e
JB
2861 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2862
2863 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
92d7f7b0 2864 pcmd += sizeof(uint32_t);
dea3101e
JB
2865
2866 /* Fill in FARPR payload */
2867 fp = (FARP *) (pcmd);
92d7f7b0 2868 memset(fp, 0, sizeof(FARP));
dea3101e
JB
2869 lp = (uint32_t *) pcmd;
2870 *lp++ = be32_to_cpu(nportid);
2e0fef85 2871 *lp++ = be32_to_cpu(vport->fc_myDID);
dea3101e
JB
2872 fp->Rflags = 0;
2873 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2874
92d7f7b0
JS
2875 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2876 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2877 ondlp = lpfc_findnode_did(vport, nportid);
e47c9093 2878 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
dea3101e 2879 memcpy(&fp->OportName, &ondlp->nlp_portname,
92d7f7b0 2880 sizeof(struct lpfc_name));
dea3101e 2881 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
92d7f7b0 2882 sizeof(struct lpfc_name));
dea3101e
JB
2883 }
2884
858c9f6c
JS
2885 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2886 "Issue FARPR: did:x%x",
2887 ndlp->nlp_DID, 0, 0);
2888
dea3101e
JB
2889 phba->fc_stat.elsXmitFARPR++;
2890 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2891 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2892 IOCB_ERROR) {
fa4066b6
JS
2893 /* The additional lpfc_nlp_put will cause the following
2894 * lpfc_els_free_iocb routine to trigger the release of
2895 * the node.
2896 */
329f9bc7 2897 lpfc_nlp_put(ndlp);
dea3101e 2898 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2899 return 1;
dea3101e 2900 }
fa4066b6
JS
2901 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2902 * trigger the release of the node.
2903 */
329f9bc7 2904 lpfc_nlp_put(ndlp);
c9f8735b 2905 return 0;
dea3101e
JB
2906}
2907
e59058c4 2908/**
3621a710 2909 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
e59058c4
JS
2910 * @vport: pointer to a host virtual N_Port data structure.
2911 * @nlp: pointer to a node-list data structure.
2912 *
2913 * This routine cancels the timer with a delayed IOCB-command retry for
2914 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2915 * removes the ELS retry event if it presents. In addition, if the
2916 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2917 * commands are sent for the @vport's nodes that require issuing discovery
2918 * ADISC.
2919 **/
fdcebe28 2920void
2e0fef85 2921lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
fdcebe28 2922{
2e0fef85 2923 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
e47c9093 2924 struct lpfc_work_evt *evtp;
2e0fef85 2925
0d2b6b83
JS
2926 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2927 return;
2e0fef85 2928 spin_lock_irq(shost->host_lock);
fdcebe28 2929 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2930 spin_unlock_irq(shost->host_lock);
fdcebe28
JS
2931 del_timer_sync(&nlp->nlp_delayfunc);
2932 nlp->nlp_last_elscmd = 0;
e47c9093 2933 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
fdcebe28 2934 list_del_init(&nlp->els_retry_evt.evt_listp);
e47c9093
JS
2935 /* Decrement nlp reference count held for the delayed retry */
2936 evtp = &nlp->els_retry_evt;
2937 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2938 }
fdcebe28 2939 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2e0fef85 2940 spin_lock_irq(shost->host_lock);
fdcebe28 2941 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85
JS
2942 spin_unlock_irq(shost->host_lock);
2943 if (vport->num_disc_nodes) {
0d2b6b83
JS
2944 if (vport->port_state < LPFC_VPORT_READY) {
2945 /* Check if there are more ADISCs to be sent */
2946 lpfc_more_adisc(vport);
0d2b6b83
JS
2947 } else {
2948 /* Check if there are more PLOGIs to be sent */
2949 lpfc_more_plogi(vport);
90160e01
JS
2950 if (vport->num_disc_nodes == 0) {
2951 spin_lock_irq(shost->host_lock);
2952 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2953 spin_unlock_irq(shost->host_lock);
2954 lpfc_can_disctmo(vport);
2955 lpfc_end_rscn(vport);
2956 }
fdcebe28
JS
2957 }
2958 }
2959 }
2960 return;
2961}
2962
e59058c4 2963/**
3621a710 2964 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
e59058c4
JS
2965 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2966 *
2967 * This routine is invoked by the ndlp delayed-function timer to check
2968 * whether there is any pending ELS retry event(s) with the node. If not, it
2969 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2970 * adds the delayed events to the HBA work list and invokes the
2971 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2972 * event. Note that lpfc_nlp_get() is called before posting the event to
2973 * the work list to hold reference count of ndlp so that it guarantees the
2974 * reference to ndlp will still be available when the worker thread gets
2975 * to the event associated with the ndlp.
2976 **/
dea3101e
JB
2977void
2978lpfc_els_retry_delay(unsigned long ptr)
2979{
2e0fef85
JS
2980 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2981 struct lpfc_vport *vport = ndlp->vport;
2e0fef85 2982 struct lpfc_hba *phba = vport->phba;
92d7f7b0 2983 unsigned long flags;
2e0fef85 2984 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
dea3101e 2985
92d7f7b0 2986 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2987 if (!list_empty(&evtp->evt_listp)) {
92d7f7b0 2988 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
2989 return;
2990 }
2991
fa4066b6
JS
2992 /* We need to hold the node by incrementing the reference
2993 * count until the queued work is done
2994 */
2995 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
2996 if (evtp->evt_arg1) {
2997 evtp->evt = LPFC_EVT_ELS_RETRY;
2998 list_add_tail(&evtp->evt_listp, &phba->work_list);
92d7f7b0 2999 lpfc_worker_wake_up(phba);
5e9d9b82 3000 }
92d7f7b0 3001 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
3002 return;
3003}
3004
e59058c4 3005/**
3621a710 3006 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
e59058c4
JS
3007 * @ndlp: pointer to a node-list data structure.
3008 *
3009 * This routine is the worker-thread handler for processing the @ndlp delayed
3010 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3011 * the last ELS command from the associated ndlp and invokes the proper ELS
3012 * function according to the delayed ELS command to retry the command.
3013 **/
dea3101e
JB
3014void
3015lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3016{
2e0fef85
JS
3017 struct lpfc_vport *vport = ndlp->vport;
3018 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
eb016566 3019 uint32_t cmd, retry;
dea3101e 3020
2e0fef85 3021 spin_lock_irq(shost->host_lock);
5024ab17
JW
3022 cmd = ndlp->nlp_last_elscmd;
3023 ndlp->nlp_last_elscmd = 0;
dea3101e
JB
3024
3025 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2e0fef85 3026 spin_unlock_irq(shost->host_lock);
dea3101e
JB
3027 return;
3028 }
3029
3030 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 3031 spin_unlock_irq(shost->host_lock);
1a169689
JS
3032 /*
3033 * If a discovery event readded nlp_delayfunc after timer
3034 * firing and before processing the timer, cancel the
3035 * nlp_delayfunc.
3036 */
3037 del_timer_sync(&ndlp->nlp_delayfunc);
dea3101e 3038 retry = ndlp->nlp_retry;
4d9ab994 3039 ndlp->nlp_retry = 0;
dea3101e
JB
3040
3041 switch (cmd) {
3042 case ELS_CMD_FLOGI:
2e0fef85 3043 lpfc_issue_els_flogi(vport, ndlp, retry);
dea3101e
JB
3044 break;
3045 case ELS_CMD_PLOGI:
2e0fef85 3046 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
5024ab17 3047 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6ad42535 3049 }
dea3101e
JB
3050 break;
3051 case ELS_CMD_ADISC:
2e0fef85 3052 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
5024ab17 3053 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3054 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6ad42535 3055 }
dea3101e
JB
3056 break;
3057 case ELS_CMD_PRLI:
2e0fef85 3058 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
5024ab17 3059 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3060 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
6ad42535 3061 }
dea3101e
JB
3062 break;
3063 case ELS_CMD_LOGO:
2e0fef85 3064 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
5024ab17 3065 ndlp->nlp_prev_state = ndlp->nlp_state;
086a345f 3066 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
6ad42535 3067 }
dea3101e 3068 break;
92d7f7b0 3069 case ELS_CMD_FDISC:
fedd3b7b
JS
3070 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3071 lpfc_issue_els_fdisc(vport, ndlp, retry);
92d7f7b0 3072 break;
dea3101e
JB
3073 }
3074 return;
3075}
3076
e59058c4 3077/**
3621a710 3078 * lpfc_els_retry - Make retry decision on an els command iocb
e59058c4
JS
3079 * @phba: pointer to lpfc hba data structure.
3080 * @cmdiocb: pointer to lpfc command iocb data structure.
3081 * @rspiocb: pointer to lpfc response iocb data structure.
3082 *
3083 * This routine makes a retry decision on an ELS command IOCB, which has
3084 * failed. The following ELS IOCBs use this function for retrying the command
3085 * when previously issued command responsed with error status: FLOGI, PLOGI,
3086 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3087 * returned error status, it makes the decision whether a retry shall be
3088 * issued for the command, and whether a retry shall be made immediately or
3089 * delayed. In the former case, the corresponding ELS command issuing-function
3090 * is called to retry the command. In the later case, the ELS command shall
3091 * be posted to the ndlp delayed event and delayed function timer set to the
3092 * ndlp for the delayed command issusing.
3093 *
3094 * Return code
3095 * 0 - No retry of els command is made
3096 * 1 - Immediate or delayed retry of els command is made
3097 **/
dea3101e 3098static int
2e0fef85
JS
3099lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3100 struct lpfc_iocbq *rspiocb)
dea3101e 3101{
2e0fef85
JS
3102 struct lpfc_vport *vport = cmdiocb->vport;
3103 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3104 IOCB_t *irsp = &rspiocb->iocb;
3105 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3106 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
dea3101e
JB
3107 uint32_t *elscmd;
3108 struct ls_rjt stat;
2e0fef85 3109 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
98c9ea5c 3110 int logerr = 0;
2e0fef85 3111 uint32_t cmd = 0;
488d1469 3112 uint32_t did;
dea3101e 3113
488d1469 3114
dea3101e
JB
3115 /* Note: context2 may be 0 for internal driver abort
3116 * of delays ELS command.
3117 */
3118
3119 if (pcmd && pcmd->virt) {
3120 elscmd = (uint32_t *) (pcmd->virt);
3121 cmd = *elscmd++;
3122 }
3123
e47c9093 3124 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
488d1469
JS
3125 did = ndlp->nlp_DID;
3126 else {
3127 /* We should only hit this case for retrying PLOGI */
3128 did = irsp->un.elsreq64.remoteID;
2e0fef85 3129 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
3130 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3131 && (cmd != ELS_CMD_PLOGI))
488d1469
JS
3132 return 1;
3133 }
3134
858c9f6c
JS
3135 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3136 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
3137 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3138
dea3101e
JB
3139 switch (irsp->ulpStatus) {
3140 case IOSTAT_FCP_RSP_ERROR:
1151e3ec 3141 break;
dea3101e 3142 case IOSTAT_REMOTE_STOP:
1151e3ec
JS
3143 if (phba->sli_rev == LPFC_SLI_REV4) {
3144 /* This IO was aborted by the target, we don't
3145 * know the rxid and because we did not send the
3146 * ABTS we cannot generate and RRQ.
3147 */
3148 lpfc_set_rrq_active(phba, ndlp,
ee0f4fe1 3149 cmdiocb->sli4_lxritag, 0, 0);
1151e3ec 3150 }
dea3101e 3151 break;
dea3101e 3152 case IOSTAT_LOCAL_REJECT:
e3d2b802 3153 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
dea3101e 3154 case IOERR_LOOP_OPEN_FAILURE:
eaf15d5b
JS
3155 if (cmd == ELS_CMD_FLOGI) {
3156 if (PCI_DEVICE_ID_HORNET ==
3157 phba->pcidev->device) {
76a95d75 3158 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
eaf15d5b
JS
3159 phba->pport->fc_myDID = 0;
3160 phba->alpa_map[0] = 0;
3161 phba->alpa_map[1] = 0;
3162 }
3163 }
2e0fef85 3164 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
92d7f7b0 3165 delay = 1000;
dea3101e
JB
3166 retry = 1;
3167 break;
3168
92d7f7b0 3169 case IOERR_ILLEGAL_COMMAND:
7f5f3d0d
JS
3170 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3171 "0124 Retry illegal cmd x%x "
3172 "retry:x%x delay:x%x\n",
3173 cmd, cmdiocb->retry, delay);
3174 retry = 1;
3175 /* All command's retry policy */
3176 maxretry = 8;
3177 if (cmdiocb->retry > 2)
3178 delay = 1000;
92d7f7b0
JS
3179 break;
3180
dea3101e 3181 case IOERR_NO_RESOURCES:
98c9ea5c 3182 logerr = 1; /* HBA out of resources */
858c9f6c
JS
3183 retry = 1;
3184 if (cmdiocb->retry > 100)
3185 delay = 100;
3186 maxretry = 250;
3187 break;
3188
3189 case IOERR_ILLEGAL_FRAME:
92d7f7b0 3190 delay = 100;
dea3101e
JB
3191 retry = 1;
3192 break;
3193
858c9f6c 3194 case IOERR_SEQUENCE_TIMEOUT:
dea3101e 3195 case IOERR_INVALID_RPI:
5b5b36a9
JS
3196 if (cmd == ELS_CMD_PLOGI &&
3197 did == NameServer_DID) {
3198 /* Continue forever if plogi to */
3199 /* the nameserver fails */
3200 maxretry = 0;
3201 delay = 100;
3202 }
dea3101e
JB
3203 retry = 1;
3204 break;
3205 }
3206 break;
3207
3208 case IOSTAT_NPORT_RJT:
3209 case IOSTAT_FABRIC_RJT:
3210 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3211 retry = 1;
3212 break;
3213 }
3214 break;
3215
3216 case IOSTAT_NPORT_BSY:
3217 case IOSTAT_FABRIC_BSY:
98c9ea5c 3218 logerr = 1; /* Fabric / Remote NPort out of resources */
dea3101e
JB
3219 retry = 1;
3220 break;
3221
3222 case IOSTAT_LS_RJT:
3223 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3224 /* Added for Vendor specifc support
3225 * Just keep retrying for these Rsn / Exp codes
3226 */
3227 switch (stat.un.b.lsRjtRsnCode) {
3228 case LSRJT_UNABLE_TPC:
3229 if (stat.un.b.lsRjtRsnCodeExp ==
3230 LSEXP_CMD_IN_PROGRESS) {
3231 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 3232 delay = 1000;
dea3101e
JB
3233 maxretry = 48;
3234 }
3235 retry = 1;
3236 break;
3237 }
ffc95493
JS
3238 if (stat.un.b.lsRjtRsnCodeExp ==
3239 LSEXP_CANT_GIVE_DATA) {
3240 if (cmd == ELS_CMD_PLOGI) {
3241 delay = 1000;
3242 maxretry = 48;
3243 }
3244 retry = 1;
3245 break;
3246 }
4c1b64ba
JS
3247 if ((cmd == ELS_CMD_PLOGI) ||
3248 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3249 delay = 1000;
dea3101e
JB
3250 maxretry = lpfc_max_els_tries + 1;
3251 retry = 1;
3252 break;
3253 }
92d7f7b0
JS
3254 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3255 (cmd == ELS_CMD_FDISC) &&
3256 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
e8b62011
JS
3257 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3258 "0125 FDISC Failed (x%x). "
3259 "Fabric out of resources\n",
3260 stat.un.lsRjtError);
92d7f7b0
JS
3261 lpfc_vport_set_state(vport,
3262 FC_VPORT_NO_FABRIC_RSCS);
3263 }
dea3101e
JB
3264 break;
3265
3266 case LSRJT_LOGICAL_BSY:
858c9f6c
JS
3267 if ((cmd == ELS_CMD_PLOGI) ||
3268 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 3269 delay = 1000;
dea3101e 3270 maxretry = 48;
92d7f7b0 3271 } else if (cmd == ELS_CMD_FDISC) {
51ef4c26
JS
3272 /* FDISC retry policy */
3273 maxretry = 48;
3274 if (cmdiocb->retry >= 32)
3275 delay = 1000;
dea3101e
JB
3276 }
3277 retry = 1;
3278 break;
92d7f7b0
JS
3279
3280 case LSRJT_LOGICAL_ERR:
7f5f3d0d
JS
3281 /* There are some cases where switches return this
3282 * error when they are not ready and should be returning
3283 * Logical Busy. We should delay every time.
3284 */
3285 if (cmd == ELS_CMD_FDISC &&
3286 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3287 maxretry = 3;
3288 delay = 1000;
3289 retry = 1;
3290 break;
3291 }
92d7f7b0
JS
3292 case LSRJT_PROTOCOL_ERR:
3293 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3294 (cmd == ELS_CMD_FDISC) &&
3295 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3296 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3297 ) {
e8b62011 3298 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 3299 "0122 FDISC Failed (x%x). "
e8b62011
JS
3300 "Fabric Detected Bad WWN\n",
3301 stat.un.lsRjtError);
92d7f7b0
JS
3302 lpfc_vport_set_state(vport,
3303 FC_VPORT_FABRIC_REJ_WWN);
3304 }
3305 break;
7bdedb34
JS
3306 case LSRJT_VENDOR_UNIQUE:
3307 if ((stat.un.b.vendorUnique == 0x45) &&
3308 (cmd == ELS_CMD_FLOGI)) {
3309 goto out_retry;
3310 }
3311 break;
dea3101e
JB
3312 }
3313 break;
3314
3315 case IOSTAT_INTERMED_RSP:
3316 case IOSTAT_BA_RJT:
3317 break;
3318
3319 default:
3320 break;
3321 }
3322
488d1469 3323 if (did == FDMI_DID)
dea3101e 3324 retry = 1;
dea3101e 3325
df9e1b59 3326 if ((cmd == ELS_CMD_FLOGI) &&
76a95d75 3327 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
1b32f6aa 3328 !lpfc_error_lost_link(irsp)) {
98c9ea5c
JS
3329 /* FLOGI retry policy */
3330 retry = 1;
df9e1b59 3331 /* retry FLOGI forever */
6eae4303
JS
3332 if (phba->link_flag != LS_LOOPBACK_MODE)
3333 maxretry = 0;
3334 else
3335 maxretry = 2;
3336
6669f9bb
JS
3337 if (cmdiocb->retry >= 100)
3338 delay = 5000;
3339 else if (cmdiocb->retry >= 32)
98c9ea5c 3340 delay = 1000;
df9e1b59
JS
3341 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3342 /* retry FDISCs every second up to devloss */
3343 retry = 1;
3344 maxretry = vport->cfg_devloss_tmo;
3345 delay = 1000;
98c9ea5c
JS
3346 }
3347
6669f9bb
JS
3348 cmdiocb->retry++;
3349 if (maxretry && (cmdiocb->retry >= maxretry)) {
dea3101e
JB
3350 phba->fc_stat.elsRetryExceeded++;
3351 retry = 0;
3352 }
3353
ed957684
JS
3354 if ((vport->load_flag & FC_UNLOADING) != 0)
3355 retry = 0;
3356
7bdedb34 3357out_retry:
dea3101e 3358 if (retry) {
38b92ef8
JS
3359 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3360 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3361 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3362 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3363 "2849 Stop retry ELS command "
3364 "x%x to remote NPORT x%x, "
3365 "Data: x%x x%x\n", cmd, did,
3366 cmdiocb->retry, delay);
3367 return 0;
3368 }
3369 }
dea3101e
JB
3370
3371 /* Retry ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
3372 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3373 "0107 Retry ELS command x%x to remote "
3374 "NPORT x%x Data: x%x x%x\n",
3375 cmd, did, cmdiocb->retry, delay);
dea3101e 3376
858c9f6c
JS
3377 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3378 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
e3d2b802
JS
3379 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3380 IOERR_NO_RESOURCES))) {
858c9f6c
JS
3381 /* Don't reset timer for no resources */
3382
dea3101e 3383 /* If discovery / RSCN timer is running, reset it */
2e0fef85 3384 if (timer_pending(&vport->fc_disctmo) ||
92d7f7b0 3385 (vport->fc_flag & FC_RSCN_MODE))
2e0fef85 3386 lpfc_set_disctmo(vport);
dea3101e
JB
3387 }
3388
3389 phba->fc_stat.elsXmitRetry++;
58da1ffb 3390 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
dea3101e
JB
3391 phba->fc_stat.elsDelayRetry++;
3392 ndlp->nlp_retry = cmdiocb->retry;
3393
92d7f7b0
JS
3394 /* delay is specified in milliseconds */
3395 mod_timer(&ndlp->nlp_delayfunc,
3396 jiffies + msecs_to_jiffies(delay));
2e0fef85 3397 spin_lock_irq(shost->host_lock);
dea3101e 3398 ndlp->nlp_flag |= NLP_DELAY_TMO;
2e0fef85 3399 spin_unlock_irq(shost->host_lock);
dea3101e 3400
5024ab17 3401 ndlp->nlp_prev_state = ndlp->nlp_state;
858c9f6c
JS
3402 if (cmd == ELS_CMD_PRLI)
3403 lpfc_nlp_set_state(vport, ndlp,
4c1b64ba 3404 NLP_STE_PRLI_ISSUE);
858c9f6c
JS
3405 else
3406 lpfc_nlp_set_state(vport, ndlp,
3407 NLP_STE_NPR_NODE);
dea3101e
JB
3408 ndlp->nlp_last_elscmd = cmd;
3409
c9f8735b 3410 return 1;
dea3101e
JB
3411 }
3412 switch (cmd) {
3413 case ELS_CMD_FLOGI:
2e0fef85 3414 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
c9f8735b 3415 return 1;
92d7f7b0
JS
3416 case ELS_CMD_FDISC:
3417 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3418 return 1;
dea3101e 3419 case ELS_CMD_PLOGI:
58da1ffb 3420 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
488d1469 3421 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 3422 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 3423 NLP_STE_PLOGI_ISSUE);
488d1469 3424 }
2e0fef85 3425 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
c9f8735b 3426 return 1;
dea3101e 3427 case ELS_CMD_ADISC:
5024ab17 3428 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3429 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3430 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
c9f8735b 3431 return 1;
dea3101e 3432 case ELS_CMD_PRLI:
5024ab17 3433 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3434 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3435 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
c9f8735b 3436 return 1;
dea3101e 3437 case ELS_CMD_LOGO:
5024ab17 3438 ndlp->nlp_prev_state = ndlp->nlp_state;
086a345f 3439 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2e0fef85 3440 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
c9f8735b 3441 return 1;
dea3101e
JB
3442 }
3443 }
dea3101e 3444 /* No retry ELS command <elsCmd> to remote NPORT <did> */
98c9ea5c
JS
3445 if (logerr) {
3446 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3447 "0137 No retry ELS command x%x to remote "
3448 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3449 cmd, did, irsp->ulpStatus,
3450 irsp->un.ulpWord[4]);
3451 }
3452 else {
3453 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
a58cbd52
JS
3454 "0108 No retry ELS command x%x to remote "
3455 "NPORT x%x Retried:%d Error:x%x/%x\n",
3456 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3457 irsp->un.ulpWord[4]);
98c9ea5c 3458 }
c9f8735b 3459 return 0;
dea3101e
JB
3460}
3461
e59058c4 3462/**
3621a710 3463 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
e59058c4
JS
3464 * @phba: pointer to lpfc hba data structure.
3465 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3466 *
3467 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3468 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3469 * checks to see whether there is a lpfc DMA buffer associated with the
3470 * response of the command IOCB. If so, it will be released before releasing
3471 * the lpfc DMA buffer associated with the IOCB itself.
3472 *
3473 * Return code
3474 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3475 **/
09372820 3476static int
87af33fe
JS
3477lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3478{
3479 struct lpfc_dmabuf *buf_ptr;
3480
e59058c4 3481 /* Free the response before processing the command. */
87af33fe
JS
3482 if (!list_empty(&buf_ptr1->list)) {
3483 list_remove_head(&buf_ptr1->list, buf_ptr,
3484 struct lpfc_dmabuf,
3485 list);
3486 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3487 kfree(buf_ptr);
3488 }
3489 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3490 kfree(buf_ptr1);
3491 return 0;
3492}
3493
e59058c4 3494/**
3621a710 3495 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
e59058c4
JS
3496 * @phba: pointer to lpfc hba data structure.
3497 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3498 *
3499 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3500 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3501 * pool.
3502 *
3503 * Return code
3504 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3505 **/
09372820 3506static int
87af33fe
JS
3507lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3508{
3509 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3510 kfree(buf_ptr);
3511 return 0;
3512}
3513
e59058c4 3514/**
3621a710 3515 * lpfc_els_free_iocb - Free a command iocb and its associated resources
e59058c4
JS
3516 * @phba: pointer to lpfc hba data structure.
3517 * @elsiocb: pointer to lpfc els command iocb data structure.
3518 *
3519 * This routine frees a command IOCB and its associated resources. The
3520 * command IOCB data structure contains the reference to various associated
3521 * resources, these fields must be set to NULL if the associated reference
3522 * not present:
3523 * context1 - reference to ndlp
3524 * context2 - reference to cmd
3525 * context2->next - reference to rsp
3526 * context3 - reference to bpl
3527 *
3528 * It first properly decrements the reference count held on ndlp for the
3529 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3530 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3531 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3532 * adds the DMA buffer the @phba data structure for the delayed release.
3533 * If reference to the Buffer Pointer List (BPL) is present, the
3534 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3535 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3536 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3537 *
3538 * Return code
3539 * 0 - Success (currently, always return 0)
3540 **/
dea3101e 3541int
329f9bc7 3542lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
dea3101e
JB
3543{
3544 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
a8adb832
JS
3545 struct lpfc_nodelist *ndlp;
3546
3547 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3548 if (ndlp) {
3549 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3550 lpfc_nlp_put(ndlp);
dea3101e 3551
a8adb832
JS
3552 /* If the ndlp is not being used by another discovery
3553 * thread, free it.
3554 */
3555 if (!lpfc_nlp_not_used(ndlp)) {
3556 /* If ndlp is being used by another discovery
3557 * thread, just clear NLP_DEFER_RM
3558 */
3559 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3560 }
3561 }
3562 else
3563 lpfc_nlp_put(ndlp);
329f9bc7
JS
3564 elsiocb->context1 = NULL;
3565 }
dea3101e
JB
3566 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3567 if (elsiocb->context2) {
0ff10d46
JS
3568 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3569 /* Firmware could still be in progress of DMAing
3570 * payload, so don't free data buffer till after
3571 * a hbeat.
3572 */
3573 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3574 buf_ptr = elsiocb->context2;
3575 elsiocb->context2 = NULL;
3576 if (buf_ptr) {
3577 buf_ptr1 = NULL;
3578 spin_lock_irq(&phba->hbalock);
3579 if (!list_empty(&buf_ptr->list)) {
3580 list_remove_head(&buf_ptr->list,
3581 buf_ptr1, struct lpfc_dmabuf,
3582 list);
3583 INIT_LIST_HEAD(&buf_ptr1->list);
3584 list_add_tail(&buf_ptr1->list,
3585 &phba->elsbuf);
3586 phba->elsbuf_cnt++;
3587 }
3588 INIT_LIST_HEAD(&buf_ptr->list);
3589 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3590 phba->elsbuf_cnt++;
3591 spin_unlock_irq(&phba->hbalock);
3592 }
3593 } else {
3594 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3595 lpfc_els_free_data(phba, buf_ptr1);
8667f515 3596 elsiocb->context2 = NULL;
0ff10d46 3597 }
dea3101e
JB
3598 }
3599
3600 if (elsiocb->context3) {
3601 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
87af33fe 3602 lpfc_els_free_bpl(phba, buf_ptr);
8667f515 3603 elsiocb->context3 = NULL;
dea3101e 3604 }
604a3e30 3605 lpfc_sli_release_iocbq(phba, elsiocb);
dea3101e
JB
3606 return 0;
3607}
3608
e59058c4 3609/**
3621a710 3610 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
e59058c4
JS
3611 * @phba: pointer to lpfc hba data structure.
3612 * @cmdiocb: pointer to lpfc command iocb data structure.
3613 * @rspiocb: pointer to lpfc response iocb data structure.
3614 *
3615 * This routine is the completion callback function to the Logout (LOGO)
3616 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3617 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3618 * release the ndlp if it has the last reference remaining (reference count
3619 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3620 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3621 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3622 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3623 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3624 * IOCB data structure.
3625 **/
dea3101e 3626static void
2e0fef85
JS
3627lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3628 struct lpfc_iocbq *rspiocb)
dea3101e 3629{
2e0fef85
JS
3630 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3631 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c
JS
3632 IOCB_t *irsp;
3633
3634 irsp = &rspiocb->iocb;
3635 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3636 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3637 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
dea3101e 3638 /* ACC to LOGO completes to NPort <nlp_DID> */
e8b62011
JS
3639 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3640 "0109 ACC to LOGO completes to NPort x%x "
3641 "Data: x%x x%x x%x\n",
3642 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3643 ndlp->nlp_rpi);
87af33fe
JS
3644
3645 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3646 /* NPort Recovery mode or node is just allocated */
3647 if (!lpfc_nlp_not_used(ndlp)) {
3648 /* If the ndlp is being used by another discovery
3649 * thread, just unregister the RPI.
3650 */
3651 lpfc_unreg_rpi(vport, ndlp);
fa4066b6
JS
3652 } else {
3653 /* Indicate the node has already released, should
3654 * not reference to it from within lpfc_els_free_iocb.
3655 */
3656 cmdiocb->context1 = NULL;
87af33fe 3657 }
dea3101e 3658 }
73d91e50
JS
3659
3660 /*
3661 * The driver received a LOGO from the rport and has ACK'd it.
df9e1b59 3662 * At this point, the driver is done so release the IOCB
73d91e50 3663 */
dea3101e 3664 lpfc_els_free_iocb(phba, cmdiocb);
dea3101e
JB
3665}
3666
e59058c4 3667/**
3621a710 3668 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
e59058c4
JS
3669 * @phba: pointer to lpfc hba data structure.
3670 * @pmb: pointer to the driver internal queue element for mailbox command.
3671 *
3672 * This routine is the completion callback function for unregister default
3673 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3674 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3675 * decrements the ndlp reference count held for this completion callback
3676 * function. After that, it invokes the lpfc_nlp_not_used() to check
3677 * whether there is only one reference left on the ndlp. If so, it will
3678 * perform one more decrement and trigger the release of the ndlp.
3679 **/
858c9f6c
JS
3680void
3681lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3682{
3683 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3684 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3685
3686 pmb->context1 = NULL;
d439d286
JS
3687 pmb->context2 = NULL;
3688
858c9f6c
JS
3689 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3690 kfree(mp);
3691 mempool_free(pmb, phba->mbox_mem_pool);
086a345f 3692 if (ndlp) {
be6bb941
JS
3693 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3694 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
3695 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 3696 kref_read(&ndlp->kref),
be6bb941 3697 ndlp->nlp_usg_map, ndlp);
086a345f
JS
3698 if (NLP_CHK_NODE_ACT(ndlp)) {
3699 lpfc_nlp_put(ndlp);
3700 /* This is the end of the default RPI cleanup logic for
3701 * this ndlp. If no other discovery threads are using
3702 * this ndlp, free all resources associated with it.
3703 */
3704 lpfc_nlp_not_used(ndlp);
3705 } else {
3706 lpfc_drop_node(ndlp->vport, ndlp);
3707 }
a8adb832 3708 }
3772a991 3709
858c9f6c
JS
3710 return;
3711}
3712
e59058c4 3713/**
3621a710 3714 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
e59058c4
JS
3715 * @phba: pointer to lpfc hba data structure.
3716 * @cmdiocb: pointer to lpfc command iocb data structure.
3717 * @rspiocb: pointer to lpfc response iocb data structure.
3718 *
3719 * This routine is the completion callback function for ELS Response IOCB
3720 * command. In normal case, this callback function just properly sets the
3721 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3722 * field in the command IOCB is not NULL, the referred mailbox command will
3723 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3724 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3725 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3726 * routine shall be invoked trying to release the ndlp if no other threads
3727 * are currently referring it.
3728 **/
dea3101e 3729static void
858c9f6c 3730lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
329f9bc7 3731 struct lpfc_iocbq *rspiocb)
dea3101e 3732{
2e0fef85
JS
3733 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3734 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3735 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
87af33fe
JS
3736 IOCB_t *irsp;
3737 uint8_t *pcmd;
dea3101e 3738 LPFC_MBOXQ_t *mbox = NULL;
2e0fef85 3739 struct lpfc_dmabuf *mp = NULL;
87af33fe 3740 uint32_t ls_rjt = 0;
dea3101e 3741
33ccf8d1
JS
3742 irsp = &rspiocb->iocb;
3743
dea3101e
JB
3744 if (cmdiocb->context_un.mbox)
3745 mbox = cmdiocb->context_un.mbox;
3746
fa4066b6
JS
3747 /* First determine if this is a LS_RJT cmpl. Note, this callback
3748 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3749 */
87af33fe 3750 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
58da1ffb
JS
3751 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3752 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
fa4066b6 3753 /* A LS_RJT associated with Default RPI cleanup has its own
3ad2f3fb 3754 * separate code path.
87af33fe
JS
3755 */
3756 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3757 ls_rjt = 1;
3758 }
3759
dea3101e 3760 /* Check to see if link went down during discovery */
58da1ffb 3761 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
dea3101e 3762 if (mbox) {
14691150
JS
3763 mp = (struct lpfc_dmabuf *) mbox->context1;
3764 if (mp) {
3765 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3766 kfree(mp);
3767 }
329f9bc7 3768 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3769 }
58da1ffb
JS
3770 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3771 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
fa4066b6 3772 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3773 ndlp = NULL;
fa4066b6
JS
3774 /* Indicate the node has already released,
3775 * should not reference to it from within
3776 * the routine lpfc_els_free_iocb.
3777 */
3778 cmdiocb->context1 = NULL;
3779 }
dea3101e
JB
3780 goto out;
3781 }
3782
858c9f6c 3783 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
51ef4c26 3784 "ELS rsp cmpl: status:x%x/x%x did:x%x",
858c9f6c 3785 irsp->ulpStatus, irsp->un.ulpWord[4],
51ef4c26 3786 cmdiocb->iocb.un.elsreq64.remoteID);
dea3101e 3787 /* ELS response tag <ulpIoTag> completes */
e8b62011
JS
3788 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3789 "0110 ELS response tag x%x completes "
3790 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3791 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3792 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3793 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3794 ndlp->nlp_rpi);
dea3101e
JB
3795 if (mbox) {
3796 if ((rspiocb->iocb.ulpStatus == 0)
3797 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2e0fef85 3798 lpfc_unreg_rpi(vport, ndlp);
e47c9093
JS
3799 /* Increment reference count to ndlp to hold the
3800 * reference to ndlp for the callback function.
3801 */
329f9bc7 3802 mbox->context2 = lpfc_nlp_get(ndlp);
2e0fef85 3803 mbox->vport = vport;
858c9f6c
JS
3804 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3805 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3806 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3807 }
3808 else {
3809 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3810 ndlp->nlp_prev_state = ndlp->nlp_state;
3811 lpfc_nlp_set_state(vport, ndlp,
2e0fef85 3812 NLP_STE_REG_LOGIN_ISSUE);
858c9f6c 3813 }
4b7789b7
JS
3814
3815 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
0b727fea 3816 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
e47c9093 3817 != MBX_NOT_FINISHED)
dea3101e 3818 goto out;
4b7789b7
JS
3819
3820 /* Decrement the ndlp reference count we
3821 * set for this failed mailbox command.
3822 */
3823 lpfc_nlp_put(ndlp);
3824 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
98c9ea5c
JS
3825
3826 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3827 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3828 "0138 ELS rsp: Cannot issue reg_login for x%x "
3829 "Data: x%x x%x x%x\n",
3830 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3831 ndlp->nlp_rpi);
3832
fa4066b6 3833 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3834 ndlp = NULL;
fa4066b6
JS
3835 /* Indicate node has already been released,
3836 * should not reference to it from within
3837 * the routine lpfc_els_free_iocb.
3838 */
3839 cmdiocb->context1 = NULL;
3840 }
dea3101e 3841 } else {
858c9f6c
JS
3842 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3843 if (!lpfc_error_lost_link(irsp) &&
3844 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
fa4066b6 3845 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3846 ndlp = NULL;
fa4066b6
JS
3847 /* Indicate node has already been
3848 * released, should not reference
3849 * to it from within the routine
3850 * lpfc_els_free_iocb.
3851 */
3852 cmdiocb->context1 = NULL;
3853 }
dea3101e
JB
3854 }
3855 }
14691150
JS
3856 mp = (struct lpfc_dmabuf *) mbox->context1;
3857 if (mp) {
3858 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3859 kfree(mp);
3860 }
3861 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e
JB
3862 }
3863out:
58da1ffb 3864 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2e0fef85 3865 spin_lock_irq(shost->host_lock);
858c9f6c 3866 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2e0fef85 3867 spin_unlock_irq(shost->host_lock);
87af33fe
JS
3868
3869 /* If the node is not being used by another discovery thread,
3870 * and we are sending a reject, we are done with it.
3871 * Release driver reference count here and free associated
3872 * resources.
3873 */
3874 if (ls_rjt)
fa4066b6
JS
3875 if (lpfc_nlp_not_used(ndlp))
3876 /* Indicate node has already been released,
3877 * should not reference to it from within
3878 * the routine lpfc_els_free_iocb.
3879 */
3880 cmdiocb->context1 = NULL;
4b7789b7 3881
dea3101e 3882 }
87af33fe 3883
dea3101e
JB
3884 lpfc_els_free_iocb(phba, cmdiocb);
3885 return;
3886}
3887
e59058c4 3888/**
3621a710 3889 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
e59058c4
JS
3890 * @vport: pointer to a host virtual N_Port data structure.
3891 * @flag: the els command code to be accepted.
3892 * @oldiocb: pointer to the original lpfc command iocb data structure.
3893 * @ndlp: pointer to a node-list data structure.
3894 * @mbox: pointer to the driver internal queue element for mailbox command.
3895 *
3896 * This routine prepares and issues an Accept (ACC) response IOCB
3897 * command. It uses the @flag to properly set up the IOCB field for the
3898 * specific ACC response command to be issued and invokes the
3899 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3900 * @mbox pointer is passed in, it will be put into the context_un.mbox
3901 * field of the IOCB for the completion callback function to issue the
3902 * mailbox command to the HBA later when callback is invoked.
3903 *
3904 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3905 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3906 * will be stored into the context1 field of the IOCB for the completion
3907 * callback function to the corresponding response ELS IOCB command.
3908 *
3909 * Return code
3910 * 0 - Successfully issued acc response
3911 * 1 - Failed to issue acc response
3912 **/
dea3101e 3913int
2e0fef85
JS
3914lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3915 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
51ef4c26 3916 LPFC_MBOXQ_t *mbox)
dea3101e 3917{
2e0fef85
JS
3918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3919 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
3920 IOCB_t *icmd;
3921 IOCB_t *oldcmd;
3922 struct lpfc_iocbq *elsiocb;
dea3101e 3923 uint8_t *pcmd;
d6de08cc 3924 struct serv_parm *sp;
dea3101e
JB
3925 uint16_t cmdsize;
3926 int rc;
82d9a2a2 3927 ELS_PKT *els_pkt_ptr;
dea3101e 3928
dea3101e
JB
3929 oldcmd = &oldiocb->iocb;
3930
3931 switch (flag) {
3932 case ELS_CMD_ACC:
92d7f7b0 3933 cmdsize = sizeof(uint32_t);
2e0fef85
JS
3934 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3935 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3936 if (!elsiocb) {
2e0fef85 3937 spin_lock_irq(shost->host_lock);
5024ab17 3938 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3939 spin_unlock_irq(shost->host_lock);
c9f8735b 3940 return 1;
dea3101e 3941 }
2e0fef85 3942
dea3101e 3943 icmd = &elsiocb->iocb;
7851fe2c
JS
3944 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3945 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3946 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3947 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3948 pcmd += sizeof(uint32_t);
858c9f6c
JS
3949
3950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3951 "Issue ACC: did:x%x flg:x%x",
3952 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3953 break;
d6de08cc 3954 case ELS_CMD_FLOGI:
dea3101e 3955 case ELS_CMD_PLOGI:
92d7f7b0 3956 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2e0fef85
JS
3957 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3958 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3959 if (!elsiocb)
c9f8735b 3960 return 1;
488d1469 3961
dea3101e 3962 icmd = &elsiocb->iocb;
7851fe2c
JS
3963 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3964 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
3965 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3966
3967 if (mbox)
3968 elsiocb->context_un.mbox = mbox;
3969
3970 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3971 pcmd += sizeof(uint32_t);
d6de08cc
JS
3972 sp = (struct serv_parm *)pcmd;
3973
3974 if (flag == ELS_CMD_FLOGI) {
3975 /* Copy the received service parameters back */
3976 memcpy(sp, &phba->fc_fabparam,
3977 sizeof(struct serv_parm));
3978
3979 /* Clear the F_Port bit */
3980 sp->cmn.fPort = 0;
3981
3982 /* Mark all class service parameters as invalid */
3983 sp->cls1.classValid = 0;
3984 sp->cls2.classValid = 0;
3985 sp->cls3.classValid = 0;
3986 sp->cls4.classValid = 0;
3987
3988 /* Copy our worldwide names */
3989 memcpy(&sp->portName, &vport->fc_sparam.portName,
3990 sizeof(struct lpfc_name));
3991 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
3992 sizeof(struct lpfc_name));
3993 } else {
3994 memcpy(pcmd, &vport->fc_sparam,
3995 sizeof(struct serv_parm));
e0165f20
JS
3996
3997 sp->cmn.valid_vendor_ver_level = 0;
3998 memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
d6de08cc 3999 }
858c9f6c
JS
4000
4001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
d6de08cc 4002 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
858c9f6c 4003 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 4004 break;
82d9a2a2 4005 case ELS_CMD_PRLO:
92d7f7b0 4006 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2e0fef85 4007 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
82d9a2a2
JS
4008 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4009 if (!elsiocb)
4010 return 1;
4011
4012 icmd = &elsiocb->iocb;
7851fe2c
JS
4013 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4014 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
82d9a2a2
JS
4015 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4016
4017 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
92d7f7b0 4018 sizeof(uint32_t) + sizeof(PRLO));
82d9a2a2
JS
4019 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4020 els_pkt_ptr = (ELS_PKT *) pcmd;
4021 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
858c9f6c
JS
4022
4023 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4024 "Issue ACC PRLO: did:x%x flg:x%x",
4025 ndlp->nlp_DID, ndlp->nlp_flag, 0);
82d9a2a2 4026 break;
dea3101e 4027 default:
c9f8735b 4028 return 1;
dea3101e 4029 }
dea3101e 4030 /* Xmit ELS ACC response tag <ulpIoTag> */
e8b62011
JS
4031 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4032 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
e6446439
JS
4033 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4034 "fc_flag x%x\n",
e8b62011
JS
4035 elsiocb->iotag, elsiocb->iocb.ulpContext,
4036 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
e6446439 4037 ndlp->nlp_rpi, vport->fc_flag);
dea3101e 4038 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2e0fef85 4039 spin_lock_irq(shost->host_lock);
7c5e518c
JS
4040 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4041 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4042 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 4043 spin_unlock_irq(shost->host_lock);
dea3101e
JB
4044 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4045 } else {
858c9f6c 4046 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e
JB
4047 }
4048
4049 phba->fc_stat.elsXmitACC++;
3772a991 4050 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4051 if (rc == IOCB_ERROR) {
4052 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4053 return 1;
dea3101e 4054 }
c9f8735b 4055 return 0;
dea3101e
JB
4056}
4057
e59058c4 4058/**
3621a710 4059 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
e59058c4
JS
4060 * @vport: pointer to a virtual N_Port data structure.
4061 * @rejectError:
4062 * @oldiocb: pointer to the original lpfc command iocb data structure.
4063 * @ndlp: pointer to a node-list data structure.
4064 * @mbox: pointer to the driver internal queue element for mailbox command.
4065 *
4066 * This routine prepares and issue an Reject (RJT) response IOCB
4067 * command. If a @mbox pointer is passed in, it will be put into the
4068 * context_un.mbox field of the IOCB for the completion callback function
4069 * to issue to the HBA later.
4070 *
4071 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4072 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4073 * will be stored into the context1 field of the IOCB for the completion
4074 * callback function to the reject response ELS IOCB command.
4075 *
4076 * Return code
4077 * 0 - Successfully issued reject response
4078 * 1 - Failed to issue reject response
4079 **/
dea3101e 4080int
2e0fef85 4081lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
858c9f6c
JS
4082 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4083 LPFC_MBOXQ_t *mbox)
dea3101e 4084{
2e0fef85 4085 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4086 IOCB_t *icmd;
4087 IOCB_t *oldcmd;
4088 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4089 uint8_t *pcmd;
4090 uint16_t cmdsize;
4091 int rc;
4092
92d7f7b0 4093 cmdsize = 2 * sizeof(uint32_t);
2e0fef85
JS
4094 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4095 ndlp->nlp_DID, ELS_CMD_LS_RJT);
488d1469 4096 if (!elsiocb)
c9f8735b 4097 return 1;
dea3101e
JB
4098
4099 icmd = &elsiocb->iocb;
4100 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4101 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4102 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
dea3101e
JB
4103 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4104
4105 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
92d7f7b0 4106 pcmd += sizeof(uint32_t);
dea3101e
JB
4107 *((uint32_t *) (pcmd)) = rejectError;
4108
51ef4c26 4109 if (mbox)
858c9f6c 4110 elsiocb->context_un.mbox = mbox;
858c9f6c 4111
dea3101e 4112 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
e8b62011
JS
4113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4114 "0129 Xmit ELS RJT x%x response tag x%x "
4115 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4116 "rpi x%x\n",
4117 rejectError, elsiocb->iotag,
4118 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4119 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
858c9f6c
JS
4120 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4121 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
4122 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4123
dea3101e 4124 phba->fc_stat.elsXmitLSRJT++;
858c9f6c 4125 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 4126 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
51ef4c26 4127
dea3101e
JB
4128 if (rc == IOCB_ERROR) {
4129 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4130 return 1;
dea3101e 4131 }
c9f8735b 4132 return 0;
dea3101e
JB
4133}
4134
e59058c4 4135/**
3621a710 4136 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
e59058c4
JS
4137 * @vport: pointer to a virtual N_Port data structure.
4138 * @oldiocb: pointer to the original lpfc command iocb data structure.
4139 * @ndlp: pointer to a node-list data structure.
4140 *
4141 * This routine prepares and issues an Accept (ACC) response to Address
4142 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4143 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4144 *
4145 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4146 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4147 * will be stored into the context1 field of the IOCB for the completion
4148 * callback function to the ADISC Accept response ELS IOCB command.
4149 *
4150 * Return code
4151 * 0 - Successfully issued acc adisc response
4152 * 1 - Failed to issue adisc acc response
4153 **/
dea3101e 4154int
2e0fef85
JS
4155lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4156 struct lpfc_nodelist *ndlp)
dea3101e 4157{
2e0fef85 4158 struct lpfc_hba *phba = vport->phba;
dea3101e 4159 ADISC *ap;
2e0fef85 4160 IOCB_t *icmd, *oldcmd;
dea3101e 4161 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4162 uint8_t *pcmd;
4163 uint16_t cmdsize;
4164 int rc;
4165
92d7f7b0 4166 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2e0fef85
JS
4167 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4168 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4169 if (!elsiocb)
c9f8735b 4170 return 1;
dea3101e 4171
5b8bd0c9
JS
4172 icmd = &elsiocb->iocb;
4173 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4174 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4175 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5b8bd0c9 4176
dea3101e 4177 /* Xmit ADISC ACC response tag <ulpIoTag> */
e8b62011
JS
4178 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4179 "0130 Xmit ADISC ACC response iotag x%x xri: "
4180 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4181 elsiocb->iotag, elsiocb->iocb.ulpContext,
4182 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4183 ndlp->nlp_rpi);
dea3101e
JB
4184 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4185
4186 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4187 pcmd += sizeof(uint32_t);
dea3101e
JB
4188
4189 ap = (ADISC *) (pcmd);
4190 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
4191 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4192 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 4193 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 4194
858c9f6c
JS
4195 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4196 "Issue ACC ADISC: did:x%x flg:x%x",
4197 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4198
dea3101e 4199 phba->fc_stat.elsXmitACC++;
858c9f6c 4200 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 4201 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4202 if (rc == IOCB_ERROR) {
4203 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4204 return 1;
dea3101e 4205 }
c9f8735b 4206 return 0;
dea3101e
JB
4207}
4208
e59058c4 4209/**
3621a710 4210 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
e59058c4
JS
4211 * @vport: pointer to a virtual N_Port data structure.
4212 * @oldiocb: pointer to the original lpfc command iocb data structure.
4213 * @ndlp: pointer to a node-list data structure.
4214 *
4215 * This routine prepares and issues an Accept (ACC) response to Process
4216 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4217 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4218 *
4219 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4220 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4221 * will be stored into the context1 field of the IOCB for the completion
4222 * callback function to the PRLI Accept response ELS IOCB command.
4223 *
4224 * Return code
4225 * 0 - Successfully issued acc prli response
4226 * 1 - Failed to issue acc prli response
4227 **/
dea3101e 4228int
2e0fef85 4229lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5b8bd0c9 4230 struct lpfc_nodelist *ndlp)
dea3101e 4231{
2e0fef85 4232 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
4233 PRLI *npr;
4234 lpfc_vpd_t *vpd;
4235 IOCB_t *icmd;
4236 IOCB_t *oldcmd;
4237 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4238 uint8_t *pcmd;
4239 uint16_t cmdsize;
4240 int rc;
4241
92d7f7b0 4242 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2e0fef85 4243 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
92d7f7b0 4244 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
c9f8735b
JW
4245 if (!elsiocb)
4246 return 1;
dea3101e 4247
5b8bd0c9
JS
4248 icmd = &elsiocb->iocb;
4249 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4250 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4251 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4252
dea3101e 4253 /* Xmit PRLI ACC response tag <ulpIoTag> */
e8b62011
JS
4254 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4255 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4256 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4257 elsiocb->iotag, elsiocb->iocb.ulpContext,
4258 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4259 ndlp->nlp_rpi);
dea3101e
JB
4260 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4261
4262 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
92d7f7b0 4263 pcmd += sizeof(uint32_t);
dea3101e
JB
4264
4265 /* For PRLI, remainder of payload is PRLI parameter page */
92d7f7b0 4266 memset(pcmd, 0, sizeof(PRLI));
dea3101e
JB
4267
4268 npr = (PRLI *) pcmd;
4269 vpd = &phba->vpd;
4270 /*
0d2b6b83
JS
4271 * If the remote port is a target and our firmware version is 3.20 or
4272 * later, set the following bits for FC-TAPE support.
dea3101e 4273 */
0d2b6b83
JS
4274 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4275 (vpd->rev.feaLevelHigh >= 0x02)) {
dea3101e
JB
4276 npr->ConfmComplAllowed = 1;
4277 npr->Retry = 1;
4278 npr->TaskRetryIdReq = 1;
4279 }
4280
4281 npr->acceptRspCode = PRLI_REQ_EXECUTED;
4282 npr->estabImagePair = 1;
4283 npr->readXferRdyDis = 1;
4284 npr->ConfmComplAllowed = 1;
4285
4286 npr->prliType = PRLI_FCP_TYPE;
4287 npr->initiatorFunc = 1;
4288
858c9f6c
JS
4289 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4290 "Issue ACC PRLI: did:x%x flg:x%x",
4291 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4292
dea3101e 4293 phba->fc_stat.elsXmitACC++;
858c9f6c 4294 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4295
3772a991 4296 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4297 if (rc == IOCB_ERROR) {
4298 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4299 return 1;
dea3101e 4300 }
c9f8735b 4301 return 0;
dea3101e
JB
4302}
4303
e59058c4 4304/**
3621a710 4305 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
e59058c4
JS
4306 * @vport: pointer to a virtual N_Port data structure.
4307 * @format: rnid command format.
4308 * @oldiocb: pointer to the original lpfc command iocb data structure.
4309 * @ndlp: pointer to a node-list data structure.
4310 *
4311 * This routine issues a Request Node Identification Data (RNID) Accept
4312 * (ACC) response. It constructs the RNID ACC response command according to
4313 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4314 * issue the response. Note that this command does not need to hold the ndlp
4315 * reference count for the callback. So, the ndlp reference count taken by
4316 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4317 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4318 * there is no ndlp reference available.
4319 *
4320 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4321 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4322 * will be stored into the context1 field of the IOCB for the completion
4323 * callback function. However, for the RNID Accept Response ELS command,
4324 * this is undone later by this routine after the IOCB is allocated.
4325 *
4326 * Return code
4327 * 0 - Successfully issued acc rnid response
4328 * 1 - Failed to issue acc rnid response
4329 **/
dea3101e 4330static int
2e0fef85 4331lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
329f9bc7 4332 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
dea3101e 4333{
2e0fef85 4334 struct lpfc_hba *phba = vport->phba;
dea3101e 4335 RNID *rn;
2e0fef85 4336 IOCB_t *icmd, *oldcmd;
dea3101e 4337 struct lpfc_iocbq *elsiocb;
dea3101e
JB
4338 uint8_t *pcmd;
4339 uint16_t cmdsize;
4340 int rc;
4341
92d7f7b0
JS
4342 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4343 + (2 * sizeof(struct lpfc_name));
dea3101e 4344 if (format)
92d7f7b0 4345 cmdsize += sizeof(RNID_TOP_DISC);
dea3101e 4346
2e0fef85
JS
4347 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4348 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 4349 if (!elsiocb)
c9f8735b 4350 return 1;
dea3101e 4351
5b8bd0c9
JS
4352 icmd = &elsiocb->iocb;
4353 oldcmd = &oldiocb->iocb;
7851fe2c
JS
4354 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4355 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4356
dea3101e 4357 /* Xmit RNID ACC response tag <ulpIoTag> */
e8b62011
JS
4358 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4359 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4360 elsiocb->iotag, elsiocb->iocb.ulpContext);
dea3101e 4361 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
dea3101e 4362 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4363 pcmd += sizeof(uint32_t);
dea3101e 4364
92d7f7b0 4365 memset(pcmd, 0, sizeof(RNID));
dea3101e
JB
4366 rn = (RNID *) (pcmd);
4367 rn->Format = format;
92d7f7b0
JS
4368 rn->CommonLen = (2 * sizeof(struct lpfc_name));
4369 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4370 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
dea3101e
JB
4371 switch (format) {
4372 case 0:
4373 rn->SpecificLen = 0;
4374 break;
4375 case RNID_TOPOLOGY_DISC:
92d7f7b0 4376 rn->SpecificLen = sizeof(RNID_TOP_DISC);
dea3101e 4377 memcpy(&rn->un.topologyDisc.portName,
92d7f7b0 4378 &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e
JB
4379 rn->un.topologyDisc.unitType = RNID_HBA;
4380 rn->un.topologyDisc.physPort = 0;
4381 rn->un.topologyDisc.attachedNodes = 0;
4382 break;
4383 default:
4384 rn->CommonLen = 0;
4385 rn->SpecificLen = 0;
4386 break;
4387 }
4388
858c9f6c
JS
4389 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4390 "Issue ACC RNID: did:x%x flg:x%x",
4391 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4392
dea3101e 4393 phba->fc_stat.elsXmitACC++;
858c9f6c 4394 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 4395
3772a991 4396 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e
JB
4397 if (rc == IOCB_ERROR) {
4398 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 4399 return 1;
dea3101e 4400 }
c9f8735b 4401 return 0;
dea3101e
JB
4402}
4403
19ca7609
JS
4404/**
4405 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4406 * @vport: pointer to a virtual N_Port data structure.
4407 * @iocb: pointer to the lpfc command iocb data structure.
4408 * @ndlp: pointer to a node-list data structure.
4409 *
4410 * Return
4411 **/
4412static void
4413lpfc_els_clear_rrq(struct lpfc_vport *vport,
4414 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4415{
4416 struct lpfc_hba *phba = vport->phba;
4417 uint8_t *pcmd;
4418 struct RRQ *rrq;
4419 uint16_t rxid;
1151e3ec 4420 uint16_t xri;
19ca7609
JS
4421 struct lpfc_node_rrq *prrq;
4422
4423
4424 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4425 pcmd += sizeof(uint32_t);
4426 rrq = (struct RRQ *)pcmd;
1151e3ec 4427 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
9589b062 4428 rxid = bf_get(rrq_rxid, rrq);
19ca7609
JS
4429
4430 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4431 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4432 " x%x x%x\n",
1151e3ec 4433 be32_to_cpu(bf_get(rrq_did, rrq)),
9589b062 4434 bf_get(rrq_oxid, rrq),
19ca7609
JS
4435 rxid,
4436 iocb->iotag, iocb->iocb.ulpContext);
4437
4438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4439 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4440 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
1151e3ec 4441 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
9589b062 4442 xri = bf_get(rrq_oxid, rrq);
1151e3ec
JS
4443 else
4444 xri = rxid;
4445 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
19ca7609 4446 if (prrq)
1151e3ec 4447 lpfc_clr_rrq_active(phba, xri, prrq);
19ca7609
JS
4448 return;
4449}
4450
12265f68
JS
4451/**
4452 * lpfc_els_rsp_echo_acc - Issue echo acc response
4453 * @vport: pointer to a virtual N_Port data structure.
4454 * @data: pointer to echo data to return in the accept.
4455 * @oldiocb: pointer to the original lpfc command iocb data structure.
4456 * @ndlp: pointer to a node-list data structure.
4457 *
4458 * Return code
4459 * 0 - Successfully issued acc echo response
4460 * 1 - Failed to issue acc echo response
4461 **/
4462static int
4463lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4464 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4465{
4466 struct lpfc_hba *phba = vport->phba;
4467 struct lpfc_iocbq *elsiocb;
12265f68
JS
4468 uint8_t *pcmd;
4469 uint16_t cmdsize;
4470 int rc;
4471
12265f68
JS
4472 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4473
bf08611b
JS
4474 /* The accumulated length can exceed the BPL_SIZE. For
4475 * now, use this as the limit
4476 */
4477 if (cmdsize > LPFC_BPL_SIZE)
4478 cmdsize = LPFC_BPL_SIZE;
12265f68
JS
4479 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4480 ndlp->nlp_DID, ELS_CMD_ACC);
4481 if (!elsiocb)
4482 return 1;
4483
7851fe2c
JS
4484 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4485 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4486
12265f68
JS
4487 /* Xmit ECHO ACC response tag <ulpIoTag> */
4488 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4489 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4490 elsiocb->iotag, elsiocb->iocb.ulpContext);
4491 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4492 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4493 pcmd += sizeof(uint32_t);
4494 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4495
4496 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4497 "Issue ACC ECHO: did:x%x flg:x%x",
4498 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4499
4500 phba->fc_stat.elsXmitACC++;
4501 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
12265f68
JS
4502
4503 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4504 if (rc == IOCB_ERROR) {
4505 lpfc_els_free_iocb(phba, elsiocb);
4506 return 1;
4507 }
4508 return 0;
4509}
4510
e59058c4 4511/**
3621a710 4512 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
e59058c4
JS
4513 * @vport: pointer to a host virtual N_Port data structure.
4514 *
4515 * This routine issues Address Discover (ADISC) ELS commands to those
4516 * N_Ports which are in node port recovery state and ADISC has not been issued
4517 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4518 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4519 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4520 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4521 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4522 * IOCBs quit for later pick up. On the other hand, after walking through
4523 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4524 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4525 * no more ADISC need to be sent.
4526 *
4527 * Return code
4528 * The number of N_Ports with adisc issued.
4529 **/
dea3101e 4530int
2e0fef85 4531lpfc_els_disc_adisc(struct lpfc_vport *vport)
dea3101e 4532{
2e0fef85 4533 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4534 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4535 int sentadisc = 0;
dea3101e 4536
685f0bf7 4537 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2e0fef85 4538 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4539 if (!NLP_CHK_NODE_ACT(ndlp))
4540 continue;
685f0bf7
JS
4541 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4542 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4543 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2e0fef85 4544 spin_lock_irq(shost->host_lock);
685f0bf7 4545 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2e0fef85 4546 spin_unlock_irq(shost->host_lock);
685f0bf7 4547 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4548 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4549 lpfc_issue_els_adisc(vport, ndlp, 0);
685f0bf7 4550 sentadisc++;
2e0fef85
JS
4551 vport->num_disc_nodes++;
4552 if (vport->num_disc_nodes >=
3de2a653 4553 vport->cfg_discovery_threads) {
2e0fef85
JS
4554 spin_lock_irq(shost->host_lock);
4555 vport->fc_flag |= FC_NLP_MORE;
4556 spin_unlock_irq(shost->host_lock);
685f0bf7 4557 break;
dea3101e
JB
4558 }
4559 }
4560 }
4561 if (sentadisc == 0) {
2e0fef85
JS
4562 spin_lock_irq(shost->host_lock);
4563 vport->fc_flag &= ~FC_NLP_MORE;
4564 spin_unlock_irq(shost->host_lock);
dea3101e 4565 }
2fe165b6 4566 return sentadisc;
dea3101e
JB
4567}
4568
e59058c4 4569/**
3621a710 4570 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
e59058c4
JS
4571 * @vport: pointer to a host virtual N_Port data structure.
4572 *
4573 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4574 * which are in node port recovery state, with a @vport. Each time an ELS
4575 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4576 * the per @vport number of discover count (num_disc_nodes) shall be
4577 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4578 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4579 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4580 * later pick up. On the other hand, after walking through all the ndlps with
4581 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4582 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4583 * PLOGI need to be sent.
4584 *
4585 * Return code
4586 * The number of N_Ports with plogi issued.
4587 **/
dea3101e 4588int
2e0fef85 4589lpfc_els_disc_plogi(struct lpfc_vport *vport)
dea3101e 4590{
2e0fef85 4591 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 4592 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 4593 int sentplogi = 0;
dea3101e 4594
2e0fef85
JS
4595 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4596 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4597 if (!NLP_CHK_NODE_ACT(ndlp))
4598 continue;
685f0bf7 4599 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
8b017a30
JS
4600 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4601 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4602 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
685f0bf7 4603 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
4604 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4605 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
685f0bf7 4606 sentplogi++;
2e0fef85
JS
4607 vport->num_disc_nodes++;
4608 if (vport->num_disc_nodes >=
8b017a30 4609 vport->cfg_discovery_threads) {
2e0fef85
JS
4610 spin_lock_irq(shost->host_lock);
4611 vport->fc_flag |= FC_NLP_MORE;
4612 spin_unlock_irq(shost->host_lock);
685f0bf7 4613 break;
dea3101e
JB
4614 }
4615 }
4616 }
87af33fe
JS
4617 if (sentplogi) {
4618 lpfc_set_disctmo(vport);
4619 }
4620 else {
2e0fef85
JS
4621 spin_lock_irq(shost->host_lock);
4622 vport->fc_flag &= ~FC_NLP_MORE;
4623 spin_unlock_irq(shost->host_lock);
dea3101e 4624 }
2fe165b6 4625 return sentplogi;
dea3101e
JB
4626}
4627
bd4b3e5c 4628static uint32_t
86478875
JS
4629lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4630 uint32_t word0)
4631{
4632
4633 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
4634 desc->payload.els_req = word0;
4635 desc->length = cpu_to_be32(sizeof(desc->payload));
6c92d1d0
JS
4636
4637 return sizeof(struct fc_rdp_link_service_desc);
86478875
JS
4638}
4639
bd4b3e5c 4640static uint32_t
86478875
JS
4641lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4642 uint8_t *page_a0, uint8_t *page_a2)
4643{
4644 uint16_t wavelength;
4645 uint16_t temperature;
4646 uint16_t rx_power;
4647 uint16_t tx_bias;
4648 uint16_t tx_power;
4649 uint16_t vcc;
4650 uint16_t flag = 0;
4651 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
4652 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
4653
4654 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
4655
4656 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
4657 &page_a0[SSF_TRANSCEIVER_CODE_B4];
4658 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
4659 &page_a0[SSF_TRANSCEIVER_CODE_B5];
4660
4661 if ((trasn_code_byte4->fc_sw_laser) ||
4662 (trasn_code_byte5->fc_sw_laser_sl) ||
4663 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
4664 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
4665 } else if (trasn_code_byte4->fc_lw_laser) {
4666 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
4667 page_a0[SSF_WAVELENGTH_B0];
4668 if (wavelength == SFP_WAVELENGTH_LC1310)
4669 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
4670 if (wavelength == SFP_WAVELENGTH_LL1550)
4671 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
4672 }
4673 /* check if its SFP+ */
4674 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
4675 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
4676 << SFP_FLAG_CT_SHIFT;
4677
4678 /* check if its OPTICAL */
4679 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
4680 SFP_FLAG_IS_OPTICAL_PORT : 0)
4681 << SFP_FLAG_IS_OPTICAL_SHIFT;
4682
4683 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
4684 page_a2[SFF_TEMPERATURE_B0]);
4685 vcc = (page_a2[SFF_VCC_B1] << 8 |
4686 page_a2[SFF_VCC_B0]);
4687 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
4688 page_a2[SFF_TXPOWER_B0]);
4689 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
4690 page_a2[SFF_TX_BIAS_CURRENT_B0]);
4691 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
4692 page_a2[SFF_RXPOWER_B0]);
4693 desc->sfp_info.temperature = cpu_to_be16(temperature);
4694 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
4695 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
4696 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
4697 desc->sfp_info.vcc = cpu_to_be16(vcc);
4698
4699 desc->sfp_info.flags = cpu_to_be16(flag);
4700 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6c92d1d0
JS
4701
4702 return sizeof(struct fc_rdp_sfp_desc);
86478875
JS
4703}
4704
bd4b3e5c 4705static uint32_t
86478875
JS
4706lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4707 READ_LNK_VAR *stat)
4708{
4709 uint32_t type;
4710
4711 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
4712
4713 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
4714
4715 desc->info.port_type = cpu_to_be32(type);
4716
4717 desc->info.link_status.link_failure_cnt =
4718 cpu_to_be32(stat->linkFailureCnt);
4719 desc->info.link_status.loss_of_synch_cnt =
4720 cpu_to_be32(stat->lossSyncCnt);
4721 desc->info.link_status.loss_of_signal_cnt =
4722 cpu_to_be32(stat->lossSignalCnt);
4723 desc->info.link_status.primitive_seq_proto_err =
4724 cpu_to_be32(stat->primSeqErrCnt);
4725 desc->info.link_status.invalid_trans_word =
4726 cpu_to_be32(stat->invalidXmitWord);
4727 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
4728
4729 desc->length = cpu_to_be32(sizeof(desc->info));
6c92d1d0
JS
4730
4731 return sizeof(struct fc_rdp_link_error_status_desc);
86478875
JS
4732}
4733
bd4b3e5c 4734static uint32_t
56204984
JS
4735lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
4736 struct lpfc_vport *vport)
4737{
3aaaa314
JS
4738 uint32_t bbCredit;
4739
56204984
JS
4740 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
4741
3aaaa314
JS
4742 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
4743 (vport->fc_sparam.cmn.bbCreditMsb << 8);
4744 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
4745 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
4746 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
4747 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
4748 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
4749 } else {
56204984 4750 desc->bbc_info.attached_port_bbc = 0;
3aaaa314 4751 }
56204984
JS
4752
4753 desc->bbc_info.rtt = 0;
4754 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6c92d1d0
JS
4755
4756 return sizeof(struct fc_rdp_bbc_desc);
56204984
JS
4757}
4758
bd4b3e5c 4759static uint32_t
310429ef
JS
4760lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
4761 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
56204984 4762{
310429ef 4763 uint32_t flags = 0;
56204984
JS
4764
4765 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4766
3aaaa314
JS
4767 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
4768 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
4769 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
4770 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
310429ef
JS
4771
4772 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4773 flags |= RDP_OET_HIGH_ALARM;
4774 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4775 flags |= RDP_OET_LOW_ALARM;
4776 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4777 flags |= RDP_OET_HIGH_WARNING;
4778 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4779 flags |= RDP_OET_LOW_WARNING;
4780
56204984
JS
4781 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
4782 desc->oed_info.function_flags = cpu_to_be32(flags);
4783 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4784 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4785}
4786
bd4b3e5c 4787static uint32_t
310429ef
JS
4788lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
4789 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4790 uint8_t *page_a2)
4791{
310429ef 4792 uint32_t flags = 0;
56204984
JS
4793
4794 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4795
3aaaa314
JS
4796 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
4797 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
4798 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
4799 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
310429ef
JS
4800
4801 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
4802 flags |= RDP_OET_HIGH_ALARM;
4803 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
4804 flags |= RDP_OET_LOW_ALARM;
4805 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
4806 flags |= RDP_OET_HIGH_WARNING;
4807 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
4808 flags |= RDP_OET_LOW_WARNING;
4809
56204984
JS
4810 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
4811 desc->oed_info.function_flags = cpu_to_be32(flags);
4812 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4813 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4814}
4815
bd4b3e5c 4816static uint32_t
310429ef
JS
4817lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
4818 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4819 uint8_t *page_a2)
4820{
310429ef 4821 uint32_t flags = 0;
56204984
JS
4822
4823 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4824
3aaaa314
JS
4825 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
4826 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
4827 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
4828 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
310429ef
JS
4829
4830 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
4831 flags |= RDP_OET_HIGH_ALARM;
4832 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
4833 flags |= RDP_OET_LOW_ALARM;
4834 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
4835 flags |= RDP_OET_HIGH_WARNING;
4836 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
4837 flags |= RDP_OET_LOW_WARNING;
4838
56204984
JS
4839 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
4840 desc->oed_info.function_flags = cpu_to_be32(flags);
4841 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4842 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4843}
4844
bd4b3e5c 4845static uint32_t
310429ef
JS
4846lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
4847 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4848 uint8_t *page_a2)
4849{
310429ef 4850 uint32_t flags = 0;
56204984
JS
4851
4852 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4853
3aaaa314
JS
4854 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
4855 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
4856 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
4857 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
310429ef
JS
4858
4859 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
4860 flags |= RDP_OET_HIGH_ALARM;
4861 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
4862 flags |= RDP_OET_LOW_ALARM;
4863 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
4864 flags |= RDP_OET_HIGH_WARNING;
4865 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
4866 flags |= RDP_OET_LOW_WARNING;
4867
56204984
JS
4868 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
4869 desc->oed_info.function_flags = cpu_to_be32(flags);
4870 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4871 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4872}
4873
4874
bd4b3e5c 4875static uint32_t
310429ef
JS
4876lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
4877 struct fc_rdp_oed_sfp_desc *desc,
56204984
JS
4878 uint8_t *page_a2)
4879{
310429ef 4880 uint32_t flags = 0;
56204984
JS
4881
4882 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4883
3aaaa314
JS
4884 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
4885 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
4886 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
4887 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
310429ef
JS
4888
4889 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
4890 flags |= RDP_OET_HIGH_ALARM;
4891 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
4892 flags |= RDP_OET_LOW_ALARM;
4893 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
4894 flags |= RDP_OET_HIGH_WARNING;
4895 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
4896 flags |= RDP_OET_LOW_WARNING;
4897
56204984
JS
4898 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
4899 desc->oed_info.function_flags = cpu_to_be32(flags);
4900 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6c92d1d0 4901 return sizeof(struct fc_rdp_oed_sfp_desc);
56204984
JS
4902}
4903
bd4b3e5c 4904static uint32_t
56204984
JS
4905lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
4906 uint8_t *page_a0, struct lpfc_vport *vport)
4907{
4908 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
4909 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
4910 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
4911 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
4912 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
4913 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
4914 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6c92d1d0 4915 return sizeof(struct fc_rdp_opd_sfp_desc);
56204984
JS
4916}
4917
bd4b3e5c 4918static uint32_t
4258e98e
JS
4919lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
4920{
4921 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
4922 return 0;
4923 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
4924
4925 desc->info.CorrectedBlocks =
4926 cpu_to_be32(stat->fecCorrBlkCount);
4927 desc->info.UncorrectableBlocks =
4928 cpu_to_be32(stat->fecUncorrBlkCount);
4929
4930 desc->length = cpu_to_be32(sizeof(desc->info));
4931
4932 return sizeof(struct fc_fec_rdp_desc);
4933}
4934
bd4b3e5c 4935static uint32_t
86478875
JS
4936lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
4937{
4938 uint16_t rdp_cap = 0;
4939 uint16_t rdp_speed;
4940
4941 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
4942
81e75177
JS
4943 switch (phba->fc_linkspeed) {
4944 case LPFC_LINK_SPEED_1GHZ:
86478875
JS
4945 rdp_speed = RDP_PS_1GB;
4946 break;
81e75177 4947 case LPFC_LINK_SPEED_2GHZ:
86478875
JS
4948 rdp_speed = RDP_PS_2GB;
4949 break;
81e75177 4950 case LPFC_LINK_SPEED_4GHZ:
86478875
JS
4951 rdp_speed = RDP_PS_4GB;
4952 break;
81e75177 4953 case LPFC_LINK_SPEED_8GHZ:
86478875
JS
4954 rdp_speed = RDP_PS_8GB;
4955 break;
81e75177 4956 case LPFC_LINK_SPEED_10GHZ:
86478875
JS
4957 rdp_speed = RDP_PS_10GB;
4958 break;
81e75177 4959 case LPFC_LINK_SPEED_16GHZ:
86478875
JS
4960 rdp_speed = RDP_PS_16GB;
4961 break;
a085e87c
JS
4962 case LPFC_LINK_SPEED_32GHZ:
4963 rdp_speed = RDP_PS_32GB;
4964 break;
86478875
JS
4965 default:
4966 rdp_speed = RDP_PS_UNKNOWN;
4967 break;
4968 }
4969
4970 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
4971
d38dd52c
JS
4972 if (phba->lmt & LMT_32Gb)
4973 rdp_cap |= RDP_PS_32GB;
86478875
JS
4974 if (phba->lmt & LMT_16Gb)
4975 rdp_cap |= RDP_PS_16GB;
4976 if (phba->lmt & LMT_10Gb)
4977 rdp_cap |= RDP_PS_10GB;
4978 if (phba->lmt & LMT_8Gb)
4979 rdp_cap |= RDP_PS_8GB;
4980 if (phba->lmt & LMT_4Gb)
4981 rdp_cap |= RDP_PS_4GB;
4982 if (phba->lmt & LMT_2Gb)
4983 rdp_cap |= RDP_PS_2GB;
4984 if (phba->lmt & LMT_1Gb)
4985 rdp_cap |= RDP_PS_1GB;
4986
4987 if (rdp_cap == 0)
4988 rdp_cap = RDP_CAP_UNKNOWN;
56204984
JS
4989 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
4990 rdp_cap |= RDP_CAP_USER_CONFIGURED;
86478875
JS
4991
4992 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
4993 desc->length = cpu_to_be32(sizeof(desc->info));
6c92d1d0 4994 return sizeof(struct fc_rdp_port_speed_desc);
86478875
JS
4995}
4996
bd4b3e5c 4997static uint32_t
86478875
JS
4998lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
4999 struct lpfc_hba *phba)
5000{
5001
5002 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5003
5004 memcpy(desc->port_names.wwnn, phba->wwnn,
5005 sizeof(desc->port_names.wwnn));
5006
5007 memcpy(desc->port_names.wwpn, &phba->wwpn,
5008 sizeof(desc->port_names.wwpn));
5009
5010 desc->length = cpu_to_be32(sizeof(desc->port_names));
6c92d1d0 5011 return sizeof(struct fc_rdp_port_name_desc);
86478875
JS
5012}
5013
bd4b3e5c 5014static uint32_t
86478875
JS
5015lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5016 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5017{
5018
5019 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5020 if (vport->fc_flag & FC_FABRIC) {
5021 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5022 sizeof(desc->port_names.wwnn));
5023
5024 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5025 sizeof(desc->port_names.wwpn));
5026 } else { /* Point to Point */
5027 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5028 sizeof(desc->port_names.wwnn));
5029
5030 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
5031 sizeof(desc->port_names.wwpn));
5032 }
5033
5034 desc->length = cpu_to_be32(sizeof(desc->port_names));
6c92d1d0 5035 return sizeof(struct fc_rdp_port_name_desc);
86478875
JS
5036}
5037
bd4b3e5c 5038static void
86478875
JS
5039lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5040 int status)
5041{
5042 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
5043 struct lpfc_vport *vport = ndlp->vport;
5044 struct lpfc_iocbq *elsiocb;
eb8d68c9 5045 struct ulp_bde64 *bpl;
86478875
JS
5046 IOCB_t *icmd;
5047 uint8_t *pcmd;
5048 struct ls_rjt *stat;
5049 struct fc_rdp_res_frame *rdp_res;
6c92d1d0 5050 uint32_t cmdsize, len;
310429ef 5051 uint16_t *flag_ptr;
6c92d1d0 5052 int rc;
86478875
JS
5053
5054 if (status != SUCCESS)
5055 goto error;
eb8d68c9
JS
5056
5057 /* This will change once we know the true size of the RDP payload */
86478875
JS
5058 cmdsize = sizeof(struct fc_rdp_res_frame);
5059
5060 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
5061 lpfc_max_els_tries, rdp_context->ndlp,
5062 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
5063 lpfc_nlp_put(ndlp);
5064 if (!elsiocb)
5065 goto free_rdp_context;
5066
5067 icmd = &elsiocb->iocb;
5068 icmd->ulpContext = rdp_context->rx_id;
5069 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5070
5071 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5072 "2171 Xmit RDP response tag x%x xri x%x, "
5073 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5074 elsiocb->iotag, elsiocb->iocb.ulpContext,
5075 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5076 ndlp->nlp_rpi);
5077 rdp_res = (struct fc_rdp_res_frame *)
5078 (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5079 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5080 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
5081 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5082
310429ef
JS
5083 /* Update Alarm and Warning */
5084 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
5085 phba->sfp_alarm |= *flag_ptr;
5086 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
5087 phba->sfp_warning |= *flag_ptr;
5088
86478875 5089 /* For RDP payload */
6c92d1d0
JS
5090 len = 8;
5091 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
5092 (len + pcmd), ELS_CMD_RDP);
86478875 5093
6c92d1d0 5094 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
86478875 5095 rdp_context->page_a0, rdp_context->page_a2);
6c92d1d0
JS
5096 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
5097 phba);
5098 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5099 (len + pcmd), &rdp_context->link_stat);
5100 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5101 (len + pcmd), phba);
5102 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5103 (len + pcmd), vport, ndlp);
5104 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
4258e98e 5105 &rdp_context->link_stat);
6c92d1d0
JS
5106 /* Check if nport is logged, BZ190632 */
5107 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
5108 goto lpfc_skip_descriptor;
5109
5110 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5111 &rdp_context->link_stat, vport);
5112 len += lpfc_rdp_res_oed_temp_desc(phba,
5113 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5114 rdp_context->page_a2);
5115 len += lpfc_rdp_res_oed_voltage_desc(phba,
5116 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5117 rdp_context->page_a2);
5118 len += lpfc_rdp_res_oed_txbias_desc(phba,
5119 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5120 rdp_context->page_a2);
5121 len += lpfc_rdp_res_oed_txpower_desc(phba,
5122 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5123 rdp_context->page_a2);
5124 len += lpfc_rdp_res_oed_rxpower_desc(phba,
5125 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5126 rdp_context->page_a2);
5127 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
5128 rdp_context->page_a0, vport);
5129
5130lpfc_skip_descriptor:
5131 rdp_res->length = cpu_to_be32(len - 8);
86478875
JS
5132 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5133
eb8d68c9
JS
5134 /* Now that we know the true size of the payload, update the BPL */
5135 bpl = (struct ulp_bde64 *)
5136 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
6c92d1d0 5137 bpl->tus.f.bdeSize = len;
eb8d68c9
JS
5138 bpl->tus.f.bdeFlags = 0;
5139 bpl->tus.w = le32_to_cpu(bpl->tus.w);
5140
86478875
JS
5141 phba->fc_stat.elsXmitACC++;
5142 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5143 if (rc == IOCB_ERROR)
5144 lpfc_els_free_iocb(phba, elsiocb);
5145
5146 kfree(rdp_context);
5147
5148 return;
5149error:
5150 cmdsize = 2 * sizeof(uint32_t);
5151 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
5152 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
5153 lpfc_nlp_put(ndlp);
5154 if (!elsiocb)
5155 goto free_rdp_context;
5156
5157 icmd = &elsiocb->iocb;
5158 icmd->ulpContext = rdp_context->rx_id;
5159 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5160 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5161
5162 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5163 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5164 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5165
5166 phba->fc_stat.elsXmitLSRJT++;
5167 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5168 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5169
5170 if (rc == IOCB_ERROR)
5171 lpfc_els_free_iocb(phba, elsiocb);
5172free_rdp_context:
5173 kfree(rdp_context);
5174}
5175
bd4b3e5c 5176static int
86478875
JS
5177lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5178{
5179 LPFC_MBOXQ_t *mbox = NULL;
5180 int rc;
5181
5182 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5183 if (!mbox) {
5184 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
5185 "7105 failed to allocate mailbox memory");
5186 return 1;
5187 }
5188
5189 if (lpfc_sli4_dump_page_a0(phba, mbox))
5190 goto prep_mbox_fail;
5191 mbox->vport = rdp_context->ndlp->vport;
5192 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
5193 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
5194 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5195 if (rc == MBX_NOT_FINISHED)
5196 goto issue_mbox_fail;
5197
5198 return 0;
5199
5200prep_mbox_fail:
5201issue_mbox_fail:
5202 mempool_free(mbox, phba->mbox_mem_pool);
5203 return 1;
5204}
5205
5206/*
5207 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
5208 * @vport: pointer to a host virtual N_Port data structure.
5209 * @cmdiocb: pointer to lpfc command iocb data structure.
5210 * @ndlp: pointer to a node-list data structure.
5211 *
5212 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
5213 * IOCB. First, the payload of the unsolicited RDP is checked.
5214 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
5215 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
5216 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
5217 * gather all data and send RDP response.
5218 *
5219 * Return code
5220 * 0 - Sent the acc response
5221 * 1 - Sent the reject response.
5222 */
5223static int
5224lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5225 struct lpfc_nodelist *ndlp)
5226{
5227 struct lpfc_hba *phba = vport->phba;
5228 struct lpfc_dmabuf *pcmd;
5229 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
5230 struct fc_rdp_req_frame *rdp_req;
5231 struct lpfc_rdp_context *rdp_context;
5232 IOCB_t *cmd = NULL;
5233 struct ls_rjt stat;
5234
5235 if (phba->sli_rev < LPFC_SLI_REV4 ||
5236 (bf_get(lpfc_sli_intf_if_type,
5237 &phba->sli4_hba.sli_intf) !=
5238 LPFC_SLI_INTF_IF_TYPE_2)) {
5239 rjt_err = LSRJT_UNABLE_TPC;
5240 rjt_expl = LSEXP_REQ_UNSUPPORTED;
5241 goto error;
5242 }
5243
5244 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
5245 rjt_err = LSRJT_UNABLE_TPC;
5246 rjt_expl = LSEXP_REQ_UNSUPPORTED;
5247 goto error;
5248 }
5249
5250 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5251 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
5252
5253
5254 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5255 "2422 ELS RDP Request "
5256 "dec len %d tag x%x port_id %d len %d\n",
5257 be32_to_cpu(rdp_req->rdp_des_length),
5258 be32_to_cpu(rdp_req->nport_id_desc.tag),
5259 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
5260 be32_to_cpu(rdp_req->nport_id_desc.length));
5261
7d933313
JS
5262 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5263 !phba->cfg_enable_SmartSAN) {
5264 rjt_err = LSRJT_UNABLE_TPC;
5265 rjt_expl = LSEXP_PORT_LOGIN_REQ;
5266 goto error;
5267 }
86478875
JS
5268 if (sizeof(struct fc_rdp_nport_desc) !=
5269 be32_to_cpu(rdp_req->rdp_des_length))
5270 goto rjt_logerr;
5271 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
5272 goto rjt_logerr;
5273 if (RDP_NPORT_ID_SIZE !=
5274 be32_to_cpu(rdp_req->nport_id_desc.length))
5275 goto rjt_logerr;
699acd62 5276 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
86478875
JS
5277 if (!rdp_context) {
5278 rjt_err = LSRJT_UNABLE_TPC;
5279 goto error;
5280 }
5281
86478875
JS
5282 cmd = &cmdiocb->iocb;
5283 rdp_context->ndlp = lpfc_nlp_get(ndlp);
5284 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
5285 rdp_context->rx_id = cmd->ulpContext;
5286 rdp_context->cmpl = lpfc_els_rdp_cmpl;
5287 if (lpfc_get_rdp_info(phba, rdp_context)) {
5288 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
5289 "2423 Unable to send mailbox");
5290 kfree(rdp_context);
5291 rjt_err = LSRJT_UNABLE_TPC;
5292 lpfc_nlp_put(ndlp);
5293 goto error;
5294 }
5295
5296 return 0;
5297
5298rjt_logerr:
5299 rjt_err = LSRJT_LOGICAL_ERR;
5300
5301error:
5302 memset(&stat, 0, sizeof(stat));
5303 stat.un.b.lsRjtRsnCode = rjt_err;
5304 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
5305 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5306 return 1;
5307}
5308
5309
8b017a30
JS
5310static void
5311lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5312{
5313 MAILBOX_t *mb;
5314 IOCB_t *icmd;
5315 uint8_t *pcmd;
5316 struct lpfc_iocbq *elsiocb;
5317 struct lpfc_nodelist *ndlp;
5318 struct ls_rjt *stat;
481ad967 5319 union lpfc_sli4_cfg_shdr *shdr;
8b017a30
JS
5320 struct lpfc_lcb_context *lcb_context;
5321 struct fc_lcb_res_frame *lcb_res;
481ad967 5322 uint32_t cmdsize, shdr_status, shdr_add_status;
8b017a30
JS
5323 int rc;
5324
5325 mb = &pmb->u.mb;
8b017a30
JS
5326 lcb_context = (struct lpfc_lcb_context *)pmb->context1;
5327 ndlp = lcb_context->ndlp;
5328 pmb->context1 = NULL;
5329 pmb->context2 = NULL;
5330
481ad967
JS
5331 shdr = (union lpfc_sli4_cfg_shdr *)
5332 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
5333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5334 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5335
5336 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
5337 "0194 SET_BEACON_CONFIG mailbox "
5338 "completed with status x%x add_status x%x,"
5339 " mbx status x%x\n",
5340 shdr_status, shdr_add_status, mb->mbxStatus);
5341
5342 if (mb->mbxStatus && !(shdr_status &&
5343 shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
8b017a30
JS
5344 mempool_free(pmb, phba->mbox_mem_pool);
5345 goto error;
5346 }
5347
5348 mempool_free(pmb, phba->mbox_mem_pool);
8b017a30
JS
5349 cmdsize = sizeof(struct fc_lcb_res_frame);
5350 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5351 lpfc_max_els_tries, ndlp,
5352 ndlp->nlp_DID, ELS_CMD_ACC);
5353
5354 /* Decrement the ndlp reference count from previous mbox command */
5355 lpfc_nlp_put(ndlp);
5356
5357 if (!elsiocb)
5358 goto free_lcb_context;
5359
5360 lcb_res = (struct fc_lcb_res_frame *)
5361 (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5362
5363 icmd = &elsiocb->iocb;
5364 icmd->ulpContext = lcb_context->rx_id;
5365 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5366
5367 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5368 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
5369 lcb_res->lcb_sub_command = lcb_context->sub_command;
5370 lcb_res->lcb_type = lcb_context->type;
5371 lcb_res->lcb_frequency = lcb_context->frequency;
5372 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5373 phba->fc_stat.elsXmitACC++;
5374 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5375 if (rc == IOCB_ERROR)
5376 lpfc_els_free_iocb(phba, elsiocb);
5377
5378 kfree(lcb_context);
5379 return;
5380
5381error:
5382 cmdsize = sizeof(struct fc_lcb_res_frame);
5383 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5384 lpfc_max_els_tries, ndlp,
5385 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5386 lpfc_nlp_put(ndlp);
5387 if (!elsiocb)
5388 goto free_lcb_context;
5389
5390 icmd = &elsiocb->iocb;
5391 icmd->ulpContext = lcb_context->rx_id;
5392 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5393 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5394
5395 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
5396 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5397 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5398
5399 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5400 phba->fc_stat.elsXmitLSRJT++;
5401 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5402 if (rc == IOCB_ERROR)
5403 lpfc_els_free_iocb(phba, elsiocb);
5404free_lcb_context:
5405 kfree(lcb_context);
5406}
5407
5408static int
5409lpfc_sli4_set_beacon(struct lpfc_vport *vport,
5410 struct lpfc_lcb_context *lcb_context,
5411 uint32_t beacon_state)
5412{
5413 struct lpfc_hba *phba = vport->phba;
5414 LPFC_MBOXQ_t *mbox = NULL;
5415 uint32_t len;
5416 int rc;
5417
5418 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5419 if (!mbox)
5420 return 1;
5421
5422 len = sizeof(struct lpfc_mbx_set_beacon_config) -
5423 sizeof(struct lpfc_sli4_cfg_mhdr);
5424 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5425 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
5426 LPFC_SLI4_MBX_EMBED);
5427 mbox->context1 = (void *)lcb_context;
5428 mbox->vport = phba->pport;
5429 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
5430 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
5431 phba->sli4_hba.physical_port);
5432 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
5433 beacon_state);
5434 bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
5435 bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
5436 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5437 if (rc == MBX_NOT_FINISHED) {
5438 mempool_free(mbox, phba->mbox_mem_pool);
5439 return 1;
5440 }
5441
5442 return 0;
5443}
5444
5445
5446/**
5447 * lpfc_els_rcv_lcb - Process an unsolicited LCB
5448 * @vport: pointer to a host virtual N_Port data structure.
5449 * @cmdiocb: pointer to lpfc command iocb data structure.
5450 * @ndlp: pointer to a node-list data structure.
5451 *
5452 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
5453 * First, the payload of the unsolicited LCB is checked.
5454 * Then based on Subcommand beacon will either turn on or off.
5455 *
5456 * Return code
5457 * 0 - Sent the acc response
5458 * 1 - Sent the reject response.
5459 **/
5460static int
5461lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5462 struct lpfc_nodelist *ndlp)
5463{
5464 struct lpfc_hba *phba = vport->phba;
5465 struct lpfc_dmabuf *pcmd;
8b017a30
JS
5466 uint8_t *lp;
5467 struct fc_lcb_request_frame *beacon;
5468 struct lpfc_lcb_context *lcb_context;
5469 uint8_t state, rjt_err;
5470 struct ls_rjt stat;
5471
8b017a30
JS
5472 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
5473 lp = (uint8_t *)pcmd->virt;
5474 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
5475
5476 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5477 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
5478 "type x%x frequency %x duration x%x\n",
5479 lp[0], lp[1], lp[2],
5480 beacon->lcb_command,
5481 beacon->lcb_sub_command,
5482 beacon->lcb_type,
5483 beacon->lcb_frequency,
5484 be16_to_cpu(beacon->lcb_duration));
5485
5486 if (phba->sli_rev < LPFC_SLI_REV4 ||
5487 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5488 LPFC_SLI_INTF_IF_TYPE_2)) {
5489 rjt_err = LSRJT_CMD_UNSUPPORTED;
5490 goto rjt;
5491 }
8b017a30
JS
5492
5493 if (phba->hba_flag & HBA_FCOE_MODE) {
5494 rjt_err = LSRJT_CMD_UNSUPPORTED;
5495 goto rjt;
5496 }
5497 if (beacon->lcb_frequency == 0) {
5498 rjt_err = LSRJT_CMD_UNSUPPORTED;
5499 goto rjt;
5500 }
5501 if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
5502 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5503 rjt_err = LSRJT_CMD_UNSUPPORTED;
5504 goto rjt;
5505 }
5506 if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
5507 (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
5508 rjt_err = LSRJT_CMD_UNSUPPORTED;
5509 goto rjt;
5510 }
5511 if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
5512 (beacon->lcb_type != LPFC_LCB_GREEN) &&
5513 (beacon->lcb_type != LPFC_LCB_AMBER)) {
5514 rjt_err = LSRJT_CMD_UNSUPPORTED;
5515 goto rjt;
5516 }
5517 if (be16_to_cpu(beacon->lcb_duration) != 0) {
5518 rjt_err = LSRJT_CMD_UNSUPPORTED;
5519 goto rjt;
5520 }
5521
e7950423
SM
5522 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
5523 if (!lcb_context) {
5524 rjt_err = LSRJT_UNABLE_TPC;
5525 goto rjt;
5526 }
5527
8b017a30
JS
5528 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
5529 lcb_context->sub_command = beacon->lcb_sub_command;
5530 lcb_context->type = beacon->lcb_type;
5531 lcb_context->frequency = beacon->lcb_frequency;
5532 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5533 lcb_context->rx_id = cmdiocb->iocb.ulpContext;
5534 lcb_context->ndlp = lpfc_nlp_get(ndlp);
5535 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
5536 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
5537 LOG_ELS, "0193 failed to send mail box");
e7950423 5538 kfree(lcb_context);
8b017a30
JS
5539 lpfc_nlp_put(ndlp);
5540 rjt_err = LSRJT_UNABLE_TPC;
5541 goto rjt;
5542 }
5543 return 0;
5544rjt:
5545 memset(&stat, 0, sizeof(stat));
5546 stat.un.b.lsRjtRsnCode = rjt_err;
5547 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5548 return 1;
5549}
5550
5551
e59058c4 5552/**
3621a710 5553 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
e59058c4
JS
5554 * @vport: pointer to a host virtual N_Port data structure.
5555 *
5556 * This routine cleans up any Registration State Change Notification
5557 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
5558 * @vport together with the host_lock is used to prevent multiple thread
5559 * trying to access the RSCN array on a same @vport at the same time.
5560 **/
92d7f7b0 5561void
2e0fef85 5562lpfc_els_flush_rscn(struct lpfc_vport *vport)
dea3101e 5563{
2e0fef85
JS
5564 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5565 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
5566 int i;
5567
7f5f3d0d
JS
5568 spin_lock_irq(shost->host_lock);
5569 if (vport->fc_rscn_flush) {
5570 /* Another thread is walking fc_rscn_id_list on this vport */
5571 spin_unlock_irq(shost->host_lock);
5572 return;
5573 }
5574 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
5575 vport->fc_rscn_flush = 1;
5576 spin_unlock_irq(shost->host_lock);
5577
2e0fef85 5578 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0 5579 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2e0fef85 5580 vport->fc_rscn_id_list[i] = NULL;
dea3101e 5581 }
2e0fef85
JS
5582 spin_lock_irq(shost->host_lock);
5583 vport->fc_rscn_id_cnt = 0;
5584 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
5585 spin_unlock_irq(shost->host_lock);
5586 lpfc_can_disctmo(vport);
7f5f3d0d
JS
5587 /* Indicate we are done walking this fc_rscn_id_list */
5588 vport->fc_rscn_flush = 0;
dea3101e
JB
5589}
5590
e59058c4 5591/**
3621a710 5592 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
e59058c4
JS
5593 * @vport: pointer to a host virtual N_Port data structure.
5594 * @did: remote destination port identifier.
5595 *
5596 * This routine checks whether there is any pending Registration State
5597 * Configuration Notification (RSCN) to a @did on @vport.
5598 *
5599 * Return code
5600 * None zero - The @did matched with a pending rscn
5601 * 0 - not able to match @did with a pending rscn
5602 **/
dea3101e 5603int
2e0fef85 5604lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
dea3101e
JB
5605{
5606 D_ID ns_did;
5607 D_ID rscn_did;
dea3101e 5608 uint32_t *lp;
92d7f7b0 5609 uint32_t payload_len, i;
7f5f3d0d 5610 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
5611
5612 ns_did.un.word = did;
dea3101e
JB
5613
5614 /* Never match fabric nodes for RSCNs */
5615 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2e0fef85 5616 return 0;
dea3101e
JB
5617
5618 /* If we are doing a FULL RSCN rediscovery, match everything */
2e0fef85 5619 if (vport->fc_flag & FC_RSCN_DISCOVERY)
c9f8735b 5620 return did;
dea3101e 5621
7f5f3d0d
JS
5622 spin_lock_irq(shost->host_lock);
5623 if (vport->fc_rscn_flush) {
5624 /* Another thread is walking fc_rscn_id_list on this vport */
5625 spin_unlock_irq(shost->host_lock);
5626 return 0;
5627 }
5628 /* Indicate we are walking fc_rscn_id_list on this vport */
5629 vport->fc_rscn_flush = 1;
5630 spin_unlock_irq(shost->host_lock);
2e0fef85 5631 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0
JS
5632 lp = vport->fc_rscn_id_list[i]->virt;
5633 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5634 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 5635 while (payload_len) {
92d7f7b0
JS
5636 rscn_did.un.word = be32_to_cpu(*lp++);
5637 payload_len -= sizeof(uint32_t);
eaf15d5b
JS
5638 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
5639 case RSCN_ADDRESS_FORMAT_PORT:
6fb120a7
JS
5640 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5641 && (ns_did.un.b.area == rscn_did.un.b.area)
5642 && (ns_did.un.b.id == rscn_did.un.b.id))
7f5f3d0d 5643 goto return_did_out;
dea3101e 5644 break;
eaf15d5b 5645 case RSCN_ADDRESS_FORMAT_AREA:
dea3101e
JB
5646 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5647 && (ns_did.un.b.area == rscn_did.un.b.area))
7f5f3d0d 5648 goto return_did_out;
dea3101e 5649 break;
eaf15d5b 5650 case RSCN_ADDRESS_FORMAT_DOMAIN:
dea3101e 5651 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7f5f3d0d 5652 goto return_did_out;
dea3101e 5653 break;
eaf15d5b 5654 case RSCN_ADDRESS_FORMAT_FABRIC:
7f5f3d0d 5655 goto return_did_out;
dea3101e
JB
5656 }
5657 }
92d7f7b0 5658 }
7f5f3d0d
JS
5659 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5660 vport->fc_rscn_flush = 0;
92d7f7b0 5661 return 0;
7f5f3d0d
JS
5662return_did_out:
5663 /* Indicate we are done with walking fc_rscn_id_list on this vport */
5664 vport->fc_rscn_flush = 0;
5665 return did;
dea3101e
JB
5666}
5667
e59058c4 5668/**
3621a710 5669 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
e59058c4
JS
5670 * @vport: pointer to a host virtual N_Port data structure.
5671 *
5672 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
5673 * state machine for a @vport's nodes that are with pending RSCN (Registration
5674 * State Change Notification).
5675 *
5676 * Return code
5677 * 0 - Successful (currently alway return 0)
5678 **/
dea3101e 5679static int
2e0fef85 5680lpfc_rscn_recovery_check(struct lpfc_vport *vport)
dea3101e 5681{
685f0bf7 5682 struct lpfc_nodelist *ndlp = NULL;
dea3101e 5683
0d2b6b83 5684 /* Move all affected nodes by pending RSCNs to NPR state. */
2e0fef85 5685 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093 5686 if (!NLP_CHK_NODE_ACT(ndlp) ||
0d2b6b83
JS
5687 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
5688 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
685f0bf7 5689 continue;
2e0fef85 5690 lpfc_disc_state_machine(vport, ndlp, NULL,
0d2b6b83
JS
5691 NLP_EVT_DEVICE_RECOVERY);
5692 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 5693 }
c9f8735b 5694 return 0;
dea3101e
JB
5695}
5696
ddcc50f0 5697/**
3621a710 5698 * lpfc_send_rscn_event - Send an RSCN event to management application
ddcc50f0
JS
5699 * @vport: pointer to a host virtual N_Port data structure.
5700 * @cmdiocb: pointer to lpfc command iocb data structure.
5701 *
5702 * lpfc_send_rscn_event sends an RSCN netlink event to management
5703 * applications.
5704 */
5705static void
5706lpfc_send_rscn_event(struct lpfc_vport *vport,
5707 struct lpfc_iocbq *cmdiocb)
5708{
5709 struct lpfc_dmabuf *pcmd;
5710 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5711 uint32_t *payload_ptr;
5712 uint32_t payload_len;
5713 struct lpfc_rscn_event_header *rscn_event_data;
5714
5715 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5716 payload_ptr = (uint32_t *) pcmd->virt;
5717 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
5718
5719 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
5720 payload_len, GFP_KERNEL);
5721 if (!rscn_event_data) {
5722 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5723 "0147 Failed to allocate memory for RSCN event\n");
5724 return;
5725 }
5726 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
5727 rscn_event_data->payload_length = payload_len;
5728 memcpy(rscn_event_data->rscn_payload, payload_ptr,
5729 payload_len);
5730
5731 fc_host_post_vendor_event(shost,
5732 fc_get_event_number(),
6599eaaa 5733 sizeof(struct lpfc_rscn_event_header) + payload_len,
ddcc50f0
JS
5734 (char *)rscn_event_data,
5735 LPFC_NL_VENDOR_ID);
5736
5737 kfree(rscn_event_data);
5738}
5739
e59058c4 5740/**
3621a710 5741 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
e59058c4
JS
5742 * @vport: pointer to a host virtual N_Port data structure.
5743 * @cmdiocb: pointer to lpfc command iocb data structure.
5744 * @ndlp: pointer to a node-list data structure.
5745 *
5746 * This routine processes an unsolicited RSCN (Registration State Change
5747 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
5748 * to invoke fc_host_post_event() routine to the FC transport layer. If the
5749 * discover state machine is about to begin discovery, it just accepts the
5750 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
5751 * contains N_Port IDs for other vports on this HBA, it just accepts the
5752 * RSCN and ignore processing it. If the state machine is in the recovery
5753 * state, the fc_rscn_id_list of this @vport is walked and the
5754 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
5755 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
5756 * routine is invoked to handle the RSCN event.
5757 *
5758 * Return code
5759 * 0 - Just sent the acc response
5760 * 1 - Sent the acc response and waited for name server completion
5761 **/
dea3101e 5762static int
2e0fef85 5763lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 5764 struct lpfc_nodelist *ndlp)
dea3101e 5765{
2e0fef85
JS
5766 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5767 struct lpfc_hba *phba = vport->phba;
dea3101e 5768 struct lpfc_dmabuf *pcmd;
92d7f7b0 5769 uint32_t *lp, *datap;
92d7f7b0 5770 uint32_t payload_len, length, nportid, *cmd;
7f5f3d0d 5771 int rscn_cnt;
92d7f7b0 5772 int rscn_id = 0, hba_id = 0;
d2873e4c 5773 int i;
dea3101e 5774
dea3101e
JB
5775 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5776 lp = (uint32_t *) pcmd->virt;
5777
92d7f7b0
JS
5778 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5779 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 5780 /* RSCN received */
e8b62011
JS
5781 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5782 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7f5f3d0d
JS
5783 vport->fc_flag, payload_len, *lp,
5784 vport->fc_rscn_id_cnt);
ddcc50f0
JS
5785
5786 /* Send an RSCN event to the management application */
5787 lpfc_send_rscn_event(vport, cmdiocb);
5788
d2873e4c 5789 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2e0fef85 5790 fc_host_post_event(shost, fc_get_event_number(),
d2873e4c
JS
5791 FCH_EVT_RSCN, lp[i]);
5792
dea3101e
JB
5793 /* If we are about to begin discovery, just ACC the RSCN.
5794 * Discovery processing will satisfy it.
5795 */
2e0fef85 5796 if (vport->port_state <= LPFC_NS_QRY) {
858c9f6c
JS
5797 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5798 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
5799 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5800
51ef4c26 5801 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
c9f8735b 5802 return 0;
dea3101e
JB
5803 }
5804
92d7f7b0
JS
5805 /* If this RSCN just contains NPortIDs for other vports on this HBA,
5806 * just ACC and ignore it.
5807 */
5808 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3de2a653 5809 !(vport->cfg_peer_port_login)) {
92d7f7b0
JS
5810 i = payload_len;
5811 datap = lp;
5812 while (i > 0) {
5813 nportid = *datap++;
5814 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
5815 i -= sizeof(uint32_t);
5816 rscn_id++;
549e55cd
JS
5817 if (lpfc_find_vport_by_did(phba, nportid))
5818 hba_id++;
92d7f7b0
JS
5819 }
5820 if (rscn_id == hba_id) {
5821 /* ALL NPortIDs in RSCN are on HBA */
e8b62011 5822 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
d7c255b2 5823 "0219 Ignore RSCN "
e8b62011
JS
5824 "Data: x%x x%x x%x x%x\n",
5825 vport->fc_flag, payload_len,
7f5f3d0d 5826 *lp, vport->fc_rscn_id_cnt);
858c9f6c
JS
5827 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5828 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
5829 ndlp->nlp_DID, vport->port_state,
5830 ndlp->nlp_flag);
5831
92d7f7b0 5832 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
51ef4c26 5833 ndlp, NULL);
92d7f7b0
JS
5834 return 0;
5835 }
5836 }
5837
7f5f3d0d
JS
5838 spin_lock_irq(shost->host_lock);
5839 if (vport->fc_rscn_flush) {
5840 /* Another thread is walking fc_rscn_id_list on this vport */
7f5f3d0d 5841 vport->fc_flag |= FC_RSCN_DISCOVERY;
97957244 5842 spin_unlock_irq(shost->host_lock);
58da1ffb
JS
5843 /* Send back ACC */
5844 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7f5f3d0d
JS
5845 return 0;
5846 }
5847 /* Indicate we are walking fc_rscn_id_list on this vport */
5848 vport->fc_rscn_flush = 1;
5849 spin_unlock_irq(shost->host_lock);
af901ca1 5850 /* Get the array count after successfully have the token */
7f5f3d0d 5851 rscn_cnt = vport->fc_rscn_id_cnt;
dea3101e
JB
5852 /* If we are already processing an RSCN, save the received
5853 * RSCN payload buffer, cmdiocb->context2 to process later.
5854 */
2e0fef85 5855 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
858c9f6c
JS
5856 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5857 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
5858 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5859
09372820 5860 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
5861 vport->fc_flag |= FC_RSCN_DEFERRED;
5862 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2e0fef85 5863 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2e0fef85
JS
5864 vport->fc_flag |= FC_RSCN_MODE;
5865 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
5866 if (rscn_cnt) {
5867 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
5868 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
5869 }
5870 if ((rscn_cnt) &&
5871 (payload_len + length <= LPFC_BPL_SIZE)) {
5872 *cmd &= ELS_CMD_MASK;
7f5f3d0d 5873 *cmd |= cpu_to_be32(payload_len + length);
92d7f7b0
JS
5874 memcpy(((uint8_t *)cmd) + length, lp,
5875 payload_len);
5876 } else {
5877 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
5878 vport->fc_rscn_id_cnt++;
5879 /* If we zero, cmdiocb->context2, the calling
5880 * routine will not try to free it.
5881 */
5882 cmdiocb->context2 = NULL;
5883 }
dea3101e 5884 /* Deferred RSCN */
e8b62011
JS
5885 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5886 "0235 Deferred RSCN "
5887 "Data: x%x x%x x%x\n",
5888 vport->fc_rscn_id_cnt, vport->fc_flag,
5889 vport->port_state);
dea3101e 5890 } else {
2e0fef85
JS
5891 vport->fc_flag |= FC_RSCN_DISCOVERY;
5892 spin_unlock_irq(shost->host_lock);
dea3101e 5893 /* ReDiscovery RSCN */
e8b62011
JS
5894 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5895 "0234 ReDiscovery RSCN "
5896 "Data: x%x x%x x%x\n",
5897 vport->fc_rscn_id_cnt, vport->fc_flag,
5898 vport->port_state);
dea3101e 5899 }
7f5f3d0d
JS
5900 /* Indicate we are done walking fc_rscn_id_list on this vport */
5901 vport->fc_rscn_flush = 0;
dea3101e 5902 /* Send back ACC */
51ef4c26 5903 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 5904 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 5905 lpfc_rscn_recovery_check(vport);
09372820 5906 spin_lock_irq(shost->host_lock);
92d7f7b0 5907 vport->fc_flag &= ~FC_RSCN_DEFERRED;
09372820 5908 spin_unlock_irq(shost->host_lock);
c9f8735b 5909 return 0;
dea3101e 5910 }
858c9f6c
JS
5911 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5912 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
5913 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
5914
2e0fef85
JS
5915 spin_lock_irq(shost->host_lock);
5916 vport->fc_flag |= FC_RSCN_MODE;
5917 spin_unlock_irq(shost->host_lock);
5918 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7f5f3d0d
JS
5919 /* Indicate we are done walking fc_rscn_id_list on this vport */
5920 vport->fc_rscn_flush = 0;
dea3101e
JB
5921 /*
5922 * If we zero, cmdiocb->context2, the calling routine will
5923 * not try to free it.
5924 */
5925 cmdiocb->context2 = NULL;
2e0fef85 5926 lpfc_set_disctmo(vport);
dea3101e 5927 /* Send back ACC */
51ef4c26 5928 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 5929 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 5930 lpfc_rscn_recovery_check(vport);
2e0fef85 5931 return lpfc_els_handle_rscn(vport);
dea3101e
JB
5932}
5933
e59058c4 5934/**
3621a710 5935 * lpfc_els_handle_rscn - Handle rscn for a vport
e59058c4
JS
5936 * @vport: pointer to a host virtual N_Port data structure.
5937 *
5938 * This routine handles the Registration State Configuration Notification
5939 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
5940 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
5941 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
5942 * NameServer shall be issued. If CT command to the NameServer fails to be
5943 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
5944 * RSCN activities with the @vport.
5945 *
5946 * Return code
5947 * 0 - Cleaned up rscn on the @vport
5948 * 1 - Wait for plogi to name server before proceed
5949 **/
dea3101e 5950int
2e0fef85 5951lpfc_els_handle_rscn(struct lpfc_vport *vport)
dea3101e
JB
5952{
5953 struct lpfc_nodelist *ndlp;
2e0fef85 5954 struct lpfc_hba *phba = vport->phba;
dea3101e 5955
92d7f7b0
JS
5956 /* Ignore RSCN if the port is being torn down. */
5957 if (vport->load_flag & FC_UNLOADING) {
5958 lpfc_els_flush_rscn(vport);
5959 return 0;
5960 }
5961
dea3101e 5962 /* Start timer for RSCN processing */
2e0fef85 5963 lpfc_set_disctmo(vport);
dea3101e
JB
5964
5965 /* RSCN processed */
e8b62011
JS
5966 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5967 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
5968 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
5969 vport->port_state);
dea3101e
JB
5970
5971 /* To process RSCN, first compare RSCN data with NameServer */
2e0fef85 5972 vport->fc_ns_retry = 0;
0ff10d46
JS
5973 vport->num_disc_nodes = 0;
5974
2e0fef85 5975 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093
JS
5976 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
5977 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
dea3101e 5978 /* Good ndlp, issue CT Request to NameServer */
92d7f7b0 5979 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
dea3101e
JB
5980 /* Wait for NameServer query cmpl before we can
5981 continue */
c9f8735b 5982 return 1;
dea3101e
JB
5983 } else {
5984 /* If login to NameServer does not exist, issue one */
5985 /* Good status, issue PLOGI to NameServer */
2e0fef85 5986 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 5987 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
dea3101e
JB
5988 /* Wait for NameServer login cmpl before we can
5989 continue */
c9f8735b 5990 return 1;
2e0fef85 5991
e47c9093
JS
5992 if (ndlp) {
5993 ndlp = lpfc_enable_node(vport, ndlp,
5994 NLP_STE_PLOGI_ISSUE);
5995 if (!ndlp) {
5996 lpfc_els_flush_rscn(vport);
5997 return 0;
5998 }
5999 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
dea3101e 6000 } else {
e47c9093
JS
6001 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6002 if (!ndlp) {
6003 lpfc_els_flush_rscn(vport);
6004 return 0;
6005 }
2e0fef85 6006 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5024ab17 6007 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 6008 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
dea3101e 6009 }
e47c9093
JS
6010 ndlp->nlp_type |= NLP_FABRIC;
6011 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
6012 /* Wait for NameServer login cmpl before we can
6013 * continue
6014 */
6015 return 1;
dea3101e
JB
6016 }
6017
2e0fef85 6018 lpfc_els_flush_rscn(vport);
c9f8735b 6019 return 0;
dea3101e
JB
6020}
6021
e59058c4 6022/**
3621a710 6023 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
e59058c4
JS
6024 * @vport: pointer to a host virtual N_Port data structure.
6025 * @cmdiocb: pointer to lpfc command iocb data structure.
6026 * @ndlp: pointer to a node-list data structure.
6027 *
6028 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6029 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6030 * point topology. As an unsolicited FLOGI should not be received in a loop
6031 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6032 * lpfc_check_sparm() routine is invoked to check the parameters in the
6033 * unsolicited FLOGI. If parameters validation failed, the routine
6034 * lpfc_els_rsp_reject() shall be called with reject reason code set to
6035 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6036 * FLOGI shall be compared with the Port WWN of the @vport to determine who
6037 * will initiate PLOGI. The higher lexicographical value party shall has
6038 * higher priority (as the winning port) and will initiate PLOGI and
6039 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6040 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6041 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6042 *
6043 * Return code
6044 * 0 - Successfully processed the unsolicited flogi
6045 * 1 - Failed to process the unsolicited flogi
6046 **/
dea3101e 6047static int
2e0fef85 6048lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 6049 struct lpfc_nodelist *ndlp)
dea3101e 6050{
2e0fef85
JS
6051 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6052 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
6053 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6054 uint32_t *lp = (uint32_t *) pcmd->virt;
6055 IOCB_t *icmd = &cmdiocb->iocb;
6056 struct serv_parm *sp;
6057 LPFC_MBOXQ_t *mbox;
dea3101e
JB
6058 uint32_t cmd, did;
6059 int rc;
e74c03c8
JS
6060 uint32_t fc_flag = 0;
6061 uint32_t port_state = 0;
dea3101e
JB
6062
6063 cmd = *lp++;
6064 sp = (struct serv_parm *) lp;
6065
6066 /* FLOGI received */
6067
2e0fef85 6068 lpfc_set_disctmo(vport);
dea3101e 6069
76a95d75 6070 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
dea3101e
JB
6071 /* We should never receive a FLOGI in loop mode, ignore it */
6072 did = icmd->un.elsreq64.remoteID;
6073
6074 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
6075 Loop Mode */
e8b62011
JS
6076 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6077 "0113 An FLOGI ELS command x%x was "
6078 "received from DID x%x in Loop Mode\n",
6079 cmd, did);
c9f8735b 6080 return 1;
dea3101e
JB
6081 }
6082
d6de08cc 6083 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
dea3101e 6084
dea3101e 6085
d6de08cc
JS
6086 /*
6087 * If our portname is greater than the remote portname,
6088 * then we initiate Nport login.
6089 */
939723a4 6090
d6de08cc
JS
6091 rc = memcmp(&vport->fc_portname, &sp->portName,
6092 sizeof(struct lpfc_name));
939723a4 6093
d6de08cc
JS
6094 if (!rc) {
6095 if (phba->sli_rev < LPFC_SLI_REV4) {
6096 mbox = mempool_alloc(phba->mbox_mem_pool,
6097 GFP_KERNEL);
6098 if (!mbox)
6099 return 1;
6100 lpfc_linkdown(phba);
6101 lpfc_init_link(phba, mbox,
6102 phba->cfg_topology,
6103 phba->cfg_link_speed);
6104 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6105 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6106 mbox->vport = vport;
6107 rc = lpfc_sli_issue_mbox(phba, mbox,
6108 MBX_NOWAIT);
6109 lpfc_set_loopback_flag(phba);
6110 if (rc == MBX_NOT_FINISHED)
6111 mempool_free(mbox, phba->mbox_mem_pool);
6112 return 1;
6113 }
6114
6115 /* abort the flogi coming back to ourselves
6116 * due to external loopback on the port.
939723a4 6117 */
d6de08cc
JS
6118 lpfc_els_abort_flogi(phba);
6119 return 0;
6120
6121 } else if (rc > 0) { /* greater than */
2e0fef85 6122 spin_lock_irq(shost->host_lock);
d6de08cc 6123 vport->fc_flag |= FC_PT2PT_PLOGI;
2e0fef85 6124 spin_unlock_irq(shost->host_lock);
939723a4 6125
d6de08cc
JS
6126 /* If we have the high WWPN we can assign our own
6127 * myDID; otherwise, we have to WAIT for a PLOGI
6128 * from the remote NPort to find out what it
6129 * will be.
939723a4 6130 */
d6de08cc 6131 vport->fc_myDID = PT2PT_LocalID;
dea3101e 6132 } else {
d6de08cc
JS
6133 vport->fc_myDID = PT2PT_RemoteID;
6134 }
939723a4 6135
d6de08cc
JS
6136 /*
6137 * The vport state should go to LPFC_FLOGI only
6138 * AFTER we issue a FLOGI, not receive one.
6139 */
6140 spin_lock_irq(shost->host_lock);
6141 fc_flag = vport->fc_flag;
6142 port_state = vport->port_state;
6143 vport->fc_flag |= FC_PT2PT;
6144 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6145 spin_unlock_irq(shost->host_lock);
6146 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6147 "3311 Rcv Flogi PS x%x new PS x%x "
6148 "fc_flag x%x new fc_flag x%x\n",
6149 port_state, vport->port_state,
6150 fc_flag, vport->fc_flag);
939723a4 6151
d6de08cc
JS
6152 /*
6153 * We temporarily set fc_myDID to make it look like we are
6154 * a Fabric. This is done just so we end up with the right
6155 * did / sid on the FLOGI ACC rsp.
6156 */
6157 did = vport->fc_myDID;
6158 vport->fc_myDID = Fabric_DID;
dea3101e 6159
d6de08cc 6160 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
eec3d312 6161
dea3101e 6162 /* Send back ACC */
d6de08cc 6163 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
dea3101e 6164
939723a4
JS
6165 /* Now lets put fc_myDID back to what its supposed to be */
6166 vport->fc_myDID = did;
6167
c9f8735b 6168 return 0;
dea3101e
JB
6169}
6170
e59058c4 6171/**
3621a710 6172 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
e59058c4
JS
6173 * @vport: pointer to a host virtual N_Port data structure.
6174 * @cmdiocb: pointer to lpfc command iocb data structure.
6175 * @ndlp: pointer to a node-list data structure.
6176 *
6177 * This routine processes Request Node Identification Data (RNID) IOCB
6178 * received as an ELS unsolicited event. Only when the RNID specified format
6179 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
6180 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
6181 * Accept (ACC) the RNID ELS command. All the other RNID formats are
6182 * rejected by invoking the lpfc_els_rsp_reject() routine.
6183 *
6184 * Return code
6185 * 0 - Successfully processed rnid iocb (currently always return 0)
6186 **/
dea3101e 6187static int
2e0fef85
JS
6188lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6189 struct lpfc_nodelist *ndlp)
dea3101e
JB
6190{
6191 struct lpfc_dmabuf *pcmd;
6192 uint32_t *lp;
dea3101e
JB
6193 RNID *rn;
6194 struct ls_rjt stat;
eb016566 6195 uint32_t cmd;
dea3101e 6196
dea3101e
JB
6197 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6198 lp = (uint32_t *) pcmd->virt;
6199
6200 cmd = *lp++;
6201 rn = (RNID *) lp;
6202
6203 /* RNID received */
6204
6205 switch (rn->Format) {
6206 case 0:
6207 case RNID_TOPOLOGY_DISC:
6208 /* Send back ACC */
2e0fef85 6209 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
dea3101e
JB
6210 break;
6211 default:
6212 /* Reject this request because format not supported */
6213 stat.un.b.lsRjtRsvd0 = 0;
6214 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6215 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6216 stat.un.b.vendorUnique = 0;
858c9f6c
JS
6217 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6218 NULL);
dea3101e 6219 }
c9f8735b 6220 return 0;
dea3101e
JB
6221}
6222
12265f68
JS
6223/**
6224 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
6225 * @vport: pointer to a host virtual N_Port data structure.
6226 * @cmdiocb: pointer to lpfc command iocb data structure.
6227 * @ndlp: pointer to a node-list data structure.
6228 *
6229 * Return code
6230 * 0 - Successfully processed echo iocb (currently always return 0)
6231 **/
6232static int
6233lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6234 struct lpfc_nodelist *ndlp)
6235{
6236 uint8_t *pcmd;
6237
6238 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
6239
6240 /* skip over first word of echo command to find echo data */
6241 pcmd += sizeof(uint32_t);
6242
6243 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
6244 return 0;
6245}
6246
e59058c4 6247/**
3621a710 6248 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
e59058c4
JS
6249 * @vport: pointer to a host virtual N_Port data structure.
6250 * @cmdiocb: pointer to lpfc command iocb data structure.
6251 * @ndlp: pointer to a node-list data structure.
6252 *
6253 * This routine processes a Link Incident Report Registration(LIRR) IOCB
6254 * received as an ELS unsolicited event. Currently, this function just invokes
6255 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
6256 *
6257 * Return code
6258 * 0 - Successfully processed lirr iocb (currently always return 0)
6259 **/
dea3101e 6260static int
2e0fef85
JS
6261lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6262 struct lpfc_nodelist *ndlp)
7bb3b137
JW
6263{
6264 struct ls_rjt stat;
6265
6266 /* For now, unconditionally reject this command */
6267 stat.un.b.lsRjtRsvd0 = 0;
6268 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6269 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6270 stat.un.b.vendorUnique = 0;
858c9f6c 6271 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
6272 return 0;
6273}
6274
5ffc266e
JS
6275/**
6276 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
6277 * @vport: pointer to a host virtual N_Port data structure.
6278 * @cmdiocb: pointer to lpfc command iocb data structure.
6279 * @ndlp: pointer to a node-list data structure.
6280 *
6281 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
6282 * received as an ELS unsolicited event. A request to RRQ shall only
6283 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
6284 * Nx_Port N_Port_ID of the target Exchange is the same as the
6285 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
6286 * not accepted, an LS_RJT with reason code "Unable to perform
6287 * command request" and reason code explanation "Invalid Originator
6288 * S_ID" shall be returned. For now, we just unconditionally accept
6289 * RRQ from the target.
6290 **/
6291static void
6292lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6293 struct lpfc_nodelist *ndlp)
6294{
6295 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
19ca7609
JS
6296 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6297 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5ffc266e
JS
6298}
6299
12265f68
JS
6300/**
6301 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6302 * @phba: pointer to lpfc hba data structure.
6303 * @pmb: pointer to the driver internal queue element for mailbox command.
6304 *
6305 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6306 * mailbox command. This callback function is to actually send the Accept
6307 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6308 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6309 * mailbox command, constructs the RPS response with the link statistics
6310 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6311 * response to the RPS.
6312 *
6313 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6314 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6315 * will be stored into the context1 field of the IOCB for the completion
6316 * callback function to the RPS Accept Response ELS IOCB command.
6317 *
6318 **/
6319static void
6320lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6321{
6322 MAILBOX_t *mb;
6323 IOCB_t *icmd;
6324 struct RLS_RSP *rls_rsp;
6325 uint8_t *pcmd;
6326 struct lpfc_iocbq *elsiocb;
6327 struct lpfc_nodelist *ndlp;
7851fe2c
JS
6328 uint16_t oxid;
6329 uint16_t rxid;
12265f68
JS
6330 uint32_t cmdsize;
6331
6332 mb = &pmb->u.mb;
6333
6334 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
6335 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6336 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
12265f68
JS
6337 pmb->context1 = NULL;
6338 pmb->context2 = NULL;
6339
6340 if (mb->mbxStatus) {
6341 mempool_free(pmb, phba->mbox_mem_pool);
6342 return;
6343 }
6344
6345 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
12265f68
JS
6346 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6347 lpfc_max_els_tries, ndlp,
6348 ndlp->nlp_DID, ELS_CMD_ACC);
6349
6350 /* Decrement the ndlp reference count from previous mbox command */
6351 lpfc_nlp_put(ndlp);
6352
37db57e3
JS
6353 if (!elsiocb) {
6354 mempool_free(pmb, phba->mbox_mem_pool);
12265f68 6355 return;
37db57e3 6356 }
12265f68
JS
6357
6358 icmd = &elsiocb->iocb;
7851fe2c
JS
6359 icmd->ulpContext = rxid;
6360 icmd->unsli3.rcvsli3.ox_id = oxid;
12265f68
JS
6361
6362 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6363 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6364 pcmd += sizeof(uint32_t); /* Skip past command */
6365 rls_rsp = (struct RLS_RSP *)pcmd;
6366
6367 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6368 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6369 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6370 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6371 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6372 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
37db57e3 6373 mempool_free(pmb, phba->mbox_mem_pool);
12265f68
JS
6374 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6375 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6376 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
6377 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6378 elsiocb->iotag, elsiocb->iocb.ulpContext,
6379 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6380 ndlp->nlp_rpi);
6381 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6382 phba->fc_stat.elsXmitACC++;
6383 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6384 lpfc_els_free_iocb(phba, elsiocb);
6385}
6386
e59058c4 6387/**
3621a710 6388 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
e59058c4
JS
6389 * @phba: pointer to lpfc hba data structure.
6390 * @pmb: pointer to the driver internal queue element for mailbox command.
6391 *
6392 * This routine is the completion callback function for the MBX_READ_LNK_STAT
6393 * mailbox command. This callback function is to actually send the Accept
6394 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6395 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6396 * mailbox command, constructs the RPS response with the link statistics
6397 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6398 * response to the RPS.
6399 *
6400 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6401 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6402 * will be stored into the context1 field of the IOCB for the completion
6403 * callback function to the RPS Accept Response ELS IOCB command.
6404 *
6405 **/
082c0266 6406static void
329f9bc7 6407lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7bb3b137 6408{
7bb3b137
JW
6409 MAILBOX_t *mb;
6410 IOCB_t *icmd;
6411 RPS_RSP *rps_rsp;
6412 uint8_t *pcmd;
6413 struct lpfc_iocbq *elsiocb;
6414 struct lpfc_nodelist *ndlp;
7851fe2c
JS
6415 uint16_t status;
6416 uint16_t oxid;
6417 uint16_t rxid;
7bb3b137
JW
6418 uint32_t cmdsize;
6419
04c68496 6420 mb = &pmb->u.mb;
7bb3b137
JW
6421
6422 ndlp = (struct lpfc_nodelist *) pmb->context2;
7851fe2c
JS
6423 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6424 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
041976fb
RD
6425 pmb->context1 = NULL;
6426 pmb->context2 = NULL;
7bb3b137
JW
6427
6428 if (mb->mbxStatus) {
329f9bc7 6429 mempool_free(pmb, phba->mbox_mem_pool);
7bb3b137
JW
6430 return;
6431 }
6432
6433 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
329f9bc7 6434 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
6435 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6436 lpfc_max_els_tries, ndlp,
6437 ndlp->nlp_DID, ELS_CMD_ACC);
fa4066b6
JS
6438
6439 /* Decrement the ndlp reference count from previous mbox command */
329f9bc7 6440 lpfc_nlp_put(ndlp);
fa4066b6 6441
c9f8735b 6442 if (!elsiocb)
7bb3b137 6443 return;
7bb3b137
JW
6444
6445 icmd = &elsiocb->iocb;
7851fe2c
JS
6446 icmd->ulpContext = rxid;
6447 icmd->unsli3.rcvsli3.ox_id = oxid;
7bb3b137
JW
6448
6449 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6450 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 6451 pcmd += sizeof(uint32_t); /* Skip past command */
7bb3b137
JW
6452 rps_rsp = (RPS_RSP *)pcmd;
6453
76a95d75 6454 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
7bb3b137
JW
6455 status = 0x10;
6456 else
6457 status = 0x8;
2e0fef85 6458 if (phba->pport->fc_flag & FC_FABRIC)
7bb3b137
JW
6459 status |= 0x4;
6460
6461 rps_rsp->rsvd1 = 0;
09372820
JS
6462 rps_rsp->portStatus = cpu_to_be16(status);
6463 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6464 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6465 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6466 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6467 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6468 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7bb3b137 6469 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
e8b62011
JS
6470 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6471 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
6472 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6473 elsiocb->iotag, elsiocb->iocb.ulpContext,
6474 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6475 ndlp->nlp_rpi);
858c9f6c 6476 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 6477 phba->fc_stat.elsXmitACC++;
3772a991 6478 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7bb3b137 6479 lpfc_els_free_iocb(phba, elsiocb);
7bb3b137
JW
6480 return;
6481}
6482
e59058c4 6483/**
12265f68
JS
6484 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
6485 * @vport: pointer to a host virtual N_Port data structure.
6486 * @cmdiocb: pointer to lpfc command iocb data structure.
6487 * @ndlp: pointer to a node-list data structure.
6488 *
6489 * This routine processes Read Port Status (RPL) IOCB received as an
6490 * ELS unsolicited event. It first checks the remote port state. If the
6491 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6492 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6493 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6494 * for reading the HBA link statistics. It is for the callback function,
6495 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
6496 * to actually sending out RPL Accept (ACC) response.
6497 *
6498 * Return codes
6499 * 0 - Successfully processed rls iocb (currently always return 0)
6500 **/
6501static int
6502lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6503 struct lpfc_nodelist *ndlp)
6504{
6505 struct lpfc_hba *phba = vport->phba;
6506 LPFC_MBOXQ_t *mbox;
12265f68
JS
6507 struct ls_rjt stat;
6508
6509 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6510 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6511 /* reject the unsolicited RPS request and done with it */
6512 goto reject_out;
6513
12265f68
JS
6514 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6515 if (mbox) {
6516 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
6517 mbox->context1 = (void *)((unsigned long)
6518 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6519 cmdiocb->iocb.ulpContext)); /* rx_id */
12265f68
JS
6520 mbox->context2 = lpfc_nlp_get(ndlp);
6521 mbox->vport = vport;
6522 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
6523 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6524 != MBX_NOT_FINISHED)
6525 /* Mbox completion will send ELS Response */
6526 return 0;
6527 /* Decrement reference count used for the failed mbox
6528 * command.
6529 */
6530 lpfc_nlp_put(ndlp);
6531 mempool_free(mbox, phba->mbox_mem_pool);
6532 }
6533reject_out:
6534 /* issue rejection response */
6535 stat.un.b.lsRjtRsvd0 = 0;
6536 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6537 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6538 stat.un.b.vendorUnique = 0;
6539 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6540 return 0;
6541}
6542
6543/**
6544 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
6545 * @vport: pointer to a host virtual N_Port data structure.
6546 * @cmdiocb: pointer to lpfc command iocb data structure.
6547 * @ndlp: pointer to a node-list data structure.
6548 *
6549 * This routine processes Read Timout Value (RTV) IOCB received as an
6550 * ELS unsolicited event. It first checks the remote port state. If the
6551 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6552 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6553 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
6554 * Value (RTV) unsolicited IOCB event.
6555 *
6556 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6557 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6558 * will be stored into the context1 field of the IOCB for the completion
6559 * callback function to the RPS Accept Response ELS IOCB command.
6560 *
6561 * Return codes
6562 * 0 - Successfully processed rtv iocb (currently always return 0)
6563 **/
6564static int
6565lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6566 struct lpfc_nodelist *ndlp)
6567{
6568 struct lpfc_hba *phba = vport->phba;
6569 struct ls_rjt stat;
6570 struct RTV_RSP *rtv_rsp;
6571 uint8_t *pcmd;
6572 struct lpfc_iocbq *elsiocb;
6573 uint32_t cmdsize;
6574
6575
6576 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6577 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6578 /* reject the unsolicited RPS request and done with it */
6579 goto reject_out;
6580
6581 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
6582 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6583 lpfc_max_els_tries, ndlp,
6584 ndlp->nlp_DID, ELS_CMD_ACC);
6585
6586 if (!elsiocb)
6587 return 1;
6588
6589 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6590 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6591 pcmd += sizeof(uint32_t); /* Skip past command */
6592
6593 /* use the command's xri in the response */
7851fe2c
JS
6594 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
6595 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
12265f68
JS
6596
6597 rtv_rsp = (struct RTV_RSP *)pcmd;
6598
6599 /* populate RTV payload */
6600 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
6601 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
6602 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
6603 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
6604 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
6605
6606 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
6607 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6608 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
6609 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
6610 "Data: x%x x%x x%x\n",
6611 elsiocb->iotag, elsiocb->iocb.ulpContext,
6612 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6613 ndlp->nlp_rpi,
6614 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
6615 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6616 phba->fc_stat.elsXmitACC++;
6617 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6618 lpfc_els_free_iocb(phba, elsiocb);
6619 return 0;
6620
6621reject_out:
6622 /* issue rejection response */
6623 stat.un.b.lsRjtRsvd0 = 0;
6624 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6625 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6626 stat.un.b.vendorUnique = 0;
6627 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6628 return 0;
6629}
6630
6631/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
e59058c4
JS
6632 * @vport: pointer to a host virtual N_Port data structure.
6633 * @cmdiocb: pointer to lpfc command iocb data structure.
6634 * @ndlp: pointer to a node-list data structure.
6635 *
6636 * This routine processes Read Port Status (RPS) IOCB received as an
6637 * ELS unsolicited event. It first checks the remote port state. If the
6638 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6639 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
6640 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6641 * for reading the HBA link statistics. It is for the callback function,
6642 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
6643 * to actually sending out RPS Accept (ACC) response.
6644 *
6645 * Return codes
6646 * 0 - Successfully processed rps iocb (currently always return 0)
6647 **/
7bb3b137 6648static int
2e0fef85
JS
6649lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6650 struct lpfc_nodelist *ndlp)
dea3101e 6651{
2e0fef85 6652 struct lpfc_hba *phba = vport->phba;
dea3101e 6653 uint32_t *lp;
7bb3b137
JW
6654 uint8_t flag;
6655 LPFC_MBOXQ_t *mbox;
6656 struct lpfc_dmabuf *pcmd;
6657 RPS *rps;
6658 struct ls_rjt stat;
6659
2fe165b6 6660 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
90160e01
JS
6661 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6662 /* reject the unsolicited RPS request and done with it */
6663 goto reject_out;
7bb3b137
JW
6664
6665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6666 lp = (uint32_t *) pcmd->virt;
6667 flag = (be32_to_cpu(*lp++) & 0xf);
6668 rps = (RPS *) lp;
6669
6670 if ((flag == 0) ||
6671 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2e0fef85 6672 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
92d7f7b0 6673 sizeof(struct lpfc_name)) == 0))) {
2e0fef85 6674
92d7f7b0
JS
6675 printk("Fix me....\n");
6676 dump_stack();
2e0fef85
JS
6677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6678 if (mbox) {
7bb3b137 6679 lpfc_read_lnk_stat(phba, mbox);
7851fe2c
JS
6680 mbox->context1 = (void *)((unsigned long)
6681 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6682 cmdiocb->iocb.ulpContext)); /* rx_id */
329f9bc7 6683 mbox->context2 = lpfc_nlp_get(ndlp);
92d7f7b0 6684 mbox->vport = vport;
7bb3b137 6685 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
fa4066b6 6686 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
0b727fea 6687 != MBX_NOT_FINISHED)
7bb3b137
JW
6688 /* Mbox completion will send ELS Response */
6689 return 0;
fa4066b6
JS
6690 /* Decrement reference count used for the failed mbox
6691 * command.
6692 */
329f9bc7 6693 lpfc_nlp_put(ndlp);
7bb3b137
JW
6694 mempool_free(mbox, phba->mbox_mem_pool);
6695 }
6696 }
90160e01
JS
6697
6698reject_out:
6699 /* issue rejection response */
7bb3b137
JW
6700 stat.un.b.lsRjtRsvd0 = 0;
6701 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6702 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6703 stat.un.b.vendorUnique = 0;
858c9f6c 6704 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
6705 return 0;
6706}
6707
19ca7609
JS
6708/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
6709 * @vport: pointer to a host virtual N_Port data structure.
6710 * @ndlp: pointer to a node-list data structure.
6711 * @did: DID of the target.
6712 * @rrq: Pointer to the rrq struct.
6713 *
6714 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
6715 * Successful the the completion handler will clear the RRQ.
6716 *
6717 * Return codes
6718 * 0 - Successfully sent rrq els iocb.
6719 * 1 - Failed to send rrq els iocb.
6720 **/
6721static int
6722lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6723 uint32_t did, struct lpfc_node_rrq *rrq)
6724{
6725 struct lpfc_hba *phba = vport->phba;
6726 struct RRQ *els_rrq;
19ca7609
JS
6727 struct lpfc_iocbq *elsiocb;
6728 uint8_t *pcmd;
6729 uint16_t cmdsize;
6730 int ret;
6731
6732
6733 if (ndlp != rrq->ndlp)
6734 ndlp = rrq->ndlp;
6735 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6736 return 1;
6737
6738 /* If ndlp is not NULL, we will bump the reference count on it */
6739 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
6740 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
6741 ELS_CMD_RRQ);
6742 if (!elsiocb)
6743 return 1;
6744
19ca7609
JS
6745 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6746
6747 /* For RRQ request, remainder of payload is Exchange IDs */
6748 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
6749 pcmd += sizeof(uint32_t);
6750 els_rrq = (struct RRQ *) pcmd;
6751
ee0f4fe1 6752 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
19ca7609
JS
6753 bf_set(rrq_rxid, els_rrq, rrq->rxid);
6754 bf_set(rrq_did, els_rrq, vport->fc_myDID);
6755 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
6756 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
6757
6758
6759 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6760 "Issue RRQ: did:x%x",
6761 did, rrq->xritag, rrq->rxid);
6762 elsiocb->context_un.rrq = rrq;
6763 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
6764 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6765
6766 if (ret == IOCB_ERROR) {
6767 lpfc_els_free_iocb(phba, elsiocb);
6768 return 1;
6769 }
6770 return 0;
6771}
6772
6773/**
6774 * lpfc_send_rrq - Sends ELS RRQ if needed.
6775 * @phba: pointer to lpfc hba data structure.
6776 * @rrq: pointer to the active rrq.
6777 *
6778 * This routine will call the lpfc_issue_els_rrq if the rrq is
6779 * still active for the xri. If this function returns a failure then
6780 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
6781 *
6782 * Returns 0 Success.
6783 * 1 Failure.
6784 **/
6785int
6786lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
6787{
6788 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
6789 rrq->nlp_DID);
6790 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
6791 return lpfc_issue_els_rrq(rrq->vport, ndlp,
6792 rrq->nlp_DID, rrq);
6793 else
6794 return 1;
6795}
6796
e59058c4 6797/**
3621a710 6798 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
e59058c4
JS
6799 * @vport: pointer to a host virtual N_Port data structure.
6800 * @cmdsize: size of the ELS command.
6801 * @oldiocb: pointer to the original lpfc command iocb data structure.
6802 * @ndlp: pointer to a node-list data structure.
6803 *
6804 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
6805 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
6806 *
6807 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6808 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6809 * will be stored into the context1 field of the IOCB for the completion
6810 * callback function to the RPL Accept Response ELS command.
6811 *
6812 * Return code
6813 * 0 - Successfully issued ACC RPL ELS command
6814 * 1 - Failed to issue ACC RPL ELS command
6815 **/
082c0266 6816static int
2e0fef85
JS
6817lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
6818 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7bb3b137 6819{
2e0fef85
JS
6820 struct lpfc_hba *phba = vport->phba;
6821 IOCB_t *icmd, *oldcmd;
7bb3b137
JW
6822 RPL_RSP rpl_rsp;
6823 struct lpfc_iocbq *elsiocb;
7bb3b137 6824 uint8_t *pcmd;
dea3101e 6825
2e0fef85
JS
6826 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6827 ndlp->nlp_DID, ELS_CMD_ACC);
7bb3b137 6828
488d1469 6829 if (!elsiocb)
7bb3b137 6830 return 1;
488d1469 6831
7bb3b137
JW
6832 icmd = &elsiocb->iocb;
6833 oldcmd = &oldiocb->iocb;
7851fe2c
JS
6834 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6835 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7bb3b137
JW
6836
6837 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6838 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 6839 pcmd += sizeof(uint16_t);
7bb3b137
JW
6840 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
6841 pcmd += sizeof(uint16_t);
6842
6843 /* Setup the RPL ACC payload */
6844 rpl_rsp.listLen = be32_to_cpu(1);
6845 rpl_rsp.index = 0;
6846 rpl_rsp.port_num_blk.portNum = 0;
2e0fef85
JS
6847 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
6848 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7bb3b137 6849 sizeof(struct lpfc_name));
7bb3b137 6850 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7bb3b137 6851 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
e8b62011
JS
6852 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6853 "0120 Xmit ELS RPL ACC response tag x%x "
6854 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
6855 "rpi x%x\n",
6856 elsiocb->iotag, elsiocb->iocb.ulpContext,
6857 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6858 ndlp->nlp_rpi);
858c9f6c 6859 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 6860 phba->fc_stat.elsXmitACC++;
3772a991
JS
6861 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6862 IOCB_ERROR) {
7bb3b137
JW
6863 lpfc_els_free_iocb(phba, elsiocb);
6864 return 1;
6865 }
6866 return 0;
6867}
6868
e59058c4 6869/**
3621a710 6870 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
e59058c4
JS
6871 * @vport: pointer to a host virtual N_Port data structure.
6872 * @cmdiocb: pointer to lpfc command iocb data structure.
6873 * @ndlp: pointer to a node-list data structure.
6874 *
6875 * This routine processes Read Port List (RPL) IOCB received as an ELS
6876 * unsolicited event. It first checks the remote port state. If the remote
6877 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
6878 * invokes the lpfc_els_rsp_reject() routine to send reject response.
6879 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
6880 * to accept the RPL.
6881 *
6882 * Return code
6883 * 0 - Successfully processed rpl iocb (currently always return 0)
6884 **/
7bb3b137 6885static int
2e0fef85
JS
6886lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6887 struct lpfc_nodelist *ndlp)
7bb3b137
JW
6888{
6889 struct lpfc_dmabuf *pcmd;
6890 uint32_t *lp;
6891 uint32_t maxsize;
6892 uint16_t cmdsize;
6893 RPL *rpl;
6894 struct ls_rjt stat;
6895
2fe165b6
JW
6896 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6897 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
90160e01 6898 /* issue rejection response */
7bb3b137
JW
6899 stat.un.b.lsRjtRsvd0 = 0;
6900 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6901 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6902 stat.un.b.vendorUnique = 0;
858c9f6c
JS
6903 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6904 NULL);
90160e01
JS
6905 /* rejected the unsolicited RPL request and done with it */
6906 return 0;
7bb3b137
JW
6907 }
6908
dea3101e
JB
6909 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6910 lp = (uint32_t *) pcmd->virt;
7bb3b137 6911 rpl = (RPL *) (lp + 1);
7bb3b137 6912 maxsize = be32_to_cpu(rpl->maxsize);
dea3101e 6913
7bb3b137
JW
6914 /* We support only one port */
6915 if ((rpl->index == 0) &&
6916 ((maxsize == 0) ||
6917 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
6918 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2fe165b6 6919 } else {
7bb3b137
JW
6920 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
6921 }
2e0fef85 6922 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
dea3101e
JB
6923
6924 return 0;
6925}
6926
e59058c4 6927/**
3621a710 6928 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
e59058c4
JS
6929 * @vport: pointer to a virtual N_Port data structure.
6930 * @cmdiocb: pointer to lpfc command iocb data structure.
6931 * @ndlp: pointer to a node-list data structure.
6932 *
6933 * This routine processes Fibre Channel Address Resolution Protocol
6934 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
6935 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
6936 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
6937 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
6938 * remote PortName is compared against the FC PortName stored in the @vport
6939 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
6940 * compared against the FC NodeName stored in the @vport data structure.
6941 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
6942 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
6943 * invoked to send out FARP Response to the remote node. Before sending the
6944 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
6945 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
6946 * routine is invoked to log into the remote port first.
6947 *
6948 * Return code
6949 * 0 - Either the FARP Match Mode not supported or successfully processed
6950 **/
dea3101e 6951static int
2e0fef85
JS
6952lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6953 struct lpfc_nodelist *ndlp)
dea3101e
JB
6954{
6955 struct lpfc_dmabuf *pcmd;
6956 uint32_t *lp;
6957 IOCB_t *icmd;
6958 FARP *fp;
6959 uint32_t cmd, cnt, did;
6960
6961 icmd = &cmdiocb->iocb;
6962 did = icmd->un.elsreq64.remoteID;
6963 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6964 lp = (uint32_t *) pcmd->virt;
6965
6966 cmd = *lp++;
6967 fp = (FARP *) lp;
dea3101e 6968 /* FARP-REQ received from DID <did> */
e8b62011
JS
6969 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6970 "0601 FARP-REQ received from DID x%x\n", did);
dea3101e
JB
6971 /* We will only support match on WWPN or WWNN */
6972 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
c9f8735b 6973 return 0;
dea3101e
JB
6974 }
6975
6976 cnt = 0;
6977 /* If this FARP command is searching for my portname */
6978 if (fp->Mflags & FARP_MATCH_PORT) {
2e0fef85 6979 if (memcmp(&fp->RportName, &vport->fc_portname,
92d7f7b0 6980 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
6981 cnt = 1;
6982 }
6983
6984 /* If this FARP command is searching for my nodename */
6985 if (fp->Mflags & FARP_MATCH_NODE) {
2e0fef85 6986 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
92d7f7b0 6987 sizeof(struct lpfc_name)) == 0)
dea3101e
JB
6988 cnt = 1;
6989 }
6990
6991 if (cnt) {
6992 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
6993 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
6994 /* Log back into the node before sending the FARP. */
6995 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5024ab17 6996 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 6997 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 6998 NLP_STE_PLOGI_ISSUE);
2e0fef85 6999 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
dea3101e
JB
7000 }
7001
7002 /* Send a FARP response to that node */
2e0fef85
JS
7003 if (fp->Rflags & FARP_REQUEST_FARPR)
7004 lpfc_issue_els_farpr(vport, did, 0);
dea3101e
JB
7005 }
7006 }
c9f8735b 7007 return 0;
dea3101e
JB
7008}
7009
e59058c4 7010/**
3621a710 7011 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
e59058c4
JS
7012 * @vport: pointer to a host virtual N_Port data structure.
7013 * @cmdiocb: pointer to lpfc command iocb data structure.
7014 * @ndlp: pointer to a node-list data structure.
7015 *
7016 * This routine processes Fibre Channel Address Resolution Protocol
7017 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7018 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7019 * the FARP response request.
7020 *
7021 * Return code
7022 * 0 - Successfully processed FARPR IOCB (currently always return 0)
7023 **/
dea3101e 7024static int
2e0fef85
JS
7025lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7026 struct lpfc_nodelist *ndlp)
dea3101e
JB
7027{
7028 struct lpfc_dmabuf *pcmd;
7029 uint32_t *lp;
7030 IOCB_t *icmd;
7031 uint32_t cmd, did;
7032
7033 icmd = &cmdiocb->iocb;
7034 did = icmd->un.elsreq64.remoteID;
7035 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7036 lp = (uint32_t *) pcmd->virt;
7037
7038 cmd = *lp++;
7039 /* FARP-RSP received from DID <did> */
e8b62011
JS
7040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7041 "0600 FARP-RSP received from DID x%x\n", did);
dea3101e 7042 /* ACCEPT the Farp resp request */
51ef4c26 7043 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e
JB
7044
7045 return 0;
7046}
7047
e59058c4 7048/**
3621a710 7049 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
e59058c4
JS
7050 * @vport: pointer to a host virtual N_Port data structure.
7051 * @cmdiocb: pointer to lpfc command iocb data structure.
7052 * @fan_ndlp: pointer to a node-list data structure.
7053 *
7054 * This routine processes a Fabric Address Notification (FAN) IOCB
7055 * command received as an ELS unsolicited event. The FAN ELS command will
7056 * only be processed on a physical port (i.e., the @vport represents the
7057 * physical port). The fabric NodeName and PortName from the FAN IOCB are
7058 * compared against those in the phba data structure. If any of those is
7059 * different, the lpfc_initial_flogi() routine is invoked to initialize
7060 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7061 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7062 * is invoked to register login to the fabric.
7063 *
7064 * Return code
7065 * 0 - Successfully processed fan iocb (currently always return 0).
7066 **/
dea3101e 7067static int
2e0fef85
JS
7068lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7069 struct lpfc_nodelist *fan_ndlp)
dea3101e 7070{
0d2b6b83 7071 struct lpfc_hba *phba = vport->phba;
dea3101e 7072 uint32_t *lp;
5024ab17 7073 FAN *fp;
dea3101e 7074
0d2b6b83
JS
7075 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
7076 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
7077 fp = (FAN *) ++lp;
5024ab17 7078 /* FAN received; Fan does not have a reply sequence */
0d2b6b83
JS
7079 if ((vport == phba->pport) &&
7080 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5024ab17 7081 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
0d2b6b83 7082 sizeof(struct lpfc_name))) ||
5024ab17 7083 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
0d2b6b83
JS
7084 sizeof(struct lpfc_name)))) {
7085 /* This port has switched fabrics. FLOGI is required */
76a95d75 7086 lpfc_issue_init_vfi(vport);
0d2b6b83
JS
7087 } else {
7088 /* FAN verified - skip FLOGI */
7089 vport->fc_myDID = vport->fc_prevDID;
6fb120a7
JS
7090 if (phba->sli_rev < LPFC_SLI_REV4)
7091 lpfc_issue_fabric_reglogin(vport);
1b51197d
JS
7092 else {
7093 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7094 "3138 Need register VFI: (x%x/%x)\n",
7095 vport->fc_prevDID, vport->fc_myDID);
6fb120a7 7096 lpfc_issue_reg_vfi(vport);
1b51197d 7097 }
5024ab17 7098 }
dea3101e 7099 }
c9f8735b 7100 return 0;
dea3101e
JB
7101}
7102
e59058c4 7103/**
3621a710 7104 * lpfc_els_timeout - Handler funciton to the els timer
e59058c4
JS
7105 * @ptr: holder for the timer function associated data.
7106 *
7107 * This routine is invoked by the ELS timer after timeout. It posts the ELS
7108 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7109 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7110 * up the worker thread. It is for the worker thread to invoke the routine
7111 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7112 **/
dea3101e
JB
7113void
7114lpfc_els_timeout(unsigned long ptr)
7115{
2e0fef85
JS
7116 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
7117 struct lpfc_hba *phba = vport->phba;
5e9d9b82 7118 uint32_t tmo_posted;
dea3101e
JB
7119 unsigned long iflag;
7120
2e0fef85 7121 spin_lock_irqsave(&vport->work_port_lock, iflag);
5e9d9b82 7122 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
06918ac5 7123 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
2e0fef85 7124 vport->work_port_events |= WORKER_ELS_TMO;
5e9d9b82 7125 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
92d7f7b0 7126
06918ac5 7127 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
5e9d9b82 7128 lpfc_worker_wake_up(phba);
dea3101e
JB
7129 return;
7130}
7131
2a9bf3d0 7132
e59058c4 7133/**
3621a710 7134 * lpfc_els_timeout_handler - Process an els timeout event
e59058c4
JS
7135 * @vport: pointer to a virtual N_Port data structure.
7136 *
7137 * This routine is the actual handler function that processes an ELS timeout
7138 * event. It walks the ELS ring to get and abort all the IOCBs (except the
7139 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7140 * invoking the lpfc_sli_issue_abort_iotag() routine.
7141 **/
dea3101e 7142void
2e0fef85 7143lpfc_els_timeout_handler(struct lpfc_vport *vport)
dea3101e 7144{
2e0fef85 7145 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
7146 struct lpfc_sli_ring *pring;
7147 struct lpfc_iocbq *tmp_iocb, *piocb;
7148 IOCB_t *cmd = NULL;
7149 struct lpfc_dmabuf *pcmd;
2e0fef85 7150 uint32_t els_command = 0;
dea3101e 7151 uint32_t timeout;
2e0fef85 7152 uint32_t remote_ID = 0xffffffff;
2a9bf3d0
JS
7153 LIST_HEAD(abort_list);
7154
dea3101e 7155
dea3101e
JB
7156 timeout = (uint32_t)(phba->fc_ratov << 1);
7157
7158 pring = &phba->sli.ring[LPFC_ELS_RING];
06918ac5
JS
7159 if ((phba->pport->load_flag & FC_UNLOADING))
7160 return;
2a9bf3d0 7161 spin_lock_irq(&phba->hbalock);
0976e1a6
JS
7162 if (phba->sli_rev == LPFC_SLI_REV4)
7163 spin_lock(&pring->ring_lock);
2a9bf3d0 7164
06918ac5
JS
7165 if ((phba->pport->load_flag & FC_UNLOADING)) {
7166 if (phba->sli_rev == LPFC_SLI_REV4)
7167 spin_unlock(&pring->ring_lock);
7168 spin_unlock_irq(&phba->hbalock);
7169 return;
7170 }
7171
0976e1a6 7172 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
dea3101e
JB
7173 cmd = &piocb->iocb;
7174
2e0fef85
JS
7175 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
7176 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7177 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
dea3101e 7178 continue;
2e0fef85
JS
7179
7180 if (piocb->vport != vport)
7181 continue;
7182
dea3101e 7183 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2e0fef85
JS
7184 if (pcmd)
7185 els_command = *(uint32_t *) (pcmd->virt);
dea3101e 7186
92d7f7b0
JS
7187 if (els_command == ELS_CMD_FARP ||
7188 els_command == ELS_CMD_FARPR ||
7189 els_command == ELS_CMD_FDISC)
7190 continue;
7191
dea3101e 7192 if (piocb->drvrTimeout > 0) {
92d7f7b0 7193 if (piocb->drvrTimeout >= timeout)
dea3101e 7194 piocb->drvrTimeout -= timeout;
92d7f7b0 7195 else
dea3101e 7196 piocb->drvrTimeout = 0;
dea3101e
JB
7197 continue;
7198 }
7199
2e0fef85
JS
7200 remote_ID = 0xffffffff;
7201 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
dea3101e 7202 remote_ID = cmd->un.elsreq64.remoteID;
2e0fef85
JS
7203 else {
7204 struct lpfc_nodelist *ndlp;
7205 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
58da1ffb 7206 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2e0fef85 7207 remote_ID = ndlp->nlp_DID;
dea3101e 7208 }
2a9bf3d0
JS
7209 list_add_tail(&piocb->dlist, &abort_list);
7210 }
0976e1a6
JS
7211 if (phba->sli_rev == LPFC_SLI_REV4)
7212 spin_unlock(&pring->ring_lock);
2a9bf3d0
JS
7213 spin_unlock_irq(&phba->hbalock);
7214
7215 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
15026c9e 7216 cmd = &piocb->iocb;
e8b62011 7217 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2a9bf3d0
JS
7218 "0127 ELS timeout Data: x%x x%x x%x "
7219 "x%x\n", els_command,
7220 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
7221 spin_lock_irq(&phba->hbalock);
7222 list_del_init(&piocb->dlist);
07951076 7223 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
2a9bf3d0 7224 spin_unlock_irq(&phba->hbalock);
dea3101e 7225 }
5a0e326d 7226
0e9bb8d7 7227 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
06918ac5
JS
7228 if (!(phba->pport->load_flag & FC_UNLOADING))
7229 mod_timer(&vport->els_tmofunc,
7230 jiffies + msecs_to_jiffies(1000 * timeout));
dea3101e
JB
7231}
7232
e59058c4 7233/**
3621a710 7234 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
e59058c4
JS
7235 * @vport: pointer to a host virtual N_Port data structure.
7236 *
7237 * This routine is used to clean up all the outstanding ELS commands on a
7238 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
7239 * routine. After that, it walks the ELS transmit queue to remove all the
7240 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
7241 * the IOCBs with a non-NULL completion callback function, the callback
7242 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7243 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
7244 * callback function, the IOCB will simply be released. Finally, it walks
7245 * the ELS transmit completion queue to issue an abort IOCB to any transmit
7246 * completion queue IOCB that is associated with the @vport and is not
7247 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
7248 * part of the discovery state machine) out to HBA by invoking the
7249 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
7250 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
7251 * the IOCBs are aborted when this function returns.
7252 **/
dea3101e 7253void
2e0fef85 7254lpfc_els_flush_cmd(struct lpfc_vport *vport)
dea3101e 7255{
0976e1a6 7256 LIST_HEAD(abort_list);
2e0fef85 7257 struct lpfc_hba *phba = vport->phba;
329f9bc7 7258 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e
JB
7259 struct lpfc_iocbq *tmp_iocb, *piocb;
7260 IOCB_t *cmd = NULL;
92d7f7b0
JS
7261
7262 lpfc_fabric_abort_vport(vport);
0976e1a6
JS
7263 /*
7264 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
7265 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
7266 * ultimately grabs the ring_lock, the driver must splice the list into
7267 * a working list and release the locks before calling the abort.
7268 */
7269 spin_lock_irq(&phba->hbalock);
7270 if (phba->sli_rev == LPFC_SLI_REV4)
7271 spin_lock(&pring->ring_lock);
7272
7273 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7274 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
7275 continue;
7276
7277 if (piocb->vport != vport)
7278 continue;
7279 list_add_tail(&piocb->dlist, &abort_list);
7280 }
7281 if (phba->sli_rev == LPFC_SLI_REV4)
7282 spin_unlock(&pring->ring_lock);
7283 spin_unlock_irq(&phba->hbalock);
7284 /* Abort each iocb on the aborted list and remove the dlist links. */
7285 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7286 spin_lock_irq(&phba->hbalock);
7287 list_del_init(&piocb->dlist);
7288 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7289 spin_unlock_irq(&phba->hbalock);
7290 }
7291 if (!list_empty(&abort_list))
7292 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7293 "3387 abort list for txq not empty\n");
7294 INIT_LIST_HEAD(&abort_list);
dea3101e 7295
2e0fef85 7296 spin_lock_irq(&phba->hbalock);
0976e1a6
JS
7297 if (phba->sli_rev == LPFC_SLI_REV4)
7298 spin_lock(&pring->ring_lock);
7299
dea3101e
JB
7300 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
7301 cmd = &piocb->iocb;
7302
7303 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
7304 continue;
7305 }
7306
7307 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
329f9bc7
JS
7308 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
7309 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
7310 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7311 cmd->ulpCommand == CMD_ABORT_XRI_CN)
dea3101e 7312 continue;
dea3101e 7313
2e0fef85
JS
7314 if (piocb->vport != vport)
7315 continue;
7316
0976e1a6
JS
7317 list_del_init(&piocb->list);
7318 list_add_tail(&piocb->list, &abort_list);
dea3101e 7319 }
0976e1a6
JS
7320 if (phba->sli_rev == LPFC_SLI_REV4)
7321 spin_unlock(&pring->ring_lock);
2e0fef85 7322 spin_unlock_irq(&phba->hbalock);
2534ba75 7323
a257bf90 7324 /* Cancell all the IOCBs from the completions list */
0976e1a6
JS
7325 lpfc_sli_cancel_iocbs(phba, &abort_list,
7326 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
2534ba75 7327
dea3101e
JB
7328 return;
7329}
7330
e59058c4 7331/**
3621a710 7332 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
e59058c4
JS
7333 * @phba: pointer to lpfc hba data structure.
7334 *
7335 * This routine is used to clean up all the outstanding ELS commands on a
7336 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
7337 * routine. After that, it walks the ELS transmit queue to remove all the
7338 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
7339 * the IOCBs with the completion callback function associated, the callback
7340 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7341 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
7342 * callback function associated, the IOCB will simply be released. Finally,
7343 * it walks the ELS transmit completion queue to issue an abort IOCB to any
7344 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
7345 * management plane IOCBs that are not part of the discovery state machine)
7346 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
7347 **/
549e55cd
JS
7348void
7349lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
7350{
0976e1a6
JS
7351 struct lpfc_vport *vport;
7352 list_for_each_entry(vport, &phba->port_list, listentry)
7353 lpfc_els_flush_cmd(vport);
a257bf90 7354
549e55cd
JS
7355 return;
7356}
7357
ea2151b4 7358/**
3621a710 7359 * lpfc_send_els_failure_event - Posts an ELS command failure event
ea2151b4
JS
7360 * @phba: Pointer to hba context object.
7361 * @cmdiocbp: Pointer to command iocb which reported error.
7362 * @rspiocbp: Pointer to response iocb which reported error.
7363 *
7364 * This function sends an event when there is an ELS command
7365 * failure.
7366 **/
7367void
7368lpfc_send_els_failure_event(struct lpfc_hba *phba,
7369 struct lpfc_iocbq *cmdiocbp,
7370 struct lpfc_iocbq *rspiocbp)
7371{
7372 struct lpfc_vport *vport = cmdiocbp->vport;
7373 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7374 struct lpfc_lsrjt_event lsrjt_event;
7375 struct lpfc_fabric_event_header fabric_event;
7376 struct ls_rjt stat;
7377 struct lpfc_nodelist *ndlp;
7378 uint32_t *pcmd;
7379
7380 ndlp = cmdiocbp->context1;
7381 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
7382 return;
7383
7384 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
7385 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
7386 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
7387 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
7388 sizeof(struct lpfc_name));
7389 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
7390 sizeof(struct lpfc_name));
7391 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7392 cmdiocbp->context2)->virt);
49198b37 7393 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
ea2151b4
JS
7394 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
7395 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
7396 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
7397 fc_host_post_vendor_event(shost,
7398 fc_get_event_number(),
7399 sizeof(lsrjt_event),
7400 (char *)&lsrjt_event,
ddcc50f0 7401 LPFC_NL_VENDOR_ID);
ea2151b4
JS
7402 return;
7403 }
7404 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
7405 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
7406 fabric_event.event_type = FC_REG_FABRIC_EVENT;
7407 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
7408 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
7409 else
7410 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
7411 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
7412 sizeof(struct lpfc_name));
7413 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
7414 sizeof(struct lpfc_name));
7415 fc_host_post_vendor_event(shost,
7416 fc_get_event_number(),
7417 sizeof(fabric_event),
7418 (char *)&fabric_event,
ddcc50f0 7419 LPFC_NL_VENDOR_ID);
ea2151b4
JS
7420 return;
7421 }
7422
7423}
7424
7425/**
3621a710 7426 * lpfc_send_els_event - Posts unsolicited els event
ea2151b4
JS
7427 * @vport: Pointer to vport object.
7428 * @ndlp: Pointer FC node object.
7429 * @cmd: ELS command code.
7430 *
7431 * This function posts an event when there is an incoming
7432 * unsolicited ELS command.
7433 **/
7434static void
7435lpfc_send_els_event(struct lpfc_vport *vport,
7436 struct lpfc_nodelist *ndlp,
ddcc50f0 7437 uint32_t *payload)
ea2151b4 7438{
ddcc50f0
JS
7439 struct lpfc_els_event_header *els_data = NULL;
7440 struct lpfc_logo_event *logo_data = NULL;
ea2151b4
JS
7441 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7442
ddcc50f0
JS
7443 if (*payload == ELS_CMD_LOGO) {
7444 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
7445 if (!logo_data) {
7446 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7447 "0148 Failed to allocate memory "
7448 "for LOGO event\n");
7449 return;
7450 }
7451 els_data = &logo_data->header;
7452 } else {
7453 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
7454 GFP_KERNEL);
7455 if (!els_data) {
7456 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7457 "0149 Failed to allocate memory "
7458 "for ELS event\n");
7459 return;
7460 }
7461 }
7462 els_data->event_type = FC_REG_ELS_EVENT;
7463 switch (*payload) {
ea2151b4 7464 case ELS_CMD_PLOGI:
ddcc50f0 7465 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
ea2151b4
JS
7466 break;
7467 case ELS_CMD_PRLO:
ddcc50f0 7468 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
ea2151b4
JS
7469 break;
7470 case ELS_CMD_ADISC:
ddcc50f0
JS
7471 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
7472 break;
7473 case ELS_CMD_LOGO:
7474 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
7475 /* Copy the WWPN in the LOGO payload */
7476 memcpy(logo_data->logo_wwpn, &payload[2],
7477 sizeof(struct lpfc_name));
ea2151b4
JS
7478 break;
7479 default:
e916141c 7480 kfree(els_data);
ea2151b4
JS
7481 return;
7482 }
ddcc50f0
JS
7483 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
7484 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
7485 if (*payload == ELS_CMD_LOGO) {
7486 fc_host_post_vendor_event(shost,
7487 fc_get_event_number(),
7488 sizeof(struct lpfc_logo_event),
7489 (char *)logo_data,
7490 LPFC_NL_VENDOR_ID);
7491 kfree(logo_data);
7492 } else {
7493 fc_host_post_vendor_event(shost,
7494 fc_get_event_number(),
7495 sizeof(struct lpfc_els_event_header),
7496 (char *)els_data,
7497 LPFC_NL_VENDOR_ID);
7498 kfree(els_data);
7499 }
ea2151b4
JS
7500
7501 return;
7502}
7503
7504
e59058c4 7505/**
3621a710 7506 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
e59058c4
JS
7507 * @phba: pointer to lpfc hba data structure.
7508 * @pring: pointer to a SLI ring.
7509 * @vport: pointer to a host virtual N_Port data structure.
7510 * @elsiocb: pointer to lpfc els command iocb data structure.
7511 *
7512 * This routine is used for processing the IOCB associated with a unsolicited
7513 * event. It first determines whether there is an existing ndlp that matches
7514 * the DID from the unsolicited IOCB. If not, it will create a new one with
7515 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
7516 * IOCB is then used to invoke the proper routine and to set up proper state
7517 * of the discovery state machine.
7518 **/
ed957684
JS
7519static void
7520lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
92d7f7b0 7521 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
dea3101e 7522{
87af33fe 7523 struct Scsi_Host *shost;
dea3101e 7524 struct lpfc_nodelist *ndlp;
dea3101e 7525 struct ls_rjt stat;
92d7f7b0 7526 uint32_t *payload;
303f2f9c
JS
7527 uint32_t cmd, did, newnode;
7528 uint8_t rjt_exp, rjt_err = 0;
ed957684 7529 IOCB_t *icmd = &elsiocb->iocb;
dea3101e 7530
e47c9093 7531 if (!vport || !(elsiocb->context2))
dea3101e 7532 goto dropit;
2e0fef85 7533
dea3101e 7534 newnode = 0;
92d7f7b0
JS
7535 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
7536 cmd = *payload;
ed957684 7537 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
495a714c 7538 lpfc_post_buffer(phba, pring, 1);
dea3101e 7539
858c9f6c
JS
7540 did = icmd->un.rcvels.remoteID;
7541 if (icmd->ulpStatus) {
7542 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7543 "RCV Unsol ELS: status:x%x/x%x did:x%x",
7544 icmd->ulpStatus, icmd->un.ulpWord[4], did);
dea3101e 7545 goto dropit;
858c9f6c 7546 }
dea3101e
JB
7547
7548 /* Check to see if link went down during discovery */
ed957684 7549 if (lpfc_els_chk_latt(vport))
dea3101e 7550 goto dropit;
dea3101e 7551
c868595d 7552 /* Ignore traffic received during vport shutdown. */
92d7f7b0
JS
7553 if (vport->load_flag & FC_UNLOADING)
7554 goto dropit;
7555
92494144
JS
7556 /* If NPort discovery is delayed drop incoming ELS */
7557 if ((vport->fc_flag & FC_DISC_DELAYED) &&
7558 (cmd != ELS_CMD_PLOGI))
7559 goto dropit;
7560
2e0fef85 7561 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 7562 if (!ndlp) {
dea3101e 7563 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b 7564 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
ed957684 7565 if (!ndlp)
dea3101e 7566 goto dropit;
dea3101e 7567
2e0fef85 7568 lpfc_nlp_init(vport, ndlp, did);
98c9ea5c 7569 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
dea3101e 7570 newnode = 1;
e47c9093 7571 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
dea3101e 7572 ndlp->nlp_type |= NLP_FABRIC;
58da1ffb
JS
7573 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
7574 ndlp = lpfc_enable_node(vport, ndlp,
7575 NLP_STE_UNUSED_NODE);
7576 if (!ndlp)
7577 goto dropit;
7578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7579 newnode = 1;
7580 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7581 ndlp->nlp_type |= NLP_FABRIC;
7582 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
7583 /* This is similar to the new node path */
7584 ndlp = lpfc_nlp_get(ndlp);
7585 if (!ndlp)
7586 goto dropit;
7587 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7588 newnode = 1;
87af33fe 7589 }
dea3101e
JB
7590
7591 phba->fc_stat.elsRcvFrame++;
e47c9093 7592
12838e74
JS
7593 /*
7594 * Do not process any unsolicited ELS commands
7595 * if the ndlp is in DEV_LOSS
7596 */
466e840b
JS
7597 shost = lpfc_shost_from_vport(vport);
7598 spin_lock_irq(shost->host_lock);
7599 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
7600 spin_unlock_irq(shost->host_lock);
12838e74 7601 goto dropit;
466e840b
JS
7602 }
7603 spin_unlock_irq(shost->host_lock);
12838e74 7604
329f9bc7 7605 elsiocb->context1 = lpfc_nlp_get(ndlp);
2e0fef85 7606 elsiocb->vport = vport;
dea3101e
JB
7607
7608 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
7609 cmd &= ELS_CMD_MASK;
7610 }
7611 /* ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
7612 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7613 "0112 ELS command x%x received from NPORT x%x "
e74c03c8
JS
7614 "Data: x%x x%x x%x x%x\n",
7615 cmd, did, vport->port_state, vport->fc_flag,
7616 vport->fc_myDID, vport->fc_prevDID);
eec3d312
JS
7617
7618 /* reject till our FLOGI completes */
7619 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
d6de08cc 7620 (cmd != ELS_CMD_FLOGI)) {
401304cc 7621 rjt_err = LSRJT_LOGICAL_BSY;
eec3d312
JS
7622 rjt_exp = LSEXP_NOTHING_MORE;
7623 goto lsrjt;
7624 }
7625
dea3101e
JB
7626 switch (cmd) {
7627 case ELS_CMD_PLOGI:
858c9f6c
JS
7628 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7629 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
7630 did, vport->port_state, ndlp->nlp_flag);
7631
dea3101e 7632 phba->fc_stat.elsRcvPLOGI++;
858c9f6c 7633 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
e74c03c8
JS
7634 if (phba->sli_rev == LPFC_SLI_REV4 &&
7635 (phba->pport->fc_flag & FC_PT2PT)) {
7636 vport->fc_prevDID = vport->fc_myDID;
7637 /* Our DID needs to be updated before registering
7638 * the vfi. This is done in lpfc_rcv_plogi but
7639 * that is called after the reg_vfi.
7640 */
7641 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
7642 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7643 "3312 Remote port assigned DID x%x "
7644 "%x\n", vport->fc_myDID,
7645 vport->fc_prevDID);
7646 }
858c9f6c 7647
ddcc50f0 7648 lpfc_send_els_event(vport, ndlp, payload);
92494144
JS
7649
7650 /* If Nport discovery is delayed, reject PLOGIs */
7651 if (vport->fc_flag & FC_DISC_DELAYED) {
7652 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7653 rjt_exp = LSEXP_NOTHING_MORE;
92494144
JS
7654 break;
7655 }
d6de08cc 7656
858c9f6c 7657 if (vport->port_state < LPFC_DISC_AUTH) {
1b32f6aa
JS
7658 if (!(phba->pport->fc_flag & FC_PT2PT) ||
7659 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
7660 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7661 rjt_exp = LSEXP_NOTHING_MORE;
1b32f6aa
JS
7662 break;
7663 }
dea3101e 7664 }
87af33fe 7665
87af33fe
JS
7666 spin_lock_irq(shost->host_lock);
7667 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
7668 spin_unlock_irq(shost->host_lock);
7669
2e0fef85
JS
7670 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7671 NLP_EVT_RCV_PLOGI);
858c9f6c 7672
dea3101e
JB
7673 break;
7674 case ELS_CMD_FLOGI:
858c9f6c
JS
7675 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7676 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
7677 did, vport->port_state, ndlp->nlp_flag);
7678
dea3101e 7679 phba->fc_stat.elsRcvFLOGI++;
51ef4c26 7680 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
87af33fe 7681 if (newnode)
98c9ea5c 7682 lpfc_nlp_put(ndlp);
dea3101e
JB
7683 break;
7684 case ELS_CMD_LOGO:
858c9f6c
JS
7685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7686 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
7687 did, vport->port_state, ndlp->nlp_flag);
7688
dea3101e 7689 phba->fc_stat.elsRcvLOGO++;
ddcc50f0 7690 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 7691 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7692 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7693 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7694 break;
7695 }
2e0fef85 7696 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
dea3101e
JB
7697 break;
7698 case ELS_CMD_PRLO:
858c9f6c
JS
7699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7700 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
7701 did, vport->port_state, ndlp->nlp_flag);
7702
dea3101e 7703 phba->fc_stat.elsRcvPRLO++;
ddcc50f0 7704 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 7705 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7706 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7707 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7708 break;
7709 }
2e0fef85 7710 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
dea3101e 7711 break;
8b017a30
JS
7712 case ELS_CMD_LCB:
7713 phba->fc_stat.elsRcvLCB++;
7714 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
7715 break;
86478875
JS
7716 case ELS_CMD_RDP:
7717 phba->fc_stat.elsRcvRDP++;
7718 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
7719 break;
dea3101e
JB
7720 case ELS_CMD_RSCN:
7721 phba->fc_stat.elsRcvRSCN++;
51ef4c26 7722 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
87af33fe 7723 if (newnode)
98c9ea5c 7724 lpfc_nlp_put(ndlp);
dea3101e
JB
7725 break;
7726 case ELS_CMD_ADISC:
858c9f6c
JS
7727 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7728 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
7729 did, vport->port_state, ndlp->nlp_flag);
7730
ddcc50f0 7731 lpfc_send_els_event(vport, ndlp, payload);
dea3101e 7732 phba->fc_stat.elsRcvADISC++;
2e0fef85 7733 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7734 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7735 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7736 break;
7737 }
2e0fef85
JS
7738 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7739 NLP_EVT_RCV_ADISC);
dea3101e
JB
7740 break;
7741 case ELS_CMD_PDISC:
858c9f6c
JS
7742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7743 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
7744 did, vport->port_state, ndlp->nlp_flag);
7745
dea3101e 7746 phba->fc_stat.elsRcvPDISC++;
2e0fef85 7747 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7748 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7749 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7750 break;
7751 }
2e0fef85
JS
7752 lpfc_disc_state_machine(vport, ndlp, elsiocb,
7753 NLP_EVT_RCV_PDISC);
dea3101e
JB
7754 break;
7755 case ELS_CMD_FARPR:
858c9f6c
JS
7756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7757 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
7758 did, vport->port_state, ndlp->nlp_flag);
7759
dea3101e 7760 phba->fc_stat.elsRcvFARPR++;
2e0fef85 7761 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
dea3101e
JB
7762 break;
7763 case ELS_CMD_FARP:
858c9f6c
JS
7764 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7765 "RCV FARP: did:x%x/ste:x%x flg:x%x",
7766 did, vport->port_state, ndlp->nlp_flag);
7767
dea3101e 7768 phba->fc_stat.elsRcvFARP++;
2e0fef85 7769 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
dea3101e
JB
7770 break;
7771 case ELS_CMD_FAN:
858c9f6c
JS
7772 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7773 "RCV FAN: did:x%x/ste:x%x flg:x%x",
7774 did, vport->port_state, ndlp->nlp_flag);
7775
dea3101e 7776 phba->fc_stat.elsRcvFAN++;
2e0fef85 7777 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
dea3101e 7778 break;
dea3101e 7779 case ELS_CMD_PRLI:
858c9f6c
JS
7780 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7781 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
7782 did, vport->port_state, ndlp->nlp_flag);
7783
dea3101e 7784 phba->fc_stat.elsRcvPRLI++;
2e0fef85 7785 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 7786 rjt_err = LSRJT_UNABLE_TPC;
303f2f9c 7787 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7788 break;
7789 }
2e0fef85 7790 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
dea3101e 7791 break;
7bb3b137 7792 case ELS_CMD_LIRR:
858c9f6c
JS
7793 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7794 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
7795 did, vport->port_state, ndlp->nlp_flag);
7796
7bb3b137 7797 phba->fc_stat.elsRcvLIRR++;
2e0fef85 7798 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
87af33fe 7799 if (newnode)
98c9ea5c 7800 lpfc_nlp_put(ndlp);
7bb3b137 7801 break;
12265f68
JS
7802 case ELS_CMD_RLS:
7803 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7804 "RCV RLS: did:x%x/ste:x%x flg:x%x",
7805 did, vport->port_state, ndlp->nlp_flag);
7806
7807 phba->fc_stat.elsRcvRLS++;
7808 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
7809 if (newnode)
7810 lpfc_nlp_put(ndlp);
7811 break;
7bb3b137 7812 case ELS_CMD_RPS:
858c9f6c
JS
7813 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7814 "RCV RPS: did:x%x/ste:x%x flg:x%x",
7815 did, vport->port_state, ndlp->nlp_flag);
7816
7bb3b137 7817 phba->fc_stat.elsRcvRPS++;
2e0fef85 7818 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
87af33fe 7819 if (newnode)
98c9ea5c 7820 lpfc_nlp_put(ndlp);
7bb3b137
JW
7821 break;
7822 case ELS_CMD_RPL:
858c9f6c
JS
7823 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7824 "RCV RPL: did:x%x/ste:x%x flg:x%x",
7825 did, vport->port_state, ndlp->nlp_flag);
7826
7bb3b137 7827 phba->fc_stat.elsRcvRPL++;
2e0fef85 7828 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
87af33fe 7829 if (newnode)
98c9ea5c 7830 lpfc_nlp_put(ndlp);
7bb3b137 7831 break;
dea3101e 7832 case ELS_CMD_RNID:
858c9f6c
JS
7833 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7834 "RCV RNID: did:x%x/ste:x%x flg:x%x",
7835 did, vport->port_state, ndlp->nlp_flag);
7836
dea3101e 7837 phba->fc_stat.elsRcvRNID++;
2e0fef85 7838 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
87af33fe 7839 if (newnode)
98c9ea5c 7840 lpfc_nlp_put(ndlp);
dea3101e 7841 break;
12265f68
JS
7842 case ELS_CMD_RTV:
7843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7844 "RCV RTV: did:x%x/ste:x%x flg:x%x",
7845 did, vport->port_state, ndlp->nlp_flag);
7846 phba->fc_stat.elsRcvRTV++;
7847 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
7848 if (newnode)
7849 lpfc_nlp_put(ndlp);
7850 break;
5ffc266e
JS
7851 case ELS_CMD_RRQ:
7852 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7853 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
7854 did, vport->port_state, ndlp->nlp_flag);
7855
7856 phba->fc_stat.elsRcvRRQ++;
7857 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
7858 if (newnode)
7859 lpfc_nlp_put(ndlp);
7860 break;
12265f68
JS
7861 case ELS_CMD_ECHO:
7862 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7863 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
7864 did, vport->port_state, ndlp->nlp_flag);
7865
7866 phba->fc_stat.elsRcvECHO++;
7867 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
7868 if (newnode)
7869 lpfc_nlp_put(ndlp);
7870 break;
303f2f9c
JS
7871 case ELS_CMD_REC:
7872 /* receive this due to exchange closed */
7873 rjt_err = LSRJT_UNABLE_TPC;
7874 rjt_exp = LSEXP_INVALID_OX_RX;
7875 break;
dea3101e 7876 default:
858c9f6c
JS
7877 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7878 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
7879 cmd, did, vport->port_state);
7880
dea3101e 7881 /* Unsupported ELS command, reject */
63e801ce 7882 rjt_err = LSRJT_CMD_UNSUPPORTED;
303f2f9c 7883 rjt_exp = LSEXP_NOTHING_MORE;
dea3101e
JB
7884
7885 /* Unknown ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
7886 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7887 "0115 Unknown ELS command x%x "
7888 "received from NPORT x%x\n", cmd, did);
87af33fe 7889 if (newnode)
98c9ea5c 7890 lpfc_nlp_put(ndlp);
dea3101e
JB
7891 break;
7892 }
7893
eec3d312 7894lsrjt:
dea3101e
JB
7895 /* check if need to LS_RJT received ELS cmd */
7896 if (rjt_err) {
92d7f7b0 7897 memset(&stat, 0, sizeof(stat));
858c9f6c 7898 stat.un.b.lsRjtRsnCode = rjt_err;
303f2f9c 7899 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
858c9f6c
JS
7900 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
7901 NULL);
dea3101e
JB
7902 }
7903
d7c255b2
JS
7904 lpfc_nlp_put(elsiocb->context1);
7905 elsiocb->context1 = NULL;
ed957684
JS
7906 return;
7907
7908dropit:
98c9ea5c 7909 if (vport && !(vport->load_flag & FC_UNLOADING))
6fb120a7
JS
7910 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7911 "0111 Dropping received ELS cmd "
ed957684 7912 "Data: x%x x%x x%x\n",
6fb120a7 7913 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
ed957684
JS
7914 phba->fc_stat.elsRcvDrop++;
7915}
7916
e59058c4 7917/**
3621a710 7918 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
e59058c4
JS
7919 * @phba: pointer to lpfc hba data structure.
7920 * @pring: pointer to a SLI ring.
7921 * @elsiocb: pointer to lpfc els iocb data structure.
7922 *
7923 * This routine is used to process an unsolicited event received from a SLI
7924 * (Service Level Interface) ring. The actual processing of the data buffer
7925 * associated with the unsolicited event is done by invoking the routine
7926 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
7927 * SLI ring on which the unsolicited event was received.
7928 **/
ed957684
JS
7929void
7930lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7931 struct lpfc_iocbq *elsiocb)
7932{
7933 struct lpfc_vport *vport = phba->pport;
ed957684 7934 IOCB_t *icmd = &elsiocb->iocb;
ed957684 7935 dma_addr_t paddr;
92d7f7b0
JS
7936 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
7937 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
7938
d7c255b2 7939 elsiocb->context1 = NULL;
92d7f7b0
JS
7940 elsiocb->context2 = NULL;
7941 elsiocb->context3 = NULL;
ed957684 7942
92d7f7b0
JS
7943 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
7944 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
7945 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
e3d2b802
JS
7946 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
7947 IOERR_RCV_BUFFER_WAITING) {
ed957684
JS
7948 phba->fc_stat.NoRcvBuf++;
7949 /* Not enough posted buffers; Try posting more buffers */
92d7f7b0 7950 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
495a714c 7951 lpfc_post_buffer(phba, pring, 0);
ed957684
JS
7952 return;
7953 }
7954
92d7f7b0
JS
7955 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
7956 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
7957 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
7958 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
7959 vport = phba->pport;
6fb120a7
JS
7960 else
7961 vport = lpfc_find_vport_by_vpid(phba,
6d368e53 7962 icmd->unsli3.rcvsli3.vpi);
92d7f7b0 7963 }
6d368e53 7964
7f5f3d0d
JS
7965 /* If there are no BDEs associated
7966 * with this IOCB, there is nothing to do.
7967 */
ed957684
JS
7968 if (icmd->ulpBdeCount == 0)
7969 return;
7970
7f5f3d0d
JS
7971 /* type of ELS cmd is first 32bit word
7972 * in packet
7973 */
ed957684 7974 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
92d7f7b0 7975 elsiocb->context2 = bdeBuf1;
ed957684
JS
7976 } else {
7977 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
7978 icmd->un.cont64[0].addrLow);
92d7f7b0
JS
7979 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
7980 paddr);
ed957684
JS
7981 }
7982
92d7f7b0
JS
7983 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
7984 /*
7985 * The different unsolicited event handlers would tell us
7986 * if they are done with "mp" by setting context2 to NULL.
7987 */
dea3101e 7988 if (elsiocb->context2) {
92d7f7b0
JS
7989 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
7990 elsiocb->context2 = NULL;
dea3101e 7991 }
ed957684
JS
7992
7993 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
92d7f7b0 7994 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
ed957684 7995 icmd->ulpBdeCount == 2) {
92d7f7b0
JS
7996 elsiocb->context2 = bdeBuf2;
7997 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
ed957684
JS
7998 /* free mp if we are done with it */
7999 if (elsiocb->context2) {
92d7f7b0
JS
8000 lpfc_in_buf_free(phba, elsiocb->context2);
8001 elsiocb->context2 = NULL;
8002 }
8003 }
8004}
8005
bd4b3e5c 8006static void
4258e98e
JS
8007lpfc_start_fdmi(struct lpfc_vport *vport)
8008{
8009 struct lpfc_hba *phba = vport->phba;
8010 struct lpfc_nodelist *ndlp;
8011
8012 /* If this is the first time, allocate an ndlp and initialize
8013 * it. Otherwise, make sure the node is enabled and then do the
8014 * login.
8015 */
8016 ndlp = lpfc_findnode_did(vport, FDMI_DID);
8017 if (!ndlp) {
8018 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
8019 if (ndlp) {
8020 lpfc_nlp_init(vport, ndlp, FDMI_DID);
8021 ndlp->nlp_type |= NLP_FABRIC;
8022 } else {
8023 return;
8024 }
8025 }
8026 if (!NLP_CHK_NODE_ACT(ndlp))
8027 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
8028
8029 if (ndlp) {
8030 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8031 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8032 }
8033}
8034
e59058c4 8035/**
3621a710 8036 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
e59058c4
JS
8037 * @phba: pointer to lpfc hba data structure.
8038 * @vport: pointer to a virtual N_Port data structure.
8039 *
8040 * This routine issues a Port Login (PLOGI) to the Name Server with
8041 * State Change Request (SCR) for a @vport. This routine will create an
8042 * ndlp for the Name Server associated to the @vport if such node does
8043 * not already exist. The PLOGI to Name Server is issued by invoking the
8044 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
8045 * (FDMI) is configured to the @vport, a FDMI node will be created and
8046 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
8047 **/
92d7f7b0
JS
8048void
8049lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
8050{
4258e98e 8051 struct lpfc_nodelist *ndlp;
92494144
JS
8052 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8053
8054 /*
8055 * If lpfc_delay_discovery parameter is set and the clean address
8056 * bit is cleared and fc fabric parameters chenged, delay FC NPort
8057 * discovery.
8058 */
8059 spin_lock_irq(shost->host_lock);
8060 if (vport->fc_flag & FC_DISC_DELAYED) {
8061 spin_unlock_irq(shost->host_lock);
18775708
JS
8062 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
8063 "3334 Delay fc port discovery for %d seconds\n",
8064 phba->fc_ratov);
92494144 8065 mod_timer(&vport->delayed_disc_tmo,
256ec0d0 8066 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
92494144
JS
8067 return;
8068 }
8069 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
8070
8071 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8072 if (!ndlp) {
8073 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
8074 if (!ndlp) {
76a95d75 8075 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
8076 lpfc_disc_start(vport);
8077 return;
8078 }
8079 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8080 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8081 "0251 NameServer login: no memory\n");
92d7f7b0
JS
8082 return;
8083 }
8084 lpfc_nlp_init(vport, ndlp, NameServer_DID);
e47c9093
JS
8085 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
8086 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
8087 if (!ndlp) {
76a95d75 8088 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
e47c9093
JS
8089 lpfc_disc_start(vport);
8090 return;
8091 }
8092 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8093 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8094 "0348 NameServer login: node freed\n");
8095 return;
8096 }
92d7f7b0 8097 }
58da1ffb 8098 ndlp->nlp_type |= NLP_FABRIC;
92d7f7b0
JS
8099
8100 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8101
8102 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
8103 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8104 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8105 "0252 Cannot issue NameServer login\n");
92d7f7b0
JS
8106 return;
8107 }
8108
8663cbbe
JS
8109 if ((phba->cfg_enable_SmartSAN ||
8110 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
8111 (vport->load_flag & FC_ALLOW_FDMI))
4258e98e 8112 lpfc_start_fdmi(vport);
92d7f7b0
JS
8113}
8114
e59058c4 8115/**
3621a710 8116 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
e59058c4
JS
8117 * @phba: pointer to lpfc hba data structure.
8118 * @pmb: pointer to the driver internal queue element for mailbox command.
8119 *
8120 * This routine is the completion callback function to register new vport
8121 * mailbox command. If the new vport mailbox command completes successfully,
8122 * the fabric registration login shall be performed on physical port (the
8123 * new vport created is actually a physical port, with VPI 0) or the port
8124 * login to Name Server for State Change Request (SCR) will be performed
8125 * on virtual port (real virtual port, with VPI greater than 0).
8126 **/
92d7f7b0
JS
8127static void
8128lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8129{
8130 struct lpfc_vport *vport = pmb->vport;
8131 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8132 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
04c68496 8133 MAILBOX_t *mb = &pmb->u.mb;
695a814e 8134 int rc;
92d7f7b0 8135
09372820 8136 spin_lock_irq(shost->host_lock);
92d7f7b0 8137 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
09372820 8138 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
8139
8140 if (mb->mbxStatus) {
e8b62011 8141 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
38b92ef8
JS
8142 "0915 Register VPI failed : Status: x%x"
8143 " upd bit: x%x \n", mb->mbxStatus,
8144 mb->un.varRegVpi.upd);
8145 if (phba->sli_rev == LPFC_SLI_REV4 &&
8146 mb->un.varRegVpi.upd)
8147 goto mbox_err_exit ;
92d7f7b0
JS
8148
8149 switch (mb->mbxStatus) {
8150 case 0x11: /* unsupported feature */
8151 case 0x9603: /* max_vpi exceeded */
7f5f3d0d 8152 case 0x9602: /* Link event since CLEAR_LA */
92d7f7b0
JS
8153 /* giving up on vport registration */
8154 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8155 spin_lock_irq(shost->host_lock);
8156 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8157 spin_unlock_irq(shost->host_lock);
8158 lpfc_can_disctmo(vport);
8159 break;
695a814e
JS
8160 /* If reg_vpi fail with invalid VPI status, re-init VPI */
8161 case 0x20:
8162 spin_lock_irq(shost->host_lock);
8163 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8164 spin_unlock_irq(shost->host_lock);
8165 lpfc_init_vpi(phba, pmb, vport->vpi);
8166 pmb->vport = vport;
8167 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
8168 rc = lpfc_sli_issue_mbox(phba, pmb,
8169 MBX_NOWAIT);
8170 if (rc == MBX_NOT_FINISHED) {
8171 lpfc_printf_vlog(vport,
8172 KERN_ERR, LOG_MBOX,
8173 "2732 Failed to issue INIT_VPI"
8174 " mailbox command\n");
8175 } else {
8176 lpfc_nlp_put(ndlp);
8177 return;
8178 }
8179
92d7f7b0
JS
8180 default:
8181 /* Try to recover from this error */
5af5eee7
JS
8182 if (phba->sli_rev == LPFC_SLI_REV4)
8183 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 8184 lpfc_mbx_unreg_vpi(vport);
09372820 8185 spin_lock_irq(shost->host_lock);
92d7f7b0 8186 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 8187 spin_unlock_irq(shost->host_lock);
4b40c59e
JS
8188 if (vport->port_type == LPFC_PHYSICAL_PORT
8189 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
76a95d75 8190 lpfc_issue_init_vfi(vport);
7f5f3d0d
JS
8191 else
8192 lpfc_initial_fdisc(vport);
92d7f7b0
JS
8193 break;
8194 }
92d7f7b0 8195 } else {
695a814e 8196 spin_lock_irq(shost->host_lock);
1987807d 8197 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e
JS
8198 spin_unlock_irq(shost->host_lock);
8199 if (vport == phba->pport) {
6fb120a7
JS
8200 if (phba->sli_rev < LPFC_SLI_REV4)
8201 lpfc_issue_fabric_reglogin(vport);
695a814e 8202 else {
fc2b989b
JS
8203 /*
8204 * If the physical port is instantiated using
8205 * FDISC, do not start vport discovery.
8206 */
8207 if (vport->port_state != LPFC_FDISC)
8208 lpfc_start_fdiscs(phba);
695a814e
JS
8209 lpfc_do_scr_ns_plogi(phba, vport);
8210 }
8211 } else
92d7f7b0
JS
8212 lpfc_do_scr_ns_plogi(phba, vport);
8213 }
38b92ef8 8214mbox_err_exit:
fa4066b6
JS
8215 /* Now, we decrement the ndlp reference count held for this
8216 * callback function
8217 */
8218 lpfc_nlp_put(ndlp);
8219
92d7f7b0
JS
8220 mempool_free(pmb, phba->mbox_mem_pool);
8221 return;
8222}
8223
e59058c4 8224/**
3621a710 8225 * lpfc_register_new_vport - Register a new vport with a HBA
e59058c4
JS
8226 * @phba: pointer to lpfc hba data structure.
8227 * @vport: pointer to a host virtual N_Port data structure.
8228 * @ndlp: pointer to a node-list data structure.
8229 *
8230 * This routine registers the @vport as a new virtual port with a HBA.
8231 * It is done through a registering vpi mailbox command.
8232 **/
695a814e 8233void
92d7f7b0
JS
8234lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
8235 struct lpfc_nodelist *ndlp)
8236{
09372820 8237 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
8238 LPFC_MBOXQ_t *mbox;
8239
8240 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8241 if (mbox) {
6fb120a7 8242 lpfc_reg_vpi(vport, mbox);
92d7f7b0
JS
8243 mbox->vport = vport;
8244 mbox->context2 = lpfc_nlp_get(ndlp);
8245 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
0b727fea 8246 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
92d7f7b0 8247 == MBX_NOT_FINISHED) {
fa4066b6
JS
8248 /* mailbox command not success, decrement ndlp
8249 * reference count for this command
8250 */
8251 lpfc_nlp_put(ndlp);
92d7f7b0 8252 mempool_free(mbox, phba->mbox_mem_pool);
92d7f7b0 8253
e8b62011
JS
8254 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8255 "0253 Register VPI: Can't send mbox\n");
fa4066b6 8256 goto mbox_err_exit;
92d7f7b0
JS
8257 }
8258 } else {
e8b62011
JS
8259 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8260 "0254 Register VPI: no memory\n");
fa4066b6 8261 goto mbox_err_exit;
92d7f7b0 8262 }
fa4066b6
JS
8263 return;
8264
8265mbox_err_exit:
8266 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8267 spin_lock_irq(shost->host_lock);
8268 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
8269 spin_unlock_irq(shost->host_lock);
8270 return;
92d7f7b0
JS
8271}
8272
695a814e 8273/**
0c9ab6f5 8274 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
695a814e
JS
8275 * @phba: pointer to lpfc hba data structure.
8276 *
0c9ab6f5 8277 * This routine cancels the retry delay timers to all the vports.
695a814e
JS
8278 **/
8279void
0c9ab6f5 8280lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
695a814e
JS
8281{
8282 struct lpfc_vport **vports;
8283 struct lpfc_nodelist *ndlp;
695a814e 8284 uint32_t link_state;
0c9ab6f5 8285 int i;
695a814e
JS
8286
8287 /* Treat this failure as linkdown for all vports */
8288 link_state = phba->link_state;
8289 lpfc_linkdown(phba);
8290 phba->link_state = link_state;
8291
8292 vports = lpfc_create_vport_work_array(phba);
8293
8294 if (vports) {
8295 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
8296 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
8297 if (ndlp)
8298 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
8299 lpfc_els_flush_cmd(vports[i]);
8300 }
8301 lpfc_destroy_vport_work_array(phba, vports);
8302 }
0c9ab6f5
JS
8303}
8304
8305/**
8306 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
8307 * @phba: pointer to lpfc hba data structure.
8308 *
8309 * This routine abort all pending discovery commands and
8310 * start a timer to retry FLOGI for the physical port
8311 * discovery.
8312 **/
8313void
8314lpfc_retry_pport_discovery(struct lpfc_hba *phba)
8315{
8316 struct lpfc_nodelist *ndlp;
8317 struct Scsi_Host *shost;
8318
8319 /* Cancel the all vports retry delay retry timers */
8320 lpfc_cancel_all_vport_retry_delay_timer(phba);
695a814e
JS
8321
8322 /* If fabric require FLOGI, then re-instantiate physical login */
8323 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
8324 if (!ndlp)
8325 return;
8326
695a814e 8327 shost = lpfc_shost_from_vport(phba->pport);
256ec0d0 8328 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
695a814e
JS
8329 spin_lock_irq(shost->host_lock);
8330 ndlp->nlp_flag |= NLP_DELAY_TMO;
8331 spin_unlock_irq(shost->host_lock);
8332 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
8333 phba->pport->port_state = LPFC_FLOGI;
8334 return;
8335}
8336
8337/**
8338 * lpfc_fabric_login_reqd - Check if FLOGI required.
8339 * @phba: pointer to lpfc hba data structure.
8340 * @cmdiocb: pointer to FDISC command iocb.
8341 * @rspiocb: pointer to FDISC response iocb.
8342 *
8343 * This routine checks if a FLOGI is reguired for FDISC
8344 * to succeed.
8345 **/
8346static int
8347lpfc_fabric_login_reqd(struct lpfc_hba *phba,
8348 struct lpfc_iocbq *cmdiocb,
8349 struct lpfc_iocbq *rspiocb)
8350{
8351
8352 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
8353 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
8354 return 0;
8355 else
8356 return 1;
8357}
8358
e59058c4 8359/**
3621a710 8360 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
e59058c4
JS
8361 * @phba: pointer to lpfc hba data structure.
8362 * @cmdiocb: pointer to lpfc command iocb data structure.
8363 * @rspiocb: pointer to lpfc response iocb data structure.
8364 *
8365 * This routine is the completion callback function to a Fabric Discover
8366 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
8367 * single threaded, each FDISC completion callback function will reset
8368 * the discovery timer for all vports such that the timers will not get
8369 * unnecessary timeout. The function checks the FDISC IOCB status. If error
8370 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
8371 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
8372 * assigned to the vport has been changed with the completion of the FDISC
8373 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
8374 * are unregistered from the HBA, and then the lpfc_register_new_vport()
8375 * routine is invoked to register new vport with the HBA. Otherwise, the
8376 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
8377 * Server for State Change Request (SCR).
8378 **/
92d7f7b0
JS
8379static void
8380lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8381 struct lpfc_iocbq *rspiocb)
8382{
8383 struct lpfc_vport *vport = cmdiocb->vport;
8384 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8385 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
8386 struct lpfc_nodelist *np;
8387 struct lpfc_nodelist *next_np;
8388 IOCB_t *irsp = &rspiocb->iocb;
8389 struct lpfc_iocbq *piocb;
92494144
JS
8390 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
8391 struct serv_parm *sp;
8392 uint8_t fabric_param_changed;
92d7f7b0 8393
e8b62011
JS
8394 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8395 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
8396 irsp->ulpStatus, irsp->un.ulpWord[4],
8397 vport->fc_prevDID);
92d7f7b0
JS
8398 /* Since all FDISCs are being single threaded, we
8399 * must reset the discovery timer for ALL vports
8400 * waiting to send FDISC when one completes.
8401 */
8402 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
8403 lpfc_set_disctmo(piocb->vport);
8404 }
8405
858c9f6c
JS
8406 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8407 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
8408 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
8409
92d7f7b0 8410 if (irsp->ulpStatus) {
695a814e
JS
8411
8412 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
8413 lpfc_retry_pport_discovery(phba);
8414 goto out;
8415 }
8416
92d7f7b0
JS
8417 /* Check for retry */
8418 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
8419 goto out;
92d7f7b0 8420 /* FDISC failed */
e8b62011 8421 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6b5151fd 8422 "0126 FDISC failed. (x%x/x%x)\n",
e8b62011 8423 irsp->ulpStatus, irsp->un.ulpWord[4]);
d7c255b2
JS
8424 goto fdisc_failed;
8425 }
d7c255b2 8426 spin_lock_irq(shost->host_lock);
695a814e 8427 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
4b40c59e 8428 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
d7c255b2 8429 vport->fc_flag |= FC_FABRIC;
76a95d75 8430 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
d7c255b2
JS
8431 vport->fc_flag |= FC_PUBLIC_LOOP;
8432 spin_unlock_irq(shost->host_lock);
92d7f7b0 8433
d7c255b2
JS
8434 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
8435 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
92494144 8436 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
a2fc4aef
JS
8437 if (!prsp)
8438 goto out;
92494144
JS
8439 sp = prsp->virt + sizeof(uint32_t);
8440 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
8441 memcpy(&vport->fabric_portname, &sp->portName,
8442 sizeof(struct lpfc_name));
8443 memcpy(&vport->fabric_nodename, &sp->nodeName,
8444 sizeof(struct lpfc_name));
8445 if (fabric_param_changed &&
d7c255b2
JS
8446 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8447 /* If our NportID changed, we need to ensure all
8448 * remaining NPORTs get unreg_login'ed so we can
8449 * issue unreg_vpi.
8450 */
8451 list_for_each_entry_safe(np, next_np,
8452 &vport->fc_nodes, nlp_listp) {
8453 if (!NLP_CHK_NODE_ACT(ndlp) ||
8454 (np->nlp_state != NLP_STE_NPR_NODE) ||
8455 !(np->nlp_flag & NLP_NPR_ADISC))
8456 continue;
09372820 8457 spin_lock_irq(shost->host_lock);
d7c255b2 8458 np->nlp_flag &= ~NLP_NPR_ADISC;
09372820 8459 spin_unlock_irq(shost->host_lock);
d7c255b2 8460 lpfc_unreg_rpi(vport, np);
92d7f7b0 8461 }
78730cfe 8462 lpfc_cleanup_pending_mbox(vport);
5af5eee7
JS
8463
8464 if (phba->sli_rev == LPFC_SLI_REV4)
8465 lpfc_sli4_unreg_all_rpis(vport);
8466
d7c255b2
JS
8467 lpfc_mbx_unreg_vpi(vport);
8468 spin_lock_irq(shost->host_lock);
8469 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
0f65ff68
JS
8470 if (phba->sli_rev == LPFC_SLI_REV4)
8471 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4b40c59e
JS
8472 else
8473 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
d7c255b2 8474 spin_unlock_irq(shost->host_lock);
38b92ef8
JS
8475 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
8476 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8477 /*
8478 * Driver needs to re-reg VPI in order for f/w
8479 * to update the MAC address.
8480 */
8481 lpfc_register_new_vport(phba, vport, ndlp);
5ac6b303 8482 goto out;
92d7f7b0
JS
8483 }
8484
ecfd03c6
JS
8485 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
8486 lpfc_issue_init_vpi(vport);
8487 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
d7c255b2
JS
8488 lpfc_register_new_vport(phba, vport, ndlp);
8489 else
8490 lpfc_do_scr_ns_plogi(phba, vport);
8491 goto out;
8492fdisc_failed:
c84163d1
JS
8493 if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
8494 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
d7c255b2
JS
8495 /* Cancel discovery timer */
8496 lpfc_can_disctmo(vport);
8497 lpfc_nlp_put(ndlp);
92d7f7b0
JS
8498out:
8499 lpfc_els_free_iocb(phba, cmdiocb);
8500}
8501
e59058c4 8502/**
3621a710 8503 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
e59058c4
JS
8504 * @vport: pointer to a virtual N_Port data structure.
8505 * @ndlp: pointer to a node-list data structure.
8506 * @retry: number of retries to the command IOCB.
8507 *
8508 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
8509 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
8510 * routine to issue the IOCB, which makes sure only one outstanding fabric
8511 * IOCB will be sent off HBA at any given time.
8512 *
8513 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8514 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8515 * will be stored into the context1 field of the IOCB for the completion
8516 * callback function to the FDISC ELS command.
8517 *
8518 * Return code
8519 * 0 - Successfully issued fdisc iocb command
8520 * 1 - Failed to issue fdisc iocb command
8521 **/
a6ababd2 8522static int
92d7f7b0
JS
8523lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8524 uint8_t retry)
8525{
8526 struct lpfc_hba *phba = vport->phba;
8527 IOCB_t *icmd;
8528 struct lpfc_iocbq *elsiocb;
8529 struct serv_parm *sp;
8530 uint8_t *pcmd;
8531 uint16_t cmdsize;
8532 int did = ndlp->nlp_DID;
8533 int rc;
92d7f7b0 8534
5ffc266e 8535 vport->port_state = LPFC_FDISC;
6b5151fd 8536 vport->fc_myDID = 0;
92d7f7b0
JS
8537 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
8538 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
8539 ELS_CMD_FDISC);
8540 if (!elsiocb) {
92d7f7b0 8541 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8542 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8543 "0255 Issue FDISC: no IOCB\n");
92d7f7b0
JS
8544 return 1;
8545 }
8546
8547 icmd = &elsiocb->iocb;
8548 icmd->un.elsreq64.myID = 0;
8549 icmd->un.elsreq64.fl = 1;
8550
73d91e50
JS
8551 /*
8552 * SLI3 ports require a different context type value than SLI4.
8553 * Catch SLI3 ports here and override the prep.
8554 */
8555 if (phba->sli_rev == LPFC_SLI_REV3) {
f1126688
JS
8556 icmd->ulpCt_h = 1;
8557 icmd->ulpCt_l = 0;
8558 }
92d7f7b0
JS
8559
8560 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8561 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
8562 pcmd += sizeof(uint32_t); /* CSP Word 1 */
8563 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
8564 sp = (struct serv_parm *) pcmd;
8565 /* Setup CSPs accordingly for Fabric */
8566 sp->cmn.e_d_tov = 0;
8567 sp->cmn.w2.r_a_tov = 0;
df9e1b59 8568 sp->cmn.virtual_fabric_support = 0;
92d7f7b0
JS
8569 sp->cls1.classValid = 0;
8570 sp->cls2.seqDelivery = 1;
8571 sp->cls3.seqDelivery = 1;
8572
8573 pcmd += sizeof(uint32_t); /* CSP Word 2 */
8574 pcmd += sizeof(uint32_t); /* CSP Word 3 */
8575 pcmd += sizeof(uint32_t); /* CSP Word 4 */
8576 pcmd += sizeof(uint32_t); /* Port Name */
8577 memcpy(pcmd, &vport->fc_portname, 8);
8578 pcmd += sizeof(uint32_t); /* Node Name */
8579 pcmd += sizeof(uint32_t); /* Node Name */
8580 memcpy(pcmd, &vport->fc_nodename, 8);
8581
8582 lpfc_set_disctmo(vport);
8583
8584 phba->fc_stat.elsXmitFDISC++;
8585 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
8586
858c9f6c
JS
8587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8588 "Issue FDISC: did:x%x",
8589 did, 0, 0);
8590
92d7f7b0
JS
8591 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
8592 if (rc == IOCB_ERROR) {
8593 lpfc_els_free_iocb(phba, elsiocb);
92d7f7b0 8594 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
8595 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8596 "0256 Issue FDISC: Cannot send IOCB\n");
92d7f7b0
JS
8597 return 1;
8598 }
8599 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
92d7f7b0
JS
8600 return 0;
8601}
8602
e59058c4 8603/**
3621a710 8604 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
e59058c4
JS
8605 * @phba: pointer to lpfc hba data structure.
8606 * @cmdiocb: pointer to lpfc command iocb data structure.
8607 * @rspiocb: pointer to lpfc response iocb data structure.
8608 *
8609 * This routine is the completion callback function to the issuing of a LOGO
8610 * ELS command off a vport. It frees the command IOCB and then decrement the
8611 * reference count held on ndlp for this completion function, indicating that
8612 * the reference to the ndlp is no long needed. Note that the
8613 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
8614 * callback function and an additional explicit ndlp reference decrementation
8615 * will trigger the actual release of the ndlp.
8616 **/
92d7f7b0
JS
8617static void
8618lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8619 struct lpfc_iocbq *rspiocb)
8620{
8621 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c 8622 IOCB_t *irsp;
e47c9093 8623 struct lpfc_nodelist *ndlp;
9589b062 8624 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
858c9f6c 8625
9589b062 8626 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
858c9f6c
JS
8627 irsp = &rspiocb->iocb;
8628 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8629 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
8630 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
92d7f7b0
JS
8631
8632 lpfc_els_free_iocb(phba, cmdiocb);
8633 vport->unreg_vpi_cmpl = VPORT_ERROR;
e47c9093
JS
8634
8635 /* Trigger the release of the ndlp after logo */
8636 lpfc_nlp_put(ndlp);
9589b062
JS
8637
8638 /* NPIV LOGO completes to NPort <nlp_DID> */
8639 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8640 "2928 NPIV LOGO completes to NPort x%x "
8641 "Data: x%x x%x x%x x%x\n",
8642 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
8643 irsp->ulpTimeout, vport->num_disc_nodes);
8644
8645 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
8646 spin_lock_irq(shost->host_lock);
73dc0dbe 8647 vport->fc_flag &= ~FC_NDISC_ACTIVE;
9589b062
JS
8648 vport->fc_flag &= ~FC_FABRIC;
8649 spin_unlock_irq(shost->host_lock);
73dc0dbe 8650 lpfc_can_disctmo(vport);
9589b062 8651 }
92d7f7b0
JS
8652}
8653
e59058c4 8654/**
3621a710 8655 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
e59058c4
JS
8656 * @vport: pointer to a virtual N_Port data structure.
8657 * @ndlp: pointer to a node-list data structure.
8658 *
8659 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
8660 *
8661 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8662 * will be incremented by 1 for holding the ndlp and the reference to ndlp
8663 * will be stored into the context1 field of the IOCB for the completion
8664 * callback function to the LOGO ELS command.
8665 *
8666 * Return codes
8667 * 0 - Successfully issued logo off the @vport
8668 * 1 - Failed to issue logo off the @vport
8669 **/
92d7f7b0
JS
8670int
8671lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
8672{
8673 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8674 struct lpfc_hba *phba = vport->phba;
92d7f7b0
JS
8675 struct lpfc_iocbq *elsiocb;
8676 uint8_t *pcmd;
8677 uint16_t cmdsize;
8678
8679 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
8680 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
8681 ELS_CMD_LOGO);
8682 if (!elsiocb)
8683 return 1;
8684
92d7f7b0
JS
8685 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8686 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
8687 pcmd += sizeof(uint32_t);
8688
8689 /* Fill in LOGO payload */
8690 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
8691 pcmd += sizeof(uint32_t);
8692 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
8693
858c9f6c
JS
8694 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8695 "Issue LOGO npiv did:x%x flg:x%x",
8696 ndlp->nlp_DID, ndlp->nlp_flag, 0);
8697
92d7f7b0
JS
8698 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
8699 spin_lock_irq(shost->host_lock);
8700 ndlp->nlp_flag |= NLP_LOGO_SND;
8701 spin_unlock_irq(shost->host_lock);
3772a991
JS
8702 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
8703 IOCB_ERROR) {
92d7f7b0
JS
8704 spin_lock_irq(shost->host_lock);
8705 ndlp->nlp_flag &= ~NLP_LOGO_SND;
8706 spin_unlock_irq(shost->host_lock);
8707 lpfc_els_free_iocb(phba, elsiocb);
8708 return 1;
8709 }
8710 return 0;
8711}
8712
e59058c4 8713/**
3621a710 8714 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
e59058c4
JS
8715 * @ptr: holder for the timer function associated data.
8716 *
8717 * This routine is invoked by the fabric iocb block timer after
8718 * timeout. It posts the fabric iocb block timeout event by setting the
8719 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
8720 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
8721 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
8722 * posted event WORKER_FABRIC_BLOCK_TMO.
8723 **/
92d7f7b0
JS
8724void
8725lpfc_fabric_block_timeout(unsigned long ptr)
8726{
8727 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
8728 unsigned long iflags;
8729 uint32_t tmo_posted;
5e9d9b82 8730
92d7f7b0
JS
8731 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8732 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
8733 if (!tmo_posted)
8734 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
8735 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8736
5e9d9b82
JS
8737 if (!tmo_posted)
8738 lpfc_worker_wake_up(phba);
8739 return;
92d7f7b0
JS
8740}
8741
e59058c4 8742/**
3621a710 8743 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
e59058c4
JS
8744 * @phba: pointer to lpfc hba data structure.
8745 *
8746 * This routine issues one fabric iocb from the driver internal list to
8747 * the HBA. It first checks whether it's ready to issue one fabric iocb to
8748 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
8749 * remove one pending fabric iocb from the driver internal list and invokes
8750 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
8751 **/
92d7f7b0
JS
8752static void
8753lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
8754{
8755 struct lpfc_iocbq *iocb;
8756 unsigned long iflags;
8757 int ret;
92d7f7b0
JS
8758 IOCB_t *cmd;
8759
8760repeat:
8761 iocb = NULL;
8762 spin_lock_irqsave(&phba->hbalock, iflags);
7f5f3d0d 8763 /* Post any pending iocb to the SLI layer */
92d7f7b0
JS
8764 if (atomic_read(&phba->fabric_iocb_count) == 0) {
8765 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
8766 list);
8767 if (iocb)
7f5f3d0d 8768 /* Increment fabric iocb count to hold the position */
92d7f7b0
JS
8769 atomic_inc(&phba->fabric_iocb_count);
8770 }
8771 spin_unlock_irqrestore(&phba->hbalock, iflags);
8772 if (iocb) {
8773 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8774 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8775 iocb->iocb_flag |= LPFC_IO_FABRIC;
8776
858c9f6c
JS
8777 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8778 "Fabric sched1: ste:x%x",
8779 iocb->vport->port_state, 0, 0);
8780
3772a991 8781 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
8782
8783 if (ret == IOCB_ERROR) {
8784 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8785 iocb->fabric_iocb_cmpl = NULL;
8786 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8787 cmd = &iocb->iocb;
8788 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
8789 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
8790 iocb->iocb_cmpl(phba, iocb, iocb);
8791
8792 atomic_dec(&phba->fabric_iocb_count);
8793 goto repeat;
8794 }
8795 }
8796
8797 return;
8798}
8799
e59058c4 8800/**
3621a710 8801 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
e59058c4
JS
8802 * @phba: pointer to lpfc hba data structure.
8803 *
8804 * This routine unblocks the issuing fabric iocb command. The function
8805 * will clear the fabric iocb block bit and then invoke the routine
8806 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
8807 * from the driver internal fabric iocb list.
8808 **/
92d7f7b0
JS
8809void
8810lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
8811{
8812 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
8813
8814 lpfc_resume_fabric_iocbs(phba);
8815 return;
8816}
8817
e59058c4 8818/**
3621a710 8819 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
e59058c4
JS
8820 * @phba: pointer to lpfc hba data structure.
8821 *
8822 * This routine blocks the issuing fabric iocb for a specified amount of
8823 * time (currently 100 ms). This is done by set the fabric iocb block bit
8824 * and set up a timeout timer for 100ms. When the block bit is set, no more
8825 * fabric iocb will be issued out of the HBA.
8826 **/
92d7f7b0
JS
8827static void
8828lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
8829{
8830 int blocked;
8831
8832 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7f5f3d0d 8833 /* Start a timer to unblock fabric iocbs after 100ms */
92d7f7b0 8834 if (!blocked)
256ec0d0
JS
8835 mod_timer(&phba->fabric_block_timer,
8836 jiffies + msecs_to_jiffies(100));
92d7f7b0
JS
8837
8838 return;
8839}
8840
e59058c4 8841/**
3621a710 8842 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
e59058c4
JS
8843 * @phba: pointer to lpfc hba data structure.
8844 * @cmdiocb: pointer to lpfc command iocb data structure.
8845 * @rspiocb: pointer to lpfc response iocb data structure.
8846 *
8847 * This routine is the callback function that is put to the fabric iocb's
8848 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
8849 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
8850 * function first restores and invokes the original iocb's callback function
8851 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
8852 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
8853 **/
92d7f7b0
JS
8854static void
8855lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8856 struct lpfc_iocbq *rspiocb)
8857{
8858 struct ls_rjt stat;
8859
e01ea5e2 8860 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
92d7f7b0
JS
8861
8862 switch (rspiocb->iocb.ulpStatus) {
8863 case IOSTAT_NPORT_RJT:
8864 case IOSTAT_FABRIC_RJT:
8865 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
8866 lpfc_block_fabric_iocbs(phba);
ed957684 8867 }
92d7f7b0
JS
8868 break;
8869
8870 case IOSTAT_NPORT_BSY:
8871 case IOSTAT_FABRIC_BSY:
8872 lpfc_block_fabric_iocbs(phba);
8873 break;
8874
8875 case IOSTAT_LS_RJT:
8876 stat.un.lsRjtError =
8877 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
8878 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
8879 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
8880 lpfc_block_fabric_iocbs(phba);
8881 break;
8882 }
8883
8884 if (atomic_read(&phba->fabric_iocb_count) == 0)
8885 BUG();
8886
8887 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
8888 cmdiocb->fabric_iocb_cmpl = NULL;
8889 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
8890 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
8891
8892 atomic_dec(&phba->fabric_iocb_count);
8893 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7f5f3d0d
JS
8894 /* Post any pending iocbs to HBA */
8895 lpfc_resume_fabric_iocbs(phba);
92d7f7b0
JS
8896 }
8897}
8898
e59058c4 8899/**
3621a710 8900 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
e59058c4
JS
8901 * @phba: pointer to lpfc hba data structure.
8902 * @iocb: pointer to lpfc command iocb data structure.
8903 *
8904 * This routine is used as the top-level API for issuing a fabric iocb command
8905 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
8906 * function makes sure that only one fabric bound iocb will be outstanding at
8907 * any given time. As such, this function will first check to see whether there
8908 * is already an outstanding fabric iocb on the wire. If so, it will put the
8909 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
8910 * issued later. Otherwise, it will issue the iocb on the wire and update the
8911 * fabric iocb count it indicate that there is one fabric iocb on the wire.
8912 *
8913 * Note, this implementation has a potential sending out fabric IOCBs out of
8914 * order. The problem is caused by the construction of the "ready" boolen does
8915 * not include the condition that the internal fabric IOCB list is empty. As
8916 * such, it is possible a fabric IOCB issued by this routine might be "jump"
8917 * ahead of the fabric IOCBs in the internal list.
8918 *
8919 * Return code
8920 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
8921 * IOCB_ERROR - failed to issue fabric iocb
8922 **/
a6ababd2 8923static int
92d7f7b0
JS
8924lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
8925{
8926 unsigned long iflags;
92d7f7b0
JS
8927 int ready;
8928 int ret;
8929
8930 if (atomic_read(&phba->fabric_iocb_count) > 1)
8931 BUG();
8932
8933 spin_lock_irqsave(&phba->hbalock, iflags);
8934 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
8935 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
8936
7f5f3d0d
JS
8937 if (ready)
8938 /* Increment fabric iocb count to hold the position */
8939 atomic_inc(&phba->fabric_iocb_count);
92d7f7b0
JS
8940 spin_unlock_irqrestore(&phba->hbalock, iflags);
8941 if (ready) {
8942 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8943 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8944 iocb->iocb_flag |= LPFC_IO_FABRIC;
8945
858c9f6c
JS
8946 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8947 "Fabric sched2: ste:x%x",
8948 iocb->vport->port_state, 0, 0);
8949
3772a991 8950 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
8951
8952 if (ret == IOCB_ERROR) {
8953 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8954 iocb->fabric_iocb_cmpl = NULL;
8955 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8956 atomic_dec(&phba->fabric_iocb_count);
8957 }
8958 } else {
8959 spin_lock_irqsave(&phba->hbalock, iflags);
8960 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
8961 spin_unlock_irqrestore(&phba->hbalock, iflags);
8962 ret = IOCB_SUCCESS;
8963 }
8964 return ret;
8965}
8966
e59058c4 8967/**
3621a710 8968 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
e59058c4
JS
8969 * @vport: pointer to a virtual N_Port data structure.
8970 *
8971 * This routine aborts all the IOCBs associated with a @vport from the
8972 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
8973 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
8974 * list, removes each IOCB associated with the @vport off the list, set the
8975 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
8976 * associated with the IOCB.
8977 **/
a6ababd2 8978static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
92d7f7b0
JS
8979{
8980 LIST_HEAD(completions);
8981 struct lpfc_hba *phba = vport->phba;
8982 struct lpfc_iocbq *tmp_iocb, *piocb;
92d7f7b0
JS
8983
8984 spin_lock_irq(&phba->hbalock);
8985 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
8986 list) {
8987
8988 if (piocb->vport != vport)
8989 continue;
8990
8991 list_move_tail(&piocb->list, &completions);
8992 }
8993 spin_unlock_irq(&phba->hbalock);
8994
a257bf90
JS
8995 /* Cancel all the IOCBs from the completions list */
8996 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8997 IOERR_SLI_ABORTED);
92d7f7b0
JS
8998}
8999
e59058c4 9000/**
3621a710 9001 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
e59058c4
JS
9002 * @ndlp: pointer to a node-list data structure.
9003 *
9004 * This routine aborts all the IOCBs associated with an @ndlp from the
9005 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9006 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9007 * list, removes each IOCB associated with the @ndlp off the list, set the
9008 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9009 * associated with the IOCB.
9010 **/
92d7f7b0
JS
9011void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9012{
9013 LIST_HEAD(completions);
a257bf90 9014 struct lpfc_hba *phba = ndlp->phba;
92d7f7b0
JS
9015 struct lpfc_iocbq *tmp_iocb, *piocb;
9016 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
92d7f7b0
JS
9017
9018 spin_lock_irq(&phba->hbalock);
9019 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9020 list) {
9021 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
9022
9023 list_move_tail(&piocb->list, &completions);
ed957684 9024 }
dea3101e 9025 }
92d7f7b0
JS
9026 spin_unlock_irq(&phba->hbalock);
9027
a257bf90
JS
9028 /* Cancel all the IOCBs from the completions list */
9029 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9030 IOERR_SLI_ABORTED);
92d7f7b0
JS
9031}
9032
e59058c4 9033/**
3621a710 9034 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
e59058c4
JS
9035 * @phba: pointer to lpfc hba data structure.
9036 *
9037 * This routine aborts all the IOCBs currently on the driver internal
9038 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
9039 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
9040 * list, removes IOCBs off the list, set the status feild to
9041 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
9042 * the IOCB.
9043 **/
92d7f7b0
JS
9044void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
9045{
9046 LIST_HEAD(completions);
92d7f7b0
JS
9047
9048 spin_lock_irq(&phba->hbalock);
9049 list_splice_init(&phba->fabric_iocb_list, &completions);
9050 spin_unlock_irq(&phba->hbalock);
9051
a257bf90
JS
9052 /* Cancel all the IOCBs from the completions list */
9053 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9054 IOERR_SLI_ABORTED);
dea3101e 9055}
6fb120a7 9056
1151e3ec
JS
9057/**
9058 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
9059 * @vport: pointer to lpfc vport data structure.
9060 *
9061 * This routine is invoked by the vport cleanup for deletions and the cleanup
9062 * for an ndlp on removal.
9063 **/
9064void
9065lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
9066{
9067 struct lpfc_hba *phba = vport->phba;
9068 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9069 unsigned long iflag = 0;
9070
9071 spin_lock_irqsave(&phba->hbalock, iflag);
9072 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
9073 list_for_each_entry_safe(sglq_entry, sglq_next,
9074 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9075 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
9076 sglq_entry->ndlp = NULL;
9077 }
9078 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
9079 spin_unlock_irqrestore(&phba->hbalock, iflag);
9080 return;
9081}
9082
6fb120a7
JS
9083/**
9084 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
9085 * @phba: pointer to lpfc hba data structure.
9086 * @axri: pointer to the els xri abort wcqe structure.
9087 *
9088 * This routine is invoked by the worker thread to process a SLI4 slow-path
9089 * ELS aborted xri.
9090 **/
9091void
9092lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
9093 struct sli4_wcqe_xri_aborted *axri)
9094{
9095 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 9096 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7851fe2c 9097 uint16_t lxri = 0;
19ca7609 9098
6fb120a7
JS
9099 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9100 unsigned long iflag = 0;
19ca7609 9101 struct lpfc_nodelist *ndlp;
589a52d6 9102 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6fb120a7 9103
0f65ff68
JS
9104 spin_lock_irqsave(&phba->hbalock, iflag);
9105 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7
JS
9106 list_for_each_entry_safe(sglq_entry, sglq_next,
9107 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9108 if (sglq_entry->sli4_xritag == xri) {
9109 list_del(&sglq_entry->list);
19ca7609
JS
9110 ndlp = sglq_entry->ndlp;
9111 sglq_entry->ndlp = NULL;
dafe8cea 9112 spin_lock(&pring->ring_lock);
6fb120a7
JS
9113 list_add_tail(&sglq_entry->list,
9114 &phba->sli4_hba.lpfc_sgl_list);
0f65ff68 9115 sglq_entry->state = SGL_FREED;
dafe8cea 9116 spin_unlock(&pring->ring_lock);
0f65ff68 9117 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6fb120a7 9118 spin_unlock_irqrestore(&phba->hbalock, iflag);
ee0f4fe1
JS
9119 lpfc_set_rrq_active(phba, ndlp,
9120 sglq_entry->sli4_lxritag,
9121 rxid, 1);
589a52d6
JS
9122
9123 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 9124 if (!(list_empty(&pring->txq)))
589a52d6 9125 lpfc_worker_wake_up(phba);
6fb120a7
JS
9126 return;
9127 }
9128 }
0f65ff68 9129 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7851fe2c
JS
9130 lxri = lpfc_sli4_xri_inrange(phba, xri);
9131 if (lxri == NO_XRI) {
9132 spin_unlock_irqrestore(&phba->hbalock, iflag);
9133 return;
9134 }
dafe8cea 9135 spin_lock(&pring->ring_lock);
7851fe2c 9136 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
0f65ff68 9137 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
dafe8cea 9138 spin_unlock(&pring->ring_lock);
0f65ff68
JS
9139 spin_unlock_irqrestore(&phba->hbalock, iflag);
9140 return;
9141 }
9142 sglq_entry->state = SGL_XRI_ABORTED;
dafe8cea 9143 spin_unlock(&pring->ring_lock);
0f65ff68
JS
9144 spin_unlock_irqrestore(&phba->hbalock, iflag);
9145 return;
6fb120a7 9146}
086a345f
JS
9147
9148/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
9149 * @vport: pointer to virtual port object.
9150 * @ndlp: nodelist pointer for the impacted node.
9151 *
9152 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
9153 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
9154 * the driver is required to send a LOGO to the remote node before it
9155 * attempts to recover its login to the remote node.
9156 */
9157void
9158lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9159 struct lpfc_nodelist *ndlp)
9160{
9161 struct Scsi_Host *shost;
9162 struct lpfc_hba *phba;
9163 unsigned long flags = 0;
9164
9165 shost = lpfc_shost_from_vport(vport);
9166 phba = vport->phba;
9167 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
9168 lpfc_printf_log(phba, KERN_INFO,
9169 LOG_SLI, "3093 No rport recovery needed. "
9170 "rport in state 0x%x\n", ndlp->nlp_state);
9171 return;
9172 }
9173 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9174 "3094 Start rport recovery on shost id 0x%x "
9175 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9176 "flags 0x%x\n",
9177 shost->host_no, ndlp->nlp_DID,
9178 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
9179 ndlp->nlp_flag);
9180 /*
9181 * The rport is not responding. Remove the FCP-2 flag to prevent
9182 * an ADISC in the follow-up recovery code.
9183 */
9184 spin_lock_irqsave(shost->host_lock, flags);
9185 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
9186 spin_unlock_irqrestore(shost->host_lock, flags);
9187 lpfc_issue_els_logo(vport, ndlp, 0);
9188 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
9189}
9190