]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/lpfc/lpfc_els.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_els.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44
45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46 struct lpfc_iocbq *);
47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48 struct lpfc_iocbq *);
49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51 struct lpfc_nodelist *ndlp, uint8_t retry);
52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53 struct lpfc_iocbq *iocb);
54
55 static int lpfc_max_els_tries = 3;
56
57 /**
58 * lpfc_els_chk_latt - Check host link attention event for a vport
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
78 **/
79 int
80 lpfc_els_chk_latt(struct lpfc_vport *vport)
81 {
82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83 struct lpfc_hba *phba = vport->phba;
84 uint32_t ha_copy;
85
86 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN ||
88 phba->sli_rev > LPFC_SLI_REV3)
89 return 0;
90
91 /* Read the HBA Host Attention Register */
92 ha_copy = readl(phba->HAregaddr);
93
94 if (!(ha_copy & HA_LATT))
95 return 0;
96
97 /* Pending Link Event during Discovery */
98 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
99 "0237 Pending Link Event during "
100 "Discovery: State x%x\n",
101 phba->pport->port_state);
102
103 /* CLEAR_LA should re-enable link attention events and
104 * we should then imediately take a LATT event. The
105 * LATT processing should call lpfc_linkdown() which
106 * will cleanup any left over in-progress discovery
107 * events.
108 */
109 spin_lock_irq(shost->host_lock);
110 vport->fc_flag |= FC_ABORT_DISCOVERY;
111 spin_unlock_irq(shost->host_lock);
112
113 if (phba->link_state != LPFC_CLEAR_LA)
114 lpfc_issue_clear_la(phba, vport);
115
116 return 1;
117 }
118
119 /**
120 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
121 * @vport: pointer to a host virtual N_Port data structure.
122 * @expectRsp: flag indicating whether response is expected.
123 * @cmdSize: size of the ELS command.
124 * @retry: number of retries to the command IOCB when it fails.
125 * @ndlp: pointer to a node-list data structure.
126 * @did: destination identifier.
127 * @elscmd: the ELS command code.
128 *
129 * This routine is used for allocating a lpfc-IOCB data structure from
130 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
131 * passed into the routine for discovery state machine to issue an Extended
132 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
133 * and preparation routine that is used by all the discovery state machine
134 * routines and the ELS command-specific fields will be later set up by
135 * the individual discovery machine routines after calling this routine
136 * allocating and preparing a generic IOCB data structure. It fills in the
137 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
138 * payload and response payload (if expected). The reference count on the
139 * ndlp is incremented by 1 and the reference to the ndlp is put into
140 * context1 of the IOCB data structure for this IOCB to hold the ndlp
141 * reference for the command's callback function to access later.
142 *
143 * Return code
144 * Pointer to the newly allocated/prepared els iocb data structure
145 * NULL - when els iocb data structure allocation/preparation failed
146 **/
147 struct lpfc_iocbq *
148 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
149 uint16_t cmdSize, uint8_t retry,
150 struct lpfc_nodelist *ndlp, uint32_t did,
151 uint32_t elscmd)
152 {
153 struct lpfc_hba *phba = vport->phba;
154 struct lpfc_iocbq *elsiocb;
155 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
156 struct ulp_bde64 *bpl;
157 IOCB_t *icmd;
158
159
160 if (!lpfc_is_link_up(phba))
161 return NULL;
162
163 /* Allocate buffer for command iocb */
164 elsiocb = lpfc_sli_get_iocbq(phba);
165
166 if (elsiocb == NULL)
167 return NULL;
168
169 /*
170 * If this command is for fabric controller and HBA running
171 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
172 */
173 if ((did == Fabric_DID) &&
174 (phba->hba_flag & HBA_FIP_SUPPORT) &&
175 ((elscmd == ELS_CMD_FLOGI) ||
176 (elscmd == ELS_CMD_FDISC) ||
177 (elscmd == ELS_CMD_LOGO)))
178 switch (elscmd) {
179 case ELS_CMD_FLOGI:
180 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
181 & LPFC_FIP_ELS_ID_MASK);
182 break;
183 case ELS_CMD_FDISC:
184 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
185 & LPFC_FIP_ELS_ID_MASK);
186 break;
187 case ELS_CMD_LOGO:
188 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
189 & LPFC_FIP_ELS_ID_MASK);
190 break;
191 }
192 else
193 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
194
195 icmd = &elsiocb->iocb;
196
197 /* fill in BDEs for command */
198 /* Allocate buffer for command payload */
199 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
200 if (pcmd)
201 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
202 if (!pcmd || !pcmd->virt)
203 goto els_iocb_free_pcmb_exit;
204
205 INIT_LIST_HEAD(&pcmd->list);
206
207 /* Allocate buffer for response payload */
208 if (expectRsp) {
209 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
210 if (prsp)
211 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
212 &prsp->phys);
213 if (!prsp || !prsp->virt)
214 goto els_iocb_free_prsp_exit;
215 INIT_LIST_HEAD(&prsp->list);
216 } else
217 prsp = NULL;
218
219 /* Allocate buffer for Buffer ptr list */
220 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
221 if (pbuflist)
222 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
223 &pbuflist->phys);
224 if (!pbuflist || !pbuflist->virt)
225 goto els_iocb_free_pbuf_exit;
226
227 INIT_LIST_HEAD(&pbuflist->list);
228
229 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
230 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
231 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
232 icmd->un.elsreq64.remoteID = did; /* DID */
233 if (expectRsp) {
234 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
235 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
236 icmd->ulpTimeout = phba->fc_ratov * 2;
237 } else {
238 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
239 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
240 }
241 icmd->ulpBdeCount = 1;
242 icmd->ulpLe = 1;
243 icmd->ulpClass = CLASS3;
244
245 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
246 icmd->un.elsreq64.myID = vport->fc_myDID;
247
248 /* For ELS_REQUEST64_CR, use the VPI by default */
249 icmd->ulpContext = vport->vpi + phba->vpi_base;
250 icmd->ulpCt_h = 0;
251 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
252 if (elscmd == ELS_CMD_ECHO)
253 icmd->ulpCt_l = 0; /* context = invalid RPI */
254 else
255 icmd->ulpCt_l = 1; /* context = VPI */
256 }
257
258 bpl = (struct ulp_bde64 *) pbuflist->virt;
259 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
260 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
261 bpl->tus.f.bdeSize = cmdSize;
262 bpl->tus.f.bdeFlags = 0;
263 bpl->tus.w = le32_to_cpu(bpl->tus.w);
264
265 if (expectRsp) {
266 bpl++;
267 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
268 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
269 bpl->tus.f.bdeSize = FCELSSIZE;
270 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
271 bpl->tus.w = le32_to_cpu(bpl->tus.w);
272 }
273
274 /* prevent preparing iocb with NULL ndlp reference */
275 elsiocb->context1 = lpfc_nlp_get(ndlp);
276 if (!elsiocb->context1)
277 goto els_iocb_free_pbuf_exit;
278 elsiocb->context2 = pcmd;
279 elsiocb->context3 = pbuflist;
280 elsiocb->retry = retry;
281 elsiocb->vport = vport;
282 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
283
284 if (prsp) {
285 list_add(&prsp->list, &pcmd->list);
286 }
287 if (expectRsp) {
288 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
289 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
290 "0116 Xmit ELS command x%x to remote "
291 "NPORT x%x I/O tag: x%x, port state: x%x\n",
292 elscmd, did, elsiocb->iotag,
293 vport->port_state);
294 } else {
295 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
296 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
297 "0117 Xmit ELS response x%x to remote "
298 "NPORT x%x I/O tag: x%x, size: x%x\n",
299 elscmd, ndlp->nlp_DID, elsiocb->iotag,
300 cmdSize);
301 }
302 return elsiocb;
303
304 els_iocb_free_pbuf_exit:
305 if (expectRsp)
306 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
307 kfree(pbuflist);
308
309 els_iocb_free_prsp_exit:
310 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
311 kfree(prsp);
312
313 els_iocb_free_pcmb_exit:
314 kfree(pcmd);
315 lpfc_sli_release_iocbq(phba, elsiocb);
316 return NULL;
317 }
318
319 /**
320 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
321 * @vport: pointer to a host virtual N_Port data structure.
322 *
323 * This routine issues a fabric registration login for a @vport. An
324 * active ndlp node with Fabric_DID must already exist for this @vport.
325 * The routine invokes two mailbox commands to carry out fabric registration
326 * login through the HBA firmware: the first mailbox command requests the
327 * HBA to perform link configuration for the @vport; and the second mailbox
328 * command requests the HBA to perform the actual fabric registration login
329 * with the @vport.
330 *
331 * Return code
332 * 0 - successfully issued fabric registration login for @vport
333 * -ENXIO -- failed to issue fabric registration login for @vport
334 **/
335 int
336 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
337 {
338 struct lpfc_hba *phba = vport->phba;
339 LPFC_MBOXQ_t *mbox;
340 struct lpfc_dmabuf *mp;
341 struct lpfc_nodelist *ndlp;
342 struct serv_parm *sp;
343 int rc;
344 int err = 0;
345
346 sp = &phba->fc_fabparam;
347 ndlp = lpfc_findnode_did(vport, Fabric_DID);
348 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
349 err = 1;
350 goto fail;
351 }
352
353 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
354 if (!mbox) {
355 err = 2;
356 goto fail;
357 }
358
359 vport->port_state = LPFC_FABRIC_CFG_LINK;
360 lpfc_config_link(phba, mbox);
361 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
362 mbox->vport = vport;
363
364 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
365 if (rc == MBX_NOT_FINISHED) {
366 err = 3;
367 goto fail_free_mbox;
368 }
369
370 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
371 if (!mbox) {
372 err = 4;
373 goto fail;
374 }
375 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
376 if (rc) {
377 err = 5;
378 goto fail_free_mbox;
379 }
380
381 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
382 mbox->vport = vport;
383 /* increment the reference count on ndlp to hold reference
384 * for the callback routine.
385 */
386 mbox->context2 = lpfc_nlp_get(ndlp);
387
388 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
389 if (rc == MBX_NOT_FINISHED) {
390 err = 6;
391 goto fail_issue_reg_login;
392 }
393
394 return 0;
395
396 fail_issue_reg_login:
397 /* decrement the reference count on ndlp just incremented
398 * for the failed mbox command.
399 */
400 lpfc_nlp_put(ndlp);
401 mp = (struct lpfc_dmabuf *) mbox->context1;
402 lpfc_mbuf_free(phba, mp->virt, mp->phys);
403 kfree(mp);
404 fail_free_mbox:
405 mempool_free(mbox, phba->mbox_mem_pool);
406
407 fail:
408 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
410 "0249 Cannot issue Register Fabric login: Err %d\n", err);
411 return -ENXIO;
412 }
413
414 /**
415 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
416 * @vport: pointer to a host virtual N_Port data structure.
417 *
418 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
419 * the @vport. This mailbox command is necessary for FCoE only.
420 *
421 * Return code
422 * 0 - successfully issued REG_VFI for @vport
423 * A failure code otherwise.
424 **/
425 static int
426 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
427 {
428 struct lpfc_hba *phba = vport->phba;
429 LPFC_MBOXQ_t *mboxq;
430 struct lpfc_nodelist *ndlp;
431 struct serv_parm *sp;
432 struct lpfc_dmabuf *dmabuf;
433 int rc = 0;
434
435 sp = &phba->fc_fabparam;
436 ndlp = lpfc_findnode_did(vport, Fabric_DID);
437 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
438 rc = -ENODEV;
439 goto fail;
440 }
441
442 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
443 if (!dmabuf) {
444 rc = -ENOMEM;
445 goto fail;
446 }
447 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
448 if (!dmabuf->virt) {
449 rc = -ENOMEM;
450 goto fail_free_dmabuf;
451 }
452 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
453 if (!mboxq) {
454 rc = -ENOMEM;
455 goto fail_free_coherent;
456 }
457 vport->port_state = LPFC_FABRIC_CFG_LINK;
458 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
459 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
460 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
461 mboxq->vport = vport;
462 mboxq->context1 = dmabuf;
463 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
464 if (rc == MBX_NOT_FINISHED) {
465 rc = -ENXIO;
466 goto fail_free_mbox;
467 }
468 return 0;
469
470 fail_free_mbox:
471 mempool_free(mboxq, phba->mbox_mem_pool);
472 fail_free_coherent:
473 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
474 fail_free_dmabuf:
475 kfree(dmabuf);
476 fail:
477 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
478 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
479 "0289 Issue Register VFI failed: Err %d\n", rc);
480 return rc;
481 }
482
483 /**
484 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
485 * @vport: pointer to a host virtual N_Port data structure.
486 * @ndlp: pointer to a node-list data structure.
487 * @sp: pointer to service parameter data structure.
488 * @irsp: pointer to the IOCB within the lpfc response IOCB.
489 *
490 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
491 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
492 * port in a fabric topology. It properly sets up the parameters to the @ndlp
493 * from the IOCB response. It also check the newly assigned N_Port ID to the
494 * @vport against the previously assigned N_Port ID. If it is different from
495 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
496 * is invoked on all the remaining nodes with the @vport to unregister the
497 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
498 * is invoked to register login to the fabric.
499 *
500 * Return code
501 * 0 - Success (currently, always return 0)
502 **/
503 static int
504 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
505 struct serv_parm *sp, IOCB_t *irsp)
506 {
507 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
508 struct lpfc_hba *phba = vport->phba;
509 struct lpfc_nodelist *np;
510 struct lpfc_nodelist *next_np;
511
512 spin_lock_irq(shost->host_lock);
513 vport->fc_flag |= FC_FABRIC;
514 spin_unlock_irq(shost->host_lock);
515
516 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
517 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
518 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
519
520 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
521
522 if (phba->fc_topology == TOPOLOGY_LOOP) {
523 spin_lock_irq(shost->host_lock);
524 vport->fc_flag |= FC_PUBLIC_LOOP;
525 spin_unlock_irq(shost->host_lock);
526 } else {
527 /*
528 * If we are a N-port connected to a Fabric, fixup sparam's so
529 * logins to devices on remote loops work.
530 */
531 vport->fc_sparam.cmn.altBbCredit = 1;
532 }
533
534 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
535 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
536 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
537 ndlp->nlp_class_sup = 0;
538 if (sp->cls1.classValid)
539 ndlp->nlp_class_sup |= FC_COS_CLASS1;
540 if (sp->cls2.classValid)
541 ndlp->nlp_class_sup |= FC_COS_CLASS2;
542 if (sp->cls3.classValid)
543 ndlp->nlp_class_sup |= FC_COS_CLASS3;
544 if (sp->cls4.classValid)
545 ndlp->nlp_class_sup |= FC_COS_CLASS4;
546 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
547 sp->cmn.bbRcvSizeLsb;
548 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
549
550 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
551 if (sp->cmn.response_multiple_NPort) {
552 lpfc_printf_vlog(vport, KERN_WARNING,
553 LOG_ELS | LOG_VPORT,
554 "1816 FLOGI NPIV supported, "
555 "response data 0x%x\n",
556 sp->cmn.response_multiple_NPort);
557 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
558 } else {
559 /* Because we asked f/w for NPIV it still expects us
560 to call reg_vnpid atleast for the physcial host */
561 lpfc_printf_vlog(vport, KERN_WARNING,
562 LOG_ELS | LOG_VPORT,
563 "1817 Fabric does not support NPIV "
564 "- configuring single port mode.\n");
565 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
566 }
567 }
568
569 if ((vport->fc_prevDID != vport->fc_myDID) &&
570 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
571
572 /* If our NportID changed, we need to ensure all
573 * remaining NPORTs get unreg_login'ed.
574 */
575 list_for_each_entry_safe(np, next_np,
576 &vport->fc_nodes, nlp_listp) {
577 if (!NLP_CHK_NODE_ACT(np))
578 continue;
579 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
580 !(np->nlp_flag & NLP_NPR_ADISC))
581 continue;
582 spin_lock_irq(shost->host_lock);
583 np->nlp_flag &= ~NLP_NPR_ADISC;
584 spin_unlock_irq(shost->host_lock);
585 lpfc_unreg_rpi(vport, np);
586 }
587 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
588 lpfc_mbx_unreg_vpi(vport);
589 spin_lock_irq(shost->host_lock);
590 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
591 spin_unlock_irq(shost->host_lock);
592 }
593 /*
594 * If VPI is unreged, driver need to do INIT_VPI
595 * before re-registering
596 */
597 if (phba->sli_rev == LPFC_SLI_REV4) {
598 spin_lock_irq(shost->host_lock);
599 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
600 spin_unlock_irq(shost->host_lock);
601 }
602 }
603
604 if (phba->sli_rev < LPFC_SLI_REV4) {
605 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
606 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
607 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
608 lpfc_register_new_vport(phba, vport, ndlp);
609 else
610 lpfc_issue_fabric_reglogin(vport);
611 } else {
612 ndlp->nlp_type |= NLP_FABRIC;
613 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
614 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
615 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
616 lpfc_start_fdiscs(phba);
617 lpfc_do_scr_ns_plogi(phba, vport);
618 } else if (vport->fc_flag & FC_VFI_REGISTERED)
619 lpfc_issue_init_vpi(vport);
620 else
621 lpfc_issue_reg_vfi(vport);
622 }
623 return 0;
624 }
625 /**
626 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
627 * @vport: pointer to a host virtual N_Port data structure.
628 * @ndlp: pointer to a node-list data structure.
629 * @sp: pointer to service parameter data structure.
630 *
631 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
632 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
633 * in a point-to-point topology. First, the @vport's N_Port Name is compared
634 * with the received N_Port Name: if the @vport's N_Port Name is greater than
635 * the received N_Port Name lexicographically, this node shall assign local
636 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
637 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
638 * this node shall just wait for the remote node to issue PLOGI and assign
639 * N_Port IDs.
640 *
641 * Return code
642 * 0 - Success
643 * -ENXIO - Fail
644 **/
645 static int
646 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
647 struct serv_parm *sp)
648 {
649 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
650 struct lpfc_hba *phba = vport->phba;
651 LPFC_MBOXQ_t *mbox;
652 int rc;
653
654 spin_lock_irq(shost->host_lock);
655 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
656 spin_unlock_irq(shost->host_lock);
657
658 phba->fc_edtov = FF_DEF_EDTOV;
659 phba->fc_ratov = FF_DEF_RATOV;
660 rc = memcmp(&vport->fc_portname, &sp->portName,
661 sizeof(vport->fc_portname));
662 if (rc >= 0) {
663 /* This side will initiate the PLOGI */
664 spin_lock_irq(shost->host_lock);
665 vport->fc_flag |= FC_PT2PT_PLOGI;
666 spin_unlock_irq(shost->host_lock);
667
668 /*
669 * N_Port ID cannot be 0, set our to LocalID the other
670 * side will be RemoteID.
671 */
672
673 /* not equal */
674 if (rc)
675 vport->fc_myDID = PT2PT_LocalID;
676
677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
678 if (!mbox)
679 goto fail;
680
681 lpfc_config_link(phba, mbox);
682
683 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
684 mbox->vport = vport;
685 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
686 if (rc == MBX_NOT_FINISHED) {
687 mempool_free(mbox, phba->mbox_mem_pool);
688 goto fail;
689 }
690 /* Decrement ndlp reference count indicating that ndlp can be
691 * safely released when other references to it are done.
692 */
693 lpfc_nlp_put(ndlp);
694
695 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
696 if (!ndlp) {
697 /*
698 * Cannot find existing Fabric ndlp, so allocate a
699 * new one
700 */
701 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
702 if (!ndlp)
703 goto fail;
704 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
705 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
706 ndlp = lpfc_enable_node(vport, ndlp,
707 NLP_STE_UNUSED_NODE);
708 if(!ndlp)
709 goto fail;
710 }
711
712 memcpy(&ndlp->nlp_portname, &sp->portName,
713 sizeof(struct lpfc_name));
714 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
715 sizeof(struct lpfc_name));
716 /* Set state will put ndlp onto node list if not already done */
717 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
718 spin_lock_irq(shost->host_lock);
719 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
720 spin_unlock_irq(shost->host_lock);
721 } else
722 /* This side will wait for the PLOGI, decrement ndlp reference
723 * count indicating that ndlp can be released when other
724 * references to it are done.
725 */
726 lpfc_nlp_put(ndlp);
727
728 /* If we are pt2pt with another NPort, force NPIV off! */
729 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
730
731 spin_lock_irq(shost->host_lock);
732 vport->fc_flag |= FC_PT2PT;
733 spin_unlock_irq(shost->host_lock);
734
735 /* Start discovery - this should just do CLEAR_LA */
736 lpfc_disc_start(vport);
737 return 0;
738 fail:
739 return -ENXIO;
740 }
741
742 /**
743 * lpfc_cmpl_els_flogi - Completion callback function for flogi
744 * @phba: pointer to lpfc hba data structure.
745 * @cmdiocb: pointer to lpfc command iocb data structure.
746 * @rspiocb: pointer to lpfc response iocb data structure.
747 *
748 * This routine is the top-level completion callback function for issuing
749 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
750 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
751 * retry has been made (either immediately or delayed with lpfc_els_retry()
752 * returning 1), the command IOCB will be released and function returned.
753 * If the retry attempt has been given up (possibly reach the maximum
754 * number of retries), one additional decrement of ndlp reference shall be
755 * invoked before going out after releasing the command IOCB. This will
756 * actually release the remote node (Note, lpfc_els_free_iocb() will also
757 * invoke one decrement of ndlp reference count). If no error reported in
758 * the IOCB status, the command Port ID field is used to determine whether
759 * this is a point-to-point topology or a fabric topology: if the Port ID
760 * field is assigned, it is a fabric topology; otherwise, it is a
761 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
762 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
763 * specific topology completion conditions.
764 **/
765 static void
766 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
767 struct lpfc_iocbq *rspiocb)
768 {
769 struct lpfc_vport *vport = cmdiocb->vport;
770 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
771 IOCB_t *irsp = &rspiocb->iocb;
772 struct lpfc_nodelist *ndlp = cmdiocb->context1;
773 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
774 struct serv_parm *sp;
775 uint16_t fcf_index;
776 int rc;
777
778 /* Check to see if link went down during discovery */
779 if (lpfc_els_chk_latt(vport)) {
780 /* One additional decrement on node reference count to
781 * trigger the release of the node
782 */
783 lpfc_nlp_put(ndlp);
784 goto out;
785 }
786
787 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
788 "FLOGI cmpl: status:x%x/x%x state:x%x",
789 irsp->ulpStatus, irsp->un.ulpWord[4],
790 vport->port_state);
791
792 if (irsp->ulpStatus) {
793 /*
794 * In case of FIP mode, perform round robin FCF failover
795 * due to new FCF discovery
796 */
797 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
798 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
799 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
800 "2611 FLOGI failed on registered "
801 "FCF record fcf_index:%d, trying "
802 "to perform round robin failover\n",
803 phba->fcf.current_rec.fcf_indx);
804 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
805 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
806 /*
807 * Exhausted the eligible FCF record list,
808 * fail through to retry FLOGI on current
809 * FCF record.
810 */
811 lpfc_printf_log(phba, KERN_WARNING,
812 LOG_FIP | LOG_ELS,
813 "2760 FLOGI exhausted FCF "
814 "round robin failover list, "
815 "retry FLOGI on the current "
816 "registered FCF index:%d\n",
817 phba->fcf.current_rec.fcf_indx);
818 spin_lock_irq(&phba->hbalock);
819 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
820 spin_unlock_irq(&phba->hbalock);
821 } else {
822 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
823 fcf_index);
824 if (rc) {
825 lpfc_printf_log(phba, KERN_WARNING,
826 LOG_FIP | LOG_ELS,
827 "2761 FLOGI round "
828 "robin FCF failover "
829 "read FCF failed "
830 "rc:x%x, fcf_index:"
831 "%d\n", rc,
832 phba->fcf.current_rec.fcf_indx);
833 spin_lock_irq(&phba->hbalock);
834 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
835 spin_unlock_irq(&phba->hbalock);
836 } else
837 goto out;
838 }
839 }
840
841 /* Check for retry */
842 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
843 goto out;
844
845 /* FLOGI failed, so there is no fabric */
846 spin_lock_irq(shost->host_lock);
847 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
848 spin_unlock_irq(shost->host_lock);
849
850 /* If private loop, then allow max outstanding els to be
851 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
852 * alpa map would take too long otherwise.
853 */
854 if (phba->alpa_map[0] == 0) {
855 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
856 }
857
858 /* FLOGI failure */
859 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
860 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
861 irsp->ulpStatus, irsp->un.ulpWord[4],
862 irsp->ulpTimeout);
863 goto flogifail;
864 }
865 spin_lock_irq(shost->host_lock);
866 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
867 spin_unlock_irq(shost->host_lock);
868
869 /*
870 * The FLogI succeeded. Sync the data for the CPU before
871 * accessing it.
872 */
873 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
874
875 sp = prsp->virt + sizeof(uint32_t);
876
877 /* FLOGI completes successfully */
878 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
879 "0101 FLOGI completes successfully "
880 "Data: x%x x%x x%x x%x\n",
881 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
882 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
883
884 if (vport->port_state == LPFC_FLOGI) {
885 /*
886 * If Common Service Parameters indicate Nport
887 * we are point to point, if Fport we are Fabric.
888 */
889 if (sp->cmn.fPort)
890 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
891 else
892 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
893
894 if (!rc) {
895 /* Mark the FCF discovery process done */
896 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
897 "2769 FLOGI successful on FCF record: "
898 "current_fcf_index:x%x, terminate FCF "
899 "round robin failover process\n",
900 phba->fcf.current_rec.fcf_indx);
901 spin_lock_irq(&phba->hbalock);
902 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
903 spin_unlock_irq(&phba->hbalock);
904 goto out;
905 }
906 }
907
908 flogifail:
909 lpfc_nlp_put(ndlp);
910
911 if (!lpfc_error_lost_link(irsp)) {
912 /* FLOGI failed, so just use loop map to make discovery list */
913 lpfc_disc_list_loopmap(vport);
914
915 /* Start discovery */
916 lpfc_disc_start(vport);
917 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
918 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
919 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
920 (phba->link_state != LPFC_CLEAR_LA)) {
921 /* If FLOGI failed enable link interrupt. */
922 lpfc_issue_clear_la(phba, vport);
923 }
924 out:
925 lpfc_els_free_iocb(phba, cmdiocb);
926 }
927
928 /**
929 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
930 * @vport: pointer to a host virtual N_Port data structure.
931 * @ndlp: pointer to a node-list data structure.
932 * @retry: number of retries to the command IOCB.
933 *
934 * This routine issues a Fabric Login (FLOGI) Request ELS command
935 * for a @vport. The initiator service parameters are put into the payload
936 * of the FLOGI Request IOCB and the top-level callback function pointer
937 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
938 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
939 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
940 *
941 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
942 * will be incremented by 1 for holding the ndlp and the reference to ndlp
943 * will be stored into the context1 field of the IOCB for the completion
944 * callback function to the FLOGI ELS command.
945 *
946 * Return code
947 * 0 - successfully issued flogi iocb for @vport
948 * 1 - failed to issue flogi iocb for @vport
949 **/
950 static int
951 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
952 uint8_t retry)
953 {
954 struct lpfc_hba *phba = vport->phba;
955 struct serv_parm *sp;
956 IOCB_t *icmd;
957 struct lpfc_iocbq *elsiocb;
958 struct lpfc_sli_ring *pring;
959 uint8_t *pcmd;
960 uint16_t cmdsize;
961 uint32_t tmo;
962 int rc;
963
964 pring = &phba->sli.ring[LPFC_ELS_RING];
965
966 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
967 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
968 ndlp->nlp_DID, ELS_CMD_FLOGI);
969
970 if (!elsiocb)
971 return 1;
972
973 icmd = &elsiocb->iocb;
974 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
975
976 /* For FLOGI request, remainder of payload is service parameters */
977 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
978 pcmd += sizeof(uint32_t);
979 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
980 sp = (struct serv_parm *) pcmd;
981
982 /* Setup CSPs accordingly for Fabric */
983 sp->cmn.e_d_tov = 0;
984 sp->cmn.w2.r_a_tov = 0;
985 sp->cls1.classValid = 0;
986 sp->cls2.seqDelivery = 1;
987 sp->cls3.seqDelivery = 1;
988 if (sp->cmn.fcphLow < FC_PH3)
989 sp->cmn.fcphLow = FC_PH3;
990 if (sp->cmn.fcphHigh < FC_PH3)
991 sp->cmn.fcphHigh = FC_PH3;
992
993 if (phba->sli_rev == LPFC_SLI_REV4) {
994 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
995 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
996 /* FLOGI needs to be 3 for WQE FCFI */
997 /* Set the fcfi to the fcfi we registered with */
998 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
999 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1000 sp->cmn.request_multiple_Nport = 1;
1001 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1002 icmd->ulpCt_h = 1;
1003 icmd->ulpCt_l = 0;
1004 }
1005
1006 if (phba->fc_topology != TOPOLOGY_LOOP) {
1007 icmd->un.elsreq64.myID = 0;
1008 icmd->un.elsreq64.fl = 1;
1009 }
1010
1011 tmo = phba->fc_ratov;
1012 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1013 lpfc_set_disctmo(vport);
1014 phba->fc_ratov = tmo;
1015
1016 phba->fc_stat.elsXmitFLOGI++;
1017 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1018
1019 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1020 "Issue FLOGI: opt:x%x",
1021 phba->sli3_options, 0, 0);
1022
1023 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1024 if (rc == IOCB_ERROR) {
1025 lpfc_els_free_iocb(phba, elsiocb);
1026 return 1;
1027 }
1028 return 0;
1029 }
1030
1031 /**
1032 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1033 * @phba: pointer to lpfc hba data structure.
1034 *
1035 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1036 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1037 * list and issues an abort IOCB commond on each outstanding IOCB that
1038 * contains a active Fabric_DID ndlp. Note that this function is to issue
1039 * the abort IOCB command on all the outstanding IOCBs, thus when this
1040 * function returns, it does not guarantee all the IOCBs are actually aborted.
1041 *
1042 * Return code
1043 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1044 **/
1045 int
1046 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1047 {
1048 struct lpfc_sli_ring *pring;
1049 struct lpfc_iocbq *iocb, *next_iocb;
1050 struct lpfc_nodelist *ndlp;
1051 IOCB_t *icmd;
1052
1053 /* Abort outstanding I/O on NPort <nlp_DID> */
1054 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1055 "0201 Abort outstanding I/O on NPort x%x\n",
1056 Fabric_DID);
1057
1058 pring = &phba->sli.ring[LPFC_ELS_RING];
1059
1060 /*
1061 * Check the txcmplq for an iocb that matches the nport the driver is
1062 * searching for.
1063 */
1064 spin_lock_irq(&phba->hbalock);
1065 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1066 icmd = &iocb->iocb;
1067 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1068 icmd->un.elsreq64.bdl.ulpIoTag32) {
1069 ndlp = (struct lpfc_nodelist *)(iocb->context1);
1070 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1071 (ndlp->nlp_DID == Fabric_DID))
1072 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1073 }
1074 }
1075 spin_unlock_irq(&phba->hbalock);
1076
1077 return 0;
1078 }
1079
1080 /**
1081 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1082 * @vport: pointer to a host virtual N_Port data structure.
1083 *
1084 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1085 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1086 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1087 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1088 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1089 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1090 * @vport.
1091 *
1092 * Return code
1093 * 0 - failed to issue initial flogi for @vport
1094 * 1 - successfully issued initial flogi for @vport
1095 **/
1096 int
1097 lpfc_initial_flogi(struct lpfc_vport *vport)
1098 {
1099 struct lpfc_hba *phba = vport->phba;
1100 struct lpfc_nodelist *ndlp;
1101
1102 vport->port_state = LPFC_FLOGI;
1103 lpfc_set_disctmo(vport);
1104
1105 /* First look for the Fabric ndlp */
1106 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1107 if (!ndlp) {
1108 /* Cannot find existing Fabric ndlp, so allocate a new one */
1109 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1110 if (!ndlp)
1111 return 0;
1112 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1113 /* Set the node type */
1114 ndlp->nlp_type |= NLP_FABRIC;
1115 /* Put ndlp onto node list */
1116 lpfc_enqueue_node(vport, ndlp);
1117 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1118 /* re-setup ndlp without removing from node list */
1119 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1120 if (!ndlp)
1121 return 0;
1122 }
1123
1124 if (lpfc_issue_els_flogi(vport, ndlp, 0))
1125 /* This decrement of reference count to node shall kick off
1126 * the release of the node.
1127 */
1128 lpfc_nlp_put(ndlp);
1129
1130 return 1;
1131 }
1132
1133 /**
1134 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1135 * @vport: pointer to a host virtual N_Port data structure.
1136 *
1137 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1138 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1139 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1140 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1141 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1142 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1143 * @vport.
1144 *
1145 * Return code
1146 * 0 - failed to issue initial fdisc for @vport
1147 * 1 - successfully issued initial fdisc for @vport
1148 **/
1149 int
1150 lpfc_initial_fdisc(struct lpfc_vport *vport)
1151 {
1152 struct lpfc_hba *phba = vport->phba;
1153 struct lpfc_nodelist *ndlp;
1154
1155 /* First look for the Fabric ndlp */
1156 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1157 if (!ndlp) {
1158 /* Cannot find existing Fabric ndlp, so allocate a new one */
1159 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1160 if (!ndlp)
1161 return 0;
1162 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1163 /* Put ndlp onto node list */
1164 lpfc_enqueue_node(vport, ndlp);
1165 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1166 /* re-setup ndlp without removing from node list */
1167 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1168 if (!ndlp)
1169 return 0;
1170 }
1171
1172 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1173 /* decrement node reference count to trigger the release of
1174 * the node.
1175 */
1176 lpfc_nlp_put(ndlp);
1177 return 0;
1178 }
1179 return 1;
1180 }
1181
1182 /**
1183 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1184 * @vport: pointer to a host virtual N_Port data structure.
1185 *
1186 * This routine checks whether there are more remaining Port Logins
1187 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1188 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1189 * to issue ELS PLOGIs up to the configured discover threads with the
1190 * @vport (@vport->cfg_discovery_threads). The function also decrement
1191 * the @vport's num_disc_node by 1 if it is not already 0.
1192 **/
1193 void
1194 lpfc_more_plogi(struct lpfc_vport *vport)
1195 {
1196 int sentplogi;
1197
1198 if (vport->num_disc_nodes)
1199 vport->num_disc_nodes--;
1200
1201 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1202 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1203 "0232 Continue discovery with %d PLOGIs to go "
1204 "Data: x%x x%x x%x\n",
1205 vport->num_disc_nodes, vport->fc_plogi_cnt,
1206 vport->fc_flag, vport->port_state);
1207 /* Check to see if there are more PLOGIs to be sent */
1208 if (vport->fc_flag & FC_NLP_MORE)
1209 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1210 sentplogi = lpfc_els_disc_plogi(vport);
1211
1212 return;
1213 }
1214
1215 /**
1216 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1217 * @phba: pointer to lpfc hba data structure.
1218 * @prsp: pointer to response IOCB payload.
1219 * @ndlp: pointer to a node-list data structure.
1220 *
1221 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1222 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1223 * The following cases are considered N_Port confirmed:
1224 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1225 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1226 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1227 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1228 * 1) if there is a node on vport list other than the @ndlp with the same
1229 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1230 * on that node to release the RPI associated with the node; 2) if there is
1231 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1232 * into, a new node shall be allocated (or activated). In either case, the
1233 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1234 * be released and the new_ndlp shall be put on to the vport node list and
1235 * its pointer returned as the confirmed node.
1236 *
1237 * Note that before the @ndlp got "released", the keepDID from not-matching
1238 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1239 * of the @ndlp. This is because the release of @ndlp is actually to put it
1240 * into an inactive state on the vport node list and the vport node list
1241 * management algorithm does not allow two node with a same DID.
1242 *
1243 * Return code
1244 * pointer to the PLOGI N_Port @ndlp
1245 **/
1246 static struct lpfc_nodelist *
1247 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1248 struct lpfc_nodelist *ndlp)
1249 {
1250 struct lpfc_vport *vport = ndlp->vport;
1251 struct lpfc_nodelist *new_ndlp;
1252 struct lpfc_rport_data *rdata;
1253 struct fc_rport *rport;
1254 struct serv_parm *sp;
1255 uint8_t name[sizeof(struct lpfc_name)];
1256 uint32_t rc, keepDID = 0;
1257
1258 /* Fabric nodes can have the same WWPN so we don't bother searching
1259 * by WWPN. Just return the ndlp that was given to us.
1260 */
1261 if (ndlp->nlp_type & NLP_FABRIC)
1262 return ndlp;
1263
1264 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1265 memset(name, 0, sizeof(struct lpfc_name));
1266
1267 /* Now we find out if the NPort we are logging into, matches the WWPN
1268 * we have for that ndlp. If not, we have some work to do.
1269 */
1270 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1271
1272 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1273 return ndlp;
1274
1275 if (!new_ndlp) {
1276 rc = memcmp(&ndlp->nlp_portname, name,
1277 sizeof(struct lpfc_name));
1278 if (!rc)
1279 return ndlp;
1280 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1281 if (!new_ndlp)
1282 return ndlp;
1283 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1284 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1285 rc = memcmp(&ndlp->nlp_portname, name,
1286 sizeof(struct lpfc_name));
1287 if (!rc)
1288 return ndlp;
1289 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1290 NLP_STE_UNUSED_NODE);
1291 if (!new_ndlp)
1292 return ndlp;
1293 keepDID = new_ndlp->nlp_DID;
1294 } else
1295 keepDID = new_ndlp->nlp_DID;
1296
1297 lpfc_unreg_rpi(vport, new_ndlp);
1298 new_ndlp->nlp_DID = ndlp->nlp_DID;
1299 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1300
1301 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1302 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1303 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1304
1305 /* Set state will put new_ndlp on to node list if not already done */
1306 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1307
1308 /* Move this back to NPR state */
1309 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1310 /* The new_ndlp is replacing ndlp totally, so we need
1311 * to put ndlp on UNUSED list and try to free it.
1312 */
1313
1314 /* Fix up the rport accordingly */
1315 rport = ndlp->rport;
1316 if (rport) {
1317 rdata = rport->dd_data;
1318 if (rdata->pnode == ndlp) {
1319 lpfc_nlp_put(ndlp);
1320 ndlp->rport = NULL;
1321 rdata->pnode = lpfc_nlp_get(new_ndlp);
1322 new_ndlp->rport = rport;
1323 }
1324 new_ndlp->nlp_type = ndlp->nlp_type;
1325 }
1326 /* We shall actually free the ndlp with both nlp_DID and
1327 * nlp_portname fields equals 0 to avoid any ndlp on the
1328 * nodelist never to be used.
1329 */
1330 if (ndlp->nlp_DID == 0) {
1331 spin_lock_irq(&phba->ndlp_lock);
1332 NLP_SET_FREE_REQ(ndlp);
1333 spin_unlock_irq(&phba->ndlp_lock);
1334 }
1335
1336 /* Two ndlps cannot have the same did on the nodelist */
1337 ndlp->nlp_DID = keepDID;
1338 lpfc_drop_node(vport, ndlp);
1339 }
1340 else {
1341 lpfc_unreg_rpi(vport, ndlp);
1342 /* Two ndlps cannot have the same did */
1343 ndlp->nlp_DID = keepDID;
1344 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1345 }
1346 return new_ndlp;
1347 }
1348
1349 /**
1350 * lpfc_end_rscn - Check and handle more rscn for a vport
1351 * @vport: pointer to a host virtual N_Port data structure.
1352 *
1353 * This routine checks whether more Registration State Change
1354 * Notifications (RSCNs) came in while the discovery state machine was in
1355 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1356 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1357 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1358 * handling the RSCNs.
1359 **/
1360 void
1361 lpfc_end_rscn(struct lpfc_vport *vport)
1362 {
1363 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1364
1365 if (vport->fc_flag & FC_RSCN_MODE) {
1366 /*
1367 * Check to see if more RSCNs came in while we were
1368 * processing this one.
1369 */
1370 if (vport->fc_rscn_id_cnt ||
1371 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1372 lpfc_els_handle_rscn(vport);
1373 else {
1374 spin_lock_irq(shost->host_lock);
1375 vport->fc_flag &= ~FC_RSCN_MODE;
1376 spin_unlock_irq(shost->host_lock);
1377 }
1378 }
1379 }
1380
1381 /**
1382 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1383 * @phba: pointer to lpfc hba data structure.
1384 * @cmdiocb: pointer to lpfc command iocb data structure.
1385 * @rspiocb: pointer to lpfc response iocb data structure.
1386 *
1387 * This routine is the completion callback function for issuing the Port
1388 * Login (PLOGI) command. For PLOGI completion, there must be an active
1389 * ndlp on the vport node list that matches the remote node ID from the
1390 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1391 * ignored and command IOCB released. The PLOGI response IOCB status is
1392 * checked for error conditons. If there is error status reported, PLOGI
1393 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1394 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1395 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1396 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1397 * there are additional N_Port nodes with the vport that need to perform
1398 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1399 * PLOGIs.
1400 **/
1401 static void
1402 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1403 struct lpfc_iocbq *rspiocb)
1404 {
1405 struct lpfc_vport *vport = cmdiocb->vport;
1406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1407 IOCB_t *irsp;
1408 struct lpfc_nodelist *ndlp;
1409 struct lpfc_dmabuf *prsp;
1410 int disc, rc, did, type;
1411
1412 /* we pass cmdiocb to state machine which needs rspiocb as well */
1413 cmdiocb->context_un.rsp_iocb = rspiocb;
1414
1415 irsp = &rspiocb->iocb;
1416 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1417 "PLOGI cmpl: status:x%x/x%x did:x%x",
1418 irsp->ulpStatus, irsp->un.ulpWord[4],
1419 irsp->un.elsreq64.remoteID);
1420
1421 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1422 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1423 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1424 "0136 PLOGI completes to NPort x%x "
1425 "with no ndlp. Data: x%x x%x x%x\n",
1426 irsp->un.elsreq64.remoteID,
1427 irsp->ulpStatus, irsp->un.ulpWord[4],
1428 irsp->ulpIoTag);
1429 goto out;
1430 }
1431
1432 /* Since ndlp can be freed in the disc state machine, note if this node
1433 * is being used during discovery.
1434 */
1435 spin_lock_irq(shost->host_lock);
1436 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1437 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1438 spin_unlock_irq(shost->host_lock);
1439 rc = 0;
1440
1441 /* PLOGI completes to NPort <nlp_DID> */
1442 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1443 "0102 PLOGI completes to NPort x%x "
1444 "Data: x%x x%x x%x x%x x%x\n",
1445 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1446 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1447 /* Check to see if link went down during discovery */
1448 if (lpfc_els_chk_latt(vport)) {
1449 spin_lock_irq(shost->host_lock);
1450 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1451 spin_unlock_irq(shost->host_lock);
1452 goto out;
1453 }
1454
1455 /* ndlp could be freed in DSM, save these values now */
1456 type = ndlp->nlp_type;
1457 did = ndlp->nlp_DID;
1458
1459 if (irsp->ulpStatus) {
1460 /* Check for retry */
1461 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1462 /* ELS command is being retried */
1463 if (disc) {
1464 spin_lock_irq(shost->host_lock);
1465 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1466 spin_unlock_irq(shost->host_lock);
1467 }
1468 goto out;
1469 }
1470 /* PLOGI failed */
1471 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1472 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1473 ndlp->nlp_DID, irsp->ulpStatus,
1474 irsp->un.ulpWord[4]);
1475 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1476 if (lpfc_error_lost_link(irsp))
1477 rc = NLP_STE_FREED_NODE;
1478 else
1479 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1480 NLP_EVT_CMPL_PLOGI);
1481 } else {
1482 /* Good status, call state machine */
1483 prsp = list_entry(((struct lpfc_dmabuf *)
1484 cmdiocb->context2)->list.next,
1485 struct lpfc_dmabuf, list);
1486 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1487 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1488 NLP_EVT_CMPL_PLOGI);
1489 }
1490
1491 if (disc && vport->num_disc_nodes) {
1492 /* Check to see if there are more PLOGIs to be sent */
1493 lpfc_more_plogi(vport);
1494
1495 if (vport->num_disc_nodes == 0) {
1496 spin_lock_irq(shost->host_lock);
1497 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1498 spin_unlock_irq(shost->host_lock);
1499
1500 lpfc_can_disctmo(vport);
1501 lpfc_end_rscn(vport);
1502 }
1503 }
1504
1505 out:
1506 lpfc_els_free_iocb(phba, cmdiocb);
1507 return;
1508 }
1509
1510 /**
1511 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1512 * @vport: pointer to a host virtual N_Port data structure.
1513 * @did: destination port identifier.
1514 * @retry: number of retries to the command IOCB.
1515 *
1516 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1517 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1518 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1519 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1520 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1521 *
1522 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1523 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1524 * will be stored into the context1 field of the IOCB for the completion
1525 * callback function to the PLOGI ELS command.
1526 *
1527 * Return code
1528 * 0 - Successfully issued a plogi for @vport
1529 * 1 - failed to issue a plogi for @vport
1530 **/
1531 int
1532 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1533 {
1534 struct lpfc_hba *phba = vport->phba;
1535 struct serv_parm *sp;
1536 IOCB_t *icmd;
1537 struct lpfc_nodelist *ndlp;
1538 struct lpfc_iocbq *elsiocb;
1539 struct lpfc_sli *psli;
1540 uint8_t *pcmd;
1541 uint16_t cmdsize;
1542 int ret;
1543
1544 psli = &phba->sli;
1545
1546 ndlp = lpfc_findnode_did(vport, did);
1547 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1548 ndlp = NULL;
1549
1550 /* If ndlp is not NULL, we will bump the reference count on it */
1551 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1552 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1553 ELS_CMD_PLOGI);
1554 if (!elsiocb)
1555 return 1;
1556
1557 icmd = &elsiocb->iocb;
1558 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1559
1560 /* For PLOGI request, remainder of payload is service parameters */
1561 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1562 pcmd += sizeof(uint32_t);
1563 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1564 sp = (struct serv_parm *) pcmd;
1565
1566 if (sp->cmn.fcphLow < FC_PH_4_3)
1567 sp->cmn.fcphLow = FC_PH_4_3;
1568
1569 if (sp->cmn.fcphHigh < FC_PH3)
1570 sp->cmn.fcphHigh = FC_PH3;
1571
1572 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1573 "Issue PLOGI: did:x%x",
1574 did, 0, 0);
1575
1576 phba->fc_stat.elsXmitPLOGI++;
1577 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1578 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1579
1580 if (ret == IOCB_ERROR) {
1581 lpfc_els_free_iocb(phba, elsiocb);
1582 return 1;
1583 }
1584 return 0;
1585 }
1586
1587 /**
1588 * lpfc_cmpl_els_prli - Completion callback function for prli
1589 * @phba: pointer to lpfc hba data structure.
1590 * @cmdiocb: pointer to lpfc command iocb data structure.
1591 * @rspiocb: pointer to lpfc response iocb data structure.
1592 *
1593 * This routine is the completion callback function for a Process Login
1594 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1595 * status. If there is error status reported, PRLI retry shall be attempted
1596 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1597 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1598 * ndlp to mark the PRLI completion.
1599 **/
1600 static void
1601 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1602 struct lpfc_iocbq *rspiocb)
1603 {
1604 struct lpfc_vport *vport = cmdiocb->vport;
1605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1606 IOCB_t *irsp;
1607 struct lpfc_sli *psli;
1608 struct lpfc_nodelist *ndlp;
1609
1610 psli = &phba->sli;
1611 /* we pass cmdiocb to state machine which needs rspiocb as well */
1612 cmdiocb->context_un.rsp_iocb = rspiocb;
1613
1614 irsp = &(rspiocb->iocb);
1615 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1616 spin_lock_irq(shost->host_lock);
1617 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1618 spin_unlock_irq(shost->host_lock);
1619
1620 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1621 "PRLI cmpl: status:x%x/x%x did:x%x",
1622 irsp->ulpStatus, irsp->un.ulpWord[4],
1623 ndlp->nlp_DID);
1624 /* PRLI completes to NPort <nlp_DID> */
1625 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1626 "0103 PRLI completes to NPort x%x "
1627 "Data: x%x x%x x%x x%x\n",
1628 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1629 irsp->ulpTimeout, vport->num_disc_nodes);
1630
1631 vport->fc_prli_sent--;
1632 /* Check to see if link went down during discovery */
1633 if (lpfc_els_chk_latt(vport))
1634 goto out;
1635
1636 if (irsp->ulpStatus) {
1637 /* Check for retry */
1638 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1639 /* ELS command is being retried */
1640 goto out;
1641 }
1642 /* PRLI failed */
1643 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1644 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1645 ndlp->nlp_DID, irsp->ulpStatus,
1646 irsp->un.ulpWord[4]);
1647 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1648 if (lpfc_error_lost_link(irsp))
1649 goto out;
1650 else
1651 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1652 NLP_EVT_CMPL_PRLI);
1653 } else
1654 /* Good status, call state machine */
1655 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1656 NLP_EVT_CMPL_PRLI);
1657 out:
1658 lpfc_els_free_iocb(phba, cmdiocb);
1659 return;
1660 }
1661
1662 /**
1663 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
1664 * @vport: pointer to a host virtual N_Port data structure.
1665 * @ndlp: pointer to a node-list data structure.
1666 * @retry: number of retries to the command IOCB.
1667 *
1668 * This routine issues a Process Login (PRLI) ELS command for the
1669 * @vport. The PRLI service parameters are set up in the payload of the
1670 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1671 * is put to the IOCB completion callback func field before invoking the
1672 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1673 *
1674 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1675 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1676 * will be stored into the context1 field of the IOCB for the completion
1677 * callback function to the PRLI ELS command.
1678 *
1679 * Return code
1680 * 0 - successfully issued prli iocb command for @vport
1681 * 1 - failed to issue prli iocb command for @vport
1682 **/
1683 int
1684 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1685 uint8_t retry)
1686 {
1687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1688 struct lpfc_hba *phba = vport->phba;
1689 PRLI *npr;
1690 IOCB_t *icmd;
1691 struct lpfc_iocbq *elsiocb;
1692 uint8_t *pcmd;
1693 uint16_t cmdsize;
1694
1695 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1696 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1697 ndlp->nlp_DID, ELS_CMD_PRLI);
1698 if (!elsiocb)
1699 return 1;
1700
1701 icmd = &elsiocb->iocb;
1702 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1703
1704 /* For PRLI request, remainder of payload is service parameters */
1705 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1706 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1707 pcmd += sizeof(uint32_t);
1708
1709 /* For PRLI, remainder of payload is PRLI parameter page */
1710 npr = (PRLI *) pcmd;
1711 /*
1712 * If our firmware version is 3.20 or later,
1713 * set the following bits for FC-TAPE support.
1714 */
1715 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1716 npr->ConfmComplAllowed = 1;
1717 npr->Retry = 1;
1718 npr->TaskRetryIdReq = 1;
1719 }
1720 npr->estabImagePair = 1;
1721 npr->readXferRdyDis = 1;
1722
1723 /* For FCP support */
1724 npr->prliType = PRLI_FCP_TYPE;
1725 npr->initiatorFunc = 1;
1726
1727 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1728 "Issue PRLI: did:x%x",
1729 ndlp->nlp_DID, 0, 0);
1730
1731 phba->fc_stat.elsXmitPRLI++;
1732 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1733 spin_lock_irq(shost->host_lock);
1734 ndlp->nlp_flag |= NLP_PRLI_SND;
1735 spin_unlock_irq(shost->host_lock);
1736 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1737 IOCB_ERROR) {
1738 spin_lock_irq(shost->host_lock);
1739 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1740 spin_unlock_irq(shost->host_lock);
1741 lpfc_els_free_iocb(phba, elsiocb);
1742 return 1;
1743 }
1744 vport->fc_prli_sent++;
1745 return 0;
1746 }
1747
1748 /**
1749 * lpfc_rscn_disc - Perform rscn discovery for a vport
1750 * @vport: pointer to a host virtual N_Port data structure.
1751 *
1752 * This routine performs Registration State Change Notification (RSCN)
1753 * discovery for a @vport. If the @vport's node port recovery count is not
1754 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1755 * the nodes that need recovery. If none of the PLOGI were needed through
1756 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1757 * invoked to check and handle possible more RSCN came in during the period
1758 * of processing the current ones.
1759 **/
1760 static void
1761 lpfc_rscn_disc(struct lpfc_vport *vport)
1762 {
1763 lpfc_can_disctmo(vport);
1764
1765 /* RSCN discovery */
1766 /* go thru NPR nodes and issue ELS PLOGIs */
1767 if (vport->fc_npr_cnt)
1768 if (lpfc_els_disc_plogi(vport))
1769 return;
1770
1771 lpfc_end_rscn(vport);
1772 }
1773
1774 /**
1775 * lpfc_adisc_done - Complete the adisc phase of discovery
1776 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1777 *
1778 * This function is called when the final ADISC is completed during discovery.
1779 * This function handles clearing link attention or issuing reg_vpi depending
1780 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
1781 * discovery.
1782 * This function is called with no locks held.
1783 **/
1784 static void
1785 lpfc_adisc_done(struct lpfc_vport *vport)
1786 {
1787 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1788 struct lpfc_hba *phba = vport->phba;
1789
1790 /*
1791 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1792 * and continue discovery.
1793 */
1794 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1795 !(vport->fc_flag & FC_RSCN_MODE) &&
1796 (phba->sli_rev < LPFC_SLI_REV4)) {
1797 lpfc_issue_reg_vpi(phba, vport);
1798 return;
1799 }
1800 /*
1801 * For SLI2, we need to set port_state to READY
1802 * and continue discovery.
1803 */
1804 if (vport->port_state < LPFC_VPORT_READY) {
1805 /* If we get here, there is nothing to ADISC */
1806 if (vport->port_type == LPFC_PHYSICAL_PORT)
1807 lpfc_issue_clear_la(phba, vport);
1808 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1809 vport->num_disc_nodes = 0;
1810 /* go thru NPR list, issue ELS PLOGIs */
1811 if (vport->fc_npr_cnt)
1812 lpfc_els_disc_plogi(vport);
1813 if (!vport->num_disc_nodes) {
1814 spin_lock_irq(shost->host_lock);
1815 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1816 spin_unlock_irq(shost->host_lock);
1817 lpfc_can_disctmo(vport);
1818 lpfc_end_rscn(vport);
1819 }
1820 }
1821 vport->port_state = LPFC_VPORT_READY;
1822 } else
1823 lpfc_rscn_disc(vport);
1824 }
1825
1826 /**
1827 * lpfc_more_adisc - Issue more adisc as needed
1828 * @vport: pointer to a host virtual N_Port data structure.
1829 *
1830 * This routine determines whether there are more ndlps on a @vport
1831 * node list need to have Address Discover (ADISC) issued. If so, it will
1832 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
1833 * remaining nodes which need to have ADISC sent.
1834 **/
1835 void
1836 lpfc_more_adisc(struct lpfc_vport *vport)
1837 {
1838 int sentadisc;
1839
1840 if (vport->num_disc_nodes)
1841 vport->num_disc_nodes--;
1842 /* Continue discovery with <num_disc_nodes> ADISCs to go */
1843 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1844 "0210 Continue discovery with %d ADISCs to go "
1845 "Data: x%x x%x x%x\n",
1846 vport->num_disc_nodes, vport->fc_adisc_cnt,
1847 vport->fc_flag, vport->port_state);
1848 /* Check to see if there are more ADISCs to be sent */
1849 if (vport->fc_flag & FC_NLP_MORE) {
1850 lpfc_set_disctmo(vport);
1851 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1852 sentadisc = lpfc_els_disc_adisc(vport);
1853 }
1854 if (!vport->num_disc_nodes)
1855 lpfc_adisc_done(vport);
1856 return;
1857 }
1858
1859 /**
1860 * lpfc_cmpl_els_adisc - Completion callback function for adisc
1861 * @phba: pointer to lpfc hba data structure.
1862 * @cmdiocb: pointer to lpfc command iocb data structure.
1863 * @rspiocb: pointer to lpfc response iocb data structure.
1864 *
1865 * This routine is the completion function for issuing the Address Discover
1866 * (ADISC) command. It first checks to see whether link went down during
1867 * the discovery process. If so, the node will be marked as node port
1868 * recovery for issuing discover IOCB by the link attention handler and
1869 * exit. Otherwise, the response status is checked. If error was reported
1870 * in the response status, the ADISC command shall be retried by invoking
1871 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
1872 * the response status, the state machine is invoked to set transition
1873 * with respect to NLP_EVT_CMPL_ADISC event.
1874 **/
1875 static void
1876 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1877 struct lpfc_iocbq *rspiocb)
1878 {
1879 struct lpfc_vport *vport = cmdiocb->vport;
1880 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1881 IOCB_t *irsp;
1882 struct lpfc_nodelist *ndlp;
1883 int disc;
1884
1885 /* we pass cmdiocb to state machine which needs rspiocb as well */
1886 cmdiocb->context_un.rsp_iocb = rspiocb;
1887
1888 irsp = &(rspiocb->iocb);
1889 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1890
1891 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1892 "ADISC cmpl: status:x%x/x%x did:x%x",
1893 irsp->ulpStatus, irsp->un.ulpWord[4],
1894 ndlp->nlp_DID);
1895
1896 /* Since ndlp can be freed in the disc state machine, note if this node
1897 * is being used during discovery.
1898 */
1899 spin_lock_irq(shost->host_lock);
1900 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1901 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1902 spin_unlock_irq(shost->host_lock);
1903 /* ADISC completes to NPort <nlp_DID> */
1904 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1905 "0104 ADISC completes to NPort x%x "
1906 "Data: x%x x%x x%x x%x x%x\n",
1907 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1908 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1909 /* Check to see if link went down during discovery */
1910 if (lpfc_els_chk_latt(vport)) {
1911 spin_lock_irq(shost->host_lock);
1912 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1913 spin_unlock_irq(shost->host_lock);
1914 goto out;
1915 }
1916
1917 if (irsp->ulpStatus) {
1918 /* Check for retry */
1919 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1920 /* ELS command is being retried */
1921 if (disc) {
1922 spin_lock_irq(shost->host_lock);
1923 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1924 spin_unlock_irq(shost->host_lock);
1925 lpfc_set_disctmo(vport);
1926 }
1927 goto out;
1928 }
1929 /* ADISC failed */
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1931 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1932 ndlp->nlp_DID, irsp->ulpStatus,
1933 irsp->un.ulpWord[4]);
1934 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1935 if (!lpfc_error_lost_link(irsp))
1936 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1937 NLP_EVT_CMPL_ADISC);
1938 } else
1939 /* Good status, call state machine */
1940 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1941 NLP_EVT_CMPL_ADISC);
1942
1943 /* Check to see if there are more ADISCs to be sent */
1944 if (disc && vport->num_disc_nodes)
1945 lpfc_more_adisc(vport);
1946 out:
1947 lpfc_els_free_iocb(phba, cmdiocb);
1948 return;
1949 }
1950
1951 /**
1952 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
1953 * @vport: pointer to a virtual N_Port data structure.
1954 * @ndlp: pointer to a node-list data structure.
1955 * @retry: number of retries to the command IOCB.
1956 *
1957 * This routine issues an Address Discover (ADISC) for an @ndlp on a
1958 * @vport. It prepares the payload of the ADISC ELS command, updates the
1959 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
1960 * to issue the ADISC ELS command.
1961 *
1962 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1963 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1964 * will be stored into the context1 field of the IOCB for the completion
1965 * callback function to the ADISC ELS command.
1966 *
1967 * Return code
1968 * 0 - successfully issued adisc
1969 * 1 - failed to issue adisc
1970 **/
1971 int
1972 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1973 uint8_t retry)
1974 {
1975 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1976 struct lpfc_hba *phba = vport->phba;
1977 ADISC *ap;
1978 IOCB_t *icmd;
1979 struct lpfc_iocbq *elsiocb;
1980 uint8_t *pcmd;
1981 uint16_t cmdsize;
1982
1983 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1984 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1985 ndlp->nlp_DID, ELS_CMD_ADISC);
1986 if (!elsiocb)
1987 return 1;
1988
1989 icmd = &elsiocb->iocb;
1990 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1991
1992 /* For ADISC request, remainder of payload is service parameters */
1993 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1994 pcmd += sizeof(uint32_t);
1995
1996 /* Fill in ADISC payload */
1997 ap = (ADISC *) pcmd;
1998 ap->hardAL_PA = phba->fc_pref_ALPA;
1999 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2000 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2001 ap->DID = be32_to_cpu(vport->fc_myDID);
2002
2003 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2004 "Issue ADISC: did:x%x",
2005 ndlp->nlp_DID, 0, 0);
2006
2007 phba->fc_stat.elsXmitADISC++;
2008 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2009 spin_lock_irq(shost->host_lock);
2010 ndlp->nlp_flag |= NLP_ADISC_SND;
2011 spin_unlock_irq(shost->host_lock);
2012 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2013 IOCB_ERROR) {
2014 spin_lock_irq(shost->host_lock);
2015 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2016 spin_unlock_irq(shost->host_lock);
2017 lpfc_els_free_iocb(phba, elsiocb);
2018 return 1;
2019 }
2020 return 0;
2021 }
2022
2023 /**
2024 * lpfc_cmpl_els_logo - Completion callback function for logo
2025 * @phba: pointer to lpfc hba data structure.
2026 * @cmdiocb: pointer to lpfc command iocb data structure.
2027 * @rspiocb: pointer to lpfc response iocb data structure.
2028 *
2029 * This routine is the completion function for issuing the ELS Logout (LOGO)
2030 * command. If no error status was reported from the LOGO response, the
2031 * state machine of the associated ndlp shall be invoked for transition with
2032 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2033 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2034 **/
2035 static void
2036 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2037 struct lpfc_iocbq *rspiocb)
2038 {
2039 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2040 struct lpfc_vport *vport = ndlp->vport;
2041 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2042 IOCB_t *irsp;
2043 struct lpfc_sli *psli;
2044
2045 psli = &phba->sli;
2046 /* we pass cmdiocb to state machine which needs rspiocb as well */
2047 cmdiocb->context_un.rsp_iocb = rspiocb;
2048
2049 irsp = &(rspiocb->iocb);
2050 spin_lock_irq(shost->host_lock);
2051 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2052 spin_unlock_irq(shost->host_lock);
2053
2054 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2055 "LOGO cmpl: status:x%x/x%x did:x%x",
2056 irsp->ulpStatus, irsp->un.ulpWord[4],
2057 ndlp->nlp_DID);
2058 /* LOGO completes to NPort <nlp_DID> */
2059 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2060 "0105 LOGO completes to NPort x%x "
2061 "Data: x%x x%x x%x x%x\n",
2062 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2063 irsp->ulpTimeout, vport->num_disc_nodes);
2064 /* Check to see if link went down during discovery */
2065 if (lpfc_els_chk_latt(vport))
2066 goto out;
2067
2068 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2069 /* NLP_EVT_DEVICE_RM should unregister the RPI
2070 * which should abort all outstanding IOs.
2071 */
2072 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2073 NLP_EVT_DEVICE_RM);
2074 goto out;
2075 }
2076
2077 if (irsp->ulpStatus) {
2078 /* Check for retry */
2079 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
2080 /* ELS command is being retried */
2081 goto out;
2082 /* LOGO failed */
2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2084 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2085 ndlp->nlp_DID, irsp->ulpStatus,
2086 irsp->un.ulpWord[4]);
2087 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2088 if (lpfc_error_lost_link(irsp))
2089 goto out;
2090 else
2091 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2092 NLP_EVT_CMPL_LOGO);
2093 } else
2094 /* Good status, call state machine.
2095 * This will unregister the rpi if needed.
2096 */
2097 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2098 NLP_EVT_CMPL_LOGO);
2099 out:
2100 lpfc_els_free_iocb(phba, cmdiocb);
2101 return;
2102 }
2103
2104 /**
2105 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2106 * @vport: pointer to a virtual N_Port data structure.
2107 * @ndlp: pointer to a node-list data structure.
2108 * @retry: number of retries to the command IOCB.
2109 *
2110 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2111 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2112 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2113 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2114 *
2115 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2116 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2117 * will be stored into the context1 field of the IOCB for the completion
2118 * callback function to the LOGO ELS command.
2119 *
2120 * Return code
2121 * 0 - successfully issued logo
2122 * 1 - failed to issue logo
2123 **/
2124 int
2125 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2126 uint8_t retry)
2127 {
2128 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2129 struct lpfc_hba *phba = vport->phba;
2130 IOCB_t *icmd;
2131 struct lpfc_iocbq *elsiocb;
2132 uint8_t *pcmd;
2133 uint16_t cmdsize;
2134 int rc;
2135
2136 spin_lock_irq(shost->host_lock);
2137 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2138 spin_unlock_irq(shost->host_lock);
2139 return 0;
2140 }
2141 spin_unlock_irq(shost->host_lock);
2142
2143 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2144 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2145 ndlp->nlp_DID, ELS_CMD_LOGO);
2146 if (!elsiocb)
2147 return 1;
2148
2149 icmd = &elsiocb->iocb;
2150 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2151 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2152 pcmd += sizeof(uint32_t);
2153
2154 /* Fill in LOGO payload */
2155 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2156 pcmd += sizeof(uint32_t);
2157 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2158
2159 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2160 "Issue LOGO: did:x%x",
2161 ndlp->nlp_DID, 0, 0);
2162
2163 phba->fc_stat.elsXmitLOGO++;
2164 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2165 spin_lock_irq(shost->host_lock);
2166 ndlp->nlp_flag |= NLP_LOGO_SND;
2167 spin_unlock_irq(shost->host_lock);
2168 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2169
2170 if (rc == IOCB_ERROR) {
2171 spin_lock_irq(shost->host_lock);
2172 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2173 spin_unlock_irq(shost->host_lock);
2174 lpfc_els_free_iocb(phba, elsiocb);
2175 return 1;
2176 }
2177 return 0;
2178 }
2179
2180 /**
2181 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2182 * @phba: pointer to lpfc hba data structure.
2183 * @cmdiocb: pointer to lpfc command iocb data structure.
2184 * @rspiocb: pointer to lpfc response iocb data structure.
2185 *
2186 * This routine is a generic completion callback function for ELS commands.
2187 * Specifically, it is the callback function which does not need to perform
2188 * any command specific operations. It is currently used by the ELS command
2189 * issuing routines for the ELS State Change Request (SCR),
2190 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2191 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2192 * certain debug loggings, this callback function simply invokes the
2193 * lpfc_els_chk_latt() routine to check whether link went down during the
2194 * discovery process.
2195 **/
2196 static void
2197 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2198 struct lpfc_iocbq *rspiocb)
2199 {
2200 struct lpfc_vport *vport = cmdiocb->vport;
2201 IOCB_t *irsp;
2202
2203 irsp = &rspiocb->iocb;
2204
2205 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2206 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2207 irsp->ulpStatus, irsp->un.ulpWord[4],
2208 irsp->un.elsreq64.remoteID);
2209 /* ELS cmd tag <ulpIoTag> completes */
2210 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2211 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2212 irsp->ulpIoTag, irsp->ulpStatus,
2213 irsp->un.ulpWord[4], irsp->ulpTimeout);
2214 /* Check to see if link went down during discovery */
2215 lpfc_els_chk_latt(vport);
2216 lpfc_els_free_iocb(phba, cmdiocb);
2217 return;
2218 }
2219
2220 /**
2221 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2222 * @vport: pointer to a host virtual N_Port data structure.
2223 * @nportid: N_Port identifier to the remote node.
2224 * @retry: number of retries to the command IOCB.
2225 *
2226 * This routine issues a State Change Request (SCR) to a fabric node
2227 * on a @vport. The remote node @nportid is passed into the function. It
2228 * first search the @vport node list to find the matching ndlp. If no such
2229 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2230 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2231 * routine is invoked to send the SCR IOCB.
2232 *
2233 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2234 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2235 * will be stored into the context1 field of the IOCB for the completion
2236 * callback function to the SCR ELS command.
2237 *
2238 * Return code
2239 * 0 - Successfully issued scr command
2240 * 1 - Failed to issue scr command
2241 **/
2242 int
2243 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2244 {
2245 struct lpfc_hba *phba = vport->phba;
2246 IOCB_t *icmd;
2247 struct lpfc_iocbq *elsiocb;
2248 struct lpfc_sli *psli;
2249 uint8_t *pcmd;
2250 uint16_t cmdsize;
2251 struct lpfc_nodelist *ndlp;
2252
2253 psli = &phba->sli;
2254 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2255
2256 ndlp = lpfc_findnode_did(vport, nportid);
2257 if (!ndlp) {
2258 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2259 if (!ndlp)
2260 return 1;
2261 lpfc_nlp_init(vport, ndlp, nportid);
2262 lpfc_enqueue_node(vport, ndlp);
2263 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2264 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2265 if (!ndlp)
2266 return 1;
2267 }
2268
2269 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2270 ndlp->nlp_DID, ELS_CMD_SCR);
2271
2272 if (!elsiocb) {
2273 /* This will trigger the release of the node just
2274 * allocated
2275 */
2276 lpfc_nlp_put(ndlp);
2277 return 1;
2278 }
2279
2280 icmd = &elsiocb->iocb;
2281 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2282
2283 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2284 pcmd += sizeof(uint32_t);
2285
2286 /* For SCR, remainder of payload is SCR parameter page */
2287 memset(pcmd, 0, sizeof(SCR));
2288 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2289
2290 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2291 "Issue SCR: did:x%x",
2292 ndlp->nlp_DID, 0, 0);
2293
2294 phba->fc_stat.elsXmitSCR++;
2295 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2296 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2297 IOCB_ERROR) {
2298 /* The additional lpfc_nlp_put will cause the following
2299 * lpfc_els_free_iocb routine to trigger the rlease of
2300 * the node.
2301 */
2302 lpfc_nlp_put(ndlp);
2303 lpfc_els_free_iocb(phba, elsiocb);
2304 return 1;
2305 }
2306 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2307 * trigger the release of node.
2308 */
2309 lpfc_nlp_put(ndlp);
2310 return 0;
2311 }
2312
2313 /**
2314 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2315 * @vport: pointer to a host virtual N_Port data structure.
2316 * @nportid: N_Port identifier to the remote node.
2317 * @retry: number of retries to the command IOCB.
2318 *
2319 * This routine issues a Fibre Channel Address Resolution Response
2320 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2321 * is passed into the function. It first search the @vport node list to find
2322 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2323 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2324 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2325 *
2326 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2327 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2328 * will be stored into the context1 field of the IOCB for the completion
2329 * callback function to the PARPR ELS command.
2330 *
2331 * Return code
2332 * 0 - Successfully issued farpr command
2333 * 1 - Failed to issue farpr command
2334 **/
2335 static int
2336 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2337 {
2338 struct lpfc_hba *phba = vport->phba;
2339 IOCB_t *icmd;
2340 struct lpfc_iocbq *elsiocb;
2341 struct lpfc_sli *psli;
2342 FARP *fp;
2343 uint8_t *pcmd;
2344 uint32_t *lp;
2345 uint16_t cmdsize;
2346 struct lpfc_nodelist *ondlp;
2347 struct lpfc_nodelist *ndlp;
2348
2349 psli = &phba->sli;
2350 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2351
2352 ndlp = lpfc_findnode_did(vport, nportid);
2353 if (!ndlp) {
2354 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2355 if (!ndlp)
2356 return 1;
2357 lpfc_nlp_init(vport, ndlp, nportid);
2358 lpfc_enqueue_node(vport, ndlp);
2359 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2360 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2361 if (!ndlp)
2362 return 1;
2363 }
2364
2365 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2366 ndlp->nlp_DID, ELS_CMD_RNID);
2367 if (!elsiocb) {
2368 /* This will trigger the release of the node just
2369 * allocated
2370 */
2371 lpfc_nlp_put(ndlp);
2372 return 1;
2373 }
2374
2375 icmd = &elsiocb->iocb;
2376 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2377
2378 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2379 pcmd += sizeof(uint32_t);
2380
2381 /* Fill in FARPR payload */
2382 fp = (FARP *) (pcmd);
2383 memset(fp, 0, sizeof(FARP));
2384 lp = (uint32_t *) pcmd;
2385 *lp++ = be32_to_cpu(nportid);
2386 *lp++ = be32_to_cpu(vport->fc_myDID);
2387 fp->Rflags = 0;
2388 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2389
2390 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2391 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2392 ondlp = lpfc_findnode_did(vport, nportid);
2393 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2394 memcpy(&fp->OportName, &ondlp->nlp_portname,
2395 sizeof(struct lpfc_name));
2396 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2397 sizeof(struct lpfc_name));
2398 }
2399
2400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2401 "Issue FARPR: did:x%x",
2402 ndlp->nlp_DID, 0, 0);
2403
2404 phba->fc_stat.elsXmitFARPR++;
2405 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2406 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2407 IOCB_ERROR) {
2408 /* The additional lpfc_nlp_put will cause the following
2409 * lpfc_els_free_iocb routine to trigger the release of
2410 * the node.
2411 */
2412 lpfc_nlp_put(ndlp);
2413 lpfc_els_free_iocb(phba, elsiocb);
2414 return 1;
2415 }
2416 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2417 * trigger the release of the node.
2418 */
2419 lpfc_nlp_put(ndlp);
2420 return 0;
2421 }
2422
2423 /**
2424 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2425 * @vport: pointer to a host virtual N_Port data structure.
2426 * @nlp: pointer to a node-list data structure.
2427 *
2428 * This routine cancels the timer with a delayed IOCB-command retry for
2429 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2430 * removes the ELS retry event if it presents. In addition, if the
2431 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2432 * commands are sent for the @vport's nodes that require issuing discovery
2433 * ADISC.
2434 **/
2435 void
2436 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
2437 {
2438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2439 struct lpfc_work_evt *evtp;
2440
2441 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2442 return;
2443 spin_lock_irq(shost->host_lock);
2444 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2445 spin_unlock_irq(shost->host_lock);
2446 del_timer_sync(&nlp->nlp_delayfunc);
2447 nlp->nlp_last_elscmd = 0;
2448 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2449 list_del_init(&nlp->els_retry_evt.evt_listp);
2450 /* Decrement nlp reference count held for the delayed retry */
2451 evtp = &nlp->els_retry_evt;
2452 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2453 }
2454 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2455 spin_lock_irq(shost->host_lock);
2456 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2457 spin_unlock_irq(shost->host_lock);
2458 if (vport->num_disc_nodes) {
2459 if (vport->port_state < LPFC_VPORT_READY) {
2460 /* Check if there are more ADISCs to be sent */
2461 lpfc_more_adisc(vport);
2462 } else {
2463 /* Check if there are more PLOGIs to be sent */
2464 lpfc_more_plogi(vport);
2465 if (vport->num_disc_nodes == 0) {
2466 spin_lock_irq(shost->host_lock);
2467 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2468 spin_unlock_irq(shost->host_lock);
2469 lpfc_can_disctmo(vport);
2470 lpfc_end_rscn(vport);
2471 }
2472 }
2473 }
2474 }
2475 return;
2476 }
2477
2478 /**
2479 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2480 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2481 *
2482 * This routine is invoked by the ndlp delayed-function timer to check
2483 * whether there is any pending ELS retry event(s) with the node. If not, it
2484 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2485 * adds the delayed events to the HBA work list and invokes the
2486 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2487 * event. Note that lpfc_nlp_get() is called before posting the event to
2488 * the work list to hold reference count of ndlp so that it guarantees the
2489 * reference to ndlp will still be available when the worker thread gets
2490 * to the event associated with the ndlp.
2491 **/
2492 void
2493 lpfc_els_retry_delay(unsigned long ptr)
2494 {
2495 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2496 struct lpfc_vport *vport = ndlp->vport;
2497 struct lpfc_hba *phba = vport->phba;
2498 unsigned long flags;
2499 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
2500
2501 spin_lock_irqsave(&phba->hbalock, flags);
2502 if (!list_empty(&evtp->evt_listp)) {
2503 spin_unlock_irqrestore(&phba->hbalock, flags);
2504 return;
2505 }
2506
2507 /* We need to hold the node by incrementing the reference
2508 * count until the queued work is done
2509 */
2510 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
2511 if (evtp->evt_arg1) {
2512 evtp->evt = LPFC_EVT_ELS_RETRY;
2513 list_add_tail(&evtp->evt_listp, &phba->work_list);
2514 lpfc_worker_wake_up(phba);
2515 }
2516 spin_unlock_irqrestore(&phba->hbalock, flags);
2517 return;
2518 }
2519
2520 /**
2521 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
2522 * @ndlp: pointer to a node-list data structure.
2523 *
2524 * This routine is the worker-thread handler for processing the @ndlp delayed
2525 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2526 * the last ELS command from the associated ndlp and invokes the proper ELS
2527 * function according to the delayed ELS command to retry the command.
2528 **/
2529 void
2530 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2531 {
2532 struct lpfc_vport *vport = ndlp->vport;
2533 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2534 uint32_t cmd, did, retry;
2535
2536 spin_lock_irq(shost->host_lock);
2537 did = ndlp->nlp_DID;
2538 cmd = ndlp->nlp_last_elscmd;
2539 ndlp->nlp_last_elscmd = 0;
2540
2541 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2542 spin_unlock_irq(shost->host_lock);
2543 return;
2544 }
2545
2546 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2547 spin_unlock_irq(shost->host_lock);
2548 /*
2549 * If a discovery event readded nlp_delayfunc after timer
2550 * firing and before processing the timer, cancel the
2551 * nlp_delayfunc.
2552 */
2553 del_timer_sync(&ndlp->nlp_delayfunc);
2554 retry = ndlp->nlp_retry;
2555 ndlp->nlp_retry = 0;
2556
2557 switch (cmd) {
2558 case ELS_CMD_FLOGI:
2559 lpfc_issue_els_flogi(vport, ndlp, retry);
2560 break;
2561 case ELS_CMD_PLOGI:
2562 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
2563 ndlp->nlp_prev_state = ndlp->nlp_state;
2564 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2565 }
2566 break;
2567 case ELS_CMD_ADISC:
2568 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
2569 ndlp->nlp_prev_state = ndlp->nlp_state;
2570 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2571 }
2572 break;
2573 case ELS_CMD_PRLI:
2574 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
2575 ndlp->nlp_prev_state = ndlp->nlp_state;
2576 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2577 }
2578 break;
2579 case ELS_CMD_LOGO:
2580 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2581 ndlp->nlp_prev_state = ndlp->nlp_state;
2582 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2583 }
2584 break;
2585 case ELS_CMD_FDISC:
2586 lpfc_issue_els_fdisc(vport, ndlp, retry);
2587 break;
2588 }
2589 return;
2590 }
2591
2592 /**
2593 * lpfc_els_retry - Make retry decision on an els command iocb
2594 * @phba: pointer to lpfc hba data structure.
2595 * @cmdiocb: pointer to lpfc command iocb data structure.
2596 * @rspiocb: pointer to lpfc response iocb data structure.
2597 *
2598 * This routine makes a retry decision on an ELS command IOCB, which has
2599 * failed. The following ELS IOCBs use this function for retrying the command
2600 * when previously issued command responsed with error status: FLOGI, PLOGI,
2601 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2602 * returned error status, it makes the decision whether a retry shall be
2603 * issued for the command, and whether a retry shall be made immediately or
2604 * delayed. In the former case, the corresponding ELS command issuing-function
2605 * is called to retry the command. In the later case, the ELS command shall
2606 * be posted to the ndlp delayed event and delayed function timer set to the
2607 * ndlp for the delayed command issusing.
2608 *
2609 * Return code
2610 * 0 - No retry of els command is made
2611 * 1 - Immediate or delayed retry of els command is made
2612 **/
2613 static int
2614 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2615 struct lpfc_iocbq *rspiocb)
2616 {
2617 struct lpfc_vport *vport = cmdiocb->vport;
2618 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2619 IOCB_t *irsp = &rspiocb->iocb;
2620 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2621 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2622 uint32_t *elscmd;
2623 struct ls_rjt stat;
2624 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
2625 int logerr = 0;
2626 uint32_t cmd = 0;
2627 uint32_t did;
2628
2629
2630 /* Note: context2 may be 0 for internal driver abort
2631 * of delays ELS command.
2632 */
2633
2634 if (pcmd && pcmd->virt) {
2635 elscmd = (uint32_t *) (pcmd->virt);
2636 cmd = *elscmd++;
2637 }
2638
2639 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2640 did = ndlp->nlp_DID;
2641 else {
2642 /* We should only hit this case for retrying PLOGI */
2643 did = irsp->un.elsreq64.remoteID;
2644 ndlp = lpfc_findnode_did(vport, did);
2645 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2646 && (cmd != ELS_CMD_PLOGI))
2647 return 1;
2648 }
2649
2650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2651 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2652 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2653
2654 switch (irsp->ulpStatus) {
2655 case IOSTAT_FCP_RSP_ERROR:
2656 case IOSTAT_REMOTE_STOP:
2657 break;
2658
2659 case IOSTAT_LOCAL_REJECT:
2660 switch ((irsp->un.ulpWord[4] & 0xff)) {
2661 case IOERR_LOOP_OPEN_FAILURE:
2662 if (cmd == ELS_CMD_FLOGI) {
2663 if (PCI_DEVICE_ID_HORNET ==
2664 phba->pcidev->device) {
2665 phba->fc_topology = TOPOLOGY_LOOP;
2666 phba->pport->fc_myDID = 0;
2667 phba->alpa_map[0] = 0;
2668 phba->alpa_map[1] = 0;
2669 }
2670 }
2671 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2672 delay = 1000;
2673 retry = 1;
2674 break;
2675
2676 case IOERR_ILLEGAL_COMMAND:
2677 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2678 "0124 Retry illegal cmd x%x "
2679 "retry:x%x delay:x%x\n",
2680 cmd, cmdiocb->retry, delay);
2681 retry = 1;
2682 /* All command's retry policy */
2683 maxretry = 8;
2684 if (cmdiocb->retry > 2)
2685 delay = 1000;
2686 break;
2687
2688 case IOERR_NO_RESOURCES:
2689 logerr = 1; /* HBA out of resources */
2690 retry = 1;
2691 if (cmdiocb->retry > 100)
2692 delay = 100;
2693 maxretry = 250;
2694 break;
2695
2696 case IOERR_ILLEGAL_FRAME:
2697 delay = 100;
2698 retry = 1;
2699 break;
2700
2701 case IOERR_SEQUENCE_TIMEOUT:
2702 case IOERR_INVALID_RPI:
2703 retry = 1;
2704 break;
2705 }
2706 break;
2707
2708 case IOSTAT_NPORT_RJT:
2709 case IOSTAT_FABRIC_RJT:
2710 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2711 retry = 1;
2712 break;
2713 }
2714 break;
2715
2716 case IOSTAT_NPORT_BSY:
2717 case IOSTAT_FABRIC_BSY:
2718 logerr = 1; /* Fabric / Remote NPort out of resources */
2719 retry = 1;
2720 break;
2721
2722 case IOSTAT_LS_RJT:
2723 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2724 /* Added for Vendor specifc support
2725 * Just keep retrying for these Rsn / Exp codes
2726 */
2727 switch (stat.un.b.lsRjtRsnCode) {
2728 case LSRJT_UNABLE_TPC:
2729 if (stat.un.b.lsRjtRsnCodeExp ==
2730 LSEXP_CMD_IN_PROGRESS) {
2731 if (cmd == ELS_CMD_PLOGI) {
2732 delay = 1000;
2733 maxretry = 48;
2734 }
2735 retry = 1;
2736 break;
2737 }
2738 if (cmd == ELS_CMD_PLOGI) {
2739 delay = 1000;
2740 maxretry = lpfc_max_els_tries + 1;
2741 retry = 1;
2742 break;
2743 }
2744 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2745 (cmd == ELS_CMD_FDISC) &&
2746 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
2747 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2748 "0125 FDISC Failed (x%x). "
2749 "Fabric out of resources\n",
2750 stat.un.lsRjtError);
2751 lpfc_vport_set_state(vport,
2752 FC_VPORT_NO_FABRIC_RSCS);
2753 }
2754 break;
2755
2756 case LSRJT_LOGICAL_BSY:
2757 if ((cmd == ELS_CMD_PLOGI) ||
2758 (cmd == ELS_CMD_PRLI)) {
2759 delay = 1000;
2760 maxretry = 48;
2761 } else if (cmd == ELS_CMD_FDISC) {
2762 /* FDISC retry policy */
2763 maxretry = 48;
2764 if (cmdiocb->retry >= 32)
2765 delay = 1000;
2766 }
2767 retry = 1;
2768 break;
2769
2770 case LSRJT_LOGICAL_ERR:
2771 /* There are some cases where switches return this
2772 * error when they are not ready and should be returning
2773 * Logical Busy. We should delay every time.
2774 */
2775 if (cmd == ELS_CMD_FDISC &&
2776 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2777 maxretry = 3;
2778 delay = 1000;
2779 retry = 1;
2780 break;
2781 }
2782 case LSRJT_PROTOCOL_ERR:
2783 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2784 (cmd == ELS_CMD_FDISC) &&
2785 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2786 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2787 ) {
2788 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2789 "0122 FDISC Failed (x%x). "
2790 "Fabric Detected Bad WWN\n",
2791 stat.un.lsRjtError);
2792 lpfc_vport_set_state(vport,
2793 FC_VPORT_FABRIC_REJ_WWN);
2794 }
2795 break;
2796 }
2797 break;
2798
2799 case IOSTAT_INTERMED_RSP:
2800 case IOSTAT_BA_RJT:
2801 break;
2802
2803 default:
2804 break;
2805 }
2806
2807 if (did == FDMI_DID)
2808 retry = 1;
2809
2810 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2811 (phba->fc_topology != TOPOLOGY_LOOP) &&
2812 !lpfc_error_lost_link(irsp)) {
2813 /* FLOGI retry policy */
2814 retry = 1;
2815 /* retry forever */
2816 maxretry = 0;
2817 if (cmdiocb->retry >= 100)
2818 delay = 5000;
2819 else if (cmdiocb->retry >= 32)
2820 delay = 1000;
2821 }
2822
2823 cmdiocb->retry++;
2824 if (maxretry && (cmdiocb->retry >= maxretry)) {
2825 phba->fc_stat.elsRetryExceeded++;
2826 retry = 0;
2827 }
2828
2829 if ((vport->load_flag & FC_UNLOADING) != 0)
2830 retry = 0;
2831
2832 if (retry) {
2833
2834 /* Retry ELS command <elsCmd> to remote NPORT <did> */
2835 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2836 "0107 Retry ELS command x%x to remote "
2837 "NPORT x%x Data: x%x x%x\n",
2838 cmd, did, cmdiocb->retry, delay);
2839
2840 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2841 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2842 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2843 /* Don't reset timer for no resources */
2844
2845 /* If discovery / RSCN timer is running, reset it */
2846 if (timer_pending(&vport->fc_disctmo) ||
2847 (vport->fc_flag & FC_RSCN_MODE))
2848 lpfc_set_disctmo(vport);
2849 }
2850
2851 phba->fc_stat.elsXmitRetry++;
2852 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2853 phba->fc_stat.elsDelayRetry++;
2854 ndlp->nlp_retry = cmdiocb->retry;
2855
2856 /* delay is specified in milliseconds */
2857 mod_timer(&ndlp->nlp_delayfunc,
2858 jiffies + msecs_to_jiffies(delay));
2859 spin_lock_irq(shost->host_lock);
2860 ndlp->nlp_flag |= NLP_DELAY_TMO;
2861 spin_unlock_irq(shost->host_lock);
2862
2863 ndlp->nlp_prev_state = ndlp->nlp_state;
2864 if (cmd == ELS_CMD_PRLI)
2865 lpfc_nlp_set_state(vport, ndlp,
2866 NLP_STE_REG_LOGIN_ISSUE);
2867 else
2868 lpfc_nlp_set_state(vport, ndlp,
2869 NLP_STE_NPR_NODE);
2870 ndlp->nlp_last_elscmd = cmd;
2871
2872 return 1;
2873 }
2874 switch (cmd) {
2875 case ELS_CMD_FLOGI:
2876 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2877 return 1;
2878 case ELS_CMD_FDISC:
2879 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2880 return 1;
2881 case ELS_CMD_PLOGI:
2882 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2883 ndlp->nlp_prev_state = ndlp->nlp_state;
2884 lpfc_nlp_set_state(vport, ndlp,
2885 NLP_STE_PLOGI_ISSUE);
2886 }
2887 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2888 return 1;
2889 case ELS_CMD_ADISC:
2890 ndlp->nlp_prev_state = ndlp->nlp_state;
2891 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2892 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2893 return 1;
2894 case ELS_CMD_PRLI:
2895 ndlp->nlp_prev_state = ndlp->nlp_state;
2896 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2897 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
2898 return 1;
2899 case ELS_CMD_LOGO:
2900 ndlp->nlp_prev_state = ndlp->nlp_state;
2901 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2902 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
2903 return 1;
2904 }
2905 }
2906 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2907 if (logerr) {
2908 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2909 "0137 No retry ELS command x%x to remote "
2910 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2911 cmd, did, irsp->ulpStatus,
2912 irsp->un.ulpWord[4]);
2913 }
2914 else {
2915 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2916 "0108 No retry ELS command x%x to remote "
2917 "NPORT x%x Retried:%d Error:x%x/%x\n",
2918 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2919 irsp->un.ulpWord[4]);
2920 }
2921 return 0;
2922 }
2923
2924 /**
2925 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
2926 * @phba: pointer to lpfc hba data structure.
2927 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
2928 *
2929 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
2930 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
2931 * checks to see whether there is a lpfc DMA buffer associated with the
2932 * response of the command IOCB. If so, it will be released before releasing
2933 * the lpfc DMA buffer associated with the IOCB itself.
2934 *
2935 * Return code
2936 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2937 **/
2938 static int
2939 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2940 {
2941 struct lpfc_dmabuf *buf_ptr;
2942
2943 /* Free the response before processing the command. */
2944 if (!list_empty(&buf_ptr1->list)) {
2945 list_remove_head(&buf_ptr1->list, buf_ptr,
2946 struct lpfc_dmabuf,
2947 list);
2948 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2949 kfree(buf_ptr);
2950 }
2951 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2952 kfree(buf_ptr1);
2953 return 0;
2954 }
2955
2956 /**
2957 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
2958 * @phba: pointer to lpfc hba data structure.
2959 * @buf_ptr: pointer to the lpfc dma buffer data structure.
2960 *
2961 * This routine releases the lpfc Direct Memory Access (DMA) buffer
2962 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
2963 * pool.
2964 *
2965 * Return code
2966 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2967 **/
2968 static int
2969 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2970 {
2971 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2972 kfree(buf_ptr);
2973 return 0;
2974 }
2975
2976 /**
2977 * lpfc_els_free_iocb - Free a command iocb and its associated resources
2978 * @phba: pointer to lpfc hba data structure.
2979 * @elsiocb: pointer to lpfc els command iocb data structure.
2980 *
2981 * This routine frees a command IOCB and its associated resources. The
2982 * command IOCB data structure contains the reference to various associated
2983 * resources, these fields must be set to NULL if the associated reference
2984 * not present:
2985 * context1 - reference to ndlp
2986 * context2 - reference to cmd
2987 * context2->next - reference to rsp
2988 * context3 - reference to bpl
2989 *
2990 * It first properly decrements the reference count held on ndlp for the
2991 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
2992 * set, it invokes the lpfc_els_free_data() routine to release the Direct
2993 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
2994 * adds the DMA buffer the @phba data structure for the delayed release.
2995 * If reference to the Buffer Pointer List (BPL) is present, the
2996 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
2997 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
2998 * invoked to release the IOCB data structure back to @phba IOCBQ list.
2999 *
3000 * Return code
3001 * 0 - Success (currently, always return 0)
3002 **/
3003 int
3004 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3005 {
3006 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3007 struct lpfc_nodelist *ndlp;
3008
3009 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3010 if (ndlp) {
3011 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3012 lpfc_nlp_put(ndlp);
3013
3014 /* If the ndlp is not being used by another discovery
3015 * thread, free it.
3016 */
3017 if (!lpfc_nlp_not_used(ndlp)) {
3018 /* If ndlp is being used by another discovery
3019 * thread, just clear NLP_DEFER_RM
3020 */
3021 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3022 }
3023 }
3024 else
3025 lpfc_nlp_put(ndlp);
3026 elsiocb->context1 = NULL;
3027 }
3028 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3029 if (elsiocb->context2) {
3030 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3031 /* Firmware could still be in progress of DMAing
3032 * payload, so don't free data buffer till after
3033 * a hbeat.
3034 */
3035 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3036 buf_ptr = elsiocb->context2;
3037 elsiocb->context2 = NULL;
3038 if (buf_ptr) {
3039 buf_ptr1 = NULL;
3040 spin_lock_irq(&phba->hbalock);
3041 if (!list_empty(&buf_ptr->list)) {
3042 list_remove_head(&buf_ptr->list,
3043 buf_ptr1, struct lpfc_dmabuf,
3044 list);
3045 INIT_LIST_HEAD(&buf_ptr1->list);
3046 list_add_tail(&buf_ptr1->list,
3047 &phba->elsbuf);
3048 phba->elsbuf_cnt++;
3049 }
3050 INIT_LIST_HEAD(&buf_ptr->list);
3051 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3052 phba->elsbuf_cnt++;
3053 spin_unlock_irq(&phba->hbalock);
3054 }
3055 } else {
3056 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3057 lpfc_els_free_data(phba, buf_ptr1);
3058 }
3059 }
3060
3061 if (elsiocb->context3) {
3062 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3063 lpfc_els_free_bpl(phba, buf_ptr);
3064 }
3065 lpfc_sli_release_iocbq(phba, elsiocb);
3066 return 0;
3067 }
3068
3069 /**
3070 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3071 * @phba: pointer to lpfc hba data structure.
3072 * @cmdiocb: pointer to lpfc command iocb data structure.
3073 * @rspiocb: pointer to lpfc response iocb data structure.
3074 *
3075 * This routine is the completion callback function to the Logout (LOGO)
3076 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3077 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3078 * release the ndlp if it has the last reference remaining (reference count
3079 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3080 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3081 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3082 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3083 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3084 * IOCB data structure.
3085 **/
3086 static void
3087 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3088 struct lpfc_iocbq *rspiocb)
3089 {
3090 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3091 struct lpfc_vport *vport = cmdiocb->vport;
3092 IOCB_t *irsp;
3093
3094 irsp = &rspiocb->iocb;
3095 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3096 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3097 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3098 /* ACC to LOGO completes to NPort <nlp_DID> */
3099 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3100 "0109 ACC to LOGO completes to NPort x%x "
3101 "Data: x%x x%x x%x\n",
3102 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3103 ndlp->nlp_rpi);
3104
3105 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3106 /* NPort Recovery mode or node is just allocated */
3107 if (!lpfc_nlp_not_used(ndlp)) {
3108 /* If the ndlp is being used by another discovery
3109 * thread, just unregister the RPI.
3110 */
3111 lpfc_unreg_rpi(vport, ndlp);
3112 } else {
3113 /* Indicate the node has already released, should
3114 * not reference to it from within lpfc_els_free_iocb.
3115 */
3116 cmdiocb->context1 = NULL;
3117 }
3118 }
3119 lpfc_els_free_iocb(phba, cmdiocb);
3120 return;
3121 }
3122
3123 /**
3124 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3125 * @phba: pointer to lpfc hba data structure.
3126 * @pmb: pointer to the driver internal queue element for mailbox command.
3127 *
3128 * This routine is the completion callback function for unregister default
3129 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3130 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3131 * decrements the ndlp reference count held for this completion callback
3132 * function. After that, it invokes the lpfc_nlp_not_used() to check
3133 * whether there is only one reference left on the ndlp. If so, it will
3134 * perform one more decrement and trigger the release of the ndlp.
3135 **/
3136 void
3137 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3138 {
3139 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3140 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3141
3142 /*
3143 * This routine is used to register and unregister in previous SLI
3144 * modes.
3145 */
3146 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3147 (phba->sli_rev == LPFC_SLI_REV4))
3148 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3149
3150 pmb->context1 = NULL;
3151 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3152 kfree(mp);
3153 mempool_free(pmb, phba->mbox_mem_pool);
3154 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3155 lpfc_nlp_put(ndlp);
3156 /* This is the end of the default RPI cleanup logic for this
3157 * ndlp. If no other discovery threads are using this ndlp.
3158 * we should free all resources associated with it.
3159 */
3160 lpfc_nlp_not_used(ndlp);
3161 }
3162
3163 return;
3164 }
3165
3166 /**
3167 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3168 * @phba: pointer to lpfc hba data structure.
3169 * @cmdiocb: pointer to lpfc command iocb data structure.
3170 * @rspiocb: pointer to lpfc response iocb data structure.
3171 *
3172 * This routine is the completion callback function for ELS Response IOCB
3173 * command. In normal case, this callback function just properly sets the
3174 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3175 * field in the command IOCB is not NULL, the referred mailbox command will
3176 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3177 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3178 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3179 * routine shall be invoked trying to release the ndlp if no other threads
3180 * are currently referring it.
3181 **/
3182 static void
3183 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3184 struct lpfc_iocbq *rspiocb)
3185 {
3186 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3187 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3188 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3189 IOCB_t *irsp;
3190 uint8_t *pcmd;
3191 LPFC_MBOXQ_t *mbox = NULL;
3192 struct lpfc_dmabuf *mp = NULL;
3193 uint32_t ls_rjt = 0;
3194
3195 irsp = &rspiocb->iocb;
3196
3197 if (cmdiocb->context_un.mbox)
3198 mbox = cmdiocb->context_un.mbox;
3199
3200 /* First determine if this is a LS_RJT cmpl. Note, this callback
3201 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3202 */
3203 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3204 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3205 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3206 /* A LS_RJT associated with Default RPI cleanup has its own
3207 * separate code path.
3208 */
3209 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3210 ls_rjt = 1;
3211 }
3212
3213 /* Check to see if link went down during discovery */
3214 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3215 if (mbox) {
3216 mp = (struct lpfc_dmabuf *) mbox->context1;
3217 if (mp) {
3218 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3219 kfree(mp);
3220 }
3221 mempool_free(mbox, phba->mbox_mem_pool);
3222 }
3223 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3224 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3225 if (lpfc_nlp_not_used(ndlp)) {
3226 ndlp = NULL;
3227 /* Indicate the node has already released,
3228 * should not reference to it from within
3229 * the routine lpfc_els_free_iocb.
3230 */
3231 cmdiocb->context1 = NULL;
3232 }
3233 goto out;
3234 }
3235
3236 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3237 "ELS rsp cmpl: status:x%x/x%x did:x%x",
3238 irsp->ulpStatus, irsp->un.ulpWord[4],
3239 cmdiocb->iocb.un.elsreq64.remoteID);
3240 /* ELS response tag <ulpIoTag> completes */
3241 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3242 "0110 ELS response tag x%x completes "
3243 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3244 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3245 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3246 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3247 ndlp->nlp_rpi);
3248 if (mbox) {
3249 if ((rspiocb->iocb.ulpStatus == 0)
3250 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3251 lpfc_unreg_rpi(vport, ndlp);
3252 /* Increment reference count to ndlp to hold the
3253 * reference to ndlp for the callback function.
3254 */
3255 mbox->context2 = lpfc_nlp_get(ndlp);
3256 mbox->vport = vport;
3257 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3258 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3259 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3260 }
3261 else {
3262 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3263 ndlp->nlp_prev_state = ndlp->nlp_state;
3264 lpfc_nlp_set_state(vport, ndlp,
3265 NLP_STE_REG_LOGIN_ISSUE);
3266 }
3267 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3268 != MBX_NOT_FINISHED)
3269 goto out;
3270 else
3271 /* Decrement the ndlp reference count we
3272 * set for this failed mailbox command.
3273 */
3274 lpfc_nlp_put(ndlp);
3275
3276 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3277 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3278 "0138 ELS rsp: Cannot issue reg_login for x%x "
3279 "Data: x%x x%x x%x\n",
3280 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3281 ndlp->nlp_rpi);
3282
3283 if (lpfc_nlp_not_used(ndlp)) {
3284 ndlp = NULL;
3285 /* Indicate node has already been released,
3286 * should not reference to it from within
3287 * the routine lpfc_els_free_iocb.
3288 */
3289 cmdiocb->context1 = NULL;
3290 }
3291 } else {
3292 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3293 if (!lpfc_error_lost_link(irsp) &&
3294 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3295 if (lpfc_nlp_not_used(ndlp)) {
3296 ndlp = NULL;
3297 /* Indicate node has already been
3298 * released, should not reference
3299 * to it from within the routine
3300 * lpfc_els_free_iocb.
3301 */
3302 cmdiocb->context1 = NULL;
3303 }
3304 }
3305 }
3306 mp = (struct lpfc_dmabuf *) mbox->context1;
3307 if (mp) {
3308 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3309 kfree(mp);
3310 }
3311 mempool_free(mbox, phba->mbox_mem_pool);
3312 }
3313 out:
3314 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3315 spin_lock_irq(shost->host_lock);
3316 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
3317 spin_unlock_irq(shost->host_lock);
3318
3319 /* If the node is not being used by another discovery thread,
3320 * and we are sending a reject, we are done with it.
3321 * Release driver reference count here and free associated
3322 * resources.
3323 */
3324 if (ls_rjt)
3325 if (lpfc_nlp_not_used(ndlp))
3326 /* Indicate node has already been released,
3327 * should not reference to it from within
3328 * the routine lpfc_els_free_iocb.
3329 */
3330 cmdiocb->context1 = NULL;
3331 }
3332
3333 lpfc_els_free_iocb(phba, cmdiocb);
3334 return;
3335 }
3336
3337 /**
3338 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3339 * @vport: pointer to a host virtual N_Port data structure.
3340 * @flag: the els command code to be accepted.
3341 * @oldiocb: pointer to the original lpfc command iocb data structure.
3342 * @ndlp: pointer to a node-list data structure.
3343 * @mbox: pointer to the driver internal queue element for mailbox command.
3344 *
3345 * This routine prepares and issues an Accept (ACC) response IOCB
3346 * command. It uses the @flag to properly set up the IOCB field for the
3347 * specific ACC response command to be issued and invokes the
3348 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3349 * @mbox pointer is passed in, it will be put into the context_un.mbox
3350 * field of the IOCB for the completion callback function to issue the
3351 * mailbox command to the HBA later when callback is invoked.
3352 *
3353 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3354 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3355 * will be stored into the context1 field of the IOCB for the completion
3356 * callback function to the corresponding response ELS IOCB command.
3357 *
3358 * Return code
3359 * 0 - Successfully issued acc response
3360 * 1 - Failed to issue acc response
3361 **/
3362 int
3363 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3364 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3365 LPFC_MBOXQ_t *mbox)
3366 {
3367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3368 struct lpfc_hba *phba = vport->phba;
3369 IOCB_t *icmd;
3370 IOCB_t *oldcmd;
3371 struct lpfc_iocbq *elsiocb;
3372 struct lpfc_sli *psli;
3373 uint8_t *pcmd;
3374 uint16_t cmdsize;
3375 int rc;
3376 ELS_PKT *els_pkt_ptr;
3377
3378 psli = &phba->sli;
3379 oldcmd = &oldiocb->iocb;
3380
3381 switch (flag) {
3382 case ELS_CMD_ACC:
3383 cmdsize = sizeof(uint32_t);
3384 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3385 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3386 if (!elsiocb) {
3387 spin_lock_irq(shost->host_lock);
3388 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3389 spin_unlock_irq(shost->host_lock);
3390 return 1;
3391 }
3392
3393 icmd = &elsiocb->iocb;
3394 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3395 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3396 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3397 pcmd += sizeof(uint32_t);
3398
3399 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3400 "Issue ACC: did:x%x flg:x%x",
3401 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3402 break;
3403 case ELS_CMD_PLOGI:
3404 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3405 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3406 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3407 if (!elsiocb)
3408 return 1;
3409
3410 icmd = &elsiocb->iocb;
3411 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3412 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3413
3414 if (mbox)
3415 elsiocb->context_un.mbox = mbox;
3416
3417 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3418 pcmd += sizeof(uint32_t);
3419 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3420
3421 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3422 "Issue ACC PLOGI: did:x%x flg:x%x",
3423 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3424 break;
3425 case ELS_CMD_PRLO:
3426 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3427 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3428 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3429 if (!elsiocb)
3430 return 1;
3431
3432 icmd = &elsiocb->iocb;
3433 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3434 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3435
3436 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
3437 sizeof(uint32_t) + sizeof(PRLO));
3438 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3439 els_pkt_ptr = (ELS_PKT *) pcmd;
3440 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
3441
3442 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3443 "Issue ACC PRLO: did:x%x flg:x%x",
3444 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3445 break;
3446 default:
3447 return 1;
3448 }
3449 /* Xmit ELS ACC response tag <ulpIoTag> */
3450 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3451 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3452 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3453 elsiocb->iotag, elsiocb->iocb.ulpContext,
3454 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3455 ndlp->nlp_rpi);
3456 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3457 spin_lock_irq(shost->host_lock);
3458 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3459 spin_unlock_irq(shost->host_lock);
3460 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3461 } else {
3462 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3463 }
3464
3465 phba->fc_stat.elsXmitACC++;
3466 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3467 if (rc == IOCB_ERROR) {
3468 lpfc_els_free_iocb(phba, elsiocb);
3469 return 1;
3470 }
3471 return 0;
3472 }
3473
3474 /**
3475 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
3476 * @vport: pointer to a virtual N_Port data structure.
3477 * @rejectError:
3478 * @oldiocb: pointer to the original lpfc command iocb data structure.
3479 * @ndlp: pointer to a node-list data structure.
3480 * @mbox: pointer to the driver internal queue element for mailbox command.
3481 *
3482 * This routine prepares and issue an Reject (RJT) response IOCB
3483 * command. If a @mbox pointer is passed in, it will be put into the
3484 * context_un.mbox field of the IOCB for the completion callback function
3485 * to issue to the HBA later.
3486 *
3487 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3488 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3489 * will be stored into the context1 field of the IOCB for the completion
3490 * callback function to the reject response ELS IOCB command.
3491 *
3492 * Return code
3493 * 0 - Successfully issued reject response
3494 * 1 - Failed to issue reject response
3495 **/
3496 int
3497 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3498 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3499 LPFC_MBOXQ_t *mbox)
3500 {
3501 struct lpfc_hba *phba = vport->phba;
3502 IOCB_t *icmd;
3503 IOCB_t *oldcmd;
3504 struct lpfc_iocbq *elsiocb;
3505 struct lpfc_sli *psli;
3506 uint8_t *pcmd;
3507 uint16_t cmdsize;
3508 int rc;
3509
3510 psli = &phba->sli;
3511 cmdsize = 2 * sizeof(uint32_t);
3512 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3513 ndlp->nlp_DID, ELS_CMD_LS_RJT);
3514 if (!elsiocb)
3515 return 1;
3516
3517 icmd = &elsiocb->iocb;
3518 oldcmd = &oldiocb->iocb;
3519 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3520 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3521
3522 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
3523 pcmd += sizeof(uint32_t);
3524 *((uint32_t *) (pcmd)) = rejectError;
3525
3526 if (mbox)
3527 elsiocb->context_un.mbox = mbox;
3528
3529 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
3530 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3531 "0129 Xmit ELS RJT x%x response tag x%x "
3532 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3533 "rpi x%x\n",
3534 rejectError, elsiocb->iotag,
3535 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3536 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
3537 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3538 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3539 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3540
3541 phba->fc_stat.elsXmitLSRJT++;
3542 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3544
3545 if (rc == IOCB_ERROR) {
3546 lpfc_els_free_iocb(phba, elsiocb);
3547 return 1;
3548 }
3549 return 0;
3550 }
3551
3552 /**
3553 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
3554 * @vport: pointer to a virtual N_Port data structure.
3555 * @oldiocb: pointer to the original lpfc command iocb data structure.
3556 * @ndlp: pointer to a node-list data structure.
3557 *
3558 * This routine prepares and issues an Accept (ACC) response to Address
3559 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3560 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3561 *
3562 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3563 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3564 * will be stored into the context1 field of the IOCB for the completion
3565 * callback function to the ADISC Accept response ELS IOCB command.
3566 *
3567 * Return code
3568 * 0 - Successfully issued acc adisc response
3569 * 1 - Failed to issue adisc acc response
3570 **/
3571 int
3572 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3573 struct lpfc_nodelist *ndlp)
3574 {
3575 struct lpfc_hba *phba = vport->phba;
3576 ADISC *ap;
3577 IOCB_t *icmd, *oldcmd;
3578 struct lpfc_iocbq *elsiocb;
3579 uint8_t *pcmd;
3580 uint16_t cmdsize;
3581 int rc;
3582
3583 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
3584 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3585 ndlp->nlp_DID, ELS_CMD_ACC);
3586 if (!elsiocb)
3587 return 1;
3588
3589 icmd = &elsiocb->iocb;
3590 oldcmd = &oldiocb->iocb;
3591 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3592
3593 /* Xmit ADISC ACC response tag <ulpIoTag> */
3594 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3595 "0130 Xmit ADISC ACC response iotag x%x xri: "
3596 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3597 elsiocb->iotag, elsiocb->iocb.ulpContext,
3598 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3599 ndlp->nlp_rpi);
3600 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3601
3602 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3603 pcmd += sizeof(uint32_t);
3604
3605 ap = (ADISC *) (pcmd);
3606 ap->hardAL_PA = phba->fc_pref_ALPA;
3607 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3608 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3609 ap->DID = be32_to_cpu(vport->fc_myDID);
3610
3611 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3612 "Issue ACC ADISC: did:x%x flg:x%x",
3613 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3614
3615 phba->fc_stat.elsXmitACC++;
3616 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3617 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3618 if (rc == IOCB_ERROR) {
3619 lpfc_els_free_iocb(phba, elsiocb);
3620 return 1;
3621 }
3622 return 0;
3623 }
3624
3625 /**
3626 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
3627 * @vport: pointer to a virtual N_Port data structure.
3628 * @oldiocb: pointer to the original lpfc command iocb data structure.
3629 * @ndlp: pointer to a node-list data structure.
3630 *
3631 * This routine prepares and issues an Accept (ACC) response to Process
3632 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3633 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3634 *
3635 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3636 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3637 * will be stored into the context1 field of the IOCB for the completion
3638 * callback function to the PRLI Accept response ELS IOCB command.
3639 *
3640 * Return code
3641 * 0 - Successfully issued acc prli response
3642 * 1 - Failed to issue acc prli response
3643 **/
3644 int
3645 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3646 struct lpfc_nodelist *ndlp)
3647 {
3648 struct lpfc_hba *phba = vport->phba;
3649 PRLI *npr;
3650 lpfc_vpd_t *vpd;
3651 IOCB_t *icmd;
3652 IOCB_t *oldcmd;
3653 struct lpfc_iocbq *elsiocb;
3654 struct lpfc_sli *psli;
3655 uint8_t *pcmd;
3656 uint16_t cmdsize;
3657 int rc;
3658
3659 psli = &phba->sli;
3660
3661 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3662 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3663 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
3664 if (!elsiocb)
3665 return 1;
3666
3667 icmd = &elsiocb->iocb;
3668 oldcmd = &oldiocb->iocb;
3669 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3670 /* Xmit PRLI ACC response tag <ulpIoTag> */
3671 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3672 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3673 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3674 elsiocb->iotag, elsiocb->iocb.ulpContext,
3675 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3676 ndlp->nlp_rpi);
3677 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3678
3679 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
3680 pcmd += sizeof(uint32_t);
3681
3682 /* For PRLI, remainder of payload is PRLI parameter page */
3683 memset(pcmd, 0, sizeof(PRLI));
3684
3685 npr = (PRLI *) pcmd;
3686 vpd = &phba->vpd;
3687 /*
3688 * If the remote port is a target and our firmware version is 3.20 or
3689 * later, set the following bits for FC-TAPE support.
3690 */
3691 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3692 (vpd->rev.feaLevelHigh >= 0x02)) {
3693 npr->ConfmComplAllowed = 1;
3694 npr->Retry = 1;
3695 npr->TaskRetryIdReq = 1;
3696 }
3697
3698 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3699 npr->estabImagePair = 1;
3700 npr->readXferRdyDis = 1;
3701 npr->ConfmComplAllowed = 1;
3702
3703 npr->prliType = PRLI_FCP_TYPE;
3704 npr->initiatorFunc = 1;
3705
3706 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3707 "Issue ACC PRLI: did:x%x flg:x%x",
3708 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3709
3710 phba->fc_stat.elsXmitACC++;
3711 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3712
3713 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3714 if (rc == IOCB_ERROR) {
3715 lpfc_els_free_iocb(phba, elsiocb);
3716 return 1;
3717 }
3718 return 0;
3719 }
3720
3721 /**
3722 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
3723 * @vport: pointer to a virtual N_Port data structure.
3724 * @format: rnid command format.
3725 * @oldiocb: pointer to the original lpfc command iocb data structure.
3726 * @ndlp: pointer to a node-list data structure.
3727 *
3728 * This routine issues a Request Node Identification Data (RNID) Accept
3729 * (ACC) response. It constructs the RNID ACC response command according to
3730 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3731 * issue the response. Note that this command does not need to hold the ndlp
3732 * reference count for the callback. So, the ndlp reference count taken by
3733 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3734 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3735 * there is no ndlp reference available.
3736 *
3737 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3738 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3739 * will be stored into the context1 field of the IOCB for the completion
3740 * callback function. However, for the RNID Accept Response ELS command,
3741 * this is undone later by this routine after the IOCB is allocated.
3742 *
3743 * Return code
3744 * 0 - Successfully issued acc rnid response
3745 * 1 - Failed to issue acc rnid response
3746 **/
3747 static int
3748 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3749 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3750 {
3751 struct lpfc_hba *phba = vport->phba;
3752 RNID *rn;
3753 IOCB_t *icmd, *oldcmd;
3754 struct lpfc_iocbq *elsiocb;
3755 struct lpfc_sli *psli;
3756 uint8_t *pcmd;
3757 uint16_t cmdsize;
3758 int rc;
3759
3760 psli = &phba->sli;
3761 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3762 + (2 * sizeof(struct lpfc_name));
3763 if (format)
3764 cmdsize += sizeof(RNID_TOP_DISC);
3765
3766 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3767 ndlp->nlp_DID, ELS_CMD_ACC);
3768 if (!elsiocb)
3769 return 1;
3770
3771 icmd = &elsiocb->iocb;
3772 oldcmd = &oldiocb->iocb;
3773 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3774 /* Xmit RNID ACC response tag <ulpIoTag> */
3775 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3776 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
3777 elsiocb->iotag, elsiocb->iocb.ulpContext);
3778 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3779 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3780 pcmd += sizeof(uint32_t);
3781
3782 memset(pcmd, 0, sizeof(RNID));
3783 rn = (RNID *) (pcmd);
3784 rn->Format = format;
3785 rn->CommonLen = (2 * sizeof(struct lpfc_name));
3786 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3787 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3788 switch (format) {
3789 case 0:
3790 rn->SpecificLen = 0;
3791 break;
3792 case RNID_TOPOLOGY_DISC:
3793 rn->SpecificLen = sizeof(RNID_TOP_DISC);
3794 memcpy(&rn->un.topologyDisc.portName,
3795 &vport->fc_portname, sizeof(struct lpfc_name));
3796 rn->un.topologyDisc.unitType = RNID_HBA;
3797 rn->un.topologyDisc.physPort = 0;
3798 rn->un.topologyDisc.attachedNodes = 0;
3799 break;
3800 default:
3801 rn->CommonLen = 0;
3802 rn->SpecificLen = 0;
3803 break;
3804 }
3805
3806 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3807 "Issue ACC RNID: did:x%x flg:x%x",
3808 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3809
3810 phba->fc_stat.elsXmitACC++;
3811 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3812 lpfc_nlp_put(ndlp);
3813 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3814 * it could be freed */
3815
3816 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3817 if (rc == IOCB_ERROR) {
3818 lpfc_els_free_iocb(phba, elsiocb);
3819 return 1;
3820 }
3821 return 0;
3822 }
3823
3824 /**
3825 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
3826 * @vport: pointer to a host virtual N_Port data structure.
3827 *
3828 * This routine issues Address Discover (ADISC) ELS commands to those
3829 * N_Ports which are in node port recovery state and ADISC has not been issued
3830 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
3831 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
3832 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
3833 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
3834 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
3835 * IOCBs quit for later pick up. On the other hand, after walking through
3836 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
3837 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
3838 * no more ADISC need to be sent.
3839 *
3840 * Return code
3841 * The number of N_Ports with adisc issued.
3842 **/
3843 int
3844 lpfc_els_disc_adisc(struct lpfc_vport *vport)
3845 {
3846 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3847 struct lpfc_nodelist *ndlp, *next_ndlp;
3848 int sentadisc = 0;
3849
3850 /* go thru NPR nodes and issue any remaining ELS ADISCs */
3851 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3852 if (!NLP_CHK_NODE_ACT(ndlp))
3853 continue;
3854 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3855 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3856 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
3857 spin_lock_irq(shost->host_lock);
3858 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3859 spin_unlock_irq(shost->host_lock);
3860 ndlp->nlp_prev_state = ndlp->nlp_state;
3861 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3862 lpfc_issue_els_adisc(vport, ndlp, 0);
3863 sentadisc++;
3864 vport->num_disc_nodes++;
3865 if (vport->num_disc_nodes >=
3866 vport->cfg_discovery_threads) {
3867 spin_lock_irq(shost->host_lock);
3868 vport->fc_flag |= FC_NLP_MORE;
3869 spin_unlock_irq(shost->host_lock);
3870 break;
3871 }
3872 }
3873 }
3874 if (sentadisc == 0) {
3875 spin_lock_irq(shost->host_lock);
3876 vport->fc_flag &= ~FC_NLP_MORE;
3877 spin_unlock_irq(shost->host_lock);
3878 }
3879 return sentadisc;
3880 }
3881
3882 /**
3883 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
3884 * @vport: pointer to a host virtual N_Port data structure.
3885 *
3886 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
3887 * which are in node port recovery state, with a @vport. Each time an ELS
3888 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
3889 * the per @vport number of discover count (num_disc_nodes) shall be
3890 * incremented. If the num_disc_nodes reaches a pre-configured threshold
3891 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
3892 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
3893 * later pick up. On the other hand, after walking through all the ndlps with
3894 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
3895 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
3896 * PLOGI need to be sent.
3897 *
3898 * Return code
3899 * The number of N_Ports with plogi issued.
3900 **/
3901 int
3902 lpfc_els_disc_plogi(struct lpfc_vport *vport)
3903 {
3904 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3905 struct lpfc_nodelist *ndlp, *next_ndlp;
3906 int sentplogi = 0;
3907
3908 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
3909 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3910 if (!NLP_CHK_NODE_ACT(ndlp))
3911 continue;
3912 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3913 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3914 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
3915 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
3916 ndlp->nlp_prev_state = ndlp->nlp_state;
3917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3918 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3919 sentplogi++;
3920 vport->num_disc_nodes++;
3921 if (vport->num_disc_nodes >=
3922 vport->cfg_discovery_threads) {
3923 spin_lock_irq(shost->host_lock);
3924 vport->fc_flag |= FC_NLP_MORE;
3925 spin_unlock_irq(shost->host_lock);
3926 break;
3927 }
3928 }
3929 }
3930 if (sentplogi) {
3931 lpfc_set_disctmo(vport);
3932 }
3933 else {
3934 spin_lock_irq(shost->host_lock);
3935 vport->fc_flag &= ~FC_NLP_MORE;
3936 spin_unlock_irq(shost->host_lock);
3937 }
3938 return sentplogi;
3939 }
3940
3941 /**
3942 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
3943 * @vport: pointer to a host virtual N_Port data structure.
3944 *
3945 * This routine cleans up any Registration State Change Notification
3946 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
3947 * @vport together with the host_lock is used to prevent multiple thread
3948 * trying to access the RSCN array on a same @vport at the same time.
3949 **/
3950 void
3951 lpfc_els_flush_rscn(struct lpfc_vport *vport)
3952 {
3953 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3954 struct lpfc_hba *phba = vport->phba;
3955 int i;
3956
3957 spin_lock_irq(shost->host_lock);
3958 if (vport->fc_rscn_flush) {
3959 /* Another thread is walking fc_rscn_id_list on this vport */
3960 spin_unlock_irq(shost->host_lock);
3961 return;
3962 }
3963 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
3964 vport->fc_rscn_flush = 1;
3965 spin_unlock_irq(shost->host_lock);
3966
3967 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
3968 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
3969 vport->fc_rscn_id_list[i] = NULL;
3970 }
3971 spin_lock_irq(shost->host_lock);
3972 vport->fc_rscn_id_cnt = 0;
3973 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
3974 spin_unlock_irq(shost->host_lock);
3975 lpfc_can_disctmo(vport);
3976 /* Indicate we are done walking this fc_rscn_id_list */
3977 vport->fc_rscn_flush = 0;
3978 }
3979
3980 /**
3981 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
3982 * @vport: pointer to a host virtual N_Port data structure.
3983 * @did: remote destination port identifier.
3984 *
3985 * This routine checks whether there is any pending Registration State
3986 * Configuration Notification (RSCN) to a @did on @vport.
3987 *
3988 * Return code
3989 * None zero - The @did matched with a pending rscn
3990 * 0 - not able to match @did with a pending rscn
3991 **/
3992 int
3993 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3994 {
3995 D_ID ns_did;
3996 D_ID rscn_did;
3997 uint32_t *lp;
3998 uint32_t payload_len, i;
3999 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4000
4001 ns_did.un.word = did;
4002
4003 /* Never match fabric nodes for RSCNs */
4004 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4005 return 0;
4006
4007 /* If we are doing a FULL RSCN rediscovery, match everything */
4008 if (vport->fc_flag & FC_RSCN_DISCOVERY)
4009 return did;
4010
4011 spin_lock_irq(shost->host_lock);
4012 if (vport->fc_rscn_flush) {
4013 /* Another thread is walking fc_rscn_id_list on this vport */
4014 spin_unlock_irq(shost->host_lock);
4015 return 0;
4016 }
4017 /* Indicate we are walking fc_rscn_id_list on this vport */
4018 vport->fc_rscn_flush = 1;
4019 spin_unlock_irq(shost->host_lock);
4020 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4021 lp = vport->fc_rscn_id_list[i]->virt;
4022 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4023 payload_len -= sizeof(uint32_t); /* take off word 0 */
4024 while (payload_len) {
4025 rscn_did.un.word = be32_to_cpu(*lp++);
4026 payload_len -= sizeof(uint32_t);
4027 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4028 case RSCN_ADDRESS_FORMAT_PORT:
4029 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4030 && (ns_did.un.b.area == rscn_did.un.b.area)
4031 && (ns_did.un.b.id == rscn_did.un.b.id))
4032 goto return_did_out;
4033 break;
4034 case RSCN_ADDRESS_FORMAT_AREA:
4035 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4036 && (ns_did.un.b.area == rscn_did.un.b.area))
4037 goto return_did_out;
4038 break;
4039 case RSCN_ADDRESS_FORMAT_DOMAIN:
4040 if (ns_did.un.b.domain == rscn_did.un.b.domain)
4041 goto return_did_out;
4042 break;
4043 case RSCN_ADDRESS_FORMAT_FABRIC:
4044 goto return_did_out;
4045 }
4046 }
4047 }
4048 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4049 vport->fc_rscn_flush = 0;
4050 return 0;
4051 return_did_out:
4052 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4053 vport->fc_rscn_flush = 0;
4054 return did;
4055 }
4056
4057 /**
4058 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4059 * @vport: pointer to a host virtual N_Port data structure.
4060 *
4061 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4062 * state machine for a @vport's nodes that are with pending RSCN (Registration
4063 * State Change Notification).
4064 *
4065 * Return code
4066 * 0 - Successful (currently alway return 0)
4067 **/
4068 static int
4069 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4070 {
4071 struct lpfc_nodelist *ndlp = NULL;
4072
4073 /* Move all affected nodes by pending RSCNs to NPR state. */
4074 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4075 if (!NLP_CHK_NODE_ACT(ndlp) ||
4076 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4077 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4078 continue;
4079 lpfc_disc_state_machine(vport, ndlp, NULL,
4080 NLP_EVT_DEVICE_RECOVERY);
4081 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4082 }
4083 return 0;
4084 }
4085
4086 /**
4087 * lpfc_send_rscn_event - Send an RSCN event to management application
4088 * @vport: pointer to a host virtual N_Port data structure.
4089 * @cmdiocb: pointer to lpfc command iocb data structure.
4090 *
4091 * lpfc_send_rscn_event sends an RSCN netlink event to management
4092 * applications.
4093 */
4094 static void
4095 lpfc_send_rscn_event(struct lpfc_vport *vport,
4096 struct lpfc_iocbq *cmdiocb)
4097 {
4098 struct lpfc_dmabuf *pcmd;
4099 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4100 uint32_t *payload_ptr;
4101 uint32_t payload_len;
4102 struct lpfc_rscn_event_header *rscn_event_data;
4103
4104 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4105 payload_ptr = (uint32_t *) pcmd->virt;
4106 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4107
4108 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4109 payload_len, GFP_KERNEL);
4110 if (!rscn_event_data) {
4111 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4112 "0147 Failed to allocate memory for RSCN event\n");
4113 return;
4114 }
4115 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4116 rscn_event_data->payload_length = payload_len;
4117 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4118 payload_len);
4119
4120 fc_host_post_vendor_event(shost,
4121 fc_get_event_number(),
4122 sizeof(struct lpfc_els_event_header) + payload_len,
4123 (char *)rscn_event_data,
4124 LPFC_NL_VENDOR_ID);
4125
4126 kfree(rscn_event_data);
4127 }
4128
4129 /**
4130 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4131 * @vport: pointer to a host virtual N_Port data structure.
4132 * @cmdiocb: pointer to lpfc command iocb data structure.
4133 * @ndlp: pointer to a node-list data structure.
4134 *
4135 * This routine processes an unsolicited RSCN (Registration State Change
4136 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4137 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4138 * discover state machine is about to begin discovery, it just accepts the
4139 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4140 * contains N_Port IDs for other vports on this HBA, it just accepts the
4141 * RSCN and ignore processing it. If the state machine is in the recovery
4142 * state, the fc_rscn_id_list of this @vport is walked and the
4143 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4144 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4145 * routine is invoked to handle the RSCN event.
4146 *
4147 * Return code
4148 * 0 - Just sent the acc response
4149 * 1 - Sent the acc response and waited for name server completion
4150 **/
4151 static int
4152 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4153 struct lpfc_nodelist *ndlp)
4154 {
4155 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4156 struct lpfc_hba *phba = vport->phba;
4157 struct lpfc_dmabuf *pcmd;
4158 uint32_t *lp, *datap;
4159 IOCB_t *icmd;
4160 uint32_t payload_len, length, nportid, *cmd;
4161 int rscn_cnt;
4162 int rscn_id = 0, hba_id = 0;
4163 int i;
4164
4165 icmd = &cmdiocb->iocb;
4166 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4167 lp = (uint32_t *) pcmd->virt;
4168
4169 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4170 payload_len -= sizeof(uint32_t); /* take off word 0 */
4171 /* RSCN received */
4172 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4173 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4174 vport->fc_flag, payload_len, *lp,
4175 vport->fc_rscn_id_cnt);
4176
4177 /* Send an RSCN event to the management application */
4178 lpfc_send_rscn_event(vport, cmdiocb);
4179
4180 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4181 fc_host_post_event(shost, fc_get_event_number(),
4182 FCH_EVT_RSCN, lp[i]);
4183
4184 /* If we are about to begin discovery, just ACC the RSCN.
4185 * Discovery processing will satisfy it.
4186 */
4187 if (vport->port_state <= LPFC_NS_QRY) {
4188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4189 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4190 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4191
4192 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4193 return 0;
4194 }
4195
4196 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4197 * just ACC and ignore it.
4198 */
4199 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4200 !(vport->cfg_peer_port_login)) {
4201 i = payload_len;
4202 datap = lp;
4203 while (i > 0) {
4204 nportid = *datap++;
4205 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4206 i -= sizeof(uint32_t);
4207 rscn_id++;
4208 if (lpfc_find_vport_by_did(phba, nportid))
4209 hba_id++;
4210 }
4211 if (rscn_id == hba_id) {
4212 /* ALL NPortIDs in RSCN are on HBA */
4213 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4214 "0219 Ignore RSCN "
4215 "Data: x%x x%x x%x x%x\n",
4216 vport->fc_flag, payload_len,
4217 *lp, vport->fc_rscn_id_cnt);
4218 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4219 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4220 ndlp->nlp_DID, vport->port_state,
4221 ndlp->nlp_flag);
4222
4223 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4224 ndlp, NULL);
4225 return 0;
4226 }
4227 }
4228
4229 spin_lock_irq(shost->host_lock);
4230 if (vport->fc_rscn_flush) {
4231 /* Another thread is walking fc_rscn_id_list on this vport */
4232 vport->fc_flag |= FC_RSCN_DISCOVERY;
4233 spin_unlock_irq(shost->host_lock);
4234 /* Send back ACC */
4235 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4236 return 0;
4237 }
4238 /* Indicate we are walking fc_rscn_id_list on this vport */
4239 vport->fc_rscn_flush = 1;
4240 spin_unlock_irq(shost->host_lock);
4241 /* Get the array count after successfully have the token */
4242 rscn_cnt = vport->fc_rscn_id_cnt;
4243 /* If we are already processing an RSCN, save the received
4244 * RSCN payload buffer, cmdiocb->context2 to process later.
4245 */
4246 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4247 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4248 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4249 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4250
4251 spin_lock_irq(shost->host_lock);
4252 vport->fc_flag |= FC_RSCN_DEFERRED;
4253 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4254 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4255 vport->fc_flag |= FC_RSCN_MODE;
4256 spin_unlock_irq(shost->host_lock);
4257 if (rscn_cnt) {
4258 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4259 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4260 }
4261 if ((rscn_cnt) &&
4262 (payload_len + length <= LPFC_BPL_SIZE)) {
4263 *cmd &= ELS_CMD_MASK;
4264 *cmd |= cpu_to_be32(payload_len + length);
4265 memcpy(((uint8_t *)cmd) + length, lp,
4266 payload_len);
4267 } else {
4268 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4269 vport->fc_rscn_id_cnt++;
4270 /* If we zero, cmdiocb->context2, the calling
4271 * routine will not try to free it.
4272 */
4273 cmdiocb->context2 = NULL;
4274 }
4275 /* Deferred RSCN */
4276 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4277 "0235 Deferred RSCN "
4278 "Data: x%x x%x x%x\n",
4279 vport->fc_rscn_id_cnt, vport->fc_flag,
4280 vport->port_state);
4281 } else {
4282 vport->fc_flag |= FC_RSCN_DISCOVERY;
4283 spin_unlock_irq(shost->host_lock);
4284 /* ReDiscovery RSCN */
4285 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4286 "0234 ReDiscovery RSCN "
4287 "Data: x%x x%x x%x\n",
4288 vport->fc_rscn_id_cnt, vport->fc_flag,
4289 vport->port_state);
4290 }
4291 /* Indicate we are done walking fc_rscn_id_list on this vport */
4292 vport->fc_rscn_flush = 0;
4293 /* Send back ACC */
4294 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4295 /* send RECOVERY event for ALL nodes that match RSCN payload */
4296 lpfc_rscn_recovery_check(vport);
4297 spin_lock_irq(shost->host_lock);
4298 vport->fc_flag &= ~FC_RSCN_DEFERRED;
4299 spin_unlock_irq(shost->host_lock);
4300 return 0;
4301 }
4302 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4303 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4304 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4305
4306 spin_lock_irq(shost->host_lock);
4307 vport->fc_flag |= FC_RSCN_MODE;
4308 spin_unlock_irq(shost->host_lock);
4309 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4310 /* Indicate we are done walking fc_rscn_id_list on this vport */
4311 vport->fc_rscn_flush = 0;
4312 /*
4313 * If we zero, cmdiocb->context2, the calling routine will
4314 * not try to free it.
4315 */
4316 cmdiocb->context2 = NULL;
4317 lpfc_set_disctmo(vport);
4318 /* Send back ACC */
4319 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4320 /* send RECOVERY event for ALL nodes that match RSCN payload */
4321 lpfc_rscn_recovery_check(vport);
4322 return lpfc_els_handle_rscn(vport);
4323 }
4324
4325 /**
4326 * lpfc_els_handle_rscn - Handle rscn for a vport
4327 * @vport: pointer to a host virtual N_Port data structure.
4328 *
4329 * This routine handles the Registration State Configuration Notification
4330 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4331 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4332 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4333 * NameServer shall be issued. If CT command to the NameServer fails to be
4334 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4335 * RSCN activities with the @vport.
4336 *
4337 * Return code
4338 * 0 - Cleaned up rscn on the @vport
4339 * 1 - Wait for plogi to name server before proceed
4340 **/
4341 int
4342 lpfc_els_handle_rscn(struct lpfc_vport *vport)
4343 {
4344 struct lpfc_nodelist *ndlp;
4345 struct lpfc_hba *phba = vport->phba;
4346
4347 /* Ignore RSCN if the port is being torn down. */
4348 if (vport->load_flag & FC_UNLOADING) {
4349 lpfc_els_flush_rscn(vport);
4350 return 0;
4351 }
4352
4353 /* Start timer for RSCN processing */
4354 lpfc_set_disctmo(vport);
4355
4356 /* RSCN processed */
4357 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4358 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4359 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4360 vport->port_state);
4361
4362 /* To process RSCN, first compare RSCN data with NameServer */
4363 vport->fc_ns_retry = 0;
4364 vport->num_disc_nodes = 0;
4365
4366 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4367 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4368 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
4369 /* Good ndlp, issue CT Request to NameServer */
4370 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
4371 /* Wait for NameServer query cmpl before we can
4372 continue */
4373 return 1;
4374 } else {
4375 /* If login to NameServer does not exist, issue one */
4376 /* Good status, issue PLOGI to NameServer */
4377 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4378 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4379 /* Wait for NameServer login cmpl before we can
4380 continue */
4381 return 1;
4382
4383 if (ndlp) {
4384 ndlp = lpfc_enable_node(vport, ndlp,
4385 NLP_STE_PLOGI_ISSUE);
4386 if (!ndlp) {
4387 lpfc_els_flush_rscn(vport);
4388 return 0;
4389 }
4390 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
4391 } else {
4392 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4393 if (!ndlp) {
4394 lpfc_els_flush_rscn(vport);
4395 return 0;
4396 }
4397 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4398 ndlp->nlp_prev_state = ndlp->nlp_state;
4399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4400 }
4401 ndlp->nlp_type |= NLP_FABRIC;
4402 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4403 /* Wait for NameServer login cmpl before we can
4404 * continue
4405 */
4406 return 1;
4407 }
4408
4409 lpfc_els_flush_rscn(vport);
4410 return 0;
4411 }
4412
4413 /**
4414 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
4415 * @vport: pointer to a host virtual N_Port data structure.
4416 * @cmdiocb: pointer to lpfc command iocb data structure.
4417 * @ndlp: pointer to a node-list data structure.
4418 *
4419 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4420 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4421 * point topology. As an unsolicited FLOGI should not be received in a loop
4422 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4423 * lpfc_check_sparm() routine is invoked to check the parameters in the
4424 * unsolicited FLOGI. If parameters validation failed, the routine
4425 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4426 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4427 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4428 * will initiate PLOGI. The higher lexicographical value party shall has
4429 * higher priority (as the winning port) and will initiate PLOGI and
4430 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4431 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4432 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4433 *
4434 * Return code
4435 * 0 - Successfully processed the unsolicited flogi
4436 * 1 - Failed to process the unsolicited flogi
4437 **/
4438 static int
4439 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440 struct lpfc_nodelist *ndlp)
4441 {
4442 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4443 struct lpfc_hba *phba = vport->phba;
4444 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4445 uint32_t *lp = (uint32_t *) pcmd->virt;
4446 IOCB_t *icmd = &cmdiocb->iocb;
4447 struct serv_parm *sp;
4448 LPFC_MBOXQ_t *mbox;
4449 struct ls_rjt stat;
4450 uint32_t cmd, did;
4451 int rc;
4452
4453 cmd = *lp++;
4454 sp = (struct serv_parm *) lp;
4455
4456 /* FLOGI received */
4457
4458 lpfc_set_disctmo(vport);
4459
4460 if (phba->fc_topology == TOPOLOGY_LOOP) {
4461 /* We should never receive a FLOGI in loop mode, ignore it */
4462 did = icmd->un.elsreq64.remoteID;
4463
4464 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4465 Loop Mode */
4466 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4467 "0113 An FLOGI ELS command x%x was "
4468 "received from DID x%x in Loop Mode\n",
4469 cmd, did);
4470 return 1;
4471 }
4472
4473 did = Fabric_DID;
4474
4475 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4476 /* For a FLOGI we accept, then if our portname is greater
4477 * then the remote portname we initiate Nport login.
4478 */
4479
4480 rc = memcmp(&vport->fc_portname, &sp->portName,
4481 sizeof(struct lpfc_name));
4482
4483 if (!rc) {
4484 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4485 if (!mbox)
4486 return 1;
4487
4488 lpfc_linkdown(phba);
4489 lpfc_init_link(phba, mbox,
4490 phba->cfg_topology,
4491 phba->cfg_link_speed);
4492 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4493 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4494 mbox->vport = vport;
4495 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4496 lpfc_set_loopback_flag(phba);
4497 if (rc == MBX_NOT_FINISHED) {
4498 mempool_free(mbox, phba->mbox_mem_pool);
4499 }
4500 return 1;
4501 } else if (rc > 0) { /* greater than */
4502 spin_lock_irq(shost->host_lock);
4503 vport->fc_flag |= FC_PT2PT_PLOGI;
4504 spin_unlock_irq(shost->host_lock);
4505 }
4506 spin_lock_irq(shost->host_lock);
4507 vport->fc_flag |= FC_PT2PT;
4508 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4509 spin_unlock_irq(shost->host_lock);
4510 } else {
4511 /* Reject this request because invalid parameters */
4512 stat.un.b.lsRjtRsvd0 = 0;
4513 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4514 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4515 stat.un.b.vendorUnique = 0;
4516 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4517 NULL);
4518 return 1;
4519 }
4520
4521 /* Send back ACC */
4522 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
4523
4524 return 0;
4525 }
4526
4527 /**
4528 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
4529 * @vport: pointer to a host virtual N_Port data structure.
4530 * @cmdiocb: pointer to lpfc command iocb data structure.
4531 * @ndlp: pointer to a node-list data structure.
4532 *
4533 * This routine processes Request Node Identification Data (RNID) IOCB
4534 * received as an ELS unsolicited event. Only when the RNID specified format
4535 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4536 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4537 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4538 * rejected by invoking the lpfc_els_rsp_reject() routine.
4539 *
4540 * Return code
4541 * 0 - Successfully processed rnid iocb (currently always return 0)
4542 **/
4543 static int
4544 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4545 struct lpfc_nodelist *ndlp)
4546 {
4547 struct lpfc_dmabuf *pcmd;
4548 uint32_t *lp;
4549 IOCB_t *icmd;
4550 RNID *rn;
4551 struct ls_rjt stat;
4552 uint32_t cmd, did;
4553
4554 icmd = &cmdiocb->iocb;
4555 did = icmd->un.elsreq64.remoteID;
4556 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4557 lp = (uint32_t *) pcmd->virt;
4558
4559 cmd = *lp++;
4560 rn = (RNID *) lp;
4561
4562 /* RNID received */
4563
4564 switch (rn->Format) {
4565 case 0:
4566 case RNID_TOPOLOGY_DISC:
4567 /* Send back ACC */
4568 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
4569 break;
4570 default:
4571 /* Reject this request because format not supported */
4572 stat.un.b.lsRjtRsvd0 = 0;
4573 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4574 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4575 stat.un.b.vendorUnique = 0;
4576 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4577 NULL);
4578 }
4579 return 0;
4580 }
4581
4582 /**
4583 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4584 * @vport: pointer to a host virtual N_Port data structure.
4585 * @cmdiocb: pointer to lpfc command iocb data structure.
4586 * @ndlp: pointer to a node-list data structure.
4587 *
4588 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4589 * received as an ELS unsolicited event. Currently, this function just invokes
4590 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4591 *
4592 * Return code
4593 * 0 - Successfully processed lirr iocb (currently always return 0)
4594 **/
4595 static int
4596 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4597 struct lpfc_nodelist *ndlp)
4598 {
4599 struct ls_rjt stat;
4600
4601 /* For now, unconditionally reject this command */
4602 stat.un.b.lsRjtRsvd0 = 0;
4603 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4604 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4605 stat.un.b.vendorUnique = 0;
4606 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4607 return 0;
4608 }
4609
4610 /**
4611 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4612 * @vport: pointer to a host virtual N_Port data structure.
4613 * @cmdiocb: pointer to lpfc command iocb data structure.
4614 * @ndlp: pointer to a node-list data structure.
4615 *
4616 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4617 * received as an ELS unsolicited event. A request to RRQ shall only
4618 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4619 * Nx_Port N_Port_ID of the target Exchange is the same as the
4620 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4621 * not accepted, an LS_RJT with reason code "Unable to perform
4622 * command request" and reason code explanation "Invalid Originator
4623 * S_ID" shall be returned. For now, we just unconditionally accept
4624 * RRQ from the target.
4625 **/
4626 static void
4627 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4628 struct lpfc_nodelist *ndlp)
4629 {
4630 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4631 }
4632
4633 /**
4634 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4635 * @phba: pointer to lpfc hba data structure.
4636 * @pmb: pointer to the driver internal queue element for mailbox command.
4637 *
4638 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4639 * mailbox command. This callback function is to actually send the Accept
4640 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4641 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4642 * mailbox command, constructs the RPS response with the link statistics
4643 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4644 * response to the RPS.
4645 *
4646 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4647 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4648 * will be stored into the context1 field of the IOCB for the completion
4649 * callback function to the RPS Accept Response ELS IOCB command.
4650 *
4651 **/
4652 static void
4653 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4654 {
4655 MAILBOX_t *mb;
4656 IOCB_t *icmd;
4657 RPS_RSP *rps_rsp;
4658 uint8_t *pcmd;
4659 struct lpfc_iocbq *elsiocb;
4660 struct lpfc_nodelist *ndlp;
4661 uint16_t xri, status;
4662 uint32_t cmdsize;
4663
4664 mb = &pmb->u.mb;
4665
4666 ndlp = (struct lpfc_nodelist *) pmb->context2;
4667 xri = (uint16_t) ((unsigned long)(pmb->context1));
4668 pmb->context1 = NULL;
4669 pmb->context2 = NULL;
4670
4671 if (mb->mbxStatus) {
4672 mempool_free(pmb, phba->mbox_mem_pool);
4673 return;
4674 }
4675
4676 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
4677 mempool_free(pmb, phba->mbox_mem_pool);
4678 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
4679 lpfc_max_els_tries, ndlp,
4680 ndlp->nlp_DID, ELS_CMD_ACC);
4681
4682 /* Decrement the ndlp reference count from previous mbox command */
4683 lpfc_nlp_put(ndlp);
4684
4685 if (!elsiocb)
4686 return;
4687
4688 icmd = &elsiocb->iocb;
4689 icmd->ulpContext = xri;
4690
4691 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4692 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4693 pcmd += sizeof(uint32_t); /* Skip past command */
4694 rps_rsp = (RPS_RSP *)pcmd;
4695
4696 if (phba->fc_topology != TOPOLOGY_LOOP)
4697 status = 0x10;
4698 else
4699 status = 0x8;
4700 if (phba->pport->fc_flag & FC_FABRIC)
4701 status |= 0x4;
4702
4703 rps_rsp->rsvd1 = 0;
4704 rps_rsp->portStatus = cpu_to_be16(status);
4705 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
4706 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
4707 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
4708 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
4709 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
4710 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
4711 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
4712 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
4713 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
4714 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4715 elsiocb->iotag, elsiocb->iocb.ulpContext,
4716 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4717 ndlp->nlp_rpi);
4718 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4719 phba->fc_stat.elsXmitACC++;
4720 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4721 lpfc_els_free_iocb(phba, elsiocb);
4722 return;
4723 }
4724
4725 /**
4726 * lpfc_els_rcv_rps - Process an unsolicited rps iocb
4727 * @vport: pointer to a host virtual N_Port data structure.
4728 * @cmdiocb: pointer to lpfc command iocb data structure.
4729 * @ndlp: pointer to a node-list data structure.
4730 *
4731 * This routine processes Read Port Status (RPS) IOCB received as an
4732 * ELS unsolicited event. It first checks the remote port state. If the
4733 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4734 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
4735 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4736 * for reading the HBA link statistics. It is for the callback function,
4737 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
4738 * to actually sending out RPS Accept (ACC) response.
4739 *
4740 * Return codes
4741 * 0 - Successfully processed rps iocb (currently always return 0)
4742 **/
4743 static int
4744 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4745 struct lpfc_nodelist *ndlp)
4746 {
4747 struct lpfc_hba *phba = vport->phba;
4748 uint32_t *lp;
4749 uint8_t flag;
4750 LPFC_MBOXQ_t *mbox;
4751 struct lpfc_dmabuf *pcmd;
4752 RPS *rps;
4753 struct ls_rjt stat;
4754
4755 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4756 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
4757 /* reject the unsolicited RPS request and done with it */
4758 goto reject_out;
4759
4760 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4761 lp = (uint32_t *) pcmd->virt;
4762 flag = (be32_to_cpu(*lp++) & 0xf);
4763 rps = (RPS *) lp;
4764
4765 if ((flag == 0) ||
4766 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
4767 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
4768 sizeof(struct lpfc_name)) == 0))) {
4769
4770 printk("Fix me....\n");
4771 dump_stack();
4772 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
4773 if (mbox) {
4774 lpfc_read_lnk_stat(phba, mbox);
4775 mbox->context1 =
4776 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
4777 mbox->context2 = lpfc_nlp_get(ndlp);
4778 mbox->vport = vport;
4779 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
4780 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4781 != MBX_NOT_FINISHED)
4782 /* Mbox completion will send ELS Response */
4783 return 0;
4784 /* Decrement reference count used for the failed mbox
4785 * command.
4786 */
4787 lpfc_nlp_put(ndlp);
4788 mempool_free(mbox, phba->mbox_mem_pool);
4789 }
4790 }
4791
4792 reject_out:
4793 /* issue rejection response */
4794 stat.un.b.lsRjtRsvd0 = 0;
4795 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4796 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4797 stat.un.b.vendorUnique = 0;
4798 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4799 return 0;
4800 }
4801
4802 /**
4803 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
4804 * @vport: pointer to a host virtual N_Port data structure.
4805 * @cmdsize: size of the ELS command.
4806 * @oldiocb: pointer to the original lpfc command iocb data structure.
4807 * @ndlp: pointer to a node-list data structure.
4808 *
4809 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
4810 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
4811 *
4812 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4813 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4814 * will be stored into the context1 field of the IOCB for the completion
4815 * callback function to the RPL Accept Response ELS command.
4816 *
4817 * Return code
4818 * 0 - Successfully issued ACC RPL ELS command
4819 * 1 - Failed to issue ACC RPL ELS command
4820 **/
4821 static int
4822 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4823 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4824 {
4825 struct lpfc_hba *phba = vport->phba;
4826 IOCB_t *icmd, *oldcmd;
4827 RPL_RSP rpl_rsp;
4828 struct lpfc_iocbq *elsiocb;
4829 uint8_t *pcmd;
4830
4831 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4832 ndlp->nlp_DID, ELS_CMD_ACC);
4833
4834 if (!elsiocb)
4835 return 1;
4836
4837 icmd = &elsiocb->iocb;
4838 oldcmd = &oldiocb->iocb;
4839 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
4840
4841 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4842 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4843 pcmd += sizeof(uint16_t);
4844 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
4845 pcmd += sizeof(uint16_t);
4846
4847 /* Setup the RPL ACC payload */
4848 rpl_rsp.listLen = be32_to_cpu(1);
4849 rpl_rsp.index = 0;
4850 rpl_rsp.port_num_blk.portNum = 0;
4851 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
4852 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
4853 sizeof(struct lpfc_name));
4854 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
4855 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
4856 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4857 "0120 Xmit ELS RPL ACC response tag x%x "
4858 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4859 "rpi x%x\n",
4860 elsiocb->iotag, elsiocb->iocb.ulpContext,
4861 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4862 ndlp->nlp_rpi);
4863 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4864 phba->fc_stat.elsXmitACC++;
4865 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4866 IOCB_ERROR) {
4867 lpfc_els_free_iocb(phba, elsiocb);
4868 return 1;
4869 }
4870 return 0;
4871 }
4872
4873 /**
4874 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
4875 * @vport: pointer to a host virtual N_Port data structure.
4876 * @cmdiocb: pointer to lpfc command iocb data structure.
4877 * @ndlp: pointer to a node-list data structure.
4878 *
4879 * This routine processes Read Port List (RPL) IOCB received as an ELS
4880 * unsolicited event. It first checks the remote port state. If the remote
4881 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
4882 * invokes the lpfc_els_rsp_reject() routine to send reject response.
4883 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
4884 * to accept the RPL.
4885 *
4886 * Return code
4887 * 0 - Successfully processed rpl iocb (currently always return 0)
4888 **/
4889 static int
4890 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4891 struct lpfc_nodelist *ndlp)
4892 {
4893 struct lpfc_dmabuf *pcmd;
4894 uint32_t *lp;
4895 uint32_t maxsize;
4896 uint16_t cmdsize;
4897 RPL *rpl;
4898 struct ls_rjt stat;
4899
4900 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4901 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
4902 /* issue rejection response */
4903 stat.un.b.lsRjtRsvd0 = 0;
4904 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4905 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4906 stat.un.b.vendorUnique = 0;
4907 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4908 NULL);
4909 /* rejected the unsolicited RPL request and done with it */
4910 return 0;
4911 }
4912
4913 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4914 lp = (uint32_t *) pcmd->virt;
4915 rpl = (RPL *) (lp + 1);
4916
4917 maxsize = be32_to_cpu(rpl->maxsize);
4918
4919 /* We support only one port */
4920 if ((rpl->index == 0) &&
4921 ((maxsize == 0) ||
4922 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
4923 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
4924 } else {
4925 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
4926 }
4927 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
4928
4929 return 0;
4930 }
4931
4932 /**
4933 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
4934 * @vport: pointer to a virtual N_Port data structure.
4935 * @cmdiocb: pointer to lpfc command iocb data structure.
4936 * @ndlp: pointer to a node-list data structure.
4937 *
4938 * This routine processes Fibre Channel Address Resolution Protocol
4939 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
4940 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
4941 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
4942 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
4943 * remote PortName is compared against the FC PortName stored in the @vport
4944 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
4945 * compared against the FC NodeName stored in the @vport data structure.
4946 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
4947 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
4948 * invoked to send out FARP Response to the remote node. Before sending the
4949 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
4950 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
4951 * routine is invoked to log into the remote port first.
4952 *
4953 * Return code
4954 * 0 - Either the FARP Match Mode not supported or successfully processed
4955 **/
4956 static int
4957 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4958 struct lpfc_nodelist *ndlp)
4959 {
4960 struct lpfc_dmabuf *pcmd;
4961 uint32_t *lp;
4962 IOCB_t *icmd;
4963 FARP *fp;
4964 uint32_t cmd, cnt, did;
4965
4966 icmd = &cmdiocb->iocb;
4967 did = icmd->un.elsreq64.remoteID;
4968 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4969 lp = (uint32_t *) pcmd->virt;
4970
4971 cmd = *lp++;
4972 fp = (FARP *) lp;
4973 /* FARP-REQ received from DID <did> */
4974 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4975 "0601 FARP-REQ received from DID x%x\n", did);
4976 /* We will only support match on WWPN or WWNN */
4977 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
4978 return 0;
4979 }
4980
4981 cnt = 0;
4982 /* If this FARP command is searching for my portname */
4983 if (fp->Mflags & FARP_MATCH_PORT) {
4984 if (memcmp(&fp->RportName, &vport->fc_portname,
4985 sizeof(struct lpfc_name)) == 0)
4986 cnt = 1;
4987 }
4988
4989 /* If this FARP command is searching for my nodename */
4990 if (fp->Mflags & FARP_MATCH_NODE) {
4991 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
4992 sizeof(struct lpfc_name)) == 0)
4993 cnt = 1;
4994 }
4995
4996 if (cnt) {
4997 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
4998 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
4999 /* Log back into the node before sending the FARP. */
5000 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5001 ndlp->nlp_prev_state = ndlp->nlp_state;
5002 lpfc_nlp_set_state(vport, ndlp,
5003 NLP_STE_PLOGI_ISSUE);
5004 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5005 }
5006
5007 /* Send a FARP response to that node */
5008 if (fp->Rflags & FARP_REQUEST_FARPR)
5009 lpfc_issue_els_farpr(vport, did, 0);
5010 }
5011 }
5012 return 0;
5013 }
5014
5015 /**
5016 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
5017 * @vport: pointer to a host virtual N_Port data structure.
5018 * @cmdiocb: pointer to lpfc command iocb data structure.
5019 * @ndlp: pointer to a node-list data structure.
5020 *
5021 * This routine processes Fibre Channel Address Resolution Protocol
5022 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5023 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5024 * the FARP response request.
5025 *
5026 * Return code
5027 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5028 **/
5029 static int
5030 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5031 struct lpfc_nodelist *ndlp)
5032 {
5033 struct lpfc_dmabuf *pcmd;
5034 uint32_t *lp;
5035 IOCB_t *icmd;
5036 uint32_t cmd, did;
5037
5038 icmd = &cmdiocb->iocb;
5039 did = icmd->un.elsreq64.remoteID;
5040 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5041 lp = (uint32_t *) pcmd->virt;
5042
5043 cmd = *lp++;
5044 /* FARP-RSP received from DID <did> */
5045 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5046 "0600 FARP-RSP received from DID x%x\n", did);
5047 /* ACCEPT the Farp resp request */
5048 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5049
5050 return 0;
5051 }
5052
5053 /**
5054 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
5055 * @vport: pointer to a host virtual N_Port data structure.
5056 * @cmdiocb: pointer to lpfc command iocb data structure.
5057 * @fan_ndlp: pointer to a node-list data structure.
5058 *
5059 * This routine processes a Fabric Address Notification (FAN) IOCB
5060 * command received as an ELS unsolicited event. The FAN ELS command will
5061 * only be processed on a physical port (i.e., the @vport represents the
5062 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5063 * compared against those in the phba data structure. If any of those is
5064 * different, the lpfc_initial_flogi() routine is invoked to initialize
5065 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5066 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5067 * is invoked to register login to the fabric.
5068 *
5069 * Return code
5070 * 0 - Successfully processed fan iocb (currently always return 0).
5071 **/
5072 static int
5073 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5074 struct lpfc_nodelist *fan_ndlp)
5075 {
5076 struct lpfc_hba *phba = vport->phba;
5077 uint32_t *lp;
5078 FAN *fp;
5079
5080 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5081 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5082 fp = (FAN *) ++lp;
5083 /* FAN received; Fan does not have a reply sequence */
5084 if ((vport == phba->pport) &&
5085 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5086 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
5087 sizeof(struct lpfc_name))) ||
5088 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
5089 sizeof(struct lpfc_name)))) {
5090 /* This port has switched fabrics. FLOGI is required */
5091 lpfc_initial_flogi(vport);
5092 } else {
5093 /* FAN verified - skip FLOGI */
5094 vport->fc_myDID = vport->fc_prevDID;
5095 if (phba->sli_rev < LPFC_SLI_REV4)
5096 lpfc_issue_fabric_reglogin(vport);
5097 else
5098 lpfc_issue_reg_vfi(vport);
5099 }
5100 }
5101 return 0;
5102 }
5103
5104 /**
5105 * lpfc_els_timeout - Handler funciton to the els timer
5106 * @ptr: holder for the timer function associated data.
5107 *
5108 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5109 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5110 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5111 * up the worker thread. It is for the worker thread to invoke the routine
5112 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5113 **/
5114 void
5115 lpfc_els_timeout(unsigned long ptr)
5116 {
5117 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5118 struct lpfc_hba *phba = vport->phba;
5119 uint32_t tmo_posted;
5120 unsigned long iflag;
5121
5122 spin_lock_irqsave(&vport->work_port_lock, iflag);
5123 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5124 if (!tmo_posted)
5125 vport->work_port_events |= WORKER_ELS_TMO;
5126 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
5127
5128 if (!tmo_posted)
5129 lpfc_worker_wake_up(phba);
5130 return;
5131 }
5132
5133 /**
5134 * lpfc_els_timeout_handler - Process an els timeout event
5135 * @vport: pointer to a virtual N_Port data structure.
5136 *
5137 * This routine is the actual handler function that processes an ELS timeout
5138 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5139 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5140 * invoking the lpfc_sli_issue_abort_iotag() routine.
5141 **/
5142 void
5143 lpfc_els_timeout_handler(struct lpfc_vport *vport)
5144 {
5145 struct lpfc_hba *phba = vport->phba;
5146 struct lpfc_sli_ring *pring;
5147 struct lpfc_iocbq *tmp_iocb, *piocb;
5148 IOCB_t *cmd = NULL;
5149 struct lpfc_dmabuf *pcmd;
5150 uint32_t els_command = 0;
5151 uint32_t timeout;
5152 uint32_t remote_ID = 0xffffffff;
5153
5154 spin_lock_irq(&phba->hbalock);
5155 timeout = (uint32_t)(phba->fc_ratov << 1);
5156
5157 pring = &phba->sli.ring[LPFC_ELS_RING];
5158
5159 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5160 cmd = &piocb->iocb;
5161
5162 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5163 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5164 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5165 continue;
5166
5167 if (piocb->vport != vport)
5168 continue;
5169
5170 pcmd = (struct lpfc_dmabuf *) piocb->context2;
5171 if (pcmd)
5172 els_command = *(uint32_t *) (pcmd->virt);
5173
5174 if (els_command == ELS_CMD_FARP ||
5175 els_command == ELS_CMD_FARPR ||
5176 els_command == ELS_CMD_FDISC)
5177 continue;
5178
5179 if (piocb->drvrTimeout > 0) {
5180 if (piocb->drvrTimeout >= timeout)
5181 piocb->drvrTimeout -= timeout;
5182 else
5183 piocb->drvrTimeout = 0;
5184 continue;
5185 }
5186
5187 remote_ID = 0xffffffff;
5188 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
5189 remote_ID = cmd->un.elsreq64.remoteID;
5190 else {
5191 struct lpfc_nodelist *ndlp;
5192 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
5193 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5194 remote_ID = ndlp->nlp_DID;
5195 }
5196 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5197 "0127 ELS timeout Data: x%x x%x x%x "
5198 "x%x\n", els_command,
5199 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5200 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5201 }
5202 spin_unlock_irq(&phba->hbalock);
5203
5204 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5205 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
5206 }
5207
5208 /**
5209 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
5210 * @vport: pointer to a host virtual N_Port data structure.
5211 *
5212 * This routine is used to clean up all the outstanding ELS commands on a
5213 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5214 * routine. After that, it walks the ELS transmit queue to remove all the
5215 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5216 * the IOCBs with a non-NULL completion callback function, the callback
5217 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5218 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5219 * callback function, the IOCB will simply be released. Finally, it walks
5220 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5221 * completion queue IOCB that is associated with the @vport and is not
5222 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5223 * part of the discovery state machine) out to HBA by invoking the
5224 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5225 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5226 * the IOCBs are aborted when this function returns.
5227 **/
5228 void
5229 lpfc_els_flush_cmd(struct lpfc_vport *vport)
5230 {
5231 LIST_HEAD(completions);
5232 struct lpfc_hba *phba = vport->phba;
5233 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5234 struct lpfc_iocbq *tmp_iocb, *piocb;
5235 IOCB_t *cmd = NULL;
5236
5237 lpfc_fabric_abort_vport(vport);
5238
5239 spin_lock_irq(&phba->hbalock);
5240 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5241 cmd = &piocb->iocb;
5242
5243 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5244 continue;
5245 }
5246
5247 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5248 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5249 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5250 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5251 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5252 continue;
5253
5254 if (piocb->vport != vport)
5255 continue;
5256
5257 list_move_tail(&piocb->list, &completions);
5258 pring->txq_cnt--;
5259 }
5260
5261 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5262 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5263 continue;
5264 }
5265
5266 if (piocb->vport != vport)
5267 continue;
5268
5269 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5270 }
5271 spin_unlock_irq(&phba->hbalock);
5272
5273 /* Cancell all the IOCBs from the completions list */
5274 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5275 IOERR_SLI_ABORTED);
5276
5277 return;
5278 }
5279
5280 /**
5281 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
5282 * @phba: pointer to lpfc hba data structure.
5283 *
5284 * This routine is used to clean up all the outstanding ELS commands on a
5285 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
5286 * routine. After that, it walks the ELS transmit queue to remove all the
5287 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
5288 * the IOCBs with the completion callback function associated, the callback
5289 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5290 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
5291 * callback function associated, the IOCB will simply be released. Finally,
5292 * it walks the ELS transmit completion queue to issue an abort IOCB to any
5293 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
5294 * management plane IOCBs that are not part of the discovery state machine)
5295 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
5296 **/
5297 void
5298 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
5299 {
5300 LIST_HEAD(completions);
5301 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5302 struct lpfc_iocbq *tmp_iocb, *piocb;
5303 IOCB_t *cmd = NULL;
5304
5305 lpfc_fabric_abort_hba(phba);
5306 spin_lock_irq(&phba->hbalock);
5307 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5308 cmd = &piocb->iocb;
5309 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5310 continue;
5311 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5312 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5313 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5314 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5315 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5316 continue;
5317 list_move_tail(&piocb->list, &completions);
5318 pring->txq_cnt--;
5319 }
5320 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5321 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5322 continue;
5323 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5324 }
5325 spin_unlock_irq(&phba->hbalock);
5326
5327 /* Cancel all the IOCBs from the completions list */
5328 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5329 IOERR_SLI_ABORTED);
5330
5331 return;
5332 }
5333
5334 /**
5335 * lpfc_send_els_failure_event - Posts an ELS command failure event
5336 * @phba: Pointer to hba context object.
5337 * @cmdiocbp: Pointer to command iocb which reported error.
5338 * @rspiocbp: Pointer to response iocb which reported error.
5339 *
5340 * This function sends an event when there is an ELS command
5341 * failure.
5342 **/
5343 void
5344 lpfc_send_els_failure_event(struct lpfc_hba *phba,
5345 struct lpfc_iocbq *cmdiocbp,
5346 struct lpfc_iocbq *rspiocbp)
5347 {
5348 struct lpfc_vport *vport = cmdiocbp->vport;
5349 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5350 struct lpfc_lsrjt_event lsrjt_event;
5351 struct lpfc_fabric_event_header fabric_event;
5352 struct ls_rjt stat;
5353 struct lpfc_nodelist *ndlp;
5354 uint32_t *pcmd;
5355
5356 ndlp = cmdiocbp->context1;
5357 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5358 return;
5359
5360 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
5361 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
5362 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
5363 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
5364 sizeof(struct lpfc_name));
5365 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
5366 sizeof(struct lpfc_name));
5367 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5368 cmdiocbp->context2)->virt);
5369 lsrjt_event.command = *pcmd;
5370 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5371 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5372 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
5373 fc_host_post_vendor_event(shost,
5374 fc_get_event_number(),
5375 sizeof(lsrjt_event),
5376 (char *)&lsrjt_event,
5377 LPFC_NL_VENDOR_ID);
5378 return;
5379 }
5380 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
5381 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
5382 fabric_event.event_type = FC_REG_FABRIC_EVENT;
5383 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
5384 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
5385 else
5386 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
5387 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
5388 sizeof(struct lpfc_name));
5389 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
5390 sizeof(struct lpfc_name));
5391 fc_host_post_vendor_event(shost,
5392 fc_get_event_number(),
5393 sizeof(fabric_event),
5394 (char *)&fabric_event,
5395 LPFC_NL_VENDOR_ID);
5396 return;
5397 }
5398
5399 }
5400
5401 /**
5402 * lpfc_send_els_event - Posts unsolicited els event
5403 * @vport: Pointer to vport object.
5404 * @ndlp: Pointer FC node object.
5405 * @cmd: ELS command code.
5406 *
5407 * This function posts an event when there is an incoming
5408 * unsolicited ELS command.
5409 **/
5410 static void
5411 lpfc_send_els_event(struct lpfc_vport *vport,
5412 struct lpfc_nodelist *ndlp,
5413 uint32_t *payload)
5414 {
5415 struct lpfc_els_event_header *els_data = NULL;
5416 struct lpfc_logo_event *logo_data = NULL;
5417 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5418
5419 if (*payload == ELS_CMD_LOGO) {
5420 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5421 if (!logo_data) {
5422 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5423 "0148 Failed to allocate memory "
5424 "for LOGO event\n");
5425 return;
5426 }
5427 els_data = &logo_data->header;
5428 } else {
5429 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5430 GFP_KERNEL);
5431 if (!els_data) {
5432 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5433 "0149 Failed to allocate memory "
5434 "for ELS event\n");
5435 return;
5436 }
5437 }
5438 els_data->event_type = FC_REG_ELS_EVENT;
5439 switch (*payload) {
5440 case ELS_CMD_PLOGI:
5441 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
5442 break;
5443 case ELS_CMD_PRLO:
5444 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
5445 break;
5446 case ELS_CMD_ADISC:
5447 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5448 break;
5449 case ELS_CMD_LOGO:
5450 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5451 /* Copy the WWPN in the LOGO payload */
5452 memcpy(logo_data->logo_wwpn, &payload[2],
5453 sizeof(struct lpfc_name));
5454 break;
5455 default:
5456 kfree(els_data);
5457 return;
5458 }
5459 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5460 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5461 if (*payload == ELS_CMD_LOGO) {
5462 fc_host_post_vendor_event(shost,
5463 fc_get_event_number(),
5464 sizeof(struct lpfc_logo_event),
5465 (char *)logo_data,
5466 LPFC_NL_VENDOR_ID);
5467 kfree(logo_data);
5468 } else {
5469 fc_host_post_vendor_event(shost,
5470 fc_get_event_number(),
5471 sizeof(struct lpfc_els_event_header),
5472 (char *)els_data,
5473 LPFC_NL_VENDOR_ID);
5474 kfree(els_data);
5475 }
5476
5477 return;
5478 }
5479
5480
5481 /**
5482 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
5483 * @phba: pointer to lpfc hba data structure.
5484 * @pring: pointer to a SLI ring.
5485 * @vport: pointer to a host virtual N_Port data structure.
5486 * @elsiocb: pointer to lpfc els command iocb data structure.
5487 *
5488 * This routine is used for processing the IOCB associated with a unsolicited
5489 * event. It first determines whether there is an existing ndlp that matches
5490 * the DID from the unsolicited IOCB. If not, it will create a new one with
5491 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
5492 * IOCB is then used to invoke the proper routine and to set up proper state
5493 * of the discovery state machine.
5494 **/
5495 static void
5496 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5497 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
5498 {
5499 struct Scsi_Host *shost;
5500 struct lpfc_nodelist *ndlp;
5501 struct ls_rjt stat;
5502 uint32_t *payload;
5503 uint32_t cmd, did, newnode, rjt_err = 0;
5504 IOCB_t *icmd = &elsiocb->iocb;
5505
5506 if (!vport || !(elsiocb->context2))
5507 goto dropit;
5508
5509 newnode = 0;
5510 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
5511 cmd = *payload;
5512 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
5513 lpfc_post_buffer(phba, pring, 1);
5514
5515 did = icmd->un.rcvels.remoteID;
5516 if (icmd->ulpStatus) {
5517 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5518 "RCV Unsol ELS: status:x%x/x%x did:x%x",
5519 icmd->ulpStatus, icmd->un.ulpWord[4], did);
5520 goto dropit;
5521 }
5522
5523 /* Check to see if link went down during discovery */
5524 if (lpfc_els_chk_latt(vport))
5525 goto dropit;
5526
5527 /* Ignore traffic received during vport shutdown. */
5528 if (vport->load_flag & FC_UNLOADING)
5529 goto dropit;
5530
5531 ndlp = lpfc_findnode_did(vport, did);
5532 if (!ndlp) {
5533 /* Cannot find existing Fabric ndlp, so allocate a new one */
5534 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5535 if (!ndlp)
5536 goto dropit;
5537
5538 lpfc_nlp_init(vport, ndlp, did);
5539 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5540 newnode = 1;
5541 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5542 ndlp->nlp_type |= NLP_FABRIC;
5543 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5544 ndlp = lpfc_enable_node(vport, ndlp,
5545 NLP_STE_UNUSED_NODE);
5546 if (!ndlp)
5547 goto dropit;
5548 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5549 newnode = 1;
5550 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5551 ndlp->nlp_type |= NLP_FABRIC;
5552 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
5553 /* This is similar to the new node path */
5554 ndlp = lpfc_nlp_get(ndlp);
5555 if (!ndlp)
5556 goto dropit;
5557 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5558 newnode = 1;
5559 }
5560
5561 phba->fc_stat.elsRcvFrame++;
5562
5563 elsiocb->context1 = lpfc_nlp_get(ndlp);
5564 elsiocb->vport = vport;
5565
5566 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
5567 cmd &= ELS_CMD_MASK;
5568 }
5569 /* ELS command <elsCmd> received from NPORT <did> */
5570 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5571 "0112 ELS command x%x received from NPORT x%x "
5572 "Data: x%x\n", cmd, did, vport->port_state);
5573 switch (cmd) {
5574 case ELS_CMD_PLOGI:
5575 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5576 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
5577 did, vport->port_state, ndlp->nlp_flag);
5578
5579 phba->fc_stat.elsRcvPLOGI++;
5580 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5581
5582 lpfc_send_els_event(vport, ndlp, payload);
5583 if (vport->port_state < LPFC_DISC_AUTH) {
5584 if (!(phba->pport->fc_flag & FC_PT2PT) ||
5585 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
5586 rjt_err = LSRJT_UNABLE_TPC;
5587 break;
5588 }
5589 /* We get here, and drop thru, if we are PT2PT with
5590 * another NPort and the other side has initiated
5591 * the PLOGI before responding to our FLOGI.
5592 */
5593 }
5594
5595 shost = lpfc_shost_from_vport(vport);
5596 spin_lock_irq(shost->host_lock);
5597 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
5598 spin_unlock_irq(shost->host_lock);
5599
5600 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5601 NLP_EVT_RCV_PLOGI);
5602
5603 break;
5604 case ELS_CMD_FLOGI:
5605 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5606 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
5607 did, vport->port_state, ndlp->nlp_flag);
5608
5609 phba->fc_stat.elsRcvFLOGI++;
5610 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
5611 if (newnode)
5612 lpfc_nlp_put(ndlp);
5613 break;
5614 case ELS_CMD_LOGO:
5615 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5616 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
5617 did, vport->port_state, ndlp->nlp_flag);
5618
5619 phba->fc_stat.elsRcvLOGO++;
5620 lpfc_send_els_event(vport, ndlp, payload);
5621 if (vport->port_state < LPFC_DISC_AUTH) {
5622 rjt_err = LSRJT_UNABLE_TPC;
5623 break;
5624 }
5625 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
5626 break;
5627 case ELS_CMD_PRLO:
5628 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5629 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
5630 did, vport->port_state, ndlp->nlp_flag);
5631
5632 phba->fc_stat.elsRcvPRLO++;
5633 lpfc_send_els_event(vport, ndlp, payload);
5634 if (vport->port_state < LPFC_DISC_AUTH) {
5635 rjt_err = LSRJT_UNABLE_TPC;
5636 break;
5637 }
5638 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
5639 break;
5640 case ELS_CMD_RSCN:
5641 phba->fc_stat.elsRcvRSCN++;
5642 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
5643 if (newnode)
5644 lpfc_nlp_put(ndlp);
5645 break;
5646 case ELS_CMD_ADISC:
5647 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5648 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
5649 did, vport->port_state, ndlp->nlp_flag);
5650
5651 lpfc_send_els_event(vport, ndlp, payload);
5652 phba->fc_stat.elsRcvADISC++;
5653 if (vport->port_state < LPFC_DISC_AUTH) {
5654 rjt_err = LSRJT_UNABLE_TPC;
5655 break;
5656 }
5657 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5658 NLP_EVT_RCV_ADISC);
5659 break;
5660 case ELS_CMD_PDISC:
5661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5662 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
5663 did, vport->port_state, ndlp->nlp_flag);
5664
5665 phba->fc_stat.elsRcvPDISC++;
5666 if (vport->port_state < LPFC_DISC_AUTH) {
5667 rjt_err = LSRJT_UNABLE_TPC;
5668 break;
5669 }
5670 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5671 NLP_EVT_RCV_PDISC);
5672 break;
5673 case ELS_CMD_FARPR:
5674 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5675 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
5676 did, vport->port_state, ndlp->nlp_flag);
5677
5678 phba->fc_stat.elsRcvFARPR++;
5679 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
5680 break;
5681 case ELS_CMD_FARP:
5682 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5683 "RCV FARP: did:x%x/ste:x%x flg:x%x",
5684 did, vport->port_state, ndlp->nlp_flag);
5685
5686 phba->fc_stat.elsRcvFARP++;
5687 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
5688 break;
5689 case ELS_CMD_FAN:
5690 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5691 "RCV FAN: did:x%x/ste:x%x flg:x%x",
5692 did, vport->port_state, ndlp->nlp_flag);
5693
5694 phba->fc_stat.elsRcvFAN++;
5695 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
5696 break;
5697 case ELS_CMD_PRLI:
5698 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5699 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
5700 did, vport->port_state, ndlp->nlp_flag);
5701
5702 phba->fc_stat.elsRcvPRLI++;
5703 if (vport->port_state < LPFC_DISC_AUTH) {
5704 rjt_err = LSRJT_UNABLE_TPC;
5705 break;
5706 }
5707 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
5708 break;
5709 case ELS_CMD_LIRR:
5710 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5711 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
5712 did, vport->port_state, ndlp->nlp_flag);
5713
5714 phba->fc_stat.elsRcvLIRR++;
5715 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
5716 if (newnode)
5717 lpfc_nlp_put(ndlp);
5718 break;
5719 case ELS_CMD_RPS:
5720 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5721 "RCV RPS: did:x%x/ste:x%x flg:x%x",
5722 did, vport->port_state, ndlp->nlp_flag);
5723
5724 phba->fc_stat.elsRcvRPS++;
5725 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
5726 if (newnode)
5727 lpfc_nlp_put(ndlp);
5728 break;
5729 case ELS_CMD_RPL:
5730 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5731 "RCV RPL: did:x%x/ste:x%x flg:x%x",
5732 did, vport->port_state, ndlp->nlp_flag);
5733
5734 phba->fc_stat.elsRcvRPL++;
5735 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
5736 if (newnode)
5737 lpfc_nlp_put(ndlp);
5738 break;
5739 case ELS_CMD_RNID:
5740 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5741 "RCV RNID: did:x%x/ste:x%x flg:x%x",
5742 did, vport->port_state, ndlp->nlp_flag);
5743
5744 phba->fc_stat.elsRcvRNID++;
5745 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
5746 if (newnode)
5747 lpfc_nlp_put(ndlp);
5748 break;
5749 case ELS_CMD_RRQ:
5750 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5751 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5752 did, vport->port_state, ndlp->nlp_flag);
5753
5754 phba->fc_stat.elsRcvRRQ++;
5755 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5756 if (newnode)
5757 lpfc_nlp_put(ndlp);
5758 break;
5759 default:
5760 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5761 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
5762 cmd, did, vport->port_state);
5763
5764 /* Unsupported ELS command, reject */
5765 rjt_err = LSRJT_INVALID_CMD;
5766
5767 /* Unknown ELS command <elsCmd> received from NPORT <did> */
5768 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5769 "0115 Unknown ELS command x%x "
5770 "received from NPORT x%x\n", cmd, did);
5771 if (newnode)
5772 lpfc_nlp_put(ndlp);
5773 break;
5774 }
5775
5776 /* check if need to LS_RJT received ELS cmd */
5777 if (rjt_err) {
5778 memset(&stat, 0, sizeof(stat));
5779 stat.un.b.lsRjtRsnCode = rjt_err;
5780 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
5781 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
5782 NULL);
5783 }
5784
5785 lpfc_nlp_put(elsiocb->context1);
5786 elsiocb->context1 = NULL;
5787 return;
5788
5789 dropit:
5790 if (vport && !(vport->load_flag & FC_UNLOADING))
5791 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5792 "0111 Dropping received ELS cmd "
5793 "Data: x%x x%x x%x\n",
5794 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5795 phba->fc_stat.elsRcvDrop++;
5796 }
5797
5798 /**
5799 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5800 * @phba: pointer to lpfc hba data structure.
5801 * @vpi: host virtual N_Port identifier.
5802 *
5803 * This routine finds a vport on a HBA (referred by @phba) through a
5804 * @vpi. The function walks the HBA's vport list and returns the address
5805 * of the vport with the matching @vpi.
5806 *
5807 * Return code
5808 * NULL - No vport with the matching @vpi found
5809 * Otherwise - Address to the vport with the matching @vpi.
5810 **/
5811 struct lpfc_vport *
5812 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5813 {
5814 struct lpfc_vport *vport;
5815 unsigned long flags;
5816
5817 spin_lock_irqsave(&phba->hbalock, flags);
5818 list_for_each_entry(vport, &phba->port_list, listentry) {
5819 if (vport->vpi == vpi) {
5820 spin_unlock_irqrestore(&phba->hbalock, flags);
5821 return vport;
5822 }
5823 }
5824 spin_unlock_irqrestore(&phba->hbalock, flags);
5825 return NULL;
5826 }
5827
5828 /**
5829 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
5830 * @phba: pointer to lpfc hba data structure.
5831 * @pring: pointer to a SLI ring.
5832 * @elsiocb: pointer to lpfc els iocb data structure.
5833 *
5834 * This routine is used to process an unsolicited event received from a SLI
5835 * (Service Level Interface) ring. The actual processing of the data buffer
5836 * associated with the unsolicited event is done by invoking the routine
5837 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
5838 * SLI ring on which the unsolicited event was received.
5839 **/
5840 void
5841 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5842 struct lpfc_iocbq *elsiocb)
5843 {
5844 struct lpfc_vport *vport = phba->pport;
5845 IOCB_t *icmd = &elsiocb->iocb;
5846 dma_addr_t paddr;
5847 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
5848 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
5849
5850 elsiocb->context1 = NULL;
5851 elsiocb->context2 = NULL;
5852 elsiocb->context3 = NULL;
5853
5854 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
5855 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
5856 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
5857 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
5858 phba->fc_stat.NoRcvBuf++;
5859 /* Not enough posted buffers; Try posting more buffers */
5860 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
5861 lpfc_post_buffer(phba, pring, 0);
5862 return;
5863 }
5864
5865 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5866 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
5867 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5868 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5869 vport = phba->pport;
5870 else
5871 vport = lpfc_find_vport_by_vpid(phba,
5872 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5873 }
5874 /* If there are no BDEs associated
5875 * with this IOCB, there is nothing to do.
5876 */
5877 if (icmd->ulpBdeCount == 0)
5878 return;
5879
5880 /* type of ELS cmd is first 32bit word
5881 * in packet
5882 */
5883 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5884 elsiocb->context2 = bdeBuf1;
5885 } else {
5886 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
5887 icmd->un.cont64[0].addrLow);
5888 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
5889 paddr);
5890 }
5891
5892 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
5893 /*
5894 * The different unsolicited event handlers would tell us
5895 * if they are done with "mp" by setting context2 to NULL.
5896 */
5897 if (elsiocb->context2) {
5898 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
5899 elsiocb->context2 = NULL;
5900 }
5901
5902 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
5903 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
5904 icmd->ulpBdeCount == 2) {
5905 elsiocb->context2 = bdeBuf2;
5906 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
5907 /* free mp if we are done with it */
5908 if (elsiocb->context2) {
5909 lpfc_in_buf_free(phba, elsiocb->context2);
5910 elsiocb->context2 = NULL;
5911 }
5912 }
5913 }
5914
5915 /**
5916 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
5917 * @phba: pointer to lpfc hba data structure.
5918 * @vport: pointer to a virtual N_Port data structure.
5919 *
5920 * This routine issues a Port Login (PLOGI) to the Name Server with
5921 * State Change Request (SCR) for a @vport. This routine will create an
5922 * ndlp for the Name Server associated to the @vport if such node does
5923 * not already exist. The PLOGI to Name Server is issued by invoking the
5924 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
5925 * (FDMI) is configured to the @vport, a FDMI node will be created and
5926 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
5927 **/
5928 void
5929 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5930 {
5931 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
5932
5933 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5934 if (!ndlp) {
5935 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5936 if (!ndlp) {
5937 if (phba->fc_topology == TOPOLOGY_LOOP) {
5938 lpfc_disc_start(vport);
5939 return;
5940 }
5941 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5942 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5943 "0251 NameServer login: no memory\n");
5944 return;
5945 }
5946 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5947 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5948 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5949 if (!ndlp) {
5950 if (phba->fc_topology == TOPOLOGY_LOOP) {
5951 lpfc_disc_start(vport);
5952 return;
5953 }
5954 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5955 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5956 "0348 NameServer login: node freed\n");
5957 return;
5958 }
5959 }
5960 ndlp->nlp_type |= NLP_FABRIC;
5961
5962 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5963
5964 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
5965 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5966 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5967 "0252 Cannot issue NameServer login\n");
5968 return;
5969 }
5970
5971 if (vport->cfg_fdmi_on) {
5972 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
5973 GFP_KERNEL);
5974 if (ndlp_fdmi) {
5975 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
5976 ndlp_fdmi->nlp_type |= NLP_FABRIC;
5977 lpfc_nlp_set_state(vport, ndlp_fdmi,
5978 NLP_STE_PLOGI_ISSUE);
5979 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
5980 0);
5981 }
5982 }
5983 return;
5984 }
5985
5986 /**
5987 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
5988 * @phba: pointer to lpfc hba data structure.
5989 * @pmb: pointer to the driver internal queue element for mailbox command.
5990 *
5991 * This routine is the completion callback function to register new vport
5992 * mailbox command. If the new vport mailbox command completes successfully,
5993 * the fabric registration login shall be performed on physical port (the
5994 * new vport created is actually a physical port, with VPI 0) or the port
5995 * login to Name Server for State Change Request (SCR) will be performed
5996 * on virtual port (real virtual port, with VPI greater than 0).
5997 **/
5998 static void
5999 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6000 {
6001 struct lpfc_vport *vport = pmb->vport;
6002 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6003 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
6004 MAILBOX_t *mb = &pmb->u.mb;
6005 int rc;
6006
6007 spin_lock_irq(shost->host_lock);
6008 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6009 spin_unlock_irq(shost->host_lock);
6010
6011 if (mb->mbxStatus) {
6012 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6013 "0915 Register VPI failed: 0x%x\n",
6014 mb->mbxStatus);
6015
6016 switch (mb->mbxStatus) {
6017 case 0x11: /* unsupported feature */
6018 case 0x9603: /* max_vpi exceeded */
6019 case 0x9602: /* Link event since CLEAR_LA */
6020 /* giving up on vport registration */
6021 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6022 spin_lock_irq(shost->host_lock);
6023 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6024 spin_unlock_irq(shost->host_lock);
6025 lpfc_can_disctmo(vport);
6026 break;
6027 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6028 case 0x20:
6029 spin_lock_irq(shost->host_lock);
6030 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6031 spin_unlock_irq(shost->host_lock);
6032 lpfc_init_vpi(phba, pmb, vport->vpi);
6033 pmb->vport = vport;
6034 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6035 rc = lpfc_sli_issue_mbox(phba, pmb,
6036 MBX_NOWAIT);
6037 if (rc == MBX_NOT_FINISHED) {
6038 lpfc_printf_vlog(vport,
6039 KERN_ERR, LOG_MBOX,
6040 "2732 Failed to issue INIT_VPI"
6041 " mailbox command\n");
6042 } else {
6043 lpfc_nlp_put(ndlp);
6044 return;
6045 }
6046
6047 default:
6048 /* Try to recover from this error */
6049 lpfc_mbx_unreg_vpi(vport);
6050 spin_lock_irq(shost->host_lock);
6051 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6052 spin_unlock_irq(shost->host_lock);
6053 if (vport->port_type == LPFC_PHYSICAL_PORT)
6054 lpfc_initial_flogi(vport);
6055 else
6056 lpfc_initial_fdisc(vport);
6057 break;
6058 }
6059 } else {
6060 spin_lock_irq(shost->host_lock);
6061 vport->vpi_state |= LPFC_VPI_REGISTERED;
6062 spin_unlock_irq(shost->host_lock);
6063 if (vport == phba->pport) {
6064 if (phba->sli_rev < LPFC_SLI_REV4)
6065 lpfc_issue_fabric_reglogin(vport);
6066 else {
6067 /*
6068 * If the physical port is instantiated using
6069 * FDISC, do not start vport discovery.
6070 */
6071 if (vport->port_state != LPFC_FDISC)
6072 lpfc_start_fdiscs(phba);
6073 lpfc_do_scr_ns_plogi(phba, vport);
6074 }
6075 } else
6076 lpfc_do_scr_ns_plogi(phba, vport);
6077 }
6078
6079 /* Now, we decrement the ndlp reference count held for this
6080 * callback function
6081 */
6082 lpfc_nlp_put(ndlp);
6083
6084 mempool_free(pmb, phba->mbox_mem_pool);
6085 return;
6086 }
6087
6088 /**
6089 * lpfc_register_new_vport - Register a new vport with a HBA
6090 * @phba: pointer to lpfc hba data structure.
6091 * @vport: pointer to a host virtual N_Port data structure.
6092 * @ndlp: pointer to a node-list data structure.
6093 *
6094 * This routine registers the @vport as a new virtual port with a HBA.
6095 * It is done through a registering vpi mailbox command.
6096 **/
6097 void
6098 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6099 struct lpfc_nodelist *ndlp)
6100 {
6101 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6102 LPFC_MBOXQ_t *mbox;
6103
6104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6105 if (mbox) {
6106 lpfc_reg_vpi(vport, mbox);
6107 mbox->vport = vport;
6108 mbox->context2 = lpfc_nlp_get(ndlp);
6109 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
6110 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6111 == MBX_NOT_FINISHED) {
6112 /* mailbox command not success, decrement ndlp
6113 * reference count for this command
6114 */
6115 lpfc_nlp_put(ndlp);
6116 mempool_free(mbox, phba->mbox_mem_pool);
6117
6118 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6119 "0253 Register VPI: Can't send mbox\n");
6120 goto mbox_err_exit;
6121 }
6122 } else {
6123 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6124 "0254 Register VPI: no memory\n");
6125 goto mbox_err_exit;
6126 }
6127 return;
6128
6129 mbox_err_exit:
6130 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6131 spin_lock_irq(shost->host_lock);
6132 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6133 spin_unlock_irq(shost->host_lock);
6134 return;
6135 }
6136
6137 /**
6138 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6139 * @phba: pointer to lpfc hba data structure.
6140 *
6141 * This routine cancels the retry delay timers to all the vports.
6142 **/
6143 void
6144 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6145 {
6146 struct lpfc_vport **vports;
6147 struct lpfc_nodelist *ndlp;
6148 uint32_t link_state;
6149 int i;
6150
6151 /* Treat this failure as linkdown for all vports */
6152 link_state = phba->link_state;
6153 lpfc_linkdown(phba);
6154 phba->link_state = link_state;
6155
6156 vports = lpfc_create_vport_work_array(phba);
6157
6158 if (vports) {
6159 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6160 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6161 if (ndlp)
6162 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6163 lpfc_els_flush_cmd(vports[i]);
6164 }
6165 lpfc_destroy_vport_work_array(phba, vports);
6166 }
6167 }
6168
6169 /**
6170 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6171 * @phba: pointer to lpfc hba data structure.
6172 *
6173 * This routine abort all pending discovery commands and
6174 * start a timer to retry FLOGI for the physical port
6175 * discovery.
6176 **/
6177 void
6178 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6179 {
6180 struct lpfc_nodelist *ndlp;
6181 struct Scsi_Host *shost;
6182
6183 /* Cancel the all vports retry delay retry timers */
6184 lpfc_cancel_all_vport_retry_delay_timer(phba);
6185
6186 /* If fabric require FLOGI, then re-instantiate physical login */
6187 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6188 if (!ndlp)
6189 return;
6190
6191 shost = lpfc_shost_from_vport(phba->pport);
6192 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6193 spin_lock_irq(shost->host_lock);
6194 ndlp->nlp_flag |= NLP_DELAY_TMO;
6195 spin_unlock_irq(shost->host_lock);
6196 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6197 phba->pport->port_state = LPFC_FLOGI;
6198 return;
6199 }
6200
6201 /**
6202 * lpfc_fabric_login_reqd - Check if FLOGI required.
6203 * @phba: pointer to lpfc hba data structure.
6204 * @cmdiocb: pointer to FDISC command iocb.
6205 * @rspiocb: pointer to FDISC response iocb.
6206 *
6207 * This routine checks if a FLOGI is reguired for FDISC
6208 * to succeed.
6209 **/
6210 static int
6211 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6212 struct lpfc_iocbq *cmdiocb,
6213 struct lpfc_iocbq *rspiocb)
6214 {
6215
6216 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6217 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6218 return 0;
6219 else
6220 return 1;
6221 }
6222
6223 /**
6224 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
6225 * @phba: pointer to lpfc hba data structure.
6226 * @cmdiocb: pointer to lpfc command iocb data structure.
6227 * @rspiocb: pointer to lpfc response iocb data structure.
6228 *
6229 * This routine is the completion callback function to a Fabric Discover
6230 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
6231 * single threaded, each FDISC completion callback function will reset
6232 * the discovery timer for all vports such that the timers will not get
6233 * unnecessary timeout. The function checks the FDISC IOCB status. If error
6234 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
6235 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
6236 * assigned to the vport has been changed with the completion of the FDISC
6237 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
6238 * are unregistered from the HBA, and then the lpfc_register_new_vport()
6239 * routine is invoked to register new vport with the HBA. Otherwise, the
6240 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
6241 * Server for State Change Request (SCR).
6242 **/
6243 static void
6244 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6245 struct lpfc_iocbq *rspiocb)
6246 {
6247 struct lpfc_vport *vport = cmdiocb->vport;
6248 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6249 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
6250 struct lpfc_nodelist *np;
6251 struct lpfc_nodelist *next_np;
6252 IOCB_t *irsp = &rspiocb->iocb;
6253 struct lpfc_iocbq *piocb;
6254
6255 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6256 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
6257 irsp->ulpStatus, irsp->un.ulpWord[4],
6258 vport->fc_prevDID);
6259 /* Since all FDISCs are being single threaded, we
6260 * must reset the discovery timer for ALL vports
6261 * waiting to send FDISC when one completes.
6262 */
6263 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
6264 lpfc_set_disctmo(piocb->vport);
6265 }
6266
6267 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6268 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
6269 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6270
6271 if (irsp->ulpStatus) {
6272
6273 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6274 lpfc_retry_pport_discovery(phba);
6275 goto out;
6276 }
6277
6278 /* Check for retry */
6279 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6280 goto out;
6281 /* FDISC failed */
6282 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6283 "0126 FDISC failed. (%d/%d)\n",
6284 irsp->ulpStatus, irsp->un.ulpWord[4]);
6285 goto fdisc_failed;
6286 }
6287 spin_lock_irq(shost->host_lock);
6288 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6289 vport->fc_flag |= FC_FABRIC;
6290 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6291 vport->fc_flag |= FC_PUBLIC_LOOP;
6292 spin_unlock_irq(shost->host_lock);
6293
6294 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
6295 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
6296 if ((vport->fc_prevDID != vport->fc_myDID) &&
6297 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
6298 /* If our NportID changed, we need to ensure all
6299 * remaining NPORTs get unreg_login'ed so we can
6300 * issue unreg_vpi.
6301 */
6302 list_for_each_entry_safe(np, next_np,
6303 &vport->fc_nodes, nlp_listp) {
6304 if (!NLP_CHK_NODE_ACT(ndlp) ||
6305 (np->nlp_state != NLP_STE_NPR_NODE) ||
6306 !(np->nlp_flag & NLP_NPR_ADISC))
6307 continue;
6308 spin_lock_irq(shost->host_lock);
6309 np->nlp_flag &= ~NLP_NPR_ADISC;
6310 spin_unlock_irq(shost->host_lock);
6311 lpfc_unreg_rpi(vport, np);
6312 }
6313 lpfc_mbx_unreg_vpi(vport);
6314 spin_lock_irq(shost->host_lock);
6315 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6316 if (phba->sli_rev == LPFC_SLI_REV4)
6317 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6318 spin_unlock_irq(shost->host_lock);
6319 }
6320
6321 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6322 lpfc_issue_init_vpi(vport);
6323 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6324 lpfc_register_new_vport(phba, vport, ndlp);
6325 else
6326 lpfc_do_scr_ns_plogi(phba, vport);
6327 goto out;
6328 fdisc_failed:
6329 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6330 /* Cancel discovery timer */
6331 lpfc_can_disctmo(vport);
6332 lpfc_nlp_put(ndlp);
6333 out:
6334 lpfc_els_free_iocb(phba, cmdiocb);
6335 }
6336
6337 /**
6338 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
6339 * @vport: pointer to a virtual N_Port data structure.
6340 * @ndlp: pointer to a node-list data structure.
6341 * @retry: number of retries to the command IOCB.
6342 *
6343 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
6344 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
6345 * routine to issue the IOCB, which makes sure only one outstanding fabric
6346 * IOCB will be sent off HBA at any given time.
6347 *
6348 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6349 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6350 * will be stored into the context1 field of the IOCB for the completion
6351 * callback function to the FDISC ELS command.
6352 *
6353 * Return code
6354 * 0 - Successfully issued fdisc iocb command
6355 * 1 - Failed to issue fdisc iocb command
6356 **/
6357 static int
6358 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6359 uint8_t retry)
6360 {
6361 struct lpfc_hba *phba = vport->phba;
6362 IOCB_t *icmd;
6363 struct lpfc_iocbq *elsiocb;
6364 struct serv_parm *sp;
6365 uint8_t *pcmd;
6366 uint16_t cmdsize;
6367 int did = ndlp->nlp_DID;
6368 int rc;
6369
6370 vport->port_state = LPFC_FDISC;
6371 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6372 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6373 ELS_CMD_FDISC);
6374 if (!elsiocb) {
6375 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6376 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6377 "0255 Issue FDISC: no IOCB\n");
6378 return 1;
6379 }
6380
6381 icmd = &elsiocb->iocb;
6382 icmd->un.elsreq64.myID = 0;
6383 icmd->un.elsreq64.fl = 1;
6384
6385 if (phba->sli_rev == LPFC_SLI_REV4) {
6386 /* FDISC needs to be 1 for WQE VPI */
6387 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6388 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6389 /* Set the ulpContext to the vpi */
6390 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6391 } else {
6392 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6393 icmd->ulpCt_h = 1;
6394 icmd->ulpCt_l = 0;
6395 }
6396
6397 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6398 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
6399 pcmd += sizeof(uint32_t); /* CSP Word 1 */
6400 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
6401 sp = (struct serv_parm *) pcmd;
6402 /* Setup CSPs accordingly for Fabric */
6403 sp->cmn.e_d_tov = 0;
6404 sp->cmn.w2.r_a_tov = 0;
6405 sp->cls1.classValid = 0;
6406 sp->cls2.seqDelivery = 1;
6407 sp->cls3.seqDelivery = 1;
6408
6409 pcmd += sizeof(uint32_t); /* CSP Word 2 */
6410 pcmd += sizeof(uint32_t); /* CSP Word 3 */
6411 pcmd += sizeof(uint32_t); /* CSP Word 4 */
6412 pcmd += sizeof(uint32_t); /* Port Name */
6413 memcpy(pcmd, &vport->fc_portname, 8);
6414 pcmd += sizeof(uint32_t); /* Node Name */
6415 pcmd += sizeof(uint32_t); /* Node Name */
6416 memcpy(pcmd, &vport->fc_nodename, 8);
6417
6418 lpfc_set_disctmo(vport);
6419
6420 phba->fc_stat.elsXmitFDISC++;
6421 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
6422
6423 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6424 "Issue FDISC: did:x%x",
6425 did, 0, 0);
6426
6427 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
6428 if (rc == IOCB_ERROR) {
6429 lpfc_els_free_iocb(phba, elsiocb);
6430 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6431 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6432 "0256 Issue FDISC: Cannot send IOCB\n");
6433 return 1;
6434 }
6435 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6436 return 0;
6437 }
6438
6439 /**
6440 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
6441 * @phba: pointer to lpfc hba data structure.
6442 * @cmdiocb: pointer to lpfc command iocb data structure.
6443 * @rspiocb: pointer to lpfc response iocb data structure.
6444 *
6445 * This routine is the completion callback function to the issuing of a LOGO
6446 * ELS command off a vport. It frees the command IOCB and then decrement the
6447 * reference count held on ndlp for this completion function, indicating that
6448 * the reference to the ndlp is no long needed. Note that the
6449 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
6450 * callback function and an additional explicit ndlp reference decrementation
6451 * will trigger the actual release of the ndlp.
6452 **/
6453 static void
6454 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6455 struct lpfc_iocbq *rspiocb)
6456 {
6457 struct lpfc_vport *vport = cmdiocb->vport;
6458 IOCB_t *irsp;
6459 struct lpfc_nodelist *ndlp;
6460 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
6461
6462 irsp = &rspiocb->iocb;
6463 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6464 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
6465 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
6466
6467 lpfc_els_free_iocb(phba, cmdiocb);
6468 vport->unreg_vpi_cmpl = VPORT_ERROR;
6469
6470 /* Trigger the release of the ndlp after logo */
6471 lpfc_nlp_put(ndlp);
6472 }
6473
6474 /**
6475 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
6476 * @vport: pointer to a virtual N_Port data structure.
6477 * @ndlp: pointer to a node-list data structure.
6478 *
6479 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
6480 *
6481 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6482 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6483 * will be stored into the context1 field of the IOCB for the completion
6484 * callback function to the LOGO ELS command.
6485 *
6486 * Return codes
6487 * 0 - Successfully issued logo off the @vport
6488 * 1 - Failed to issue logo off the @vport
6489 **/
6490 int
6491 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6492 {
6493 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6494 struct lpfc_hba *phba = vport->phba;
6495 IOCB_t *icmd;
6496 struct lpfc_iocbq *elsiocb;
6497 uint8_t *pcmd;
6498 uint16_t cmdsize;
6499
6500 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
6501 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
6502 ELS_CMD_LOGO);
6503 if (!elsiocb)
6504 return 1;
6505
6506 icmd = &elsiocb->iocb;
6507 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6508 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
6509 pcmd += sizeof(uint32_t);
6510
6511 /* Fill in LOGO payload */
6512 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
6513 pcmd += sizeof(uint32_t);
6514 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
6515
6516 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6517 "Issue LOGO npiv did:x%x flg:x%x",
6518 ndlp->nlp_DID, ndlp->nlp_flag, 0);
6519
6520 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
6521 spin_lock_irq(shost->host_lock);
6522 ndlp->nlp_flag |= NLP_LOGO_SND;
6523 spin_unlock_irq(shost->host_lock);
6524 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6525 IOCB_ERROR) {
6526 spin_lock_irq(shost->host_lock);
6527 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6528 spin_unlock_irq(shost->host_lock);
6529 lpfc_els_free_iocb(phba, elsiocb);
6530 return 1;
6531 }
6532 return 0;
6533 }
6534
6535 /**
6536 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
6537 * @ptr: holder for the timer function associated data.
6538 *
6539 * This routine is invoked by the fabric iocb block timer after
6540 * timeout. It posts the fabric iocb block timeout event by setting the
6541 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
6542 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
6543 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
6544 * posted event WORKER_FABRIC_BLOCK_TMO.
6545 **/
6546 void
6547 lpfc_fabric_block_timeout(unsigned long ptr)
6548 {
6549 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6550 unsigned long iflags;
6551 uint32_t tmo_posted;
6552
6553 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
6554 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
6555 if (!tmo_posted)
6556 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
6557 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
6558
6559 if (!tmo_posted)
6560 lpfc_worker_wake_up(phba);
6561 return;
6562 }
6563
6564 /**
6565 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
6566 * @phba: pointer to lpfc hba data structure.
6567 *
6568 * This routine issues one fabric iocb from the driver internal list to
6569 * the HBA. It first checks whether it's ready to issue one fabric iocb to
6570 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
6571 * remove one pending fabric iocb from the driver internal list and invokes
6572 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
6573 **/
6574 static void
6575 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6576 {
6577 struct lpfc_iocbq *iocb;
6578 unsigned long iflags;
6579 int ret;
6580 IOCB_t *cmd;
6581
6582 repeat:
6583 iocb = NULL;
6584 spin_lock_irqsave(&phba->hbalock, iflags);
6585 /* Post any pending iocb to the SLI layer */
6586 if (atomic_read(&phba->fabric_iocb_count) == 0) {
6587 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
6588 list);
6589 if (iocb)
6590 /* Increment fabric iocb count to hold the position */
6591 atomic_inc(&phba->fabric_iocb_count);
6592 }
6593 spin_unlock_irqrestore(&phba->hbalock, iflags);
6594 if (iocb) {
6595 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6596 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6597 iocb->iocb_flag |= LPFC_IO_FABRIC;
6598
6599 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6600 "Fabric sched1: ste:x%x",
6601 iocb->vport->port_state, 0, 0);
6602
6603 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6604
6605 if (ret == IOCB_ERROR) {
6606 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6607 iocb->fabric_iocb_cmpl = NULL;
6608 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6609 cmd = &iocb->iocb;
6610 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6611 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6612 iocb->iocb_cmpl(phba, iocb, iocb);
6613
6614 atomic_dec(&phba->fabric_iocb_count);
6615 goto repeat;
6616 }
6617 }
6618
6619 return;
6620 }
6621
6622 /**
6623 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
6624 * @phba: pointer to lpfc hba data structure.
6625 *
6626 * This routine unblocks the issuing fabric iocb command. The function
6627 * will clear the fabric iocb block bit and then invoke the routine
6628 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
6629 * from the driver internal fabric iocb list.
6630 **/
6631 void
6632 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
6633 {
6634 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6635
6636 lpfc_resume_fabric_iocbs(phba);
6637 return;
6638 }
6639
6640 /**
6641 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
6642 * @phba: pointer to lpfc hba data structure.
6643 *
6644 * This routine blocks the issuing fabric iocb for a specified amount of
6645 * time (currently 100 ms). This is done by set the fabric iocb block bit
6646 * and set up a timeout timer for 100ms. When the block bit is set, no more
6647 * fabric iocb will be issued out of the HBA.
6648 **/
6649 static void
6650 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
6651 {
6652 int blocked;
6653
6654 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6655 /* Start a timer to unblock fabric iocbs after 100ms */
6656 if (!blocked)
6657 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
6658
6659 return;
6660 }
6661
6662 /**
6663 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
6664 * @phba: pointer to lpfc hba data structure.
6665 * @cmdiocb: pointer to lpfc command iocb data structure.
6666 * @rspiocb: pointer to lpfc response iocb data structure.
6667 *
6668 * This routine is the callback function that is put to the fabric iocb's
6669 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
6670 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
6671 * function first restores and invokes the original iocb's callback function
6672 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
6673 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
6674 **/
6675 static void
6676 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6677 struct lpfc_iocbq *rspiocb)
6678 {
6679 struct ls_rjt stat;
6680
6681 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
6682 BUG();
6683
6684 switch (rspiocb->iocb.ulpStatus) {
6685 case IOSTAT_NPORT_RJT:
6686 case IOSTAT_FABRIC_RJT:
6687 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
6688 lpfc_block_fabric_iocbs(phba);
6689 }
6690 break;
6691
6692 case IOSTAT_NPORT_BSY:
6693 case IOSTAT_FABRIC_BSY:
6694 lpfc_block_fabric_iocbs(phba);
6695 break;
6696
6697 case IOSTAT_LS_RJT:
6698 stat.un.lsRjtError =
6699 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
6700 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
6701 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
6702 lpfc_block_fabric_iocbs(phba);
6703 break;
6704 }
6705
6706 if (atomic_read(&phba->fabric_iocb_count) == 0)
6707 BUG();
6708
6709 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
6710 cmdiocb->fabric_iocb_cmpl = NULL;
6711 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
6712 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
6713
6714 atomic_dec(&phba->fabric_iocb_count);
6715 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
6716 /* Post any pending iocbs to HBA */
6717 lpfc_resume_fabric_iocbs(phba);
6718 }
6719 }
6720
6721 /**
6722 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
6723 * @phba: pointer to lpfc hba data structure.
6724 * @iocb: pointer to lpfc command iocb data structure.
6725 *
6726 * This routine is used as the top-level API for issuing a fabric iocb command
6727 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
6728 * function makes sure that only one fabric bound iocb will be outstanding at
6729 * any given time. As such, this function will first check to see whether there
6730 * is already an outstanding fabric iocb on the wire. If so, it will put the
6731 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
6732 * issued later. Otherwise, it will issue the iocb on the wire and update the
6733 * fabric iocb count it indicate that there is one fabric iocb on the wire.
6734 *
6735 * Note, this implementation has a potential sending out fabric IOCBs out of
6736 * order. The problem is caused by the construction of the "ready" boolen does
6737 * not include the condition that the internal fabric IOCB list is empty. As
6738 * such, it is possible a fabric IOCB issued by this routine might be "jump"
6739 * ahead of the fabric IOCBs in the internal list.
6740 *
6741 * Return code
6742 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
6743 * IOCB_ERROR - failed to issue fabric iocb
6744 **/
6745 static int
6746 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6747 {
6748 unsigned long iflags;
6749 int ready;
6750 int ret;
6751
6752 if (atomic_read(&phba->fabric_iocb_count) > 1)
6753 BUG();
6754
6755 spin_lock_irqsave(&phba->hbalock, iflags);
6756 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
6757 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6758
6759 if (ready)
6760 /* Increment fabric iocb count to hold the position */
6761 atomic_inc(&phba->fabric_iocb_count);
6762 spin_unlock_irqrestore(&phba->hbalock, iflags);
6763 if (ready) {
6764 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6765 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6766 iocb->iocb_flag |= LPFC_IO_FABRIC;
6767
6768 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6769 "Fabric sched2: ste:x%x",
6770 iocb->vport->port_state, 0, 0);
6771
6772 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6773
6774 if (ret == IOCB_ERROR) {
6775 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6776 iocb->fabric_iocb_cmpl = NULL;
6777 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6778 atomic_dec(&phba->fabric_iocb_count);
6779 }
6780 } else {
6781 spin_lock_irqsave(&phba->hbalock, iflags);
6782 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
6783 spin_unlock_irqrestore(&phba->hbalock, iflags);
6784 ret = IOCB_SUCCESS;
6785 }
6786 return ret;
6787 }
6788
6789 /**
6790 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
6791 * @vport: pointer to a virtual N_Port data structure.
6792 *
6793 * This routine aborts all the IOCBs associated with a @vport from the
6794 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6795 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6796 * list, removes each IOCB associated with the @vport off the list, set the
6797 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6798 * associated with the IOCB.
6799 **/
6800 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6801 {
6802 LIST_HEAD(completions);
6803 struct lpfc_hba *phba = vport->phba;
6804 struct lpfc_iocbq *tmp_iocb, *piocb;
6805
6806 spin_lock_irq(&phba->hbalock);
6807 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6808 list) {
6809
6810 if (piocb->vport != vport)
6811 continue;
6812
6813 list_move_tail(&piocb->list, &completions);
6814 }
6815 spin_unlock_irq(&phba->hbalock);
6816
6817 /* Cancel all the IOCBs from the completions list */
6818 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6819 IOERR_SLI_ABORTED);
6820 }
6821
6822 /**
6823 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
6824 * @ndlp: pointer to a node-list data structure.
6825 *
6826 * This routine aborts all the IOCBs associated with an @ndlp from the
6827 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6828 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6829 * list, removes each IOCB associated with the @ndlp off the list, set the
6830 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6831 * associated with the IOCB.
6832 **/
6833 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6834 {
6835 LIST_HEAD(completions);
6836 struct lpfc_hba *phba = ndlp->phba;
6837 struct lpfc_iocbq *tmp_iocb, *piocb;
6838 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6839
6840 spin_lock_irq(&phba->hbalock);
6841 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6842 list) {
6843 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
6844
6845 list_move_tail(&piocb->list, &completions);
6846 }
6847 }
6848 spin_unlock_irq(&phba->hbalock);
6849
6850 /* Cancel all the IOCBs from the completions list */
6851 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6852 IOERR_SLI_ABORTED);
6853 }
6854
6855 /**
6856 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
6857 * @phba: pointer to lpfc hba data structure.
6858 *
6859 * This routine aborts all the IOCBs currently on the driver internal
6860 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
6861 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
6862 * list, removes IOCBs off the list, set the status feild to
6863 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
6864 * the IOCB.
6865 **/
6866 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6867 {
6868 LIST_HEAD(completions);
6869
6870 spin_lock_irq(&phba->hbalock);
6871 list_splice_init(&phba->fabric_iocb_list, &completions);
6872 spin_unlock_irq(&phba->hbalock);
6873
6874 /* Cancel all the IOCBs from the completions list */
6875 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6876 IOERR_SLI_ABORTED);
6877 }
6878
6879 /**
6880 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6881 * @phba: pointer to lpfc hba data structure.
6882 * @axri: pointer to the els xri abort wcqe structure.
6883 *
6884 * This routine is invoked by the worker thread to process a SLI4 slow-path
6885 * ELS aborted xri.
6886 **/
6887 void
6888 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6889 struct sli4_wcqe_xri_aborted *axri)
6890 {
6891 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6892 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6893 unsigned long iflag = 0;
6894
6895 spin_lock_irqsave(&phba->hbalock, iflag);
6896 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6897 list_for_each_entry_safe(sglq_entry, sglq_next,
6898 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6899 if (sglq_entry->sli4_xritag == xri) {
6900 list_del(&sglq_entry->list);
6901 list_add_tail(&sglq_entry->list,
6902 &phba->sli4_hba.lpfc_sgl_list);
6903 sglq_entry->state = SGL_FREED;
6904 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6905 spin_unlock_irqrestore(&phba->hbalock, iflag);
6906 return;
6907 }
6908 }
6909 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6910 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6911 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6912 spin_unlock_irqrestore(&phba->hbalock, iflag);
6913 return;
6914 }
6915 sglq_entry->state = SGL_XRI_ABORTED;
6916 spin_unlock_irqrestore(&phba->hbalock, iflag);
6917 return;
6918 }