]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/lpfc/lpfc_hbadisc.c
Pull throttle into release branch
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
41
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 };
58
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60
61 void
62 lpfc_terminate_rport_io(struct fc_rport *rport)
63 {
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
67
68 rdata = rport->dd_data;
69 ndlp = rdata->pnode;
70
71 if (!ndlp) {
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
75 rport->port_id);
76 return;
77 }
78
79 phba = ndlp->vport->phba;
80
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
84
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
87 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 }
89
90 return;
91 }
92
93 /*
94 * This function will be called when dev_loss_tmo fire.
95 */
96 void
97 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
98 {
99 struct lpfc_rport_data *rdata;
100 struct lpfc_nodelist * ndlp;
101 struct lpfc_vport *vport;
102 struct lpfc_hba *phba;
103 struct completion devloss_compl;
104 struct lpfc_work_evt *evtp;
105
106 rdata = rport->dd_data;
107 ndlp = rdata->pnode;
108
109 if (!ndlp) {
110 if (rport->scsi_target_id != -1) {
111 printk(KERN_ERR "Cannot find remote node"
112 " for rport in dev_loss_tmo_callbk x%x\n",
113 rport->port_id);
114 }
115 return;
116 }
117
118 vport = ndlp->vport;
119 phba = vport->phba;
120
121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125 init_completion(&devloss_compl);
126 evtp = &ndlp->dev_loss_evt;
127
128 if (!list_empty(&evtp->evt_listp))
129 return;
130
131 spin_lock_irq(&phba->hbalock);
132 evtp->evt_arg1 = ndlp;
133 evtp->evt_arg2 = &devloss_compl;
134 evtp->evt = LPFC_EVT_DEV_LOSS;
135 list_add_tail(&evtp->evt_listp, &phba->work_list);
136 if (phba->work_wait)
137 wake_up(phba->work_wait);
138
139 spin_unlock_irq(&phba->hbalock);
140
141 wait_for_completion(&devloss_compl);
142
143 return;
144 }
145
146 /*
147 * This function is called from the worker thread when dev_loss_tmo
148 * expire.
149 */
150 void
151 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152 {
153 struct lpfc_rport_data *rdata;
154 struct fc_rport *rport;
155 struct lpfc_vport *vport;
156 struct lpfc_hba *phba;
157 uint8_t *name;
158 int warn_on = 0;
159
160 rport = ndlp->rport;
161
162 if (!rport)
163 return;
164
165 rdata = rport->dd_data;
166 name = (uint8_t *) &ndlp->nlp_portname;
167 vport = ndlp->vport;
168 phba = vport->phba;
169
170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171 "rport devlosstmo:did:x%x type:x%x id:x%x",
172 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174 if (!(vport->load_flag & FC_UNLOADING) &&
175 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
176 return;
177
178 if (ndlp->nlp_type & NLP_FABRIC) {
179 int put_node;
180 int put_rport;
181
182 /* We will clean up these Nodes in linkup */
183 put_node = rdata->pnode != NULL;
184 put_rport = ndlp->rport != NULL;
185 rdata->pnode = NULL;
186 ndlp->rport = NULL;
187 if (put_node)
188 lpfc_nlp_put(ndlp);
189 if (put_rport)
190 put_device(&rport->dev);
191 return;
192 }
193
194 if (ndlp->nlp_sid != NLP_NO_SID) {
195 warn_on = 1;
196 /* flush the target */
197 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
198 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
199 }
200 if (vport->load_flag & FC_UNLOADING)
201 warn_on = 0;
202
203 if (warn_on) {
204 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
205 "%d (%d):0203 Devloss timeout on "
206 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
207 "NPort x%x Data: x%x x%x x%x\n",
208 phba->brd_no, vport->vpi,
209 *name, *(name+1), *(name+2), *(name+3),
210 *(name+4), *(name+5), *(name+6), *(name+7),
211 ndlp->nlp_DID, ndlp->nlp_flag,
212 ndlp->nlp_state, ndlp->nlp_rpi);
213 } else {
214 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
215 "%d (%d):0204 Devloss timeout on "
216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
217 "NPort x%x Data: x%x x%x x%x\n",
218 phba->brd_no, vport->vpi,
219 *name, *(name+1), *(name+2), *(name+3),
220 *(name+4), *(name+5), *(name+6), *(name+7),
221 ndlp->nlp_DID, ndlp->nlp_flag,
222 ndlp->nlp_state, ndlp->nlp_rpi);
223 }
224
225 if (!(vport->load_flag & FC_UNLOADING) &&
226 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
227 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
228 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
230 else {
231 int put_node;
232 int put_rport;
233
234 put_node = rdata->pnode != NULL;
235 put_rport = ndlp->rport != NULL;
236 rdata->pnode = NULL;
237 ndlp->rport = NULL;
238 if (put_node)
239 lpfc_nlp_put(ndlp);
240 if (put_rport)
241 put_device(&rport->dev);
242 }
243 }
244
245
246 void
247 lpfc_worker_wake_up(struct lpfc_hba *phba)
248 {
249 wake_up(phba->work_wait);
250 return;
251 }
252
253 static void
254 lpfc_work_list_done(struct lpfc_hba *phba)
255 {
256 struct lpfc_work_evt *evtp = NULL;
257 struct lpfc_nodelist *ndlp;
258 struct lpfc_vport *vport;
259 int free_evt;
260
261 spin_lock_irq(&phba->hbalock);
262 while (!list_empty(&phba->work_list)) {
263 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
264 evt_listp);
265 spin_unlock_irq(&phba->hbalock);
266 free_evt = 1;
267 switch (evtp->evt) {
268 case LPFC_EVT_DEV_LOSS_DELAY:
269 free_evt = 0; /* evt is part of ndlp */
270 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
271 vport = ndlp->vport;
272 if (!vport)
273 break;
274
275 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
276 "rport devlossdly:did:x%x flg:x%x",
277 ndlp->nlp_DID, ndlp->nlp_flag, 0);
278
279 if (!(vport->load_flag & FC_UNLOADING) &&
280 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
281 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
282 lpfc_disc_state_machine(vport, ndlp, NULL,
283 NLP_EVT_DEVICE_RM);
284 }
285 break;
286 case LPFC_EVT_ELS_RETRY:
287 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
288 lpfc_els_retry_delay_handler(ndlp);
289 free_evt = 0; /* evt is part of ndlp */
290 break;
291 case LPFC_EVT_DEV_LOSS:
292 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
293 lpfc_nlp_get(ndlp);
294 lpfc_dev_loss_tmo_handler(ndlp);
295 free_evt = 0;
296 complete((struct completion *)(evtp->evt_arg2));
297 lpfc_nlp_put(ndlp);
298 break;
299 case LPFC_EVT_ONLINE:
300 if (phba->link_state < LPFC_LINK_DOWN)
301 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
302 else
303 *(int *) (evtp->evt_arg1) = 0;
304 complete((struct completion *)(evtp->evt_arg2));
305 break;
306 case LPFC_EVT_OFFLINE_PREP:
307 if (phba->link_state >= LPFC_LINK_DOWN)
308 lpfc_offline_prep(phba);
309 *(int *)(evtp->evt_arg1) = 0;
310 complete((struct completion *)(evtp->evt_arg2));
311 break;
312 case LPFC_EVT_OFFLINE:
313 lpfc_offline(phba);
314 lpfc_sli_brdrestart(phba);
315 *(int *)(evtp->evt_arg1) =
316 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
317 lpfc_unblock_mgmt_io(phba);
318 complete((struct completion *)(evtp->evt_arg2));
319 break;
320 case LPFC_EVT_WARM_START:
321 lpfc_offline(phba);
322 lpfc_reset_barrier(phba);
323 lpfc_sli_brdreset(phba);
324 lpfc_hba_down_post(phba);
325 *(int *)(evtp->evt_arg1) =
326 lpfc_sli_brdready(phba, HS_MBRDY);
327 lpfc_unblock_mgmt_io(phba);
328 complete((struct completion *)(evtp->evt_arg2));
329 break;
330 case LPFC_EVT_KILL:
331 lpfc_offline(phba);
332 *(int *)(evtp->evt_arg1)
333 = (phba->pport->stopped)
334 ? 0 : lpfc_sli_brdkill(phba);
335 lpfc_unblock_mgmt_io(phba);
336 complete((struct completion *)(evtp->evt_arg2));
337 break;
338 }
339 if (free_evt)
340 kfree(evtp);
341 spin_lock_irq(&phba->hbalock);
342 }
343 spin_unlock_irq(&phba->hbalock);
344
345 }
346
347 void
348 lpfc_work_done(struct lpfc_hba *phba)
349 {
350 struct lpfc_sli_ring *pring;
351 uint32_t ha_copy, status, control, work_port_events;
352 struct lpfc_vport *vport;
353
354 spin_lock_irq(&phba->hbalock);
355 ha_copy = phba->work_ha;
356 phba->work_ha = 0;
357 spin_unlock_irq(&phba->hbalock);
358
359 if (ha_copy & HA_ERATT)
360 lpfc_handle_eratt(phba);
361
362 if (ha_copy & HA_MBATT)
363 lpfc_sli_handle_mb_event(phba);
364
365 if (ha_copy & HA_LATT)
366 lpfc_handle_latt(phba);
367
368 spin_lock_irq(&phba->hbalock);
369 list_for_each_entry(vport, &phba->port_list, listentry) {
370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
371
372 if (!scsi_host_get(shost)) {
373 continue;
374 }
375 spin_unlock_irq(&phba->hbalock);
376 work_port_events = vport->work_port_events;
377
378 if (work_port_events & WORKER_DISC_TMO)
379 lpfc_disc_timeout_handler(vport);
380
381 if (work_port_events & WORKER_ELS_TMO)
382 lpfc_els_timeout_handler(vport);
383
384 if (work_port_events & WORKER_HB_TMO)
385 lpfc_hb_timeout_handler(phba);
386
387 if (work_port_events & WORKER_MBOX_TMO)
388 lpfc_mbox_timeout_handler(phba);
389
390 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
391 lpfc_unblock_fabric_iocbs(phba);
392
393 if (work_port_events & WORKER_FDMI_TMO)
394 lpfc_fdmi_timeout_handler(vport);
395
396 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
397 lpfc_ramp_down_queue_handler(phba);
398
399 if (work_port_events & WORKER_RAMP_UP_QUEUE)
400 lpfc_ramp_up_queue_handler(phba);
401
402 spin_lock_irq(&vport->work_port_lock);
403 vport->work_port_events &= ~work_port_events;
404 spin_unlock_irq(&vport->work_port_lock);
405 scsi_host_put(shost);
406 spin_lock_irq(&phba->hbalock);
407 }
408 spin_unlock_irq(&phba->hbalock);
409
410 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring,
419 (status &
420 HA_RXMASK));
421 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
422 }
423 /*
424 * Turn on Ring interrupts
425 */
426 spin_lock_irq(&phba->hbalock);
427 control = readl(phba->HCregaddr);
428 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
430 writel(control, phba->HCregaddr);
431 readl(phba->HCregaddr); /* flush */
432 }
433 spin_unlock_irq(&phba->hbalock);
434 }
435 lpfc_work_list_done(phba);
436 }
437
438 static int
439 check_work_wait_done(struct lpfc_hba *phba)
440 {
441 struct lpfc_vport *vport;
442 struct lpfc_sli_ring *pring;
443 int rc = 0;
444
445 spin_lock_irq(&phba->hbalock);
446 list_for_each_entry(vport, &phba->port_list, listentry) {
447 if (vport->work_port_events) {
448 rc = 1;
449 goto exit;
450 }
451 }
452
453 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
454 kthread_should_stop()) {
455 rc = 1;
456 goto exit;
457 }
458
459 pring = &phba->sli.ring[LPFC_ELS_RING];
460 if (pring->flag & LPFC_DEFERRED_RING_EVENT)
461 rc = 1;
462 exit:
463 if (rc)
464 phba->work_found++;
465 else
466 phba->work_found = 0;
467
468 spin_unlock_irq(&phba->hbalock);
469 return rc;
470 }
471
472
473 int
474 lpfc_do_work(void *p)
475 {
476 struct lpfc_hba *phba = p;
477 int rc;
478 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
479
480 set_user_nice(current, -20);
481 phba->work_wait = &work_waitq;
482 phba->work_found = 0;
483
484 while (1) {
485
486 rc = wait_event_interruptible(work_waitq,
487 check_work_wait_done(phba));
488
489 BUG_ON(rc);
490
491 if (kthread_should_stop())
492 break;
493
494 lpfc_work_done(phba);
495
496 /* If there is alot of slow ring work, like during link up
497 * check_work_wait_done() may cause this thread to not give
498 * up the CPU for very long periods of time. This may cause
499 * soft lockups or other problems. To avoid these situations
500 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
501 * consecutive iterations.
502 */
503 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
504 phba->work_found = 0;
505 schedule();
506 }
507 }
508 phba->work_wait = NULL;
509 return 0;
510 }
511
512 /*
513 * This is only called to handle FC worker events. Since this a rare
514 * occurance, we allocate a struct lpfc_work_evt structure here instead of
515 * embedding it in the IOCB.
516 */
517 int
518 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
519 uint32_t evt)
520 {
521 struct lpfc_work_evt *evtp;
522 unsigned long flags;
523
524 /*
525 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
526 * be queued to worker thread for processing
527 */
528 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
529 if (!evtp)
530 return 0;
531
532 evtp->evt_arg1 = arg1;
533 evtp->evt_arg2 = arg2;
534 evtp->evt = evt;
535
536 spin_lock_irqsave(&phba->hbalock, flags);
537 list_add_tail(&evtp->evt_listp, &phba->work_list);
538 if (phba->work_wait)
539 lpfc_worker_wake_up(phba);
540 spin_unlock_irqrestore(&phba->hbalock, flags);
541
542 return 1;
543 }
544
545 void
546 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
547 {
548 struct lpfc_hba *phba = vport->phba;
549 struct lpfc_nodelist *ndlp, *next_ndlp;
550 int rc;
551
552 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
554 continue;
555
556 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
557 lpfc_unreg_rpi(vport, ndlp);
558
559 /* Leave Fabric nodes alone on link down */
560 if (!remove && ndlp->nlp_type & NLP_FABRIC)
561 continue;
562 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
563 remove
564 ? NLP_EVT_DEVICE_RM
565 : NLP_EVT_DEVICE_RECOVERY);
566 }
567 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
568 lpfc_mbx_unreg_vpi(vport);
569 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
570 }
571 }
572
573 static void
574 lpfc_linkdown_port(struct lpfc_vport *vport)
575 {
576 struct lpfc_nodelist *ndlp, *next_ndlp;
577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
578
579 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
580
581 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
582 "Link Down: state:x%x rtry:x%x flg:x%x",
583 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
584
585 /* Cleanup any outstanding RSCN activity */
586 lpfc_els_flush_rscn(vport);
587
588 /* Cleanup any outstanding ELS commands */
589 lpfc_els_flush_cmd(vport);
590
591 lpfc_cleanup_rpis(vport, 0);
592
593 /* free any ndlp's on unused list */
594 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
595 /* free any ndlp's in unused state */
596 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
597 lpfc_drop_node(vport, ndlp);
598
599 /* Turn off discovery timer if its running */
600 lpfc_can_disctmo(vport);
601 }
602
603 int
604 lpfc_linkdown(struct lpfc_hba *phba)
605 {
606 struct lpfc_vport *vport = phba->pport;
607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
608 struct lpfc_vport *port_iterator;
609 LPFC_MBOXQ_t *mb;
610
611 if (phba->link_state == LPFC_LINK_DOWN) {
612 return 0;
613 }
614 spin_lock_irq(&phba->hbalock);
615 if (phba->link_state > LPFC_LINK_DOWN) {
616 phba->link_state = LPFC_LINK_DOWN;
617 phba->pport->fc_flag &= ~FC_LBIT;
618 }
619 spin_unlock_irq(&phba->hbalock);
620
621 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
622
623 /* Issue a LINK DOWN event to all nodes */
624 lpfc_linkdown_port(port_iterator);
625 }
626
627 /* Clean up any firmware default rpi's */
628 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 if (mb) {
630 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
631 mb->vport = vport;
632 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
633 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
634 == MBX_NOT_FINISHED) {
635 mempool_free(mb, phba->mbox_mem_pool);
636 }
637 }
638
639 /* Setup myDID for link up if we are in pt2pt mode */
640 if (phba->pport->fc_flag & FC_PT2PT) {
641 phba->pport->fc_myDID = 0;
642 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (mb) {
644 lpfc_config_link(phba, mb);
645 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
646 mb->vport = vport;
647 if (lpfc_sli_issue_mbox(phba, mb,
648 (MBX_NOWAIT | MBX_STOP_IOCB))
649 == MBX_NOT_FINISHED) {
650 mempool_free(mb, phba->mbox_mem_pool);
651 }
652 }
653 spin_lock_irq(shost->host_lock);
654 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
655 spin_unlock_irq(shost->host_lock);
656 }
657
658 return 0;
659 }
660
661 static void
662 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
663 {
664 struct lpfc_nodelist *ndlp;
665
666 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
667 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
668 continue;
669
670 if (ndlp->nlp_type & NLP_FABRIC) {
671 /* On Linkup its safe to clean up the ndlp
672 * from Fabric connections.
673 */
674 if (ndlp->nlp_DID != Fabric_DID)
675 lpfc_unreg_rpi(vport, ndlp);
676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
677 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
678 /* Fail outstanding IO now since device is
679 * marked for PLOGI.
680 */
681 lpfc_unreg_rpi(vport, ndlp);
682 }
683 }
684 }
685
686 static void
687 lpfc_linkup_port(struct lpfc_vport *vport)
688 {
689 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
690 struct lpfc_nodelist *ndlp, *next_ndlp;
691 struct lpfc_hba *phba = vport->phba;
692
693 if ((vport->load_flag & FC_UNLOADING) != 0)
694 return;
695
696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
697 "Link Up: top:x%x speed:x%x flg:x%x",
698 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
699
700 /* If NPIV is not enabled, only bring the physical port up */
701 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
702 (vport != phba->pport))
703 return;
704
705 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
706
707 spin_lock_irq(shost->host_lock);
708 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
709 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
710 vport->fc_flag |= FC_NDISC_ACTIVE;
711 vport->fc_ns_retry = 0;
712 spin_unlock_irq(shost->host_lock);
713
714 if (vport->fc_flag & FC_LBIT)
715 lpfc_linkup_cleanup_nodes(vport);
716
717 /* free any ndlp's in unused state */
718 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
719 nlp_listp)
720 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
721 lpfc_drop_node(vport, ndlp);
722 }
723
724 static int
725 lpfc_linkup(struct lpfc_hba *phba)
726 {
727 struct lpfc_vport *vport;
728
729 phba->link_state = LPFC_LINK_UP;
730
731 /* Unblock fabric iocbs if they are blocked */
732 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
733 del_timer_sync(&phba->fabric_block_timer);
734
735 list_for_each_entry(vport, &phba->port_list, listentry) {
736 lpfc_linkup_port(vport);
737 }
738 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
739 lpfc_issue_clear_la(phba, phba->pport);
740
741 return 0;
742 }
743
744 /*
745 * This routine handles processing a CLEAR_LA mailbox
746 * command upon completion. It is setup in the LPFC_MBOXQ
747 * as the completion routine when the command is
748 * handed off to the SLI layer.
749 */
750 void
751 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
752 {
753 struct lpfc_vport *vport = pmb->vport;
754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
755 struct lpfc_sli *psli = &phba->sli;
756 MAILBOX_t *mb = &pmb->mb;
757 uint32_t control;
758
759 /* Since we don't do discovery right now, turn these off here */
760 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
761 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
762 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
763
764 /* Check for error */
765 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
766 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
767 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
768 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
769 "state x%x\n",
770 phba->brd_no, vport->vpi, mb->mbxStatus,
771 vport->port_state);
772
773 phba->link_state = LPFC_HBA_ERROR;
774 goto out;
775 }
776
777 if (vport->port_type == LPFC_PHYSICAL_PORT)
778 phba->link_state = LPFC_HBA_READY;
779
780 spin_lock_irq(&phba->hbalock);
781 psli->sli_flag |= LPFC_PROCESS_LA;
782 control = readl(phba->HCregaddr);
783 control |= HC_LAINT_ENA;
784 writel(control, phba->HCregaddr);
785 readl(phba->HCregaddr); /* flush */
786 spin_unlock_irq(&phba->hbalock);
787 return;
788
789 vport->num_disc_nodes = 0;
790 /* go thru NPR nodes and issue ELS PLOGIs */
791 if (vport->fc_npr_cnt)
792 lpfc_els_disc_plogi(vport);
793
794 if (!vport->num_disc_nodes) {
795 spin_lock_irq(shost->host_lock);
796 vport->fc_flag &= ~FC_NDISC_ACTIVE;
797 spin_unlock_irq(shost->host_lock);
798 }
799
800 vport->port_state = LPFC_VPORT_READY;
801
802 out:
803 /* Device Discovery completes */
804 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
805 "%d (%d):0225 Device Discovery completes\n",
806 phba->brd_no, vport->vpi);
807
808 mempool_free(pmb, phba->mbox_mem_pool);
809
810 spin_lock_irq(shost->host_lock);
811 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
812 spin_unlock_irq(shost->host_lock);
813
814 del_timer_sync(&phba->fc_estabtmo);
815
816 lpfc_can_disctmo(vport);
817
818 /* turn on Link Attention interrupts */
819
820 spin_lock_irq(&phba->hbalock);
821 psli->sli_flag |= LPFC_PROCESS_LA;
822 control = readl(phba->HCregaddr);
823 control |= HC_LAINT_ENA;
824 writel(control, phba->HCregaddr);
825 readl(phba->HCregaddr); /* flush */
826 spin_unlock_irq(&phba->hbalock);
827
828 return;
829 }
830
831
832 static void
833 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
834 {
835 struct lpfc_vport *vport = pmb->vport;
836
837 if (pmb->mb.mbxStatus)
838 goto out;
839
840 mempool_free(pmb, phba->mbox_mem_pool);
841
842 if (phba->fc_topology == TOPOLOGY_LOOP &&
843 vport->fc_flag & FC_PUBLIC_LOOP &&
844 !(vport->fc_flag & FC_LBIT)) {
845 /* Need to wait for FAN - use discovery timer
846 * for timeout. port_state is identically
847 * LPFC_LOCAL_CFG_LINK while waiting for FAN
848 */
849 lpfc_set_disctmo(vport);
850 return;
851 }
852
853 /* Start discovery by sending a FLOGI. port_state is identically
854 * LPFC_FLOGI while waiting for FLOGI cmpl
855 */
856 if (vport->port_state != LPFC_FLOGI) {
857 vport->port_state = LPFC_FLOGI;
858 lpfc_set_disctmo(vport);
859 lpfc_initial_flogi(vport);
860 }
861 return;
862
863 out:
864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
865 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
866 "HBA state x%x\n",
867 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
868 vport->port_state);
869
870 mempool_free(pmb, phba->mbox_mem_pool);
871
872 lpfc_linkdown(phba);
873
874 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
875 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
876 phba->brd_no, vport->vpi, vport->port_state);
877
878 lpfc_issue_clear_la(phba, vport);
879 return;
880 }
881
882 static void
883 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
884 {
885 MAILBOX_t *mb = &pmb->mb;
886 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
887 struct lpfc_vport *vport = pmb->vport;
888
889
890 /* Check for error */
891 if (mb->mbxStatus) {
892 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
894 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
895 "hba state x%x>\n",
896 phba->brd_no, vport->vpi, mb->mbxStatus,
897 vport->port_state);
898
899 lpfc_linkdown(phba);
900 goto out;
901 }
902
903 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
904 sizeof (struct serv_parm));
905 if (phba->cfg_soft_wwnn)
906 u64_to_wwn(phba->cfg_soft_wwnn,
907 vport->fc_sparam.nodeName.u.wwn);
908 if (phba->cfg_soft_wwpn)
909 u64_to_wwn(phba->cfg_soft_wwpn,
910 vport->fc_sparam.portName.u.wwn);
911 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
912 sizeof(vport->fc_nodename));
913 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
914 sizeof(vport->fc_portname));
915 if (vport->port_type == LPFC_PHYSICAL_PORT) {
916 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
917 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
918 }
919
920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
921 kfree(mp);
922 mempool_free(pmb, phba->mbox_mem_pool);
923 return;
924
925 out:
926 pmb->context1 = NULL;
927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 kfree(mp);
929 lpfc_issue_clear_la(phba, vport);
930 mempool_free(pmb, phba->mbox_mem_pool);
931 return;
932 }
933
934 static void
935 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
936 {
937 struct lpfc_vport *vport = phba->pport;
938 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
939 int i;
940 struct lpfc_dmabuf *mp;
941 int rc;
942
943 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
944 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
945
946 spin_lock_irq(&phba->hbalock);
947 switch (la->UlnkSpeed) {
948 case LA_1GHZ_LINK:
949 phba->fc_linkspeed = LA_1GHZ_LINK;
950 break;
951 case LA_2GHZ_LINK:
952 phba->fc_linkspeed = LA_2GHZ_LINK;
953 break;
954 case LA_4GHZ_LINK:
955 phba->fc_linkspeed = LA_4GHZ_LINK;
956 break;
957 case LA_8GHZ_LINK:
958 phba->fc_linkspeed = LA_8GHZ_LINK;
959 break;
960 default:
961 phba->fc_linkspeed = LA_UNKNW_LINK;
962 break;
963 }
964
965 phba->fc_topology = la->topology;
966 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
967
968 if (phba->fc_topology == TOPOLOGY_LOOP) {
969 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
970
971 /* Get Loop Map information */
972 if (la->il)
973 vport->fc_flag |= FC_LBIT;
974
975 vport->fc_myDID = la->granted_AL_PA;
976 i = la->un.lilpBde64.tus.f.bdeSize;
977
978 if (i == 0) {
979 phba->alpa_map[0] = 0;
980 } else {
981 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
982 int numalpa, j, k;
983 union {
984 uint8_t pamap[16];
985 struct {
986 uint32_t wd1;
987 uint32_t wd2;
988 uint32_t wd3;
989 uint32_t wd4;
990 } pa;
991 } un;
992 numalpa = phba->alpa_map[0];
993 j = 0;
994 while (j < numalpa) {
995 memset(un.pamap, 0, 16);
996 for (k = 1; j < numalpa; k++) {
997 un.pamap[k - 1] =
998 phba->alpa_map[j + 1];
999 j++;
1000 if (k == 16)
1001 break;
1002 }
1003 /* Link Up Event ALPA map */
1004 lpfc_printf_log(phba,
1005 KERN_WARNING,
1006 LOG_LINK_EVENT,
1007 "%d:1304 Link Up Event "
1008 "ALPA map Data: x%x "
1009 "x%x x%x x%x\n",
1010 phba->brd_no,
1011 un.pa.wd1, un.pa.wd2,
1012 un.pa.wd3, un.pa.wd4);
1013 }
1014 }
1015 }
1016 } else {
1017 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1018 if (phba->max_vpi && phba->cfg_npiv_enable &&
1019 (phba->sli_rev == 3))
1020 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1021 }
1022 vport->fc_myDID = phba->fc_pref_DID;
1023 vport->fc_flag |= FC_LBIT;
1024 }
1025 spin_unlock_irq(&phba->hbalock);
1026
1027 lpfc_linkup(phba);
1028 if (sparam_mbox) {
1029 lpfc_read_sparam(phba, sparam_mbox, 0);
1030 sparam_mbox->vport = vport;
1031 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1032 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
1033 (MBX_NOWAIT | MBX_STOP_IOCB));
1034 if (rc == MBX_NOT_FINISHED) {
1035 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1037 kfree(mp);
1038 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1039 if (cfglink_mbox)
1040 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1041 goto out;
1042 }
1043 }
1044
1045 if (cfglink_mbox) {
1046 vport->port_state = LPFC_LOCAL_CFG_LINK;
1047 lpfc_config_link(phba, cfglink_mbox);
1048 cfglink_mbox->vport = vport;
1049 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1050 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
1051 (MBX_NOWAIT | MBX_STOP_IOCB));
1052 if (rc != MBX_NOT_FINISHED)
1053 return;
1054 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1055 }
1056 out:
1057 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1059 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1060 phba->brd_no, vport->vpi,
1061 vport->port_state, sparam_mbox, cfglink_mbox);
1062
1063 lpfc_issue_clear_la(phba, vport);
1064 return;
1065 }
1066
1067 static void
1068 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1069 {
1070 uint32_t control;
1071 struct lpfc_sli *psli = &phba->sli;
1072
1073 lpfc_linkdown(phba);
1074
1075 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1076 spin_lock_irq(&phba->hbalock);
1077 psli->sli_flag |= LPFC_PROCESS_LA;
1078 control = readl(phba->HCregaddr);
1079 control |= HC_LAINT_ENA;
1080 writel(control, phba->HCregaddr);
1081 readl(phba->HCregaddr); /* flush */
1082 spin_unlock_irq(&phba->hbalock);
1083 }
1084
1085 /*
1086 * This routine handles processing a READ_LA mailbox
1087 * command upon completion. It is setup in the LPFC_MBOXQ
1088 * as the completion routine when the command is
1089 * handed off to the SLI layer.
1090 */
1091 void
1092 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1093 {
1094 struct lpfc_vport *vport = pmb->vport;
1095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1096 READ_LA_VAR *la;
1097 MAILBOX_t *mb = &pmb->mb;
1098 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1099
1100 /* Check for error */
1101 if (mb->mbxStatus) {
1102 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1103 "%d:1307 READ_LA mbox error x%x state x%x\n",
1104 phba->brd_no, mb->mbxStatus, vport->port_state);
1105 lpfc_mbx_issue_link_down(phba);
1106 phba->link_state = LPFC_HBA_ERROR;
1107 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1108 }
1109
1110 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1111
1112 memcpy(&phba->alpa_map[0], mp->virt, 128);
1113
1114 spin_lock_irq(shost->host_lock);
1115 if (la->pb)
1116 vport->fc_flag |= FC_BYPASSED_MODE;
1117 else
1118 vport->fc_flag &= ~FC_BYPASSED_MODE;
1119 spin_unlock_irq(shost->host_lock);
1120
1121 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1122 (phba->fc_eventTag == la->eventTag)) {
1123 phba->fc_stat.LinkMultiEvent++;
1124 if (la->attType == AT_LINK_UP)
1125 if (phba->fc_eventTag != 0)
1126 lpfc_linkdown(phba);
1127 }
1128
1129 phba->fc_eventTag = la->eventTag;
1130
1131 if (la->attType == AT_LINK_UP) {
1132 phba->fc_stat.LinkUp++;
1133 if (phba->link_flag & LS_LOOPBACK_MODE) {
1134 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1135 "%d:1306 Link Up Event in loop back mode "
1136 "x%x received Data: x%x x%x x%x x%x\n",
1137 phba->brd_no, la->eventTag, phba->fc_eventTag,
1138 la->granted_AL_PA, la->UlnkSpeed,
1139 phba->alpa_map[0]);
1140 } else {
1141 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1142 "%d:1303 Link Up Event x%x received "
1143 "Data: x%x x%x x%x x%x\n",
1144 phba->brd_no, la->eventTag, phba->fc_eventTag,
1145 la->granted_AL_PA, la->UlnkSpeed,
1146 phba->alpa_map[0]);
1147 }
1148 lpfc_mbx_process_link_up(phba, la);
1149 } else {
1150 phba->fc_stat.LinkDown++;
1151 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1152 "%d:1305 Link Down Event x%x received "
1153 "Data: x%x x%x x%x\n",
1154 phba->brd_no, la->eventTag, phba->fc_eventTag,
1155 phba->pport->port_state, vport->fc_flag);
1156 lpfc_mbx_issue_link_down(phba);
1157 }
1158
1159 lpfc_mbx_cmpl_read_la_free_mbuf:
1160 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1161 kfree(mp);
1162 mempool_free(pmb, phba->mbox_mem_pool);
1163 return;
1164 }
1165
1166 /*
1167 * This routine handles processing a REG_LOGIN mailbox
1168 * command upon completion. It is setup in the LPFC_MBOXQ
1169 * as the completion routine when the command is
1170 * handed off to the SLI layer.
1171 */
1172 void
1173 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1174 {
1175 struct lpfc_vport *vport = pmb->vport;
1176 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1177 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1178
1179 pmb->context1 = NULL;
1180
1181 /* Good status, call state machine */
1182 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1183 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1184 kfree(mp);
1185 mempool_free(pmb, phba->mbox_mem_pool);
1186 lpfc_nlp_put(ndlp);
1187
1188 return;
1189 }
1190
1191 static void
1192 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1193 {
1194 MAILBOX_t *mb = &pmb->mb;
1195 struct lpfc_vport *vport = pmb->vport;
1196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1197
1198 switch (mb->mbxStatus) {
1199 case 0x0011:
1200 case 0x0020:
1201 case 0x9700:
1202 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1203 "%d (%d):0911 cmpl_unreg_vpi, "
1204 "mb status = 0x%x\n",
1205 phba->brd_no, vport->vpi, mb->mbxStatus);
1206 break;
1207 }
1208 vport->unreg_vpi_cmpl = VPORT_OK;
1209 mempool_free(pmb, phba->mbox_mem_pool);
1210 /*
1211 * This shost reference might have been taken at the beginning of
1212 * lpfc_vport_delete()
1213 */
1214 if (vport->load_flag & FC_UNLOADING)
1215 scsi_host_put(shost);
1216 }
1217
1218 void
1219 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1220 {
1221 struct lpfc_hba *phba = vport->phba;
1222 LPFC_MBOXQ_t *mbox;
1223 int rc;
1224
1225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1226 if (!mbox)
1227 return;
1228
1229 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1230 mbox->vport = vport;
1231 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1232 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1233 if (rc == MBX_NOT_FINISHED) {
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1235 "%d (%d):1800 Could not issue unreg_vpi\n",
1236 phba->brd_no, vport->vpi);
1237 mempool_free(mbox, phba->mbox_mem_pool);
1238 vport->unreg_vpi_cmpl = VPORT_ERROR;
1239 }
1240 }
1241
1242 static void
1243 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1244 {
1245 struct lpfc_vport *vport = pmb->vport;
1246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1247 MAILBOX_t *mb = &pmb->mb;
1248
1249 switch (mb->mbxStatus) {
1250 case 0x0011:
1251 case 0x9601:
1252 case 0x9602:
1253 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1254 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1255 phba->brd_no, vport->vpi, mb->mbxStatus);
1256 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1257 spin_lock_irq(shost->host_lock);
1258 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1259 spin_unlock_irq(shost->host_lock);
1260 vport->fc_myDID = 0;
1261 goto out;
1262 }
1263
1264 vport->num_disc_nodes = 0;
1265 /* go thru NPR list and issue ELS PLOGIs */
1266 if (vport->fc_npr_cnt)
1267 lpfc_els_disc_plogi(vport);
1268
1269 if (!vport->num_disc_nodes) {
1270 spin_lock_irq(shost->host_lock);
1271 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1272 spin_unlock_irq(shost->host_lock);
1273 lpfc_can_disctmo(vport);
1274 }
1275 vport->port_state = LPFC_VPORT_READY;
1276
1277 out:
1278 mempool_free(pmb, phba->mbox_mem_pool);
1279 return;
1280 }
1281
1282 /*
1283 * This routine handles processing a Fabric REG_LOGIN mailbox
1284 * command upon completion. It is setup in the LPFC_MBOXQ
1285 * as the completion routine when the command is
1286 * handed off to the SLI layer.
1287 */
1288 void
1289 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1290 {
1291 struct lpfc_vport *vport = pmb->vport;
1292 struct lpfc_vport *next_vport;
1293 MAILBOX_t *mb = &pmb->mb;
1294 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1295 struct lpfc_nodelist *ndlp;
1296 ndlp = (struct lpfc_nodelist *) pmb->context2;
1297
1298 pmb->context1 = NULL;
1299 pmb->context2 = NULL;
1300
1301 if (mb->mbxStatus) {
1302 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1303 kfree(mp);
1304 mempool_free(pmb, phba->mbox_mem_pool);
1305 lpfc_nlp_put(ndlp);
1306
1307 if (phba->fc_topology == TOPOLOGY_LOOP) {
1308 /* FLOGI failed, use loop map to make discovery list */
1309 lpfc_disc_list_loopmap(vport);
1310
1311 /* Start discovery */
1312 lpfc_disc_start(vport);
1313 return;
1314 }
1315
1316 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1317 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1318 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1319 phba->brd_no, vport->vpi, mb->mbxStatus);
1320
1321 return;
1322 }
1323
1324 ndlp->nlp_rpi = mb->un.varWords[0];
1325 ndlp->nlp_type |= NLP_FABRIC;
1326 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1327
1328 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1329
1330 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1331 list_for_each_entry(next_vport, &phba->port_list, listentry) {
1332 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
1333 continue;
1334
1335 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1336 lpfc_initial_fdisc(next_vport);
1337 else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1338 lpfc_vport_set_state(vport,
1339 FC_VPORT_NO_FABRIC_SUPP);
1340 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1341 "%d (%d):0259 No NPIV Fabric "
1342 "support\n",
1343 phba->brd_no, vport->vpi);
1344 }
1345 }
1346 lpfc_do_scr_ns_plogi(phba, vport);
1347 }
1348
1349 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1350 kfree(mp);
1351 mempool_free(pmb, phba->mbox_mem_pool);
1352 return;
1353 }
1354
1355 /*
1356 * This routine handles processing a NameServer REG_LOGIN mailbox
1357 * command upon completion. It is setup in the LPFC_MBOXQ
1358 * as the completion routine when the command is
1359 * handed off to the SLI layer.
1360 */
1361 void
1362 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1363 {
1364 MAILBOX_t *mb = &pmb->mb;
1365 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1366 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1367 struct lpfc_vport *vport = pmb->vport;
1368
1369 if (mb->mbxStatus) {
1370 out:
1371 lpfc_nlp_put(ndlp);
1372 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1373 kfree(mp);
1374 mempool_free(pmb, phba->mbox_mem_pool);
1375 lpfc_drop_node(vport, ndlp);
1376
1377 if (phba->fc_topology == TOPOLOGY_LOOP) {
1378 /*
1379 * RegLogin failed, use loop map to make discovery
1380 * list
1381 */
1382 lpfc_disc_list_loopmap(vport);
1383
1384 /* Start discovery */
1385 lpfc_disc_start(vport);
1386 return;
1387 }
1388 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1389 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1390 "%d (%d):0260 Register NameServer error: 0x%x\n",
1391 phba->brd_no, vport->vpi, mb->mbxStatus);
1392 return;
1393 }
1394
1395 pmb->context1 = NULL;
1396
1397 ndlp->nlp_rpi = mb->un.varWords[0];
1398 ndlp->nlp_type |= NLP_FABRIC;
1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1400
1401 if (vport->port_state < LPFC_VPORT_READY) {
1402 /* Link up discovery requires Fabric registration. */
1403 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1404 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1405 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1406 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1407 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1408
1409 /* Issue SCR just before NameServer GID_FT Query */
1410 lpfc_issue_els_scr(vport, SCR_DID, 0);
1411 }
1412
1413 vport->fc_ns_retry = 0;
1414 /* Good status, issue CT Request to NameServer */
1415 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1416 /* Cannot issue NameServer Query, so finish up discovery */
1417 goto out;
1418 }
1419
1420 lpfc_nlp_put(ndlp);
1421 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1422 kfree(mp);
1423 mempool_free(pmb, phba->mbox_mem_pool);
1424
1425 return;
1426 }
1427
1428 static void
1429 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1430 {
1431 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1432 struct fc_rport *rport;
1433 struct lpfc_rport_data *rdata;
1434 struct fc_rport_identifiers rport_ids;
1435 struct lpfc_hba *phba = vport->phba;
1436
1437 /* Remote port has reappeared. Re-register w/ FC transport */
1438 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1439 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1440 rport_ids.port_id = ndlp->nlp_DID;
1441 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1442
1443 /*
1444 * We leave our node pointer in rport->dd_data when we unregister a
1445 * FCP target port. But fc_remote_port_add zeros the space to which
1446 * rport->dd_data points. So, if we're reusing a previously
1447 * registered port, drop the reference that we took the last time we
1448 * registered the port.
1449 */
1450 if (ndlp->rport && ndlp->rport->dd_data &&
1451 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1452 lpfc_nlp_put(ndlp);
1453 }
1454
1455 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1456 "rport add: did:x%x flg:x%x type x%x",
1457 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1458
1459 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1460 if (!rport || !get_device(&rport->dev)) {
1461 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1462 "Warning: fc_remote_port_add failed\n");
1463 return;
1464 }
1465
1466 /* initialize static port data */
1467 rport->maxframe_size = ndlp->nlp_maxframe;
1468 rport->supported_classes = ndlp->nlp_class_sup;
1469 rdata = rport->dd_data;
1470 rdata->pnode = lpfc_nlp_get(ndlp);
1471
1472 if (ndlp->nlp_type & NLP_FCP_TARGET)
1473 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1474 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1475 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1476
1477
1478 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1479 fc_remote_port_rolechg(rport, rport_ids.roles);
1480
1481 if ((rport->scsi_target_id != -1) &&
1482 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1483 ndlp->nlp_sid = rport->scsi_target_id;
1484 }
1485 return;
1486 }
1487
1488 static void
1489 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1490 {
1491 struct fc_rport *rport = ndlp->rport;
1492
1493 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1494 "rport delete: did:x%x flg:x%x type x%x",
1495 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1496
1497 fc_remote_port_delete(rport);
1498
1499 return;
1500 }
1501
1502 static void
1503 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1504 {
1505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1506
1507 spin_lock_irq(shost->host_lock);
1508 switch (state) {
1509 case NLP_STE_UNUSED_NODE:
1510 vport->fc_unused_cnt += count;
1511 break;
1512 case NLP_STE_PLOGI_ISSUE:
1513 vport->fc_plogi_cnt += count;
1514 break;
1515 case NLP_STE_ADISC_ISSUE:
1516 vport->fc_adisc_cnt += count;
1517 break;
1518 case NLP_STE_REG_LOGIN_ISSUE:
1519 vport->fc_reglogin_cnt += count;
1520 break;
1521 case NLP_STE_PRLI_ISSUE:
1522 vport->fc_prli_cnt += count;
1523 break;
1524 case NLP_STE_UNMAPPED_NODE:
1525 vport->fc_unmap_cnt += count;
1526 break;
1527 case NLP_STE_MAPPED_NODE:
1528 vport->fc_map_cnt += count;
1529 break;
1530 case NLP_STE_NPR_NODE:
1531 vport->fc_npr_cnt += count;
1532 break;
1533 }
1534 spin_unlock_irq(shost->host_lock);
1535 }
1536
1537 static void
1538 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1539 int old_state, int new_state)
1540 {
1541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1542
1543 if (new_state == NLP_STE_UNMAPPED_NODE) {
1544 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1545 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1546 ndlp->nlp_type |= NLP_FC_NODE;
1547 }
1548 if (new_state == NLP_STE_MAPPED_NODE)
1549 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1550 if (new_state == NLP_STE_NPR_NODE)
1551 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1552
1553 /* Transport interface */
1554 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1555 old_state == NLP_STE_UNMAPPED_NODE)) {
1556 vport->phba->nport_event_cnt++;
1557 lpfc_unregister_remote_port(ndlp);
1558 }
1559
1560 if (new_state == NLP_STE_MAPPED_NODE ||
1561 new_state == NLP_STE_UNMAPPED_NODE) {
1562 vport->phba->nport_event_cnt++;
1563 /*
1564 * Tell the fc transport about the port, if we haven't
1565 * already. If we have, and it's a scsi entity, be
1566 * sure to unblock any attached scsi devices
1567 */
1568 lpfc_register_remote_port(vport, ndlp);
1569 }
1570 /*
1571 * if we added to Mapped list, but the remote port
1572 * registration failed or assigned a target id outside
1573 * our presentable range - move the node to the
1574 * Unmapped List
1575 */
1576 if (new_state == NLP_STE_MAPPED_NODE &&
1577 (!ndlp->rport ||
1578 ndlp->rport->scsi_target_id == -1 ||
1579 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1580 spin_lock_irq(shost->host_lock);
1581 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1582 spin_unlock_irq(shost->host_lock);
1583 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1584 }
1585 }
1586
1587 static char *
1588 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1589 {
1590 static char *states[] = {
1591 [NLP_STE_UNUSED_NODE] = "UNUSED",
1592 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1593 [NLP_STE_ADISC_ISSUE] = "ADISC",
1594 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1595 [NLP_STE_PRLI_ISSUE] = "PRLI",
1596 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1597 [NLP_STE_MAPPED_NODE] = "MAPPED",
1598 [NLP_STE_NPR_NODE] = "NPR",
1599 };
1600
1601 if (state < ARRAY_SIZE(states) && states[state])
1602 strlcpy(buffer, states[state], size);
1603 else
1604 snprintf(buffer, size, "unknown (%d)", state);
1605 return buffer;
1606 }
1607
1608 void
1609 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1610 int state)
1611 {
1612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1613 int old_state = ndlp->nlp_state;
1614 char name1[16], name2[16];
1615
1616 lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
1617 "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
1618 vport->phba->brd_no, vport->vpi,
1619 ndlp->nlp_DID,
1620 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1621 lpfc_nlp_state_name(name2, sizeof(name2), state));
1622
1623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1624 "node statechg did:x%x old:%d ste:%d",
1625 ndlp->nlp_DID, old_state, state);
1626
1627 if (old_state == NLP_STE_NPR_NODE &&
1628 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1629 state != NLP_STE_NPR_NODE)
1630 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1631 if (old_state == NLP_STE_UNMAPPED_NODE) {
1632 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1633 ndlp->nlp_type &= ~NLP_FC_NODE;
1634 }
1635
1636 if (list_empty(&ndlp->nlp_listp)) {
1637 spin_lock_irq(shost->host_lock);
1638 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1639 spin_unlock_irq(shost->host_lock);
1640 } else if (old_state)
1641 lpfc_nlp_counters(vport, old_state, -1);
1642
1643 ndlp->nlp_state = state;
1644 lpfc_nlp_counters(vport, state, 1);
1645 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1646 }
1647
1648 void
1649 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1650 {
1651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1652
1653 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1655 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1656 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1657 spin_lock_irq(shost->host_lock);
1658 list_del_init(&ndlp->nlp_listp);
1659 spin_unlock_irq(shost->host_lock);
1660 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1661 NLP_STE_UNUSED_NODE);
1662 }
1663
1664 void
1665 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1666 {
1667 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1668
1669 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1670 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1671 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1672 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1673 spin_lock_irq(shost->host_lock);
1674 list_del_init(&ndlp->nlp_listp);
1675 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
1676 spin_unlock_irq(shost->host_lock);
1677 lpfc_nlp_put(ndlp);
1678 }
1679
1680 /*
1681 * Start / ReStart rescue timer for Discovery / RSCN handling
1682 */
1683 void
1684 lpfc_set_disctmo(struct lpfc_vport *vport)
1685 {
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 struct lpfc_hba *phba = vport->phba;
1688 uint32_t tmo;
1689
1690 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1691 /* For FAN, timeout should be greater then edtov */
1692 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1693 } else {
1694 /* Normal discovery timeout should be > then ELS/CT timeout
1695 * FC spec states we need 3 * ratov for CT requests
1696 */
1697 tmo = ((phba->fc_ratov * 3) + 3);
1698 }
1699
1700
1701 if (!timer_pending(&vport->fc_disctmo)) {
1702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1703 "set disc timer: tmo:x%x state:x%x flg:x%x",
1704 tmo, vport->port_state, vport->fc_flag);
1705 }
1706
1707 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1708 spin_lock_irq(shost->host_lock);
1709 vport->fc_flag |= FC_DISC_TMO;
1710 spin_unlock_irq(shost->host_lock);
1711
1712 /* Start Discovery Timer state <hba_state> */
1713 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1714 "%d (%d):0247 Start Discovery Timer state x%x "
1715 "Data: x%x x%lx x%x x%x\n",
1716 phba->brd_no, vport->vpi, vport->port_state, tmo,
1717 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1718 vport->fc_adisc_cnt);
1719
1720 return;
1721 }
1722
1723 /*
1724 * Cancel rescue timer for Discovery / RSCN handling
1725 */
1726 int
1727 lpfc_can_disctmo(struct lpfc_vport *vport)
1728 {
1729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1730 struct lpfc_hba *phba = vport->phba;
1731 unsigned long iflags;
1732
1733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1734 "can disc timer: state:x%x rtry:x%x flg:x%x",
1735 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1736
1737 /* Turn off discovery timer if its running */
1738 if (vport->fc_flag & FC_DISC_TMO) {
1739 spin_lock_irqsave(shost->host_lock, iflags);
1740 vport->fc_flag &= ~FC_DISC_TMO;
1741 spin_unlock_irqrestore(shost->host_lock, iflags);
1742 del_timer_sync(&vport->fc_disctmo);
1743 spin_lock_irqsave(&vport->work_port_lock, iflags);
1744 vport->work_port_events &= ~WORKER_DISC_TMO;
1745 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1746 }
1747
1748 /* Cancel Discovery Timer state <hba_state> */
1749 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1750 "%d (%d):0248 Cancel Discovery Timer state x%x "
1751 "Data: x%x x%x x%x\n",
1752 phba->brd_no, vport->vpi, vport->port_state,
1753 vport->fc_flag, vport->fc_plogi_cnt,
1754 vport->fc_adisc_cnt);
1755
1756 return 0;
1757 }
1758
1759 /*
1760 * Check specified ring for outstanding IOCB on the SLI queue
1761 * Return true if iocb matches the specified nport
1762 */
1763 int
1764 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1765 struct lpfc_sli_ring *pring,
1766 struct lpfc_iocbq *iocb,
1767 struct lpfc_nodelist *ndlp)
1768 {
1769 struct lpfc_sli *psli = &phba->sli;
1770 IOCB_t *icmd = &iocb->iocb;
1771 struct lpfc_vport *vport = ndlp->vport;
1772
1773 if (iocb->vport != vport)
1774 return 0;
1775
1776 if (pring->ringno == LPFC_ELS_RING) {
1777 switch (icmd->ulpCommand) {
1778 case CMD_GEN_REQUEST64_CR:
1779 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1780 return 1;
1781 case CMD_ELS_REQUEST64_CR:
1782 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1783 return 1;
1784 case CMD_XMIT_ELS_RSP64_CX:
1785 if (iocb->context1 == (uint8_t *) ndlp)
1786 return 1;
1787 }
1788 } else if (pring->ringno == psli->extra_ring) {
1789
1790 } else if (pring->ringno == psli->fcp_ring) {
1791 /* Skip match check if waiting to relogin to FCP target */
1792 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1793 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1794 return 0;
1795 }
1796 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1797 return 1;
1798 }
1799 } else if (pring->ringno == psli->next_ring) {
1800
1801 }
1802 return 0;
1803 }
1804
1805 /*
1806 * Free resources / clean up outstanding I/Os
1807 * associated with nlp_rpi in the LPFC_NODELIST entry.
1808 */
1809 static int
1810 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1811 {
1812 LIST_HEAD(completions);
1813 struct lpfc_sli *psli;
1814 struct lpfc_sli_ring *pring;
1815 struct lpfc_iocbq *iocb, *next_iocb;
1816 IOCB_t *icmd;
1817 uint32_t rpi, i;
1818
1819 lpfc_fabric_abort_nport(ndlp);
1820
1821 /*
1822 * Everything that matches on txcmplq will be returned
1823 * by firmware with a no rpi error.
1824 */
1825 psli = &phba->sli;
1826 rpi = ndlp->nlp_rpi;
1827 if (rpi) {
1828 /* Now process each ring */
1829 for (i = 0; i < psli->num_rings; i++) {
1830 pring = &psli->ring[i];
1831
1832 spin_lock_irq(&phba->hbalock);
1833 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1834 list) {
1835 /*
1836 * Check to see if iocb matches the nport we are
1837 * looking for
1838 */
1839 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1840 ndlp))) {
1841 /* It matches, so deque and call compl
1842 with an error */
1843 list_move_tail(&iocb->list,
1844 &completions);
1845 pring->txq_cnt--;
1846 }
1847 }
1848 spin_unlock_irq(&phba->hbalock);
1849 }
1850 }
1851
1852 while (!list_empty(&completions)) {
1853 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1854 list_del_init(&iocb->list);
1855
1856 if (!iocb->iocb_cmpl)
1857 lpfc_sli_release_iocbq(phba, iocb);
1858 else {
1859 icmd = &iocb->iocb;
1860 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1861 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1862 (iocb->iocb_cmpl)(phba, iocb, iocb);
1863 }
1864 }
1865
1866 return 0;
1867 }
1868
1869 /*
1870 * Free rpi associated with LPFC_NODELIST entry.
1871 * This routine is called from lpfc_freenode(), when we are removing
1872 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1873 * LOGO that completes successfully, and we are waiting to PLOGI back
1874 * to the remote NPort. In addition, it is called after we receive
1875 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1876 * we are waiting to PLOGI back to the remote NPort.
1877 */
1878 int
1879 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1880 {
1881 struct lpfc_hba *phba = vport->phba;
1882 LPFC_MBOXQ_t *mbox;
1883 int rc;
1884
1885 if (ndlp->nlp_rpi) {
1886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1887 if (mbox) {
1888 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1889 mbox->vport = vport;
1890 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1891 rc = lpfc_sli_issue_mbox(phba, mbox,
1892 (MBX_NOWAIT | MBX_STOP_IOCB));
1893 if (rc == MBX_NOT_FINISHED)
1894 mempool_free(mbox, phba->mbox_mem_pool);
1895 }
1896 lpfc_no_rpi(phba, ndlp);
1897 ndlp->nlp_rpi = 0;
1898 return 1;
1899 }
1900 return 0;
1901 }
1902
1903 void
1904 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1905 {
1906 struct lpfc_hba *phba = vport->phba;
1907 LPFC_MBOXQ_t *mbox;
1908 int rc;
1909
1910 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1911 if (mbox) {
1912 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1913 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox,
1916 (MBX_NOWAIT | MBX_STOP_IOCB));
1917 if (rc == MBX_NOT_FINISHED) {
1918 mempool_free(mbox, phba->mbox_mem_pool);
1919 }
1920 }
1921 }
1922
1923 void
1924 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1925 {
1926 struct lpfc_hba *phba = vport->phba;
1927 LPFC_MBOXQ_t *mbox;
1928 int rc;
1929
1930 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1931 if (mbox) {
1932 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1933 mbox->vport = vport;
1934 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1935 rc = lpfc_sli_issue_mbox(phba, mbox,
1936 (MBX_NOWAIT | MBX_STOP_IOCB));
1937 if (rc == MBX_NOT_FINISHED) {
1938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1939 "%d (%d):1815 Could not issue "
1940 "unreg_did (default rpis)\n",
1941 phba->brd_no, vport->vpi);
1942 mempool_free(mbox, phba->mbox_mem_pool);
1943 }
1944 }
1945 }
1946
1947 /*
1948 * Free resources associated with LPFC_NODELIST entry
1949 * so it can be freed.
1950 */
1951 static int
1952 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1953 {
1954 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1955 struct lpfc_hba *phba = vport->phba;
1956 LPFC_MBOXQ_t *mb, *nextmb;
1957 struct lpfc_dmabuf *mp;
1958
1959 /* Cleanup node for NPort <nlp_DID> */
1960 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1961 "%d (%d):0900 Cleanup node for NPort x%x "
1962 "Data: x%x x%x x%x\n",
1963 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1964 ndlp->nlp_state, ndlp->nlp_rpi);
1965
1966 lpfc_dequeue_node(vport, ndlp);
1967
1968 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1969 if ((mb = phba->sli.mbox_active)) {
1970 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1971 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1972 mb->context2 = NULL;
1973 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1974 }
1975 }
1976
1977 spin_lock_irq(&phba->hbalock);
1978 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1979 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1980 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1981 mp = (struct lpfc_dmabuf *) (mb->context1);
1982 if (mp) {
1983 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1984 kfree(mp);
1985 }
1986 list_del(&mb->list);
1987 mempool_free(mb, phba->mbox_mem_pool);
1988 lpfc_nlp_put(ndlp);
1989 }
1990 }
1991 spin_unlock_irq(&phba->hbalock);
1992
1993 lpfc_els_abort(phba,ndlp);
1994 spin_lock_irq(shost->host_lock);
1995 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1996 spin_unlock_irq(shost->host_lock);
1997
1998 ndlp->nlp_last_elscmd = 0;
1999 del_timer_sync(&ndlp->nlp_delayfunc);
2000
2001 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
2002 list_del_init(&ndlp->els_retry_evt.evt_listp);
2003 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2004 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2005
2006 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
2007 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2008 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
2009 }
2010
2011 lpfc_unreg_rpi(vport, ndlp);
2012
2013 return 0;
2014 }
2015
2016 /*
2017 * Check to see if we can free the nlp back to the freelist.
2018 * If we are in the middle of using the nlp in the discovery state
2019 * machine, defer the free till we reach the end of the state machine.
2020 */
2021 static void
2022 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2023 {
2024 struct lpfc_rport_data *rdata;
2025
2026 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2027 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2028 }
2029
2030 lpfc_cleanup_node(vport, ndlp);
2031
2032 /*
2033 * We can get here with a non-NULL ndlp->rport because when we
2034 * unregister a rport we don't break the rport/node linkage. So if we
2035 * do, make sure we don't leaving any dangling pointers behind.
2036 */
2037 if (ndlp->rport) {
2038 rdata = ndlp->rport->dd_data;
2039 rdata->pnode = NULL;
2040 ndlp->rport = NULL;
2041 }
2042 }
2043
2044 static int
2045 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2046 uint32_t did)
2047 {
2048 D_ID mydid, ndlpdid, matchdid;
2049
2050 if (did == Bcast_DID)
2051 return 0;
2052
2053 if (ndlp->nlp_DID == 0) {
2054 return 0;
2055 }
2056
2057 /* First check for Direct match */
2058 if (ndlp->nlp_DID == did)
2059 return 1;
2060
2061 /* Next check for area/domain identically equals 0 match */
2062 mydid.un.word = vport->fc_myDID;
2063 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2064 return 0;
2065 }
2066
2067 matchdid.un.word = did;
2068 ndlpdid.un.word = ndlp->nlp_DID;
2069 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2070 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2071 (mydid.un.b.area == matchdid.un.b.area)) {
2072 if ((ndlpdid.un.b.domain == 0) &&
2073 (ndlpdid.un.b.area == 0)) {
2074 if (ndlpdid.un.b.id)
2075 return 1;
2076 }
2077 return 0;
2078 }
2079
2080 matchdid.un.word = ndlp->nlp_DID;
2081 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2082 (mydid.un.b.area == ndlpdid.un.b.area)) {
2083 if ((matchdid.un.b.domain == 0) &&
2084 (matchdid.un.b.area == 0)) {
2085 if (matchdid.un.b.id)
2086 return 1;
2087 }
2088 }
2089 }
2090 return 0;
2091 }
2092
2093 /* Search for a nodelist entry */
2094 static struct lpfc_nodelist *
2095 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2096 {
2097 struct lpfc_hba *phba = vport->phba;
2098 struct lpfc_nodelist *ndlp;
2099 uint32_t data1;
2100
2101 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2102 if (lpfc_matchdid(vport, ndlp, did)) {
2103 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2104 ((uint32_t) ndlp->nlp_xri << 16) |
2105 ((uint32_t) ndlp->nlp_type << 8) |
2106 ((uint32_t) ndlp->nlp_rpi & 0xff));
2107 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
2108 "%d (%d):0929 FIND node DID "
2109 " Data: x%p x%x x%x x%x\n",
2110 phba->brd_no, vport->vpi,
2111 ndlp, ndlp->nlp_DID,
2112 ndlp->nlp_flag, data1);
2113 return ndlp;
2114 }
2115 }
2116
2117 /* FIND node did <did> NOT FOUND */
2118 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
2119 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
2120 phba->brd_no, vport->vpi, did);
2121 return NULL;
2122 }
2123
2124 struct lpfc_nodelist *
2125 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2126 {
2127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2128 struct lpfc_nodelist *ndlp;
2129
2130 spin_lock_irq(shost->host_lock);
2131 ndlp = __lpfc_findnode_did(vport, did);
2132 spin_unlock_irq(shost->host_lock);
2133 return ndlp;
2134 }
2135
2136 struct lpfc_nodelist *
2137 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2138 {
2139 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2140 struct lpfc_nodelist *ndlp;
2141
2142 ndlp = lpfc_findnode_did(vport, did);
2143 if (!ndlp) {
2144 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2145 lpfc_rscn_payload_check(vport, did) == 0)
2146 return NULL;
2147 ndlp = (struct lpfc_nodelist *)
2148 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2149 if (!ndlp)
2150 return NULL;
2151 lpfc_nlp_init(vport, ndlp, did);
2152 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2153 spin_lock_irq(shost->host_lock);
2154 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2155 spin_unlock_irq(shost->host_lock);
2156 return ndlp;
2157 }
2158 if (vport->fc_flag & FC_RSCN_MODE) {
2159 if (lpfc_rscn_payload_check(vport, did)) {
2160 spin_lock_irq(shost->host_lock);
2161 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2162 spin_unlock_irq(shost->host_lock);
2163
2164 /* Since this node is marked for discovery,
2165 * delay timeout is not needed.
2166 */
2167 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2168 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2169 } else
2170 ndlp = NULL;
2171 } else {
2172 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2173 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
2174 return NULL;
2175 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2176 spin_lock_irq(shost->host_lock);
2177 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2178 spin_unlock_irq(shost->host_lock);
2179 }
2180 return ndlp;
2181 }
2182
2183 /* Build a list of nodes to discover based on the loopmap */
2184 void
2185 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2186 {
2187 struct lpfc_hba *phba = vport->phba;
2188 int j;
2189 uint32_t alpa, index;
2190
2191 if (!lpfc_is_link_up(phba))
2192 return;
2193
2194 if (phba->fc_topology != TOPOLOGY_LOOP)
2195 return;
2196
2197 /* Check for loop map present or not */
2198 if (phba->alpa_map[0]) {
2199 for (j = 1; j <= phba->alpa_map[0]; j++) {
2200 alpa = phba->alpa_map[j];
2201 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2202 continue;
2203 lpfc_setup_disc_node(vport, alpa);
2204 }
2205 } else {
2206 /* No alpamap, so try all alpa's */
2207 for (j = 0; j < FC_MAXLOOP; j++) {
2208 /* If cfg_scan_down is set, start from highest
2209 * ALPA (0xef) to lowest (0x1).
2210 */
2211 if (phba->cfg_scan_down)
2212 index = j;
2213 else
2214 index = FC_MAXLOOP - j - 1;
2215 alpa = lpfcAlpaArray[index];
2216 if ((vport->fc_myDID & 0xff) == alpa)
2217 continue;
2218 lpfc_setup_disc_node(vport, alpa);
2219 }
2220 }
2221 return;
2222 }
2223
2224 void
2225 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2226 {
2227 LPFC_MBOXQ_t *mbox;
2228 struct lpfc_sli *psli = &phba->sli;
2229 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2230 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2231 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2232 int rc;
2233
2234 /*
2235 * if it's not a physical port or if we already send
2236 * clear_la then don't send it.
2237 */
2238 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2239 (vport->port_type != LPFC_PHYSICAL_PORT))
2240 return;
2241
2242 /* Link up discovery */
2243 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2244 phba->link_state = LPFC_CLEAR_LA;
2245 lpfc_clear_la(phba, mbox);
2246 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2247 mbox->vport = vport;
2248 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2249 MBX_STOP_IOCB));
2250 if (rc == MBX_NOT_FINISHED) {
2251 mempool_free(mbox, phba->mbox_mem_pool);
2252 lpfc_disc_flush_list(vport);
2253 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2254 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2255 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2256 phba->link_state = LPFC_HBA_ERROR;
2257 }
2258 }
2259 }
2260
2261 /* Reg_vpi to tell firmware to resume normal operations */
2262 void
2263 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2264 {
2265 LPFC_MBOXQ_t *regvpimbox;
2266
2267 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268 if (regvpimbox) {
2269 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2270 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2271 regvpimbox->vport = vport;
2272 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2273 (MBX_NOWAIT | MBX_STOP_IOCB))
2274 == MBX_NOT_FINISHED) {
2275 mempool_free(regvpimbox, phba->mbox_mem_pool);
2276 }
2277 }
2278 }
2279
2280 /* Start Link up / RSCN discovery on NPR nodes */
2281 void
2282 lpfc_disc_start(struct lpfc_vport *vport)
2283 {
2284 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2285 struct lpfc_hba *phba = vport->phba;
2286 uint32_t num_sent;
2287 uint32_t clear_la_pending;
2288 int did_changed;
2289
2290 if (!lpfc_is_link_up(phba))
2291 return;
2292
2293 if (phba->link_state == LPFC_CLEAR_LA)
2294 clear_la_pending = 1;
2295 else
2296 clear_la_pending = 0;
2297
2298 if (vport->port_state < LPFC_VPORT_READY)
2299 vport->port_state = LPFC_DISC_AUTH;
2300
2301 lpfc_set_disctmo(vport);
2302
2303 if (vport->fc_prevDID == vport->fc_myDID)
2304 did_changed = 0;
2305 else
2306 did_changed = 1;
2307
2308 vport->fc_prevDID = vport->fc_myDID;
2309 vport->num_disc_nodes = 0;
2310
2311 /* Start Discovery state <hba_state> */
2312 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2313 "%d (%d):0202 Start Discovery hba state x%x "
2314 "Data: x%x x%x x%x\n",
2315 phba->brd_no, vport->vpi, vport->port_state,
2316 vport->fc_flag, vport->fc_plogi_cnt,
2317 vport->fc_adisc_cnt);
2318
2319 /* First do ADISCs - if any */
2320 num_sent = lpfc_els_disc_adisc(vport);
2321
2322 if (num_sent)
2323 return;
2324
2325 /*
2326 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2327 * continue discovery.
2328 */
2329 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2330 !(vport->fc_flag & FC_RSCN_MODE)) {
2331 lpfc_issue_reg_vpi(phba, vport);
2332 return;
2333 }
2334
2335 /*
2336 * For SLI2, we need to set port_state to READY and continue
2337 * discovery.
2338 */
2339 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2340 /* If we get here, there is nothing to ADISC */
2341 if (vport->port_type == LPFC_PHYSICAL_PORT)
2342 lpfc_issue_clear_la(phba, vport);
2343
2344 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2345 vport->num_disc_nodes = 0;
2346 /* go thru NPR nodes and issue ELS PLOGIs */
2347 if (vport->fc_npr_cnt)
2348 lpfc_els_disc_plogi(vport);
2349
2350 if (!vport->num_disc_nodes) {
2351 spin_lock_irq(shost->host_lock);
2352 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2353 spin_unlock_irq(shost->host_lock);
2354 lpfc_can_disctmo(vport);
2355 }
2356 }
2357 vport->port_state = LPFC_VPORT_READY;
2358 } else {
2359 /* Next do PLOGIs - if any */
2360 num_sent = lpfc_els_disc_plogi(vport);
2361
2362 if (num_sent)
2363 return;
2364
2365 if (vport->fc_flag & FC_RSCN_MODE) {
2366 /* Check to see if more RSCNs came in while we
2367 * were processing this one.
2368 */
2369 if ((vport->fc_rscn_id_cnt == 0) &&
2370 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2371 spin_lock_irq(shost->host_lock);
2372 vport->fc_flag &= ~FC_RSCN_MODE;
2373 spin_unlock_irq(shost->host_lock);
2374 lpfc_can_disctmo(vport);
2375 } else
2376 lpfc_els_handle_rscn(vport);
2377 }
2378 }
2379 return;
2380 }
2381
2382 /*
2383 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2384 * ring the match the sppecified nodelist.
2385 */
2386 static void
2387 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2388 {
2389 LIST_HEAD(completions);
2390 struct lpfc_sli *psli;
2391 IOCB_t *icmd;
2392 struct lpfc_iocbq *iocb, *next_iocb;
2393 struct lpfc_sli_ring *pring;
2394
2395 psli = &phba->sli;
2396 pring = &psli->ring[LPFC_ELS_RING];
2397
2398 /* Error matching iocb on txq or txcmplq
2399 * First check the txq.
2400 */
2401 spin_lock_irq(&phba->hbalock);
2402 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2403 if (iocb->context1 != ndlp) {
2404 continue;
2405 }
2406 icmd = &iocb->iocb;
2407 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2408 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2409
2410 list_move_tail(&iocb->list, &completions);
2411 pring->txq_cnt--;
2412 }
2413 }
2414
2415 /* Next check the txcmplq */
2416 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2417 if (iocb->context1 != ndlp) {
2418 continue;
2419 }
2420 icmd = &iocb->iocb;
2421 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2422 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2423 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2424 }
2425 }
2426 spin_unlock_irq(&phba->hbalock);
2427
2428 while (!list_empty(&completions)) {
2429 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2430 list_del_init(&iocb->list);
2431
2432 if (!iocb->iocb_cmpl)
2433 lpfc_sli_release_iocbq(phba, iocb);
2434 else {
2435 icmd = &iocb->iocb;
2436 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2437 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2438 (iocb->iocb_cmpl) (phba, iocb, iocb);
2439 }
2440 }
2441 }
2442
2443 void
2444 lpfc_disc_flush_list(struct lpfc_vport *vport)
2445 {
2446 struct lpfc_nodelist *ndlp, *next_ndlp;
2447 struct lpfc_hba *phba = vport->phba;
2448
2449 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2450 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2451 nlp_listp) {
2452 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2453 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2454 lpfc_free_tx(phba, ndlp);
2455 lpfc_nlp_put(ndlp);
2456 }
2457 }
2458 }
2459 }
2460
2461 void
2462 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2463 {
2464 lpfc_els_flush_rscn(vport);
2465 lpfc_els_flush_cmd(vport);
2466 lpfc_disc_flush_list(vport);
2467 }
2468
2469 /*****************************************************************************/
2470 /*
2471 * NAME: lpfc_disc_timeout
2472 *
2473 * FUNCTION: Fibre Channel driver discovery timeout routine.
2474 *
2475 * EXECUTION ENVIRONMENT: interrupt only
2476 *
2477 * CALLED FROM:
2478 * Timer function
2479 *
2480 * RETURNS:
2481 * none
2482 */
2483 /*****************************************************************************/
2484 void
2485 lpfc_disc_timeout(unsigned long ptr)
2486 {
2487 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2488 struct lpfc_hba *phba = vport->phba;
2489 unsigned long flags = 0;
2490
2491 if (unlikely(!phba))
2492 return;
2493
2494 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2495 spin_lock_irqsave(&vport->work_port_lock, flags);
2496 vport->work_port_events |= WORKER_DISC_TMO;
2497 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2498
2499 spin_lock_irqsave(&phba->hbalock, flags);
2500 if (phba->work_wait)
2501 lpfc_worker_wake_up(phba);
2502 spin_unlock_irqrestore(&phba->hbalock, flags);
2503 }
2504 return;
2505 }
2506
2507 static void
2508 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2509 {
2510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2511 struct lpfc_hba *phba = vport->phba;
2512 struct lpfc_sli *psli = &phba->sli;
2513 struct lpfc_nodelist *ndlp, *next_ndlp;
2514 LPFC_MBOXQ_t *initlinkmbox;
2515 int rc, clrlaerr = 0;
2516
2517 if (!(vport->fc_flag & FC_DISC_TMO))
2518 return;
2519
2520 spin_lock_irq(shost->host_lock);
2521 vport->fc_flag &= ~FC_DISC_TMO;
2522 spin_unlock_irq(shost->host_lock);
2523
2524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2525 "disc timeout: state:x%x rtry:x%x flg:x%x",
2526 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2527
2528 switch (vport->port_state) {
2529
2530 case LPFC_LOCAL_CFG_LINK:
2531 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2532 * FAN
2533 */
2534 /* FAN timeout */
2535 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2536 "%d (%d):0221 FAN timeout\n",
2537 phba->brd_no, vport->vpi);
2538
2539 /* Start discovery by sending FLOGI, clean up old rpis */
2540 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2541 nlp_listp) {
2542 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2543 continue;
2544 if (ndlp->nlp_type & NLP_FABRIC) {
2545 /* Clean up the ndlp on Fabric connections */
2546 lpfc_drop_node(vport, ndlp);
2547 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2548 /* Fail outstanding IO now since device
2549 * is marked for PLOGI.
2550 */
2551 lpfc_unreg_rpi(vport, ndlp);
2552 }
2553 }
2554 if (vport->port_state != LPFC_FLOGI) {
2555 vport->port_state = LPFC_FLOGI;
2556 lpfc_set_disctmo(vport);
2557 lpfc_initial_flogi(vport);
2558 }
2559 break;
2560
2561 case LPFC_FDISC:
2562 case LPFC_FLOGI:
2563 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2564 /* Initial FLOGI timeout */
2565 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2566 "%d (%d):0222 Initial %s timeout\n",
2567 phba->brd_no, vport->vpi,
2568 vport->vpi ? "FLOGI" : "FDISC");
2569
2570 /* Assume no Fabric and go on with discovery.
2571 * Check for outstanding ELS FLOGI to abort.
2572 */
2573
2574 /* FLOGI failed, so just use loop map to make discovery list */
2575 lpfc_disc_list_loopmap(vport);
2576
2577 /* Start discovery */
2578 lpfc_disc_start(vport);
2579 break;
2580
2581 case LPFC_FABRIC_CFG_LINK:
2582 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2583 NameServer login */
2584 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2585 "%d (%d):0223 Timeout while waiting for "
2586 "NameServer login\n",
2587 phba->brd_no, vport->vpi);
2588
2589 /* Next look for NameServer ndlp */
2590 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2591 if (ndlp)
2592 lpfc_nlp_put(ndlp);
2593 /* Start discovery */
2594 lpfc_disc_start(vport);
2595 break;
2596
2597 case LPFC_NS_QRY:
2598 /* Check for wait for NameServer Rsp timeout */
2599 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2600 "%d (%d):0224 NameServer Query timeout "
2601 "Data: x%x x%x\n",
2602 phba->brd_no, vport->vpi,
2603 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2604
2605 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2606 /* Try it one more time */
2607 vport->fc_ns_retry++;
2608 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2609 vport->fc_ns_retry, 0);
2610 if (rc == 0)
2611 break;
2612 }
2613 vport->fc_ns_retry = 0;
2614
2615 /*
2616 * Discovery is over.
2617 * set port_state to PORT_READY if SLI2.
2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2619 */
2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2621 lpfc_issue_reg_vpi(phba, vport);
2622 else { /* NPIV Not enabled */
2623 lpfc_issue_clear_la(phba, vport);
2624 vport->port_state = LPFC_VPORT_READY;
2625 }
2626
2627 /* Setup and issue mailbox INITIALIZE LINK command */
2628 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2629 if (!initlinkmbox) {
2630 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2631 "%d (%d):0206 Device Discovery "
2632 "completion error\n",
2633 phba->brd_no, vport->vpi);
2634 phba->link_state = LPFC_HBA_ERROR;
2635 break;
2636 }
2637
2638 lpfc_linkdown(phba);
2639 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2640 phba->cfg_link_speed);
2641 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2642 initlinkmbox->vport = vport;
2643 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2644 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2645 (MBX_NOWAIT | MBX_STOP_IOCB));
2646 lpfc_set_loopback_flag(phba);
2647 if (rc == MBX_NOT_FINISHED)
2648 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2649
2650 break;
2651
2652 case LPFC_DISC_AUTH:
2653 /* Node Authentication timeout */
2654 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2655 "%d (%d):0227 Node Authentication timeout\n",
2656 phba->brd_no, vport->vpi);
2657 lpfc_disc_flush_list(vport);
2658
2659 /*
2660 * set port_state to PORT_READY if SLI2.
2661 * cmpl_reg_vpi will set port_state to READY for SLI3.
2662 */
2663 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2664 lpfc_issue_reg_vpi(phba, vport);
2665 else { /* NPIV Not enabled */
2666 lpfc_issue_clear_la(phba, vport);
2667 vport->port_state = LPFC_VPORT_READY;
2668 }
2669 break;
2670
2671 case LPFC_VPORT_READY:
2672 if (vport->fc_flag & FC_RSCN_MODE) {
2673 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2674 "%d (%d):0231 RSCN timeout Data: x%x "
2675 "x%x\n",
2676 phba->brd_no, vport->vpi,
2677 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2678
2679 /* Cleanup any outstanding ELS commands */
2680 lpfc_els_flush_cmd(vport);
2681
2682 lpfc_els_flush_rscn(vport);
2683 lpfc_disc_flush_list(vport);
2684 }
2685 break;
2686
2687 default:
2688 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2689 "%d (%d):0229 Unexpected discovery timeout, "
2690 "vport State x%x\n",
2691 phba->brd_no, vport->vpi, vport->port_state);
2692
2693 break;
2694 }
2695
2696 switch (phba->link_state) {
2697 case LPFC_CLEAR_LA:
2698 /* CLEAR LA timeout */
2699 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2700 "%d (%d):0228 CLEAR LA timeout\n",
2701 phba->brd_no, vport->vpi);
2702 clrlaerr = 1;
2703 break;
2704
2705 case LPFC_LINK_UNKNOWN:
2706 case LPFC_WARM_START:
2707 case LPFC_INIT_START:
2708 case LPFC_INIT_MBX_CMDS:
2709 case LPFC_LINK_DOWN:
2710 case LPFC_LINK_UP:
2711 case LPFC_HBA_ERROR:
2712 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2713 "%d (%d):0230 Unexpected timeout, hba link "
2714 "state x%x\n",
2715 phba->brd_no, vport->vpi, phba->link_state);
2716 clrlaerr = 1;
2717 break;
2718
2719 case LPFC_HBA_READY:
2720 break;
2721 }
2722
2723 if (clrlaerr) {
2724 lpfc_disc_flush_list(vport);
2725 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2726 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2727 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2728 vport->port_state = LPFC_VPORT_READY;
2729 }
2730
2731 return;
2732 }
2733
2734 /*
2735 * This routine handles processing a NameServer REG_LOGIN mailbox
2736 * command upon completion. It is setup in the LPFC_MBOXQ
2737 * as the completion routine when the command is
2738 * handed off to the SLI layer.
2739 */
2740 void
2741 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2742 {
2743 MAILBOX_t *mb = &pmb->mb;
2744 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2745 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2746 struct lpfc_vport *vport = pmb->vport;
2747
2748 pmb->context1 = NULL;
2749
2750 ndlp->nlp_rpi = mb->un.varWords[0];
2751 ndlp->nlp_type |= NLP_FABRIC;
2752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2753
2754 /*
2755 * Start issuing Fabric-Device Management Interface (FDMI) command to
2756 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2757 * fdmi-on=2 (supporting RPA/hostnmae)
2758 */
2759
2760 if (phba->cfg_fdmi_on == 1)
2761 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2762 else
2763 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2764
2765 /* Mailbox took a reference to the node */
2766 lpfc_nlp_put(ndlp);
2767 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2768 kfree(mp);
2769 mempool_free(pmb, phba->mbox_mem_pool);
2770
2771 return;
2772 }
2773
2774 static int
2775 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2776 {
2777 uint16_t *rpi = param;
2778
2779 return ndlp->nlp_rpi == *rpi;
2780 }
2781
2782 static int
2783 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2784 {
2785 return memcmp(&ndlp->nlp_portname, param,
2786 sizeof(ndlp->nlp_portname)) == 0;
2787 }
2788
2789 struct lpfc_nodelist *
2790 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2791 {
2792 struct lpfc_nodelist *ndlp;
2793
2794 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2795 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2796 filter(ndlp, param))
2797 return ndlp;
2798 }
2799 return NULL;
2800 }
2801
2802 /*
2803 * Search node lists for a remote port matching filter criteria
2804 * Caller needs to hold host_lock before calling this routine.
2805 */
2806 struct lpfc_nodelist *
2807 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2808 {
2809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2810 struct lpfc_nodelist *ndlp;
2811
2812 spin_lock_irq(shost->host_lock);
2813 ndlp = __lpfc_find_node(vport, filter, param);
2814 spin_unlock_irq(shost->host_lock);
2815 return ndlp;
2816 }
2817
2818 /*
2819 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2820 * returns the node list element pointer else return NULL.
2821 */
2822 struct lpfc_nodelist *
2823 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2824 {
2825 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2826 }
2827
2828 struct lpfc_nodelist *
2829 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2830 {
2831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2832 struct lpfc_nodelist *ndlp;
2833
2834 spin_lock_irq(shost->host_lock);
2835 ndlp = __lpfc_findnode_rpi(vport, rpi);
2836 spin_unlock_irq(shost->host_lock);
2837 return ndlp;
2838 }
2839
2840 /*
2841 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2842 * returns the node element list pointer else return NULL.
2843 */
2844 struct lpfc_nodelist *
2845 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2846 {
2847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2848 struct lpfc_nodelist *ndlp;
2849
2850 spin_lock_irq(shost->host_lock);
2851 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2852 spin_unlock_irq(shost->host_lock);
2853 return ndlp;
2854 }
2855
2856 void
2857 lpfc_dev_loss_delay(unsigned long ptr)
2858 {
2859 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2860 struct lpfc_vport *vport = ndlp->vport;
2861 struct lpfc_hba *phba = vport->phba;
2862 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2863 unsigned long flags;
2864
2865 evtp = &ndlp->dev_loss_evt;
2866
2867 spin_lock_irqsave(&phba->hbalock, flags);
2868 if (!list_empty(&evtp->evt_listp)) {
2869 spin_unlock_irqrestore(&phba->hbalock, flags);
2870 return;
2871 }
2872
2873 evtp->evt_arg1 = ndlp;
2874 evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
2875 list_add_tail(&evtp->evt_listp, &phba->work_list);
2876 if (phba->work_wait)
2877 lpfc_worker_wake_up(phba);
2878 spin_unlock_irqrestore(&phba->hbalock, flags);
2879 return;
2880 }
2881
2882 void
2883 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2884 uint32_t did)
2885 {
2886 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2887 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2888 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2889 init_timer(&ndlp->nlp_delayfunc);
2890 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2891 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2892 ndlp->nlp_DID = did;
2893 ndlp->vport = vport;
2894 ndlp->nlp_sid = NLP_NO_SID;
2895 INIT_LIST_HEAD(&ndlp->nlp_listp);
2896 kref_init(&ndlp->kref);
2897
2898 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2899 "node init: did:x%x",
2900 ndlp->nlp_DID, 0, 0);
2901
2902 return;
2903 }
2904
2905 void
2906 lpfc_nlp_release(struct kref *kref)
2907 {
2908 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2909 kref);
2910
2911 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2912 "node release: did:x%x flg:x%x type:x%x",
2913 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2914
2915 lpfc_nlp_remove(ndlp->vport, ndlp);
2916 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2917 }
2918
2919 struct lpfc_nodelist *
2920 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2921 {
2922 if (ndlp)
2923 kref_get(&ndlp->kref);
2924 return ndlp;
2925 }
2926
2927 int
2928 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2929 {
2930 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2931 }