]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_hbadisc.c
[SCSI] lpfc 8.2.4 : Add parameters to enable and disable heartbeat and hba resets
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
9413afff 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e
JB
20 *******************************************************************/
21
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw.h"
33#include "lpfc_disc.h"
34#include "lpfc_sli.h"
35#include "lpfc_scsi.h"
36#include "lpfc.h"
37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h"
92d7f7b0 39#include "lpfc_vport.h"
858c9f6c 40#include "lpfc_debugfs.h"
dea3101e
JB
41
42/* AlpaArray for assignment of scsid for scan-down and bind_method */
43static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57};
58
2e0fef85 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
a6ababd2 60static void lpfc_disc_flush_list(struct lpfc_vport *vport);
dea3101e 61
c01f3208
JS
62void
63lpfc_terminate_rport_io(struct fc_rport *rport)
dea3101e 64{
c01f3208
JS
65 struct lpfc_rport_data *rdata;
66 struct lpfc_nodelist * ndlp;
67 struct lpfc_hba *phba;
dea3101e 68
c01f3208
JS
69 rdata = rport->dd_data;
70 ndlp = rdata->pnode;
71
72 if (!ndlp) {
73 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
74 printk(KERN_ERR "Cannot find remote node"
75 " to terminate I/O Data x%x\n",
76 rport->port_id);
dea3101e
JB
77 return;
78 }
79
2e0fef85 80 phba = ndlp->vport->phba;
c01f3208 81
858c9f6c
JS
82 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
83 "rport terminate: sid:x%x did:x%x flg:x%x",
84 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85
c01f3208 86 if (ndlp->nlp_sid != NLP_NO_SID) {
51ef4c26
JS
87 lpfc_sli_abort_iocb(ndlp->vport,
88 &phba->sli.ring[phba->sli.fcp_ring],
89 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
c01f3208 90 }
c01f3208 91
51ef4c26
JS
92 /*
93 * A device is normally blocked for rediscovery and unblocked when
94 * devloss timeout happens. In case a vport is removed or driver
95 * unloaded before devloss timeout happens, we need to unblock here.
96 */
97 scsi_target_unblock(&rport->dev);
c01f3208
JS
98 return;
99}
100
101/*
102 * This function will be called when dev_loss_tmo fire.
103 */
104void
105lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106{
107 struct lpfc_rport_data *rdata;
108 struct lpfc_nodelist * ndlp;
2e0fef85 109 struct lpfc_vport *vport;
858c9f6c 110 struct lpfc_hba *phba;
858c9f6c 111 struct lpfc_work_evt *evtp;
a8adb832
JS
112 int put_node;
113 int put_rport;
c01f3208
JS
114
115 rdata = rport->dd_data;
116 ndlp = rdata->pnode;
1a169689 117
c01f3208 118 if (!ndlp) {
92d7f7b0 119 if (rport->scsi_target_id != -1) {
c01f3208 120 printk(KERN_ERR "Cannot find remote node"
92d7f7b0
JS
121 " for rport in dev_loss_tmo_callbk x%x\n",
122 rport->port_id);
123 }
c01f3208
JS
124 return;
125 }
126
858c9f6c
JS
127 vport = ndlp->vport;
128 phba = vport->phba;
129
130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
131 "rport devlosscb: sid:x%x did:x%x flg:x%x",
132 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
133
a8adb832
JS
134 /* Don't defer this if we are in the process of deleting the vport
135 * or unloading the driver. The unload will cleanup the node
136 * appropriately we just need to cleanup the ndlp rport info here.
137 */
138 if (vport->load_flag & FC_UNLOADING) {
139 put_node = rdata->pnode != NULL;
140 put_rport = ndlp->rport != NULL;
141 rdata->pnode = NULL;
142 ndlp->rport = NULL;
143 if (put_node)
144 lpfc_nlp_put(ndlp);
145 if (put_rport)
146 put_device(&rport->dev);
147 return;
148 }
149
150 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
151 return;
152
858c9f6c
JS
153 evtp = &ndlp->dev_loss_evt;
154
155 if (!list_empty(&evtp->evt_listp))
156 return;
157
158 spin_lock_irq(&phba->hbalock);
159 evtp->evt_arg1 = ndlp;
858c9f6c
JS
160 evtp->evt = LPFC_EVT_DEV_LOSS;
161 list_add_tail(&evtp->evt_listp, &phba->work_list);
162 if (phba->work_wait)
163 wake_up(phba->work_wait);
164
165 spin_unlock_irq(&phba->hbalock);
166
858c9f6c
JS
167 return;
168}
169
170/*
171 * This function is called from the worker thread when dev_loss_tmo
172 * expire.
173 */
a6ababd2 174static void
858c9f6c
JS
175lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
176{
177 struct lpfc_rport_data *rdata;
178 struct fc_rport *rport;
179 struct lpfc_vport *vport;
180 struct lpfc_hba *phba;
181 uint8_t *name;
87af33fe
JS
182 int put_node;
183 int put_rport;
858c9f6c
JS
184 int warn_on = 0;
185
186 rport = ndlp->rport;
187
188 if (!rport)
189 return;
190
191 rdata = rport->dd_data;
192 name = (uint8_t *) &ndlp->nlp_portname;
193 vport = ndlp->vport;
194 phba = vport->phba;
195
196 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
197 "rport devlosstmo:did:x%x type:x%x id:x%x",
198 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
199
a8adb832
JS
200 /* Don't defer this if we are in the process of deleting the vport
201 * or unloading the driver. The unload will cleanup the node
202 * appropriately we just need to cleanup the ndlp rport info here.
203 */
204 if (vport->load_flag & FC_UNLOADING) {
205 put_node = rdata->pnode != NULL;
206 put_rport = ndlp->rport != NULL;
207 rdata->pnode = NULL;
208 ndlp->rport = NULL;
209 if (put_node)
210 lpfc_nlp_put(ndlp);
211 if (put_rport)
212 put_device(&rport->dev);
213 return;
214 }
215
216 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
858c9f6c
JS
217 return;
218
92d7f7b0
JS
219 if (ndlp->nlp_type & NLP_FABRIC) {
220 /* We will clean up these Nodes in linkup */
221 put_node = rdata->pnode != NULL;
222 put_rport = ndlp->rport != NULL;
223 rdata->pnode = NULL;
224 ndlp->rport = NULL;
225 if (put_node)
226 lpfc_nlp_put(ndlp);
227 if (put_rport)
228 put_device(&rport->dev);
82085718 229 return;
92d7f7b0 230 }
82085718 231
dea3101e 232 if (ndlp->nlp_sid != NLP_NO_SID) {
6e8215e4 233 warn_on = 1;
dea3101e 234 /* flush the target */
51ef4c26
JS
235 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
236 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
dea3101e 237 }
2e0fef85 238 if (vport->load_flag & FC_UNLOADING)
c01f3208
JS
239 warn_on = 0;
240
6e8215e4 241 if (warn_on) {
e8b62011
JS
242 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
243 "0203 Devloss timeout on "
244 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
245 "NPort x%x Data: x%x x%x x%x\n",
246 *name, *(name+1), *(name+2), *(name+3),
247 *(name+4), *(name+5), *(name+6), *(name+7),
248 ndlp->nlp_DID, ndlp->nlp_flag,
249 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4 250 } else {
e8b62011
JS
251 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
252 "0204 Devloss timeout on "
253 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
254 "NPort x%x Data: x%x x%x x%x\n",
255 *name, *(name+1), *(name+2), *(name+3),
256 *(name+4), *(name+5), *(name+6), *(name+7),
257 ndlp->nlp_DID, ndlp->nlp_flag,
258 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4
JSEC
259 }
260
87af33fe
JS
261 put_node = rdata->pnode != NULL;
262 put_rport = ndlp->rport != NULL;
263 rdata->pnode = NULL;
264 ndlp->rport = NULL;
265 if (put_node)
266 lpfc_nlp_put(ndlp);
267 if (put_rport)
268 put_device(&rport->dev);
269
2e0fef85 270 if (!(vport->load_flag & FC_UNLOADING) &&
1dcb58e5 271 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
82085718 272 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
87af33fe 273 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
2e0fef85 274 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1dcb58e5 275 }
92d7f7b0 276}
c01f3208 277
92d7f7b0
JS
278
279void
280lpfc_worker_wake_up(struct lpfc_hba *phba)
281{
282 wake_up(phba->work_wait);
dea3101e
JB
283 return;
284}
285
286static void
2e0fef85 287lpfc_work_list_done(struct lpfc_hba *phba)
dea3101e
JB
288{
289 struct lpfc_work_evt *evtp = NULL;
290 struct lpfc_nodelist *ndlp;
291 int free_evt;
292
2e0fef85
JS
293 spin_lock_irq(&phba->hbalock);
294 while (!list_empty(&phba->work_list)) {
dea3101e
JB
295 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
296 evt_listp);
2e0fef85 297 spin_unlock_irq(&phba->hbalock);
dea3101e 298 free_evt = 1;
2fe165b6 299 switch (evtp->evt) {
dea3101e 300 case LPFC_EVT_ELS_RETRY:
2e0fef85 301 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
dea3101e 302 lpfc_els_retry_delay_handler(ndlp);
92d7f7b0 303 free_evt = 0; /* evt is part of ndlp */
dea3101e 304 break;
858c9f6c
JS
305 case LPFC_EVT_DEV_LOSS:
306 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
307 lpfc_nlp_get(ndlp);
308 lpfc_dev_loss_tmo_handler(ndlp);
309 free_evt = 0;
858c9f6c
JS
310 lpfc_nlp_put(ndlp);
311 break;
dea3101e 312 case LPFC_EVT_ONLINE:
2e0fef85
JS
313 if (phba->link_state < LPFC_LINK_DOWN)
314 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
41415862 315 else
2e0fef85 316 *(int *) (evtp->evt_arg1) = 0;
dea3101e
JB
317 complete((struct completion *)(evtp->evt_arg2));
318 break;
46fa311e 319 case LPFC_EVT_OFFLINE_PREP:
2e0fef85 320 if (phba->link_state >= LPFC_LINK_DOWN)
46fa311e
JS
321 lpfc_offline_prep(phba);
322 *(int *)(evtp->evt_arg1) = 0;
323 complete((struct completion *)(evtp->evt_arg2));
324 break;
325 case LPFC_EVT_OFFLINE:
326 lpfc_offline(phba);
41415862
JW
327 lpfc_sli_brdrestart(phba);
328 *(int *)(evtp->evt_arg1) =
46fa311e
JS
329 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
330 lpfc_unblock_mgmt_io(phba);
41415862
JW
331 complete((struct completion *)(evtp->evt_arg2));
332 break;
333 case LPFC_EVT_WARM_START:
46fa311e 334 lpfc_offline(phba);
9290831f 335 lpfc_reset_barrier(phba);
41415862
JW
336 lpfc_sli_brdreset(phba);
337 lpfc_hba_down_post(phba);
338 *(int *)(evtp->evt_arg1) =
339 lpfc_sli_brdready(phba, HS_MBRDY);
46fa311e 340 lpfc_unblock_mgmt_io(phba);
41415862
JW
341 complete((struct completion *)(evtp->evt_arg2));
342 break;
343 case LPFC_EVT_KILL:
46fa311e 344 lpfc_offline(phba);
9290831f 345 *(int *)(evtp->evt_arg1)
2e0fef85
JS
346 = (phba->pport->stopped)
347 ? 0 : lpfc_sli_brdkill(phba);
46fa311e 348 lpfc_unblock_mgmt_io(phba);
dea3101e
JB
349 complete((struct completion *)(evtp->evt_arg2));
350 break;
351 }
352 if (free_evt)
353 kfree(evtp);
2e0fef85 354 spin_lock_irq(&phba->hbalock);
dea3101e 355 }
2e0fef85 356 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
357
358}
359
311464ec 360static void
2e0fef85 361lpfc_work_done(struct lpfc_hba *phba)
dea3101e
JB
362{
363 struct lpfc_sli_ring *pring;
858c9f6c 364 uint32_t ha_copy, status, control, work_port_events;
549e55cd 365 struct lpfc_vport **vports;
51ef4c26 366 struct lpfc_vport *vport;
549e55cd 367 int i;
dea3101e 368
2e0fef85 369 spin_lock_irq(&phba->hbalock);
dea3101e
JB
370 ha_copy = phba->work_ha;
371 phba->work_ha = 0;
2e0fef85 372 spin_unlock_irq(&phba->hbalock);
dea3101e 373
2fe165b6 374 if (ha_copy & HA_ERATT)
dea3101e
JB
375 lpfc_handle_eratt(phba);
376
2fe165b6 377 if (ha_copy & HA_MBATT)
dea3101e
JB
378 lpfc_sli_handle_mb_event(phba);
379
2fe165b6 380 if (ha_copy & HA_LATT)
dea3101e 381 lpfc_handle_latt(phba);
549e55cd
JS
382 vports = lpfc_create_vport_work_array(phba);
383 if (vports != NULL)
51ef4c26
JS
384 for(i = 0; i < LPFC_MAX_VPORTS; i++) {
385 /*
386 * We could have no vports in array if unloading, so if
387 * this happens then just use the pport
388 */
389 if (vports[i] == NULL && i == 0)
390 vport = phba->pport;
391 else
392 vport = vports[i];
393 if (vport == NULL)
394 break;
395 work_port_events = vport->work_port_events;
549e55cd 396 if (work_port_events & WORKER_DISC_TMO)
51ef4c26 397 lpfc_disc_timeout_handler(vport);
549e55cd 398 if (work_port_events & WORKER_ELS_TMO)
51ef4c26 399 lpfc_els_timeout_handler(vport);
549e55cd
JS
400 if (work_port_events & WORKER_HB_TMO)
401 lpfc_hb_timeout_handler(phba);
402 if (work_port_events & WORKER_MBOX_TMO)
403 lpfc_mbox_timeout_handler(phba);
404 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
405 lpfc_unblock_fabric_iocbs(phba);
406 if (work_port_events & WORKER_FDMI_TMO)
51ef4c26 407 lpfc_fdmi_timeout_handler(vport);
549e55cd
JS
408 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
409 lpfc_ramp_down_queue_handler(phba);
410 if (work_port_events & WORKER_RAMP_UP_QUEUE)
411 lpfc_ramp_up_queue_handler(phba);
51ef4c26
JS
412 spin_lock_irq(&vport->work_port_lock);
413 vport->work_port_events &= ~work_port_events;
414 spin_unlock_irq(&vport->work_port_lock);
92d7f7b0 415 }
549e55cd 416 lpfc_destroy_vport_work_array(vports);
dea3101e 417
858c9f6c
JS
418 pring = &phba->sli.ring[LPFC_ELS_RING];
419 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
420 status >>= (4*LPFC_ELS_RING);
421 if ((status & HA_RXMASK)
422 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
0b727fea 423 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
858c9f6c
JS
424 pring->flag |= LPFC_DEFERRED_RING_EVENT;
425 } else {
426 lpfc_sli_handle_slow_ring_event(phba, pring,
427 (status &
428 HA_RXMASK));
429 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
430 }
431 /*
432 * Turn on Ring interrupts
433 */
434 spin_lock_irq(&phba->hbalock);
435 control = readl(phba->HCregaddr);
436 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
a58cbd52
JS
437 lpfc_debugfs_slow_ring_trc(phba,
438 "WRK Enable ring: cntl:x%x hacopy:x%x",
439 control, ha_copy, 0);
440
858c9f6c 441 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e
JB
442 writel(control, phba->HCregaddr);
443 readl(phba->HCregaddr); /* flush */
dea3101e 444 }
a58cbd52
JS
445 else {
446 lpfc_debugfs_slow_ring_trc(phba,
447 "WRK Ring ok: cntl:x%x hacopy:x%x",
448 control, ha_copy, 0);
449 }
858c9f6c 450 spin_unlock_irq(&phba->hbalock);
dea3101e 451 }
2e0fef85 452 lpfc_work_list_done(phba);
dea3101e
JB
453}
454
455static int
2e0fef85
JS
456check_work_wait_done(struct lpfc_hba *phba)
457{
92d7f7b0 458 struct lpfc_vport *vport;
549e55cd 459 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
858c9f6c 460 int rc = 0;
ed957684 461
2e0fef85 462 spin_lock_irq(&phba->hbalock);
92d7f7b0
JS
463 list_for_each_entry(vport, &phba->port_list, listentry) {
464 if (vport->work_port_events) {
465 rc = 1;
549e55cd 466 break;
92d7f7b0
JS
467 }
468 }
549e55cd
JS
469 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
470 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
858c9f6c 471 rc = 1;
92d7f7b0 472 phba->work_found++;
549e55cd 473 } else
92d7f7b0 474 phba->work_found = 0;
2e0fef85
JS
475 spin_unlock_irq(&phba->hbalock);
476 return rc;
dea3101e
JB
477}
478
92d7f7b0 479
dea3101e
JB
480int
481lpfc_do_work(void *p)
482{
483 struct lpfc_hba *phba = p;
484 int rc;
7259f0d0 485 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
dea3101e
JB
486
487 set_user_nice(current, -20);
488 phba->work_wait = &work_waitq;
92d7f7b0 489 phba->work_found = 0;
dea3101e
JB
490
491 while (1) {
492
493 rc = wait_event_interruptible(work_waitq,
92d7f7b0
JS
494 check_work_wait_done(phba));
495
dea3101e
JB
496 BUG_ON(rc);
497
498 if (kthread_should_stop())
499 break;
500
501 lpfc_work_done(phba);
502
92d7f7b0
JS
503 /* If there is alot of slow ring work, like during link up
504 * check_work_wait_done() may cause this thread to not give
505 * up the CPU for very long periods of time. This may cause
506 * soft lockups or other problems. To avoid these situations
507 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
508 * consecutive iterations.
509 */
510 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
511 phba->work_found = 0;
512 schedule();
513 }
dea3101e
JB
514 }
515 phba->work_wait = NULL;
516 return 0;
517}
518
519/*
520 * This is only called to handle FC worker events. Since this a rare
521 * occurance, we allocate a struct lpfc_work_evt structure here instead of
522 * embedding it in the IOCB.
523 */
524int
2e0fef85 525lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
dea3101e
JB
526 uint32_t evt)
527{
528 struct lpfc_work_evt *evtp;
ed957684 529 unsigned long flags;
dea3101e
JB
530
531 /*
532 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
533 * be queued to worker thread for processing
534 */
92d7f7b0 535 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
dea3101e
JB
536 if (!evtp)
537 return 0;
538
539 evtp->evt_arg1 = arg1;
540 evtp->evt_arg2 = arg2;
541 evtp->evt = evt;
542
ed957684 543 spin_lock_irqsave(&phba->hbalock, flags);
071fbd3d 544 list_add_tail(&evtp->evt_listp, &phba->work_list);
dea3101e 545 if (phba->work_wait)
92d7f7b0 546 lpfc_worker_wake_up(phba);
ed957684 547 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e
JB
548
549 return 1;
550}
551
92d7f7b0
JS
552void
553lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
554{
555 struct lpfc_hba *phba = vport->phba;
556 struct lpfc_nodelist *ndlp, *next_ndlp;
557 int rc;
558
559 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
560 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
561 continue;
562
98c9ea5c
JS
563 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
564 ((vport->port_type == LPFC_NPIV_PORT) &&
565 (ndlp->nlp_DID == NameServer_DID)))
92d7f7b0
JS
566 lpfc_unreg_rpi(vport, ndlp);
567
568 /* Leave Fabric nodes alone on link down */
569 if (!remove && ndlp->nlp_type & NLP_FABRIC)
570 continue;
571 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
572 remove
573 ? NLP_EVT_DEVICE_RM
574 : NLP_EVT_DEVICE_RECOVERY);
575 }
576 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
577 lpfc_mbx_unreg_vpi(vport);
578 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
579 }
580}
581
87af33fe 582void
98c9ea5c 583lpfc_port_link_failure(struct lpfc_vport *vport)
92d7f7b0 584{
92d7f7b0
JS
585 /* Cleanup any outstanding RSCN activity */
586 lpfc_els_flush_rscn(vport);
587
588 /* Cleanup any outstanding ELS commands */
589 lpfc_els_flush_cmd(vport);
590
591 lpfc_cleanup_rpis(vport, 0);
592
92d7f7b0
JS
593 /* Turn off discovery timer if its running */
594 lpfc_can_disctmo(vport);
595}
596
98c9ea5c
JS
597static void
598lpfc_linkdown_port(struct lpfc_vport *vport)
599{
600 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
601
602 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
603
604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
605 "Link Down: state:x%x rtry:x%x flg:x%x",
606 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
607
608 lpfc_port_link_failure(vport);
609
610}
611
dea3101e 612int
685f0bf7 613lpfc_linkdown(struct lpfc_hba *phba)
dea3101e 614{
2e0fef85
JS
615 struct lpfc_vport *vport = phba->pport;
616 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
549e55cd 617 struct lpfc_vport **vports;
685f0bf7 618 LPFC_MBOXQ_t *mb;
549e55cd 619 int i;
dea3101e 620
2e0fef85
JS
621 if (phba->link_state == LPFC_LINK_DOWN) {
622 return 0;
c9f8735b 623 }
2e0fef85 624 spin_lock_irq(&phba->hbalock);
92d7f7b0 625 if (phba->link_state > LPFC_LINK_DOWN) {
2e0fef85 626 phba->link_state = LPFC_LINK_DOWN;
92d7f7b0
JS
627 phba->pport->fc_flag &= ~FC_LBIT;
628 }
2e0fef85 629 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
630 vports = lpfc_create_vport_work_array(phba);
631 if (vports != NULL)
632 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
633 /* Issue a LINK DOWN event to all nodes */
634 lpfc_linkdown_port(vports[i]);
635 }
636 lpfc_destroy_vport_work_array(vports);
dea3101e 637 /* Clean up any firmware default rpi's */
2e0fef85
JS
638 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
639 if (mb) {
92d7f7b0 640 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
ed957684 641 mb->vport = vport;
2e0fef85 642 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 643 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 644 == MBX_NOT_FINISHED) {
2e0fef85 645 mempool_free(mb, phba->mbox_mem_pool);
dea3101e
JB
646 }
647 }
648
dea3101e 649 /* Setup myDID for link up if we are in pt2pt mode */
92d7f7b0
JS
650 if (phba->pport->fc_flag & FC_PT2PT) {
651 phba->pport->fc_myDID = 0;
2e0fef85
JS
652 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
653 if (mb) {
dea3101e 654 lpfc_config_link(phba, mb);
92d7f7b0 655 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 656 mb->vport = vport;
0b727fea 657 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 658 == MBX_NOT_FINISHED) {
2e0fef85 659 mempool_free(mb, phba->mbox_mem_pool);
dea3101e
JB
660 }
661 }
2e0fef85 662 spin_lock_irq(shost->host_lock);
92d7f7b0 663 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
2e0fef85 664 spin_unlock_irq(shost->host_lock);
dea3101e 665 }
2e0fef85 666
92d7f7b0
JS
667 return 0;
668}
dea3101e 669
92d7f7b0
JS
670static void
671lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
672{
673 struct lpfc_nodelist *ndlp;
dea3101e 674
92d7f7b0
JS
675 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
676 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
677 continue;
678
679 if (ndlp->nlp_type & NLP_FABRIC) {
680 /* On Linkup its safe to clean up the ndlp
681 * from Fabric connections.
682 */
683 if (ndlp->nlp_DID != Fabric_DID)
684 lpfc_unreg_rpi(vport, ndlp);
685 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
686 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
687 /* Fail outstanding IO now since device is
688 * marked for PLOGI.
689 */
690 lpfc_unreg_rpi(vport, ndlp);
691 }
692 }
dea3101e
JB
693}
694
92d7f7b0
JS
695static void
696lpfc_linkup_port(struct lpfc_vport *vport)
dea3101e 697{
92d7f7b0 698 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
699 struct lpfc_hba *phba = vport->phba;
700
701 if ((vport->load_flag & FC_UNLOADING) != 0)
702 return;
703
858c9f6c
JS
704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
705 "Link Up: top:x%x speed:x%x flg:x%x",
706 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
707
92d7f7b0
JS
708 /* If NPIV is not enabled, only bring the physical port up */
709 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
710 (vport != phba->pport))
711 return;
dea3101e 712
2e0fef85 713 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
d2873e4c 714
2e0fef85 715 spin_lock_irq(shost->host_lock);
2e0fef85
JS
716 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
717 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
718 vport->fc_flag |= FC_NDISC_ACTIVE;
719 vport->fc_ns_retry = 0;
720 spin_unlock_irq(shost->host_lock);
dea3101e 721
92d7f7b0
JS
722 if (vport->fc_flag & FC_LBIT)
723 lpfc_linkup_cleanup_nodes(vport);
dea3101e 724
92d7f7b0
JS
725}
726
727static int
728lpfc_linkup(struct lpfc_hba *phba)
729{
549e55cd
JS
730 struct lpfc_vport **vports;
731 int i;
92d7f7b0
JS
732
733 phba->link_state = LPFC_LINK_UP;
734
735 /* Unblock fabric iocbs if they are blocked */
736 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
737 del_timer_sync(&phba->fabric_block_timer);
738
549e55cd
JS
739 vports = lpfc_create_vport_work_array(phba);
740 if (vports != NULL)
741 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
742 lpfc_linkup_port(vports[i]);
743 lpfc_destroy_vport_work_array(vports);
92d7f7b0
JS
744 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
745 lpfc_issue_clear_la(phba, phba->pport);
dea3101e
JB
746
747 return 0;
748}
749
750/*
751 * This routine handles processing a CLEAR_LA mailbox
752 * command upon completion. It is setup in the LPFC_MBOXQ
753 * as the completion routine when the command is
754 * handed off to the SLI layer.
755 */
a6ababd2 756static void
2e0fef85 757lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 758{
2e0fef85
JS
759 struct lpfc_vport *vport = pmb->vport;
760 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
761 struct lpfc_sli *psli = &phba->sli;
762 MAILBOX_t *mb = &pmb->mb;
dea3101e
JB
763 uint32_t control;
764
dea3101e 765 /* Since we don't do discovery right now, turn these off here */
a4bc3379 766 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e
JB
767 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
768 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
769
770 /* Check for error */
771 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
92d7f7b0 772 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
773 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
774 "0320 CLEAR_LA mbxStatus error x%x hba "
775 "state x%x\n",
776 mb->mbxStatus, vport->port_state);
2e0fef85 777 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
778 goto out;
779 }
780
92d7f7b0
JS
781 if (vport->port_type == LPFC_PHYSICAL_PORT)
782 phba->link_state = LPFC_HBA_READY;
783
784 spin_lock_irq(&phba->hbalock);
785 psli->sli_flag |= LPFC_PROCESS_LA;
786 control = readl(phba->HCregaddr);
787 control |= HC_LAINT_ENA;
788 writel(control, phba->HCregaddr);
789 readl(phba->HCregaddr); /* flush */
790 spin_unlock_irq(&phba->hbalock);
791 return;
dea3101e 792
2e0fef85
JS
793 vport->num_disc_nodes = 0;
794 /* go thru NPR nodes and issue ELS PLOGIs */
795 if (vport->fc_npr_cnt)
796 lpfc_els_disc_plogi(vport);
dea3101e 797
2e0fef85
JS
798 if (!vport->num_disc_nodes) {
799 spin_lock_irq(shost->host_lock);
800 vport->fc_flag &= ~FC_NDISC_ACTIVE;
801 spin_unlock_irq(shost->host_lock);
dea3101e
JB
802 }
803
2e0fef85 804 vport->port_state = LPFC_VPORT_READY;
dea3101e
JB
805
806out:
807 /* Device Discovery completes */
e8b62011
JS
808 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
809 "0225 Device Discovery completes\n");
2e0fef85 810 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 811
2e0fef85
JS
812 spin_lock_irq(shost->host_lock);
813 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
814 spin_unlock_irq(shost->host_lock);
dea3101e
JB
815
816 del_timer_sync(&phba->fc_estabtmo);
817
2e0fef85 818 lpfc_can_disctmo(vport);
dea3101e
JB
819
820 /* turn on Link Attention interrupts */
2e0fef85
JS
821
822 spin_lock_irq(&phba->hbalock);
dea3101e
JB
823 psli->sli_flag |= LPFC_PROCESS_LA;
824 control = readl(phba->HCregaddr);
825 control |= HC_LAINT_ENA;
826 writel(control, phba->HCregaddr);
827 readl(phba->HCregaddr); /* flush */
2e0fef85 828 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
829
830 return;
831}
832
2e0fef85 833
dea3101e 834static void
25594c6b 835lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 836{
2e0fef85 837 struct lpfc_vport *vport = pmb->vport;
dea3101e 838
25594c6b 839 if (pmb->mb.mbxStatus)
dea3101e 840 goto out;
dea3101e 841
25594c6b
JW
842 mempool_free(pmb, phba->mbox_mem_pool);
843
844 if (phba->fc_topology == TOPOLOGY_LOOP &&
2e0fef85
JS
845 vport->fc_flag & FC_PUBLIC_LOOP &&
846 !(vport->fc_flag & FC_LBIT)) {
25594c6b 847 /* Need to wait for FAN - use discovery timer
2e0fef85 848 * for timeout. port_state is identically
25594c6b
JW
849 * LPFC_LOCAL_CFG_LINK while waiting for FAN
850 */
2e0fef85 851 lpfc_set_disctmo(vport);
25594c6b 852 return;
92d7f7b0 853 }
dea3101e 854
2e0fef85 855 /* Start discovery by sending a FLOGI. port_state is identically
25594c6b
JW
856 * LPFC_FLOGI while waiting for FLOGI cmpl
857 */
92d7f7b0 858 if (vport->port_state != LPFC_FLOGI) {
92d7f7b0
JS
859 lpfc_initial_flogi(vport);
860 }
25594c6b 861 return;
dea3101e
JB
862
863out:
e8b62011
JS
864 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
865 "0306 CONFIG_LINK mbxStatus error x%x "
866 "HBA state x%x\n",
867 pmb->mb.mbxStatus, vport->port_state);
92d7f7b0 868 mempool_free(pmb, phba->mbox_mem_pool);
25594c6b 869
92d7f7b0 870 lpfc_linkdown(phba);
25594c6b 871
e8b62011
JS
872 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
873 "0200 CONFIG_LINK bad hba state x%x\n",
874 vport->port_state);
dea3101e 875
92d7f7b0 876 lpfc_issue_clear_la(phba, vport);
dea3101e
JB
877 return;
878}
879
880static void
2e0fef85 881lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 882{
dea3101e
JB
883 MAILBOX_t *mb = &pmb->mb;
884 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2e0fef85 885 struct lpfc_vport *vport = pmb->vport;
dea3101e
JB
886
887
888 /* Check for error */
889 if (mb->mbxStatus) {
890 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
891 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
892 "0319 READ_SPARAM mbxStatus error x%x "
893 "hba state x%x>\n",
894 mb->mbxStatus, vport->port_state);
dea3101e 895 lpfc_linkdown(phba);
dea3101e
JB
896 goto out;
897 }
898
2e0fef85 899 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
dea3101e 900 sizeof (struct serv_parm));
a12e07bc 901 if (phba->cfg_soft_wwnn)
2e0fef85
JS
902 u64_to_wwn(phba->cfg_soft_wwnn,
903 vport->fc_sparam.nodeName.u.wwn);
c3f28afa 904 if (phba->cfg_soft_wwpn)
2e0fef85
JS
905 u64_to_wwn(phba->cfg_soft_wwpn,
906 vport->fc_sparam.portName.u.wwn);
92d7f7b0
JS
907 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
908 sizeof(vport->fc_nodename));
909 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
910 sizeof(vport->fc_portname));
911 if (vport->port_type == LPFC_PHYSICAL_PORT) {
912 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
913 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
914 }
915
dea3101e
JB
916 lpfc_mbuf_free(phba, mp->virt, mp->phys);
917 kfree(mp);
2e0fef85 918 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
919 return;
920
921out:
922 pmb->context1 = NULL;
923 lpfc_mbuf_free(phba, mp->virt, mp->phys);
924 kfree(mp);
92d7f7b0
JS
925 lpfc_issue_clear_la(phba, vport);
926 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
927 return;
928}
929
930static void
92d7f7b0 931lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
dea3101e 932{
92d7f7b0 933 struct lpfc_vport *vport = phba->pport;
dea3101e 934 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
2e0fef85 935 int i;
14691150
JS
936 struct lpfc_dmabuf *mp;
937 int rc;
938
dea3101e
JB
939 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
940 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
941
92d7f7b0 942 spin_lock_irq(&phba->hbalock);
2fe165b6 943 switch (la->UlnkSpeed) {
92d7f7b0
JS
944 case LA_1GHZ_LINK:
945 phba->fc_linkspeed = LA_1GHZ_LINK;
946 break;
947 case LA_2GHZ_LINK:
948 phba->fc_linkspeed = LA_2GHZ_LINK;
949 break;
950 case LA_4GHZ_LINK:
951 phba->fc_linkspeed = LA_4GHZ_LINK;
952 break;
953 case LA_8GHZ_LINK:
954 phba->fc_linkspeed = LA_8GHZ_LINK;
955 break;
956 default:
957 phba->fc_linkspeed = LA_UNKNW_LINK;
958 break;
dea3101e
JB
959 }
960
961 phba->fc_topology = la->topology;
92d7f7b0 962 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
dea3101e
JB
963
964 if (phba->fc_topology == TOPOLOGY_LOOP) {
92d7f7b0 965 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
dea3101e 966
92d7f7b0 967 /* Get Loop Map information */
dea3101e 968 if (la->il)
2e0fef85 969 vport->fc_flag |= FC_LBIT;
dea3101e 970
2e0fef85 971 vport->fc_myDID = la->granted_AL_PA;
dea3101e
JB
972 i = la->un.lilpBde64.tus.f.bdeSize;
973
974 if (i == 0) {
975 phba->alpa_map[0] = 0;
976 } else {
e8b62011 977 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
dea3101e
JB
978 int numalpa, j, k;
979 union {
980 uint8_t pamap[16];
981 struct {
982 uint32_t wd1;
983 uint32_t wd2;
984 uint32_t wd3;
985 uint32_t wd4;
986 } pa;
987 } un;
988 numalpa = phba->alpa_map[0];
989 j = 0;
990 while (j < numalpa) {
991 memset(un.pamap, 0, 16);
992 for (k = 1; j < numalpa; k++) {
993 un.pamap[k - 1] =
994 phba->alpa_map[j + 1];
995 j++;
996 if (k == 16)
997 break;
998 }
999 /* Link Up Event ALPA map */
1000 lpfc_printf_log(phba,
92d7f7b0
JS
1001 KERN_WARNING,
1002 LOG_LINK_EVENT,
e8b62011 1003 "1304 Link Up Event "
92d7f7b0
JS
1004 "ALPA map Data: x%x "
1005 "x%x x%x x%x\n",
92d7f7b0
JS
1006 un.pa.wd1, un.pa.wd2,
1007 un.pa.wd3, un.pa.wd4);
dea3101e
JB
1008 }
1009 }
1010 }
1011 } else {
92d7f7b0 1012 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
78b2d852 1013 if (phba->max_vpi && phba->cfg_enable_npiv &&
92d7f7b0
JS
1014 (phba->sli_rev == 3))
1015 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1016 }
2e0fef85
JS
1017 vport->fc_myDID = phba->fc_pref_DID;
1018 vport->fc_flag |= FC_LBIT;
dea3101e 1019 }
92d7f7b0 1020 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1021
1022 lpfc_linkup(phba);
1023 if (sparam_mbox) {
92d7f7b0 1024 lpfc_read_sparam(phba, sparam_mbox, 0);
2e0fef85 1025 sparam_mbox->vport = vport;
dea3101e 1026 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
0b727fea 1027 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
14691150
JS
1028 if (rc == MBX_NOT_FINISHED) {
1029 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1030 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1031 kfree(mp);
1032 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1033 if (cfglink_mbox)
1034 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
92d7f7b0 1035 goto out;
14691150 1036 }
dea3101e
JB
1037 }
1038
1039 if (cfglink_mbox) {
2e0fef85 1040 vport->port_state = LPFC_LOCAL_CFG_LINK;
dea3101e 1041 lpfc_config_link(phba, cfglink_mbox);
2e0fef85 1042 cfglink_mbox->vport = vport;
25594c6b 1043 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
0b727fea 1044 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
92d7f7b0
JS
1045 if (rc != MBX_NOT_FINISHED)
1046 return;
1047 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
dea3101e 1048 }
92d7f7b0
JS
1049out:
1050 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1051 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1052 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1053 vport->port_state, sparam_mbox, cfglink_mbox);
92d7f7b0
JS
1054 lpfc_issue_clear_la(phba, vport);
1055 return;
dea3101e
JB
1056}
1057
1058static void
2e0fef85
JS
1059lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1060{
dea3101e
JB
1061 uint32_t control;
1062 struct lpfc_sli *psli = &phba->sli;
1063
1064 lpfc_linkdown(phba);
1065
1066 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2e0fef85 1067 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1068 psli->sli_flag |= LPFC_PROCESS_LA;
1069 control = readl(phba->HCregaddr);
1070 control |= HC_LAINT_ENA;
1071 writel(control, phba->HCregaddr);
1072 readl(phba->HCregaddr); /* flush */
2e0fef85 1073 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1074}
1075
1076/*
1077 * This routine handles processing a READ_LA mailbox
1078 * command upon completion. It is setup in the LPFC_MBOXQ
1079 * as the completion routine when the command is
1080 * handed off to the SLI layer.
1081 */
1082void
2e0fef85 1083lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1084{
2e0fef85
JS
1085 struct lpfc_vport *vport = pmb->vport;
1086 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
1087 READ_LA_VAR *la;
1088 MAILBOX_t *mb = &pmb->mb;
1089 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1090
1091 /* Check for error */
1092 if (mb->mbxStatus) {
ed957684 1093 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
e8b62011
JS
1094 "1307 READ_LA mbox error x%x state x%x\n",
1095 mb->mbxStatus, vport->port_state);
dea3101e 1096 lpfc_mbx_issue_link_down(phba);
2e0fef85 1097 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1098 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1099 }
1100
1101 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1102
1103 memcpy(&phba->alpa_map[0], mp->virt, 128);
1104
2e0fef85 1105 spin_lock_irq(shost->host_lock);
c9f8735b 1106 if (la->pb)
2e0fef85 1107 vport->fc_flag |= FC_BYPASSED_MODE;
c9f8735b 1108 else
2e0fef85
JS
1109 vport->fc_flag &= ~FC_BYPASSED_MODE;
1110 spin_unlock_irq(shost->host_lock);
c9f8735b 1111
dea3101e 1112 if (((phba->fc_eventTag + 1) < la->eventTag) ||
92d7f7b0 1113 (phba->fc_eventTag == la->eventTag)) {
dea3101e 1114 phba->fc_stat.LinkMultiEvent++;
2e0fef85 1115 if (la->attType == AT_LINK_UP)
dea3101e
JB
1116 if (phba->fc_eventTag != 0)
1117 lpfc_linkdown(phba);
92d7f7b0 1118 }
dea3101e
JB
1119
1120 phba->fc_eventTag = la->eventTag;
1121
1122 if (la->attType == AT_LINK_UP) {
1123 phba->fc_stat.LinkUp++;
2e0fef85 1124 if (phba->link_flag & LS_LOOPBACK_MODE) {
5b8bd0c9 1125 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
e8b62011
JS
1126 "1306 Link Up Event in loop back mode "
1127 "x%x received Data: x%x x%x x%x x%x\n",
1128 la->eventTag, phba->fc_eventTag,
1129 la->granted_AL_PA, la->UlnkSpeed,
1130 phba->alpa_map[0]);
5b8bd0c9
JS
1131 } else {
1132 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011
JS
1133 "1303 Link Up Event x%x received "
1134 "Data: x%x x%x x%x x%x\n",
1135 la->eventTag, phba->fc_eventTag,
1136 la->granted_AL_PA, la->UlnkSpeed,
1137 phba->alpa_map[0]);
5b8bd0c9 1138 }
92d7f7b0 1139 lpfc_mbx_process_link_up(phba, la);
dea3101e
JB
1140 } else {
1141 phba->fc_stat.LinkDown++;
1142 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011 1143 "1305 Link Down Event x%x received "
dea3101e 1144 "Data: x%x x%x x%x\n",
e8b62011 1145 la->eventTag, phba->fc_eventTag,
2e0fef85 1146 phba->pport->port_state, vport->fc_flag);
dea3101e
JB
1147 lpfc_mbx_issue_link_down(phba);
1148 }
1149
1150lpfc_mbx_cmpl_read_la_free_mbuf:
1151 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1152 kfree(mp);
1153 mempool_free(pmb, phba->mbox_mem_pool);
1154 return;
1155}
1156
1157/*
1158 * This routine handles processing a REG_LOGIN mailbox
1159 * command upon completion. It is setup in the LPFC_MBOXQ
1160 * as the completion routine when the command is
1161 * handed off to the SLI layer.
1162 */
1163void
2e0fef85 1164lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1165{
2e0fef85 1166 struct lpfc_vport *vport = pmb->vport;
92d7f7b0 1167 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2e0fef85 1168 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
dea3101e 1169
dea3101e
JB
1170 pmb->context1 = NULL;
1171
1172 /* Good status, call state machine */
2e0fef85 1173 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
dea3101e
JB
1174 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1175 kfree(mp);
2e0fef85 1176 mempool_free(pmb, phba->mbox_mem_pool);
329f9bc7 1177 lpfc_nlp_put(ndlp);
dea3101e
JB
1178
1179 return;
1180}
1181
92d7f7b0
JS
1182static void
1183lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1184{
1185 MAILBOX_t *mb = &pmb->mb;
1186 struct lpfc_vport *vport = pmb->vport;
1187 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1188
1189 switch (mb->mbxStatus) {
1190 case 0x0011:
1191 case 0x0020:
1192 case 0x9700:
e8b62011
JS
1193 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1194 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1195 mb->mbxStatus);
92d7f7b0 1196 break;
92d7f7b0
JS
1197 }
1198 vport->unreg_vpi_cmpl = VPORT_OK;
1199 mempool_free(pmb, phba->mbox_mem_pool);
1200 /*
1201 * This shost reference might have been taken at the beginning of
1202 * lpfc_vport_delete()
1203 */
1204 if (vport->load_flag & FC_UNLOADING)
1205 scsi_host_put(shost);
1206}
1207
1208void
1209lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1210{
1211 struct lpfc_hba *phba = vport->phba;
1212 LPFC_MBOXQ_t *mbox;
1213 int rc;
1214
1215 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1216 if (!mbox)
1217 return;
1218
1219 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1220 mbox->vport = vport;
1221 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
0b727fea 1222 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
92d7f7b0 1223 if (rc == MBX_NOT_FINISHED) {
e8b62011
JS
1224 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1225 "1800 Could not issue unreg_vpi\n");
92d7f7b0
JS
1226 mempool_free(mbox, phba->mbox_mem_pool);
1227 vport->unreg_vpi_cmpl = VPORT_ERROR;
1228 }
1229}
1230
1231static void
1232lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1233{
1234 struct lpfc_vport *vport = pmb->vport;
1235 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1236 MAILBOX_t *mb = &pmb->mb;
1237
1238 switch (mb->mbxStatus) {
1239 case 0x0011:
1240 case 0x9601:
1241 case 0x9602:
e8b62011
JS
1242 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1243 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1244 mb->mbxStatus);
92d7f7b0
JS
1245 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1246 spin_lock_irq(shost->host_lock);
1247 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1248 spin_unlock_irq(shost->host_lock);
1249 vport->fc_myDID = 0;
1250 goto out;
1251 }
92d7f7b0
JS
1252
1253 vport->num_disc_nodes = 0;
1254 /* go thru NPR list and issue ELS PLOGIs */
1255 if (vport->fc_npr_cnt)
1256 lpfc_els_disc_plogi(vport);
1257
1258 if (!vport->num_disc_nodes) {
1259 spin_lock_irq(shost->host_lock);
1260 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1261 spin_unlock_irq(shost->host_lock);
1262 lpfc_can_disctmo(vport);
1263 }
1264 vport->port_state = LPFC_VPORT_READY;
1265
1266out:
1267 mempool_free(pmb, phba->mbox_mem_pool);
1268 return;
1269}
1270
dea3101e
JB
1271/*
1272 * This routine handles processing a Fabric REG_LOGIN mailbox
1273 * command upon completion. It is setup in the LPFC_MBOXQ
1274 * as the completion routine when the command is
1275 * handed off to the SLI layer.
1276 */
1277void
2e0fef85 1278lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1279{
92d7f7b0 1280 struct lpfc_vport *vport = pmb->vport;
2e0fef85
JS
1281 MAILBOX_t *mb = &pmb->mb;
1282 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
92d7f7b0 1283 struct lpfc_nodelist *ndlp;
549e55cd
JS
1284 struct lpfc_vport **vports;
1285 int i;
dea3101e 1286
549e55cd 1287 ndlp = (struct lpfc_nodelist *) pmb->context2;
329f9bc7
JS
1288 pmb->context1 = NULL;
1289 pmb->context2 = NULL;
dea3101e
JB
1290 if (mb->mbxStatus) {
1291 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1292 kfree(mp);
329f9bc7
JS
1293 mempool_free(pmb, phba->mbox_mem_pool);
1294 lpfc_nlp_put(ndlp);
dea3101e 1295
92d7f7b0
JS
1296 if (phba->fc_topology == TOPOLOGY_LOOP) {
1297 /* FLOGI failed, use loop map to make discovery list */
1298 lpfc_disc_list_loopmap(vport);
1299
1300 /* Start discovery */
1301 lpfc_disc_start(vport);
1302 return;
1303 }
1304
1305 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1306 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1307 "0258 Register Fabric login error: 0x%x\n",
1308 mb->mbxStatus);
dea3101e
JB
1309 return;
1310 }
1311
dea3101e 1312 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1313 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1314 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1315
329f9bc7
JS
1316 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1317
2e0fef85 1318 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
549e55cd
JS
1319 vports = lpfc_create_vport_work_array(phba);
1320 if (vports != NULL)
1321 for(i = 0;
1322 i < LPFC_MAX_VPORTS && vports[i] != NULL;
1323 i++) {
1324 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1325 continue;
1326 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1327 lpfc_initial_fdisc(vports[i]);
1328 else if (phba->sli3_options &
1329 LPFC_SLI3_NPIV_ENABLED) {
1330 lpfc_vport_set_state(vports[i],
1331 FC_VPORT_NO_FABRIC_SUPP);
e8b62011
JS
1332 lpfc_printf_vlog(vport, KERN_ERR,
1333 LOG_ELS,
1334 "0259 No NPIV "
1335 "Fabric support\n");
549e55cd 1336 }
dea3101e 1337 }
549e55cd 1338 lpfc_destroy_vport_work_array(vports);
92d7f7b0 1339 lpfc_do_scr_ns_plogi(phba, vport);
dea3101e
JB
1340 }
1341
1342 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1343 kfree(mp);
329f9bc7 1344 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
1345 return;
1346}
1347
1348/*
1349 * This routine handles processing a NameServer REG_LOGIN mailbox
1350 * command upon completion. It is setup in the LPFC_MBOXQ
1351 * as the completion routine when the command is
1352 * handed off to the SLI layer.
1353 */
1354void
2e0fef85 1355lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1356{
2e0fef85
JS
1357 MAILBOX_t *mb = &pmb->mb;
1358 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1359 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1360 struct lpfc_vport *vport = pmb->vport;
dea3101e
JB
1361
1362 if (mb->mbxStatus) {
92d7f7b0 1363out:
329f9bc7 1364 lpfc_nlp_put(ndlp);
dea3101e
JB
1365 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1366 kfree(mp);
de0c5b32 1367 mempool_free(pmb, phba->mbox_mem_pool);
87af33fe
JS
1368
1369 /* If no other thread is using the ndlp, free it */
1370 lpfc_nlp_not_used(ndlp);
dea3101e 1371
92d7f7b0
JS
1372 if (phba->fc_topology == TOPOLOGY_LOOP) {
1373 /*
1374 * RegLogin failed, use loop map to make discovery
1375 * list
1376 */
1377 lpfc_disc_list_loopmap(vport);
dea3101e 1378
92d7f7b0
JS
1379 /* Start discovery */
1380 lpfc_disc_start(vport);
1381 return;
1382 }
1383 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1384 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1385 "0260 Register NameServer error: 0x%x\n",
1386 mb->mbxStatus);
dea3101e
JB
1387 return;
1388 }
1389
1390 pmb->context1 = NULL;
1391
dea3101e 1392 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1393 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1394 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1395
2e0fef85
JS
1396 if (vport->port_state < LPFC_VPORT_READY) {
1397 /* Link up discovery requires Fabric registration. */
92d7f7b0
JS
1398 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1399 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1400 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1401 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1402 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1403
1404 /* Issue SCR just before NameServer GID_FT Query */
1405 lpfc_issue_els_scr(vport, SCR_DID, 0);
dea3101e
JB
1406 }
1407
2e0fef85 1408 vport->fc_ns_retry = 0;
dea3101e 1409 /* Good status, issue CT Request to NameServer */
92d7f7b0 1410 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
dea3101e 1411 /* Cannot issue NameServer Query, so finish up discovery */
92d7f7b0 1412 goto out;
dea3101e
JB
1413 }
1414
329f9bc7 1415 lpfc_nlp_put(ndlp);
dea3101e
JB
1416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1417 kfree(mp);
2e0fef85 1418 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
1419
1420 return;
1421}
1422
1423static void
2e0fef85 1424lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1425{
2e0fef85
JS
1426 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1427 struct fc_rport *rport;
dea3101e
JB
1428 struct lpfc_rport_data *rdata;
1429 struct fc_rport_identifiers rport_ids;
2e0fef85 1430 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1431
1432 /* Remote port has reappeared. Re-register w/ FC transport */
68ce1eb5
AM
1433 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1434 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
dea3101e
JB
1435 rport_ids.port_id = ndlp->nlp_DID;
1436 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
dea3101e 1437
329f9bc7
JS
1438 /*
1439 * We leave our node pointer in rport->dd_data when we unregister a
1440 * FCP target port. But fc_remote_port_add zeros the space to which
1441 * rport->dd_data points. So, if we're reusing a previously
1442 * registered port, drop the reference that we took the last time we
1443 * registered the port.
1444 */
1445 if (ndlp->rport && ndlp->rport->dd_data &&
92d7f7b0 1446 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
329f9bc7
JS
1447 lpfc_nlp_put(ndlp);
1448 }
858c9f6c
JS
1449
1450 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1451 "rport add: did:x%x flg:x%x type x%x",
1452 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1453
2e0fef85 1454 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
329f9bc7 1455 if (!rport || !get_device(&rport->dev)) {
dea3101e
JB
1456 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1457 "Warning: fc_remote_port_add failed\n");
1458 return;
1459 }
1460
1461 /* initialize static port data */
1462 rport->maxframe_size = ndlp->nlp_maxframe;
1463 rport->supported_classes = ndlp->nlp_class_sup;
dea3101e 1464 rdata = rport->dd_data;
329f9bc7 1465 rdata->pnode = lpfc_nlp_get(ndlp);
23dc04f1
JSEC
1466
1467 if (ndlp->nlp_type & NLP_FCP_TARGET)
1468 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1469 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1470 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1471
1472
1473 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1474 fc_remote_port_rolechg(rport, rport_ids.roles);
1475
071fbd3d 1476 if ((rport->scsi_target_id != -1) &&
92d7f7b0 1477 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
071fbd3d
JS
1478 ndlp->nlp_sid = rport->scsi_target_id;
1479 }
19a7b4ae
JSEC
1480 return;
1481}
1482
1483static void
2e0fef85 1484lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
19a7b4ae
JSEC
1485{
1486 struct fc_rport *rport = ndlp->rport;
c01f3208 1487
858c9f6c
JS
1488 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1489 "rport delete: did:x%x flg:x%x type x%x",
1490 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1491
19a7b4ae 1492 fc_remote_port_delete(rport);
dea3101e
JB
1493
1494 return;
1495}
1496
de0c5b32 1497static void
2e0fef85 1498lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
dea3101e 1499{
2e0fef85
JS
1500 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1501
1502 spin_lock_irq(shost->host_lock);
de0c5b32
JS
1503 switch (state) {
1504 case NLP_STE_UNUSED_NODE:
2e0fef85 1505 vport->fc_unused_cnt += count;
de0c5b32
JS
1506 break;
1507 case NLP_STE_PLOGI_ISSUE:
2e0fef85 1508 vport->fc_plogi_cnt += count;
de0c5b32
JS
1509 break;
1510 case NLP_STE_ADISC_ISSUE:
2e0fef85 1511 vport->fc_adisc_cnt += count;
dea3101e 1512 break;
de0c5b32 1513 case NLP_STE_REG_LOGIN_ISSUE:
2e0fef85 1514 vport->fc_reglogin_cnt += count;
de0c5b32
JS
1515 break;
1516 case NLP_STE_PRLI_ISSUE:
2e0fef85 1517 vport->fc_prli_cnt += count;
de0c5b32
JS
1518 break;
1519 case NLP_STE_UNMAPPED_NODE:
2e0fef85 1520 vport->fc_unmap_cnt += count;
de0c5b32
JS
1521 break;
1522 case NLP_STE_MAPPED_NODE:
2e0fef85 1523 vport->fc_map_cnt += count;
de0c5b32
JS
1524 break;
1525 case NLP_STE_NPR_NODE:
2e0fef85 1526 vport->fc_npr_cnt += count;
de0c5b32
JS
1527 break;
1528 }
2e0fef85 1529 spin_unlock_irq(shost->host_lock);
de0c5b32 1530}
66a9ed66 1531
de0c5b32 1532static void
2e0fef85 1533lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
de0c5b32
JS
1534 int old_state, int new_state)
1535{
2e0fef85
JS
1536 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1537
de0c5b32
JS
1538 if (new_state == NLP_STE_UNMAPPED_NODE) {
1539 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1540 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1541 ndlp->nlp_type |= NLP_FC_NODE;
1542 }
1543 if (new_state == NLP_STE_MAPPED_NODE)
1544 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1545 if (new_state == NLP_STE_NPR_NODE)
1546 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1547
1548 /* Transport interface */
1549 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1550 old_state == NLP_STE_UNMAPPED_NODE)) {
2e0fef85
JS
1551 vport->phba->nport_event_cnt++;
1552 lpfc_unregister_remote_port(ndlp);
de0c5b32 1553 }
dea3101e 1554
de0c5b32
JS
1555 if (new_state == NLP_STE_MAPPED_NODE ||
1556 new_state == NLP_STE_UNMAPPED_NODE) {
2e0fef85 1557 vport->phba->nport_event_cnt++;
858c9f6c
JS
1558 /*
1559 * Tell the fc transport about the port, if we haven't
1560 * already. If we have, and it's a scsi entity, be
1561 * sure to unblock any attached scsi devices
1562 */
1563 lpfc_register_remote_port(vport, ndlp);
de0c5b32 1564 }
858c9f6c
JS
1565 /*
1566 * if we added to Mapped list, but the remote port
1567 * registration failed or assigned a target id outside
1568 * our presentable range - move the node to the
1569 * Unmapped List
1570 */
de0c5b32
JS
1571 if (new_state == NLP_STE_MAPPED_NODE &&
1572 (!ndlp->rport ||
1573 ndlp->rport->scsi_target_id == -1 ||
1574 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
2e0fef85 1575 spin_lock_irq(shost->host_lock);
de0c5b32 1576 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
2e0fef85
JS
1577 spin_unlock_irq(shost->host_lock);
1578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1579 }
de0c5b32
JS
1580}
1581
685f0bf7
JS
1582static char *
1583lpfc_nlp_state_name(char *buffer, size_t size, int state)
1584{
1585 static char *states[] = {
1586 [NLP_STE_UNUSED_NODE] = "UNUSED",
1587 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1588 [NLP_STE_ADISC_ISSUE] = "ADISC",
1589 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1590 [NLP_STE_PRLI_ISSUE] = "PRLI",
1591 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1592 [NLP_STE_MAPPED_NODE] = "MAPPED",
1593 [NLP_STE_NPR_NODE] = "NPR",
1594 };
1595
311464ec 1596 if (state < NLP_STE_MAX_STATE && states[state])
685f0bf7
JS
1597 strlcpy(buffer, states[state], size);
1598 else
1599 snprintf(buffer, size, "unknown (%d)", state);
1600 return buffer;
1601}
1602
de0c5b32 1603void
2e0fef85
JS
1604lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1605 int state)
de0c5b32 1606{
2e0fef85 1607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
de0c5b32 1608 int old_state = ndlp->nlp_state;
685f0bf7 1609 char name1[16], name2[16];
de0c5b32 1610
e8b62011
JS
1611 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1612 "0904 NPort state transition x%06x, %s -> %s\n",
1613 ndlp->nlp_DID,
1614 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1615 lpfc_nlp_state_name(name2, sizeof(name2), state));
858c9f6c
JS
1616
1617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1618 "node statechg did:x%x old:%d ste:%d",
1619 ndlp->nlp_DID, old_state, state);
1620
de0c5b32
JS
1621 if (old_state == NLP_STE_NPR_NODE &&
1622 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1623 state != NLP_STE_NPR_NODE)
2e0fef85 1624 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32
JS
1625 if (old_state == NLP_STE_UNMAPPED_NODE) {
1626 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1627 ndlp->nlp_type &= ~NLP_FC_NODE;
1628 }
1629
685f0bf7 1630 if (list_empty(&ndlp->nlp_listp)) {
2e0fef85
JS
1631 spin_lock_irq(shost->host_lock);
1632 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1633 spin_unlock_irq(shost->host_lock);
685f0bf7 1634 } else if (old_state)
2e0fef85 1635 lpfc_nlp_counters(vport, old_state, -1);
de0c5b32
JS
1636
1637 ndlp->nlp_state = state;
2e0fef85
JS
1638 lpfc_nlp_counters(vport, state, 1);
1639 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
de0c5b32
JS
1640}
1641
1642void
2e0fef85 1643lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1644{
2e0fef85
JS
1645 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1646
de0c5b32 1647 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
2e0fef85 1648 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32 1649 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2e0fef85
JS
1650 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1651 spin_lock_irq(shost->host_lock);
685f0bf7 1652 list_del_init(&ndlp->nlp_listp);
2e0fef85 1653 spin_unlock_irq(shost->host_lock);
858c9f6c
JS
1654 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1655 NLP_STE_UNUSED_NODE);
de0c5b32
JS
1656}
1657
1658void
2e0fef85 1659lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1660{
87af33fe
JS
1661 /*
1662 * Use of lpfc_drop_node and UNUSED list. lpfc_drop_node should
1663 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1664 * the ndlp from the vport. The ndlp resides on the UNUSED list
1665 * until ALL other outstanding threads have completed. Thus, if a
1666 * ndlp is on the UNUSED list already, we should never do another
1667 * lpfc_drop_node() on it.
1668 */
51ef4c26 1669 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
87af33fe 1670 lpfc_nlp_put(ndlp);
98c9ea5c 1671 return;
dea3101e
JB
1672}
1673
1674/*
1675 * Start / ReStart rescue timer for Discovery / RSCN handling
1676 */
1677void
2e0fef85 1678lpfc_set_disctmo(struct lpfc_vport *vport)
dea3101e 1679{
2e0fef85
JS
1680 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1681 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1682 uint32_t tmo;
1683
2e0fef85 1684 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
c9f8735b
JW
1685 /* For FAN, timeout should be greater then edtov */
1686 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1687 } else {
1688 /* Normal discovery timeout should be > then ELS/CT timeout
1689 * FC spec states we need 3 * ratov for CT requests
1690 */
1691 tmo = ((phba->fc_ratov * 3) + 3);
1692 }
dea3101e 1693
858c9f6c
JS
1694
1695 if (!timer_pending(&vport->fc_disctmo)) {
1696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1697 "set disc timer: tmo:x%x state:x%x flg:x%x",
1698 tmo, vport->port_state, vport->fc_flag);
1699 }
1700
2e0fef85
JS
1701 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1702 spin_lock_irq(shost->host_lock);
1703 vport->fc_flag |= FC_DISC_TMO;
1704 spin_unlock_irq(shost->host_lock);
dea3101e
JB
1705
1706 /* Start Discovery Timer state <hba_state> */
e8b62011
JS
1707 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1708 "0247 Start Discovery Timer state x%x "
1709 "Data: x%x x%lx x%x x%x\n",
1710 vport->port_state, tmo,
1711 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1712 vport->fc_adisc_cnt);
dea3101e
JB
1713
1714 return;
1715}
1716
1717/*
1718 * Cancel rescue timer for Discovery / RSCN handling
1719 */
1720int
2e0fef85 1721lpfc_can_disctmo(struct lpfc_vport *vport)
dea3101e 1722{
2e0fef85 1723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2e0fef85
JS
1724 unsigned long iflags;
1725
858c9f6c
JS
1726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1727 "can disc timer: state:x%x rtry:x%x flg:x%x",
1728 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1729
dea3101e 1730 /* Turn off discovery timer if its running */
2e0fef85
JS
1731 if (vport->fc_flag & FC_DISC_TMO) {
1732 spin_lock_irqsave(shost->host_lock, iflags);
1733 vport->fc_flag &= ~FC_DISC_TMO;
1734 spin_unlock_irqrestore(shost->host_lock, iflags);
1735 del_timer_sync(&vport->fc_disctmo);
1736 spin_lock_irqsave(&vport->work_port_lock, iflags);
1737 vport->work_port_events &= ~WORKER_DISC_TMO;
1738 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
dea3101e
JB
1739 }
1740
1741 /* Cancel Discovery Timer state <hba_state> */
e8b62011
JS
1742 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1743 "0248 Cancel Discovery Timer state x%x "
1744 "Data: x%x x%x x%x\n",
1745 vport->port_state, vport->fc_flag,
1746 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
2fe165b6 1747 return 0;
dea3101e
JB
1748}
1749
1750/*
1751 * Check specified ring for outstanding IOCB on the SLI queue
1752 * Return true if iocb matches the specified nport
1753 */
1754int
2e0fef85
JS
1755lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1756 struct lpfc_sli_ring *pring,
1757 struct lpfc_iocbq *iocb,
1758 struct lpfc_nodelist *ndlp)
dea3101e 1759{
2e0fef85
JS
1760 struct lpfc_sli *psli = &phba->sli;
1761 IOCB_t *icmd = &iocb->iocb;
92d7f7b0
JS
1762 struct lpfc_vport *vport = ndlp->vport;
1763
1764 if (iocb->vport != vport)
1765 return 0;
1766
dea3101e
JB
1767 if (pring->ringno == LPFC_ELS_RING) {
1768 switch (icmd->ulpCommand) {
1769 case CMD_GEN_REQUEST64_CR:
1770 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
2fe165b6 1771 return 1;
dea3101e 1772 case CMD_ELS_REQUEST64_CR:
10d4e957
JS
1773 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1774 return 1;
dea3101e
JB
1775 case CMD_XMIT_ELS_RSP64_CX:
1776 if (iocb->context1 == (uint8_t *) ndlp)
2fe165b6 1777 return 1;
dea3101e 1778 }
a4bc3379 1779 } else if (pring->ringno == psli->extra_ring) {
dea3101e
JB
1780
1781 } else if (pring->ringno == psli->fcp_ring) {
1782 /* Skip match check if waiting to relogin to FCP target */
1783 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
92d7f7b0 1784 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
2fe165b6 1785 return 0;
dea3101e
JB
1786 }
1787 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
2fe165b6 1788 return 1;
dea3101e
JB
1789 }
1790 } else if (pring->ringno == psli->next_ring) {
1791
1792 }
2fe165b6 1793 return 0;
dea3101e
JB
1794}
1795
1796/*
1797 * Free resources / clean up outstanding I/Os
1798 * associated with nlp_rpi in the LPFC_NODELIST entry.
1799 */
1800static int
2e0fef85 1801lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1802{
2534ba75 1803 LIST_HEAD(completions);
dea3101e
JB
1804 struct lpfc_sli *psli;
1805 struct lpfc_sli_ring *pring;
1806 struct lpfc_iocbq *iocb, *next_iocb;
1807 IOCB_t *icmd;
1808 uint32_t rpi, i;
1809
92d7f7b0
JS
1810 lpfc_fabric_abort_nport(ndlp);
1811
dea3101e
JB
1812 /*
1813 * Everything that matches on txcmplq will be returned
1814 * by firmware with a no rpi error.
1815 */
1816 psli = &phba->sli;
1817 rpi = ndlp->nlp_rpi;
1818 if (rpi) {
1819 /* Now process each ring */
1820 for (i = 0; i < psli->num_rings; i++) {
1821 pring = &psli->ring[i];
1822
2e0fef85 1823 spin_lock_irq(&phba->hbalock);
dea3101e 1824 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
2e0fef85 1825 list) {
dea3101e
JB
1826 /*
1827 * Check to see if iocb matches the nport we are
1828 * looking for
1829 */
92d7f7b0
JS
1830 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1831 ndlp))) {
dea3101e
JB
1832 /* It matches, so deque and call compl
1833 with an error */
2534ba75
JS
1834 list_move_tail(&iocb->list,
1835 &completions);
dea3101e 1836 pring->txq_cnt--;
dea3101e
JB
1837 }
1838 }
2e0fef85 1839 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1840 }
1841 }
2534ba75
JS
1842
1843 while (!list_empty(&completions)) {
1844 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 1845 list_del_init(&iocb->list);
2534ba75 1846
2e0fef85
JS
1847 if (!iocb->iocb_cmpl)
1848 lpfc_sli_release_iocbq(phba, iocb);
1849 else {
2534ba75
JS
1850 icmd = &iocb->iocb;
1851 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1852 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2e0fef85
JS
1853 (iocb->iocb_cmpl)(phba, iocb, iocb);
1854 }
2534ba75
JS
1855 }
1856
2fe165b6 1857 return 0;
dea3101e
JB
1858}
1859
1860/*
1861 * Free rpi associated with LPFC_NODELIST entry.
1862 * This routine is called from lpfc_freenode(), when we are removing
1863 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1864 * LOGO that completes successfully, and we are waiting to PLOGI back
1865 * to the remote NPort. In addition, it is called after we receive
1866 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1867 * we are waiting to PLOGI back to the remote NPort.
1868 */
1869int
2e0fef85 1870lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1871{
2e0fef85
JS
1872 struct lpfc_hba *phba = vport->phba;
1873 LPFC_MBOXQ_t *mbox;
dea3101e
JB
1874 int rc;
1875
1876 if (ndlp->nlp_rpi) {
2e0fef85
JS
1877 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1878 if (mbox) {
92d7f7b0 1879 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
ed957684 1880 mbox->vport = vport;
92d7f7b0 1881 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 1882 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
dea3101e 1883 if (rc == MBX_NOT_FINISHED)
2e0fef85 1884 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 1885 }
dea3101e
JB
1886 lpfc_no_rpi(phba, ndlp);
1887 ndlp->nlp_rpi = 0;
1888 return 1;
1889 }
1890 return 0;
1891}
1892
92d7f7b0
JS
1893void
1894lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1895{
1896 struct lpfc_hba *phba = vport->phba;
1897 LPFC_MBOXQ_t *mbox;
1898 int rc;
1899
1900 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1901 if (mbox) {
1902 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1903 mbox->vport = vport;
1904 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 1905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
92d7f7b0
JS
1906 if (rc == MBX_NOT_FINISHED) {
1907 mempool_free(mbox, phba->mbox_mem_pool);
1908 }
1909 }
1910}
1911
1912void
1913lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1914{
1915 struct lpfc_hba *phba = vport->phba;
1916 LPFC_MBOXQ_t *mbox;
1917 int rc;
1918
1919 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1920 if (mbox) {
1921 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1922 mbox->vport = vport;
1923 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 1924 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
92d7f7b0 1925 if (rc == MBX_NOT_FINISHED) {
e8b62011
JS
1926 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1927 "1815 Could not issue "
1928 "unreg_did (default rpis)\n");
92d7f7b0
JS
1929 mempool_free(mbox, phba->mbox_mem_pool);
1930 }
1931 }
1932}
1933
dea3101e
JB
1934/*
1935 * Free resources associated with LPFC_NODELIST entry
1936 * so it can be freed.
1937 */
1938static int
2e0fef85 1939lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1940{
2e0fef85
JS
1941 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1942 struct lpfc_hba *phba = vport->phba;
1943 LPFC_MBOXQ_t *mb, *nextmb;
dea3101e 1944 struct lpfc_dmabuf *mp;
dea3101e
JB
1945
1946 /* Cleanup node for NPort <nlp_DID> */
e8b62011
JS
1947 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1948 "0900 Cleanup node for NPort x%x "
1949 "Data: x%x x%x x%x\n",
1950 ndlp->nlp_DID, ndlp->nlp_flag,
1951 ndlp->nlp_state, ndlp->nlp_rpi);
2e0fef85 1952 lpfc_dequeue_node(vport, ndlp);
dea3101e 1953
dea3101e
JB
1954 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1955 if ((mb = phba->sli.mbox_active)) {
1956 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1957 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1958 mb->context2 = NULL;
1959 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1960 }
1961 }
33ccf8d1 1962
2e0fef85 1963 spin_lock_irq(&phba->hbalock);
dea3101e
JB
1964 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1965 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
92d7f7b0 1966 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
dea3101e
JB
1967 mp = (struct lpfc_dmabuf *) (mb->context1);
1968 if (mp) {
2e0fef85 1969 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e
JB
1970 kfree(mp);
1971 }
1972 list_del(&mb->list);
1973 mempool_free(mb, phba->mbox_mem_pool);
329f9bc7 1974 lpfc_nlp_put(ndlp);
dea3101e
JB
1975 }
1976 }
2e0fef85 1977 spin_unlock_irq(&phba->hbalock);
dea3101e 1978
07951076 1979 lpfc_els_abort(phba,ndlp);
2e0fef85 1980 spin_lock_irq(shost->host_lock);
c01f3208 1981 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 1982 spin_unlock_irq(shost->host_lock);
dea3101e 1983
5024ab17 1984 ndlp->nlp_last_elscmd = 0;
dea3101e
JB
1985 del_timer_sync(&ndlp->nlp_delayfunc);
1986
dea3101e
JB
1987 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1988 list_del_init(&ndlp->els_retry_evt.evt_listp);
92d7f7b0
JS
1989 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1990 list_del_init(&ndlp->dev_loss_evt.evt_listp);
dea3101e 1991
2e0fef85 1992 lpfc_unreg_rpi(vport, ndlp);
dea3101e 1993
2fe165b6 1994 return 0;
dea3101e
JB
1995}
1996
1997/*
1998 * Check to see if we can free the nlp back to the freelist.
1999 * If we are in the middle of using the nlp in the discovery state
2000 * machine, defer the free till we reach the end of the state machine.
2001 */
329f9bc7 2002static void
2e0fef85 2003lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 2004{
a8adb832 2005 struct lpfc_hba *phba = vport->phba;
1dcb58e5 2006 struct lpfc_rport_data *rdata;
a8adb832
JS
2007 LPFC_MBOXQ_t *mbox;
2008 int rc;
dea3101e
JB
2009
2010 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2e0fef85 2011 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e
JB
2012 }
2013
a8adb832
JS
2014 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2015 /* For this case we need to cleanup the default rpi
2016 * allocated by the firmware.
2017 */
2018 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2019 != NULL) {
2020 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2021 (uint8_t *) &vport->fc_sparam, mbox, 0);
2022 if (rc) {
2023 mempool_free(mbox, phba->mbox_mem_pool);
2024 }
2025 else {
2026 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2027 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2028 mbox->vport = vport;
2029 mbox->context2 = 0;
2030 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2031 if (rc == MBX_NOT_FINISHED) {
2032 mempool_free(mbox, phba->mbox_mem_pool);
2033 }
2034 }
2035 }
2036 }
2037
2e0fef85 2038 lpfc_cleanup_node(vport, ndlp);
1dcb58e5 2039
2e0fef85 2040 /*
92d7f7b0
JS
2041 * We can get here with a non-NULL ndlp->rport because when we
2042 * unregister a rport we don't break the rport/node linkage. So if we
2043 * do, make sure we don't leaving any dangling pointers behind.
2e0fef85 2044 */
92d7f7b0 2045 if (ndlp->rport) {
329f9bc7
JS
2046 rdata = ndlp->rport->dd_data;
2047 rdata->pnode = NULL;
2048 ndlp->rport = NULL;
dea3101e 2049 }
dea3101e
JB
2050}
2051
2052static int
2e0fef85
JS
2053lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2054 uint32_t did)
dea3101e 2055{
2e0fef85 2056 D_ID mydid, ndlpdid, matchdid;
dea3101e
JB
2057
2058 if (did == Bcast_DID)
2fe165b6 2059 return 0;
dea3101e
JB
2060
2061 if (ndlp->nlp_DID == 0) {
2fe165b6 2062 return 0;
dea3101e
JB
2063 }
2064
2065 /* First check for Direct match */
2066 if (ndlp->nlp_DID == did)
2fe165b6 2067 return 1;
dea3101e
JB
2068
2069 /* Next check for area/domain identically equals 0 match */
2e0fef85 2070 mydid.un.word = vport->fc_myDID;
dea3101e 2071 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2fe165b6 2072 return 0;
dea3101e
JB
2073 }
2074
2075 matchdid.un.word = did;
2076 ndlpdid.un.word = ndlp->nlp_DID;
2077 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2078 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2079 (mydid.un.b.area == matchdid.un.b.area)) {
2080 if ((ndlpdid.un.b.domain == 0) &&
2081 (ndlpdid.un.b.area == 0)) {
2082 if (ndlpdid.un.b.id)
2fe165b6 2083 return 1;
dea3101e 2084 }
2fe165b6 2085 return 0;
dea3101e
JB
2086 }
2087
2088 matchdid.un.word = ndlp->nlp_DID;
2089 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2090 (mydid.un.b.area == ndlpdid.un.b.area)) {
2091 if ((matchdid.un.b.domain == 0) &&
2092 (matchdid.un.b.area == 0)) {
2093 if (matchdid.un.b.id)
2fe165b6 2094 return 1;
dea3101e
JB
2095 }
2096 }
2097 }
2fe165b6 2098 return 0;
dea3101e
JB
2099}
2100
685f0bf7 2101/* Search for a nodelist entry */
2e0fef85
JS
2102static struct lpfc_nodelist *
2103__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
dea3101e 2104{
2fb9bd8b 2105 struct lpfc_nodelist *ndlp;
dea3101e
JB
2106 uint32_t data1;
2107
2e0fef85
JS
2108 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2109 if (lpfc_matchdid(vport, ndlp, did)) {
685f0bf7
JS
2110 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2111 ((uint32_t) ndlp->nlp_xri << 16) |
2112 ((uint32_t) ndlp->nlp_type << 8) |
2113 ((uint32_t) ndlp->nlp_rpi & 0xff));
e8b62011
JS
2114 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2115 "0929 FIND node DID "
2116 "Data: x%p x%x x%x x%x\n",
2117 ndlp, ndlp->nlp_DID,
2118 ndlp->nlp_flag, data1);
685f0bf7 2119 return ndlp;
dea3101e
JB
2120 }
2121 }
66a9ed66 2122
dea3101e 2123 /* FIND node did <did> NOT FOUND */
e8b62011
JS
2124 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2125 "0932 FIND node did x%x NOT FOUND.\n", did);
dea3101e
JB
2126 return NULL;
2127}
2128
2129struct lpfc_nodelist *
2e0fef85
JS
2130lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2131{
2132 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2133 struct lpfc_nodelist *ndlp;
2134
2135 spin_lock_irq(shost->host_lock);
2136 ndlp = __lpfc_findnode_did(vport, did);
2137 spin_unlock_irq(shost->host_lock);
2138 return ndlp;
2139}
2140
2141struct lpfc_nodelist *
2142lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
dea3101e 2143{
2e0fef85 2144 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2145 struct lpfc_nodelist *ndlp;
dea3101e 2146
2e0fef85 2147 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 2148 if (!ndlp) {
2e0fef85
JS
2149 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2150 lpfc_rscn_payload_check(vport, did) == 0)
dea3101e
JB
2151 return NULL;
2152 ndlp = (struct lpfc_nodelist *)
2e0fef85 2153 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
dea3101e
JB
2154 if (!ndlp)
2155 return NULL;
2e0fef85
JS
2156 lpfc_nlp_init(vport, ndlp, did);
2157 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2158 spin_lock_irq(shost->host_lock);
dea3101e 2159 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2160 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2161 return ndlp;
2162 }
2e0fef85
JS
2163 if (vport->fc_flag & FC_RSCN_MODE) {
2164 if (lpfc_rscn_payload_check(vport, did)) {
87af33fe
JS
2165 /* If we've already recieved a PLOGI from this NPort
2166 * we don't need to try to discover it again.
2167 */
2168 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2169 return NULL;
2170
2e0fef85 2171 spin_lock_irq(shost->host_lock);
dea3101e 2172 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2173 spin_unlock_irq(shost->host_lock);
c9f8735b
JW
2174
2175 /* Since this node is marked for discovery,
2176 * delay timeout is not needed.
2177 */
fdcebe28 2178 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2e0fef85 2179 lpfc_cancel_retry_delay_tmo(vport, ndlp);
071fbd3d 2180 } else
dea3101e 2181 ndlp = NULL;
2fe165b6 2182 } else {
87af33fe
JS
2183 /* If we've already recieved a PLOGI from this NPort,
2184 * or we are already in the process of discovery on it,
2185 * we don't need to try to discover it again.
2186 */
685f0bf7 2187 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
87af33fe
JS
2188 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2189 ndlp->nlp_flag & NLP_RCV_PLOGI)
dea3101e 2190 return NULL;
2e0fef85
JS
2191 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2192 spin_lock_irq(shost->host_lock);
dea3101e 2193 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2194 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2195 }
2196 return ndlp;
2197}
2198
2199/* Build a list of nodes to discover based on the loopmap */
2200void
2e0fef85 2201lpfc_disc_list_loopmap(struct lpfc_vport *vport)
dea3101e 2202{
2e0fef85 2203 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2204 int j;
2205 uint32_t alpa, index;
2206
2e0fef85 2207 if (!lpfc_is_link_up(phba))
dea3101e 2208 return;
2e0fef85
JS
2209
2210 if (phba->fc_topology != TOPOLOGY_LOOP)
dea3101e 2211 return;
dea3101e
JB
2212
2213 /* Check for loop map present or not */
2214 if (phba->alpa_map[0]) {
2215 for (j = 1; j <= phba->alpa_map[0]; j++) {
2216 alpa = phba->alpa_map[j];
2e0fef85 2217 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
dea3101e 2218 continue;
2e0fef85 2219 lpfc_setup_disc_node(vport, alpa);
dea3101e
JB
2220 }
2221 } else {
2222 /* No alpamap, so try all alpa's */
2223 for (j = 0; j < FC_MAXLOOP; j++) {
2224 /* If cfg_scan_down is set, start from highest
2225 * ALPA (0xef) to lowest (0x1).
2226 */
3de2a653 2227 if (vport->cfg_scan_down)
dea3101e
JB
2228 index = j;
2229 else
2230 index = FC_MAXLOOP - j - 1;
2231 alpa = lpfcAlpaArray[index];
2e0fef85 2232 if ((vport->fc_myDID & 0xff) == alpa)
dea3101e 2233 continue;
2e0fef85 2234 lpfc_setup_disc_node(vport, alpa);
dea3101e
JB
2235 }
2236 }
2237 return;
2238}
2239
dea3101e 2240void
2e0fef85 2241lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
dea3101e 2242{
dea3101e 2243 LPFC_MBOXQ_t *mbox;
2e0fef85
JS
2244 struct lpfc_sli *psli = &phba->sli;
2245 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2246 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2247 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2248 int rc;
2249
92d7f7b0
JS
2250 /*
2251 * if it's not a physical port or if we already send
2252 * clear_la then don't send it.
2253 */
2254 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2255 (vport->port_type != LPFC_PHYSICAL_PORT))
2256 return;
2257
2e0fef85
JS
2258 /* Link up discovery */
2259 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2260 phba->link_state = LPFC_CLEAR_LA;
2261 lpfc_clear_la(phba, mbox);
2262 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2263 mbox->vport = vport;
0b727fea 2264 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2e0fef85
JS
2265 if (rc == MBX_NOT_FINISHED) {
2266 mempool_free(mbox, phba->mbox_mem_pool);
2267 lpfc_disc_flush_list(vport);
2268 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2269 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2270 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
92d7f7b0
JS
2271 phba->link_state = LPFC_HBA_ERROR;
2272 }
2273 }
2274}
2275
2276/* Reg_vpi to tell firmware to resume normal operations */
2277void
2278lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2279{
2280 LPFC_MBOXQ_t *regvpimbox;
2281
2282 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2283 if (regvpimbox) {
2284 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2285 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2286 regvpimbox->vport = vport;
0b727fea 2287 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
92d7f7b0
JS
2288 == MBX_NOT_FINISHED) {
2289 mempool_free(regvpimbox, phba->mbox_mem_pool);
2e0fef85
JS
2290 }
2291 }
2292}
2293
2294/* Start Link up / RSCN discovery on NPR nodes */
2295void
2296lpfc_disc_start(struct lpfc_vport *vport)
2297{
2298 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2299 struct lpfc_hba *phba = vport->phba;
685f0bf7 2300 uint32_t num_sent;
dea3101e 2301 uint32_t clear_la_pending;
685f0bf7 2302 int did_changed;
dea3101e 2303
2e0fef85 2304 if (!lpfc_is_link_up(phba))
dea3101e 2305 return;
2e0fef85
JS
2306
2307 if (phba->link_state == LPFC_CLEAR_LA)
dea3101e
JB
2308 clear_la_pending = 1;
2309 else
2310 clear_la_pending = 0;
2311
2e0fef85
JS
2312 if (vport->port_state < LPFC_VPORT_READY)
2313 vport->port_state = LPFC_DISC_AUTH;
dea3101e 2314
2e0fef85
JS
2315 lpfc_set_disctmo(vport);
2316
2317 if (vport->fc_prevDID == vport->fc_myDID)
dea3101e 2318 did_changed = 0;
2e0fef85 2319 else
dea3101e 2320 did_changed = 1;
2e0fef85
JS
2321
2322 vport->fc_prevDID = vport->fc_myDID;
2323 vport->num_disc_nodes = 0;
dea3101e
JB
2324
2325 /* Start Discovery state <hba_state> */
e8b62011
JS
2326 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2327 "0202 Start Discovery hba state x%x "
2328 "Data: x%x x%x x%x\n",
2329 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2330 vport->fc_adisc_cnt);
dea3101e
JB
2331
2332 /* First do ADISCs - if any */
2e0fef85 2333 num_sent = lpfc_els_disc_adisc(vport);
dea3101e
JB
2334
2335 if (num_sent)
2336 return;
2337
92d7f7b0
JS
2338 /*
2339 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2340 * continue discovery.
2341 */
2342 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2343 !(vport->fc_flag & FC_RSCN_MODE)) {
2344 lpfc_issue_reg_vpi(phba, vport);
2345 return;
2346 }
2347
2348 /*
2349 * For SLI2, we need to set port_state to READY and continue
2350 * discovery.
2351 */
2e0fef85 2352 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
dea3101e 2353 /* If we get here, there is nothing to ADISC */
92d7f7b0 2354 if (vport->port_type == LPFC_PHYSICAL_PORT)
2e0fef85 2355 lpfc_issue_clear_la(phba, vport);
2e0fef85 2356
92d7f7b0 2357 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2e0fef85
JS
2358 vport->num_disc_nodes = 0;
2359 /* go thru NPR nodes and issue ELS PLOGIs */
2360 if (vport->fc_npr_cnt)
2361 lpfc_els_disc_plogi(vport);
2362
2363 if (!vport->num_disc_nodes) {
2364 spin_lock_irq(shost->host_lock);
2365 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2366 spin_unlock_irq(shost->host_lock);
92d7f7b0 2367 lpfc_can_disctmo(vport);
dea3101e
JB
2368 }
2369 }
92d7f7b0 2370 vport->port_state = LPFC_VPORT_READY;
dea3101e
JB
2371 } else {
2372 /* Next do PLOGIs - if any */
2e0fef85 2373 num_sent = lpfc_els_disc_plogi(vport);
dea3101e
JB
2374
2375 if (num_sent)
2376 return;
2377
2e0fef85 2378 if (vport->fc_flag & FC_RSCN_MODE) {
dea3101e
JB
2379 /* Check to see if more RSCNs came in while we
2380 * were processing this one.
2381 */
2e0fef85
JS
2382 if ((vport->fc_rscn_id_cnt == 0) &&
2383 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2384 spin_lock_irq(shost->host_lock);
2385 vport->fc_flag &= ~FC_RSCN_MODE;
2386 spin_unlock_irq(shost->host_lock);
92d7f7b0 2387 lpfc_can_disctmo(vport);
2fe165b6 2388 } else
2e0fef85 2389 lpfc_els_handle_rscn(vport);
dea3101e
JB
2390 }
2391 }
2392 return;
2393}
2394
2395/*
2396 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2397 * ring the match the sppecified nodelist.
2398 */
2399static void
2e0fef85 2400lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 2401{
2534ba75 2402 LIST_HEAD(completions);
dea3101e
JB
2403 struct lpfc_sli *psli;
2404 IOCB_t *icmd;
2405 struct lpfc_iocbq *iocb, *next_iocb;
2406 struct lpfc_sli_ring *pring;
dea3101e
JB
2407
2408 psli = &phba->sli;
2409 pring = &psli->ring[LPFC_ELS_RING];
2410
2411 /* Error matching iocb on txq or txcmplq
2412 * First check the txq.
2413 */
2e0fef85 2414 spin_lock_irq(&phba->hbalock);
dea3101e
JB
2415 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2416 if (iocb->context1 != ndlp) {
2417 continue;
2418 }
2419 icmd = &iocb->iocb;
2420 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2421 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2422
2534ba75 2423 list_move_tail(&iocb->list, &completions);
dea3101e 2424 pring->txq_cnt--;
dea3101e
JB
2425 }
2426 }
2427
2428 /* Next check the txcmplq */
2429 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2430 if (iocb->context1 != ndlp) {
2431 continue;
2432 }
2433 icmd = &iocb->iocb;
2e0fef85
JS
2434 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2435 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2534ba75
JS
2436 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2437 }
2438 }
2e0fef85 2439 spin_unlock_irq(&phba->hbalock);
dea3101e 2440
2534ba75
JS
2441 while (!list_empty(&completions)) {
2442 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 2443 list_del_init(&iocb->list);
dea3101e 2444
2e0fef85
JS
2445 if (!iocb->iocb_cmpl)
2446 lpfc_sli_release_iocbq(phba, iocb);
2447 else {
2534ba75
JS
2448 icmd = &iocb->iocb;
2449 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2450 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2451 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 2452 }
dea3101e 2453 }
dea3101e
JB
2454}
2455
a6ababd2 2456static void
2e0fef85 2457lpfc_disc_flush_list(struct lpfc_vport *vport)
dea3101e
JB
2458{
2459 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 2460 struct lpfc_hba *phba = vport->phba;
dea3101e 2461
2e0fef85
JS
2462 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2463 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7
JS
2464 nlp_listp) {
2465 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2466 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2467 lpfc_free_tx(phba, ndlp);
685f0bf7 2468 }
dea3101e
JB
2469 }
2470 }
dea3101e
JB
2471}
2472
92d7f7b0
JS
2473void
2474lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2475{
2476 lpfc_els_flush_rscn(vport);
2477 lpfc_els_flush_cmd(vport);
2478 lpfc_disc_flush_list(vport);
2479}
2480
dea3101e
JB
2481/*****************************************************************************/
2482/*
2483 * NAME: lpfc_disc_timeout
2484 *
2485 * FUNCTION: Fibre Channel driver discovery timeout routine.
2486 *
2487 * EXECUTION ENVIRONMENT: interrupt only
2488 *
2489 * CALLED FROM:
2490 * Timer function
2491 *
2492 * RETURNS:
2493 * none
2494 */
2495/*****************************************************************************/
2496void
2497lpfc_disc_timeout(unsigned long ptr)
2498{
2e0fef85
JS
2499 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2500 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2501 unsigned long flags = 0;
2502
2503 if (unlikely(!phba))
2504 return;
2505
2e0fef85
JS
2506 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2507 spin_lock_irqsave(&vport->work_port_lock, flags);
2508 vport->work_port_events |= WORKER_DISC_TMO;
2509 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2510
92d7f7b0 2511 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2512 if (phba->work_wait)
92d7f7b0
JS
2513 lpfc_worker_wake_up(phba);
2514 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 2515 }
dea3101e
JB
2516 return;
2517}
2518
2519static void
2e0fef85 2520lpfc_disc_timeout_handler(struct lpfc_vport *vport)
dea3101e 2521{
2e0fef85
JS
2522 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2523 struct lpfc_hba *phba = vport->phba;
2524 struct lpfc_sli *psli = &phba->sli;
c9f8735b 2525 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0 2526 LPFC_MBOXQ_t *initlinkmbox;
dea3101e
JB
2527 int rc, clrlaerr = 0;
2528
2e0fef85 2529 if (!(vport->fc_flag & FC_DISC_TMO))
dea3101e
JB
2530 return;
2531
2e0fef85
JS
2532 spin_lock_irq(shost->host_lock);
2533 vport->fc_flag &= ~FC_DISC_TMO;
2534 spin_unlock_irq(shost->host_lock);
dea3101e 2535
858c9f6c
JS
2536 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2537 "disc timeout: state:x%x rtry:x%x flg:x%x",
2538 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2539
2e0fef85 2540 switch (vport->port_state) {
dea3101e
JB
2541
2542 case LPFC_LOCAL_CFG_LINK:
2e0fef85
JS
2543 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2544 * FAN
2545 */
2546 /* FAN timeout */
e8b62011
JS
2547 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2548 "0221 FAN timeout\n");
c9f8735b 2549 /* Start discovery by sending FLOGI, clean up old rpis */
2e0fef85 2550 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7
JS
2551 nlp_listp) {
2552 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2553 continue;
c9f8735b
JW
2554 if (ndlp->nlp_type & NLP_FABRIC) {
2555 /* Clean up the ndlp on Fabric connections */
2e0fef85 2556 lpfc_drop_node(vport, ndlp);
87af33fe 2557
2fe165b6 2558 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
c9f8735b
JW
2559 /* Fail outstanding IO now since device
2560 * is marked for PLOGI.
2561 */
2e0fef85 2562 lpfc_unreg_rpi(vport, ndlp);
c9f8735b
JW
2563 }
2564 }
92d7f7b0 2565 if (vport->port_state != LPFC_FLOGI) {
92d7f7b0 2566 lpfc_initial_flogi(vport);
0ff10d46 2567 return;
92d7f7b0 2568 }
dea3101e
JB
2569 break;
2570
92d7f7b0 2571 case LPFC_FDISC:
dea3101e 2572 case LPFC_FLOGI:
2e0fef85 2573 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
dea3101e 2574 /* Initial FLOGI timeout */
e8b62011
JS
2575 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2576 "0222 Initial %s timeout\n",
87af33fe 2577 vport->vpi ? "FDISC" : "FLOGI");
dea3101e
JB
2578
2579 /* Assume no Fabric and go on with discovery.
2580 * Check for outstanding ELS FLOGI to abort.
2581 */
2582
2583 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 2584 lpfc_disc_list_loopmap(vport);
dea3101e
JB
2585
2586 /* Start discovery */
2e0fef85 2587 lpfc_disc_start(vport);
dea3101e
JB
2588 break;
2589
2590 case LPFC_FABRIC_CFG_LINK:
2591 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2592 NameServer login */
e8b62011
JS
2593 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2594 "0223 Timeout while waiting for "
2595 "NameServer login\n");
dea3101e 2596 /* Next look for NameServer ndlp */
2e0fef85 2597 ndlp = lpfc_findnode_did(vport, NameServer_DID);
dea3101e 2598 if (ndlp)
87af33fe
JS
2599 lpfc_els_abort(phba, ndlp);
2600
2601 /* ReStart discovery */
2602 goto restart_disc;
dea3101e
JB
2603
2604 case LPFC_NS_QRY:
2605 /* Check for wait for NameServer Rsp timeout */
e8b62011
JS
2606 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2607 "0224 NameServer Query timeout "
2608 "Data: x%x x%x\n",
2609 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 2610
92d7f7b0
JS
2611 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2612 /* Try it one more time */
2613 vport->fc_ns_retry++;
2614 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2615 vport->fc_ns_retry, 0);
2616 if (rc == 0)
2617 break;
dea3101e 2618 }
92d7f7b0 2619 vport->fc_ns_retry = 0;
dea3101e 2620
87af33fe 2621restart_disc:
92d7f7b0
JS
2622 /*
2623 * Discovery is over.
2624 * set port_state to PORT_READY if SLI2.
2625 * cmpl_reg_vpi will set port_state to READY for SLI3.
2626 */
2627 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2628 lpfc_issue_reg_vpi(phba, vport);
2629 else { /* NPIV Not enabled */
2630 lpfc_issue_clear_la(phba, vport);
2631 vport->port_state = LPFC_VPORT_READY;
dea3101e
JB
2632 }
2633
2634 /* Setup and issue mailbox INITIALIZE LINK command */
2635 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2636 if (!initlinkmbox) {
e8b62011
JS
2637 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2638 "0206 Device Discovery "
2639 "completion error\n");
2e0fef85 2640 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
2641 break;
2642 }
2643
2644 lpfc_linkdown(phba);
2645 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2646 phba->cfg_link_speed);
2647 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
ed957684 2648 initlinkmbox->vport = vport;
92d7f7b0 2649 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 2650 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5b8bd0c9 2651 lpfc_set_loopback_flag(phba);
dea3101e
JB
2652 if (rc == MBX_NOT_FINISHED)
2653 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2654
2655 break;
2656
2657 case LPFC_DISC_AUTH:
2658 /* Node Authentication timeout */
e8b62011
JS
2659 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2660 "0227 Node Authentication timeout\n");
2e0fef85
JS
2661 lpfc_disc_flush_list(vport);
2662
92d7f7b0
JS
2663 /*
2664 * set port_state to PORT_READY if SLI2.
2665 * cmpl_reg_vpi will set port_state to READY for SLI3.
2666 */
2667 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2668 lpfc_issue_reg_vpi(phba, vport);
2669 else { /* NPIV Not enabled */
2670 lpfc_issue_clear_la(phba, vport);
2671 vport->port_state = LPFC_VPORT_READY;
dea3101e
JB
2672 }
2673 break;
2674
2e0fef85
JS
2675 case LPFC_VPORT_READY:
2676 if (vport->fc_flag & FC_RSCN_MODE) {
e8b62011
JS
2677 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2678 "0231 RSCN timeout Data: x%x "
2679 "x%x\n",
2680 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e
JB
2681
2682 /* Cleanup any outstanding ELS commands */
2e0fef85 2683 lpfc_els_flush_cmd(vport);
dea3101e 2684
2e0fef85
JS
2685 lpfc_els_flush_rscn(vport);
2686 lpfc_disc_flush_list(vport);
dea3101e
JB
2687 }
2688 break;
2e0fef85 2689
92d7f7b0 2690 default:
e8b62011
JS
2691 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2692 "0229 Unexpected discovery timeout, "
2693 "vport State x%x\n", vport->port_state);
2e0fef85
JS
2694 break;
2695 }
2696
2697 switch (phba->link_state) {
2698 case LPFC_CLEAR_LA:
92d7f7b0 2699 /* CLEAR LA timeout */
e8b62011
JS
2700 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2701 "0228 CLEAR LA timeout\n");
2e0fef85
JS
2702 clrlaerr = 1;
2703 break;
2704
2705 case LPFC_LINK_UNKNOWN:
2706 case LPFC_WARM_START:
2707 case LPFC_INIT_START:
2708 case LPFC_INIT_MBX_CMDS:
2709 case LPFC_LINK_DOWN:
2710 case LPFC_LINK_UP:
2711 case LPFC_HBA_ERROR:
e8b62011
JS
2712 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2713 "0230 Unexpected timeout, hba link "
2714 "state x%x\n", phba->link_state);
2e0fef85
JS
2715 clrlaerr = 1;
2716 break;
92d7f7b0
JS
2717
2718 case LPFC_HBA_READY:
2719 break;
dea3101e
JB
2720 }
2721
2722 if (clrlaerr) {
2e0fef85 2723 lpfc_disc_flush_list(vport);
a4bc3379 2724 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e
JB
2725 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2726 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2e0fef85 2727 vport->port_state = LPFC_VPORT_READY;
dea3101e
JB
2728 }
2729
2730 return;
2731}
2732
dea3101e
JB
2733/*
2734 * This routine handles processing a NameServer REG_LOGIN mailbox
2735 * command upon completion. It is setup in the LPFC_MBOXQ
2736 * as the completion routine when the command is
2737 * handed off to the SLI layer.
2738 */
2739void
2e0fef85 2740lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2741{
2e0fef85
JS
2742 MAILBOX_t *mb = &pmb->mb;
2743 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2744 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2745 struct lpfc_vport *vport = pmb->vport;
dea3101e
JB
2746
2747 pmb->context1 = NULL;
2748
dea3101e 2749 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 2750 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 2751 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 2752
2e0fef85
JS
2753 /*
2754 * Start issuing Fabric-Device Management Interface (FDMI) command to
2755 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2756 * fdmi-on=2 (supporting RPA/hostnmae)
dea3101e 2757 */
2e0fef85 2758
3de2a653 2759 if (vport->cfg_fdmi_on == 1)
2e0fef85
JS
2760 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2761 else
2762 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
dea3101e 2763
329f9bc7
JS
2764 /* Mailbox took a reference to the node */
2765 lpfc_nlp_put(ndlp);
dea3101e
JB
2766 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2767 kfree(mp);
329f9bc7 2768 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
2769
2770 return;
2771}
2772
685f0bf7
JS
2773static int
2774lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2775{
2776 uint16_t *rpi = param;
2777
2778 return ndlp->nlp_rpi == *rpi;
2779}
2780
2781static int
2782lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2783{
2784 return memcmp(&ndlp->nlp_portname, param,
2785 sizeof(ndlp->nlp_portname)) == 0;
2786}
2787
a6ababd2 2788static struct lpfc_nodelist *
2e0fef85 2789__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
dea3101e 2790{
21568f53 2791 struct lpfc_nodelist *ndlp;
dea3101e 2792
2e0fef85 2793 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
87af33fe 2794 if (filter(ndlp, param))
685f0bf7
JS
2795 return ndlp;
2796 }
21568f53 2797 return NULL;
dea3101e
JB
2798}
2799
a6ababd2 2800#if 0
685f0bf7
JS
2801/*
2802 * Search node lists for a remote port matching filter criteria
92d7f7b0 2803 * Caller needs to hold host_lock before calling this routine.
685f0bf7
JS
2804 */
2805struct lpfc_nodelist *
2e0fef85 2806lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
685f0bf7 2807{
2e0fef85 2808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
685f0bf7
JS
2809 struct lpfc_nodelist *ndlp;
2810
2e0fef85
JS
2811 spin_lock_irq(shost->host_lock);
2812 ndlp = __lpfc_find_node(vport, filter, param);
2813 spin_unlock_irq(shost->host_lock);
685f0bf7
JS
2814 return ndlp;
2815}
a6ababd2 2816#endif /* 0 */
685f0bf7
JS
2817
2818/*
2819 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2e0fef85 2820 * returns the node list element pointer else return NULL.
685f0bf7
JS
2821 */
2822struct lpfc_nodelist *
2e0fef85 2823__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
685f0bf7 2824{
2e0fef85 2825 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
685f0bf7
JS
2826}
2827
a6ababd2 2828#if 0
2534ba75 2829struct lpfc_nodelist *
2e0fef85 2830lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2534ba75 2831{
2e0fef85 2832 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2534ba75
JS
2833 struct lpfc_nodelist *ndlp;
2834
2e0fef85
JS
2835 spin_lock_irq(shost->host_lock);
2836 ndlp = __lpfc_findnode_rpi(vport, rpi);
2837 spin_unlock_irq(shost->host_lock);
2534ba75
JS
2838 return ndlp;
2839}
a6ababd2 2840#endif /* 0 */
2534ba75 2841
488d1469 2842/*
685f0bf7 2843 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2e0fef85 2844 * returns the node element list pointer else return NULL.
488d1469
JS
2845 */
2846struct lpfc_nodelist *
2e0fef85 2847lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
488d1469 2848{
2e0fef85 2849 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 2850 struct lpfc_nodelist *ndlp;
488d1469 2851
2e0fef85
JS
2852 spin_lock_irq(shost->host_lock);
2853 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2854 spin_unlock_irq(shost->host_lock);
858c9f6c 2855 return ndlp;
488d1469
JS
2856}
2857
dea3101e 2858void
2e0fef85
JS
2859lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2860 uint32_t did)
dea3101e
JB
2861{
2862 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
dea3101e 2863 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
92d7f7b0 2864 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
dea3101e
JB
2865 init_timer(&ndlp->nlp_delayfunc);
2866 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2867 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2868 ndlp->nlp_DID = did;
2e0fef85 2869 ndlp->vport = vport;
dea3101e 2870 ndlp->nlp_sid = NLP_NO_SID;
685f0bf7 2871 INIT_LIST_HEAD(&ndlp->nlp_listp);
329f9bc7 2872 kref_init(&ndlp->kref);
858c9f6c
JS
2873
2874 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2875 "node init: did:x%x",
2876 ndlp->nlp_DID, 0, 0);
2877
dea3101e
JB
2878 return;
2879}
329f9bc7 2880
98c9ea5c
JS
2881/* This routine releases all resources associated with a specifc NPort's ndlp
2882 * and mempool_free's the nodelist.
2883 */
311464ec 2884static void
329f9bc7
JS
2885lpfc_nlp_release(struct kref *kref)
2886{
2887 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2888 kref);
858c9f6c
JS
2889
2890 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2891 "node release: did:x%x flg:x%x type:x%x",
2892 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2893
2e0fef85
JS
2894 lpfc_nlp_remove(ndlp->vport, ndlp);
2895 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
329f9bc7
JS
2896}
2897
98c9ea5c
JS
2898/* This routine bumps the reference count for a ndlp structure to ensure
2899 * that one discovery thread won't free a ndlp while another discovery thread
2900 * is using it.
2901 */
329f9bc7
JS
2902struct lpfc_nodelist *
2903lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2904{
98c9ea5c
JS
2905 if (ndlp) {
2906 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2907 "node get: did:x%x flg:x%x refcnt:x%x",
2908 ndlp->nlp_DID, ndlp->nlp_flag,
2909 atomic_read(&ndlp->kref.refcount));
329f9bc7 2910 kref_get(&ndlp->kref);
98c9ea5c 2911 }
329f9bc7
JS
2912 return ndlp;
2913}
2914
98c9ea5c
JS
2915
2916/* This routine decrements the reference count for a ndlp structure. If the
2917 * count goes to 0, this indicates the the associated nodelist should be freed.
2918 */
329f9bc7
JS
2919int
2920lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2921{
98c9ea5c
JS
2922 if (ndlp) {
2923 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2924 "node put: did:x%x flg:x%x refcnt:x%x",
2925 ndlp->nlp_DID, ndlp->nlp_flag,
2926 atomic_read(&ndlp->kref.refcount));
2927 }
329f9bc7
JS
2928 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2929}
98c9ea5c
JS
2930
2931/* This routine free's the specified nodelist if it is not in use
2932 * by any other discovery thread. This routine returns 1 if the ndlp
2933 * is not being used by anyone and has been freed. A return value of
2934 * 0 indicates it is being used by another discovery thread and the
2935 * refcount is left unchanged.
2936 */
2937int
2938lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2939{
2940 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2941 "node not used: did:x%x flg:x%x refcnt:x%x",
2942 ndlp->nlp_DID, ndlp->nlp_flag,
2943 atomic_read(&ndlp->kref.refcount));
2944
2945 if (atomic_read(&ndlp->kref.refcount) == 1) {
2946 lpfc_nlp_put(ndlp);
2947 return 1;
2948 }
2949 return 0;
2950}
2951