]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/lpfc/lpfc_vport.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_vport.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_version.h"
47 #include "lpfc_vport.h"
48
49 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
50 enum fc_vport_state new_state)
51 {
52 struct fc_vport *fc_vport = vport->fc_vport;
53
54 if (fc_vport) {
55 /*
56 * When the transport defines fc_vport_set state we will replace
57 * this code with the following line
58 */
59 /* fc_vport_set_state(fc_vport, new_state); */
60 if (new_state != FC_VPORT_INITIALIZING)
61 fc_vport->vport_last_state = fc_vport->vport_state;
62 fc_vport->vport_state = new_state;
63 }
64
65 /* for all the error states we will set the invternal state to FAILED */
66 switch (new_state) {
67 case FC_VPORT_NO_FABRIC_SUPP:
68 case FC_VPORT_NO_FABRIC_RSCS:
69 case FC_VPORT_FABRIC_LOGOUT:
70 case FC_VPORT_FABRIC_REJ_WWN:
71 case FC_VPORT_FAILED:
72 vport->port_state = LPFC_VPORT_FAILED;
73 break;
74 case FC_VPORT_LINKDOWN:
75 vport->port_state = LPFC_VPORT_UNKNOWN;
76 break;
77 default:
78 /* do nothing */
79 break;
80 }
81 }
82
83 int
84 lpfc_alloc_vpi(struct lpfc_hba *phba)
85 {
86 unsigned long vpi;
87
88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */
90 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
91 if (vpi > phba->max_vpi)
92 vpi = 0;
93 else
94 set_bit(vpi, phba->vpi_bmask);
95 if (phba->sli_rev == LPFC_SLI_REV4)
96 phba->sli4_hba.max_cfg_param.vpi_used++;
97 spin_unlock_irq(&phba->hbalock);
98 return vpi;
99 }
100
101 static void
102 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
103 {
104 if (vpi == 0)
105 return;
106 spin_lock_irq(&phba->hbalock);
107 clear_bit(vpi, phba->vpi_bmask);
108 if (phba->sli_rev == LPFC_SLI_REV4)
109 phba->sli4_hba.max_cfg_param.vpi_used--;
110 spin_unlock_irq(&phba->hbalock);
111 }
112
113 static int
114 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
115 {
116 LPFC_MBOXQ_t *pmb;
117 MAILBOX_t *mb;
118 struct lpfc_dmabuf *mp;
119 int rc;
120
121 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
122 if (!pmb) {
123 return -ENOMEM;
124 }
125 mb = &pmb->u.mb;
126
127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
133 /*
134 * Grab buffer pointer and clear context1 so we can use
135 * lpfc_sli_issue_box_wait
136 */
137 mp = (struct lpfc_dmabuf *) pmb->context1;
138 pmb->context1 = NULL;
139
140 pmb->vport = vport;
141 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
142 if (rc != MBX_SUCCESS) {
143 if (signal_pending(current)) {
144 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
145 "1830 Signal aborted mbxCmd x%x\n",
146 mb->mbxCommand);
147 lpfc_mbuf_free(phba, mp->virt, mp->phys);
148 kfree(mp);
149 if (rc != MBX_TIMEOUT)
150 mempool_free(pmb, phba->mbox_mem_pool);
151 return -EINTR;
152 } else {
153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
154 "1818 VPort failed init, mbxCmd x%x "
155 "READ_SPARM mbxStatus x%x, rc = x%x\n",
156 mb->mbxCommand, mb->mbxStatus, rc);
157 lpfc_mbuf_free(phba, mp->virt, mp->phys);
158 kfree(mp);
159 if (rc != MBX_TIMEOUT)
160 mempool_free(pmb, phba->mbox_mem_pool);
161 return -EIO;
162 }
163 }
164
165 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
166 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
167 sizeof (struct lpfc_name));
168 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
169 sizeof (struct lpfc_name));
170
171 lpfc_mbuf_free(phba, mp->virt, mp->phys);
172 kfree(mp);
173 mempool_free(pmb, phba->mbox_mem_pool);
174
175 return 0;
176 }
177
178 static int
179 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
180 const char *name_type)
181 {
182 /* ensure that IEEE format 1 addresses
183 * contain zeros in bits 59-48
184 */
185 if (!((wwn->u.wwn[0] >> 4) == 1 &&
186 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
187 return 1;
188
189 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
190 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
191 "%02x:%02x:%02x:%02x\n",
192 name_type,
193 wwn->u.wwn[0], wwn->u.wwn[1],
194 wwn->u.wwn[2], wwn->u.wwn[3],
195 wwn->u.wwn[4], wwn->u.wwn[5],
196 wwn->u.wwn[6], wwn->u.wwn[7]);
197 return 0;
198 }
199
200 static int
201 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
202 {
203 struct lpfc_vport *vport;
204 unsigned long flags;
205
206 spin_lock_irqsave(&phba->hbalock, flags);
207 list_for_each_entry(vport, &phba->port_list, listentry) {
208 if (vport == new_vport)
209 continue;
210 /* If they match, return not unique */
211 if (memcmp(&vport->fc_sparam.portName,
212 &new_vport->fc_sparam.portName,
213 sizeof(struct lpfc_name)) == 0) {
214 spin_unlock_irqrestore(&phba->hbalock, flags);
215 return 0;
216 }
217 }
218 spin_unlock_irqrestore(&phba->hbalock, flags);
219 return 1;
220 }
221
222 /**
223 * lpfc_discovery_wait - Wait for driver discovery to quiesce
224 * @vport: The virtual port for which this call is being executed.
225 *
226 * This driver calls this routine specifically from lpfc_vport_delete
227 * to enforce a synchronous execution of vport
228 * delete relative to discovery activities. The
229 * lpfc_vport_delete routine should not return until it
230 * can reasonably guarantee that discovery has quiesced.
231 * Post FDISC LOGO, the driver must wait until its SAN teardown is
232 * complete and all resources recovered before allowing
233 * cleanup.
234 *
235 * This routine does not require any locks held.
236 **/
237 static void lpfc_discovery_wait(struct lpfc_vport *vport)
238 {
239 struct lpfc_hba *phba = vport->phba;
240 uint32_t wait_flags = 0;
241 unsigned long wait_time_max;
242 unsigned long start_time;
243
244 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
245 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
246
247 /*
248 * The time constraint on this loop is a balance between the
249 * fabric RA_TOV value and dev_loss tmo. The driver's
250 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
251 */
252 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
253 wait_time_max += jiffies;
254 start_time = jiffies;
255 while (time_before(jiffies, wait_time_max)) {
256 if ((vport->num_disc_nodes > 0) ||
257 (vport->fc_flag & wait_flags) ||
258 ((vport->port_state > LPFC_VPORT_FAILED) &&
259 (vport->port_state < LPFC_VPORT_READY))) {
260 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
261 "1833 Vport discovery quiesce Wait:"
262 " state x%x fc_flags x%x"
263 " num_nodes x%x, waiting 1000 msecs"
264 " total wait msecs x%x\n",
265 vport->port_state, vport->fc_flag,
266 vport->num_disc_nodes,
267 jiffies_to_msecs(jiffies - start_time));
268 msleep(1000);
269 } else {
270 /* Base case. Wait variants satisfied. Break out */
271 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
272 "1834 Vport discovery quiesced:"
273 " state x%x fc_flags x%x"
274 " wait msecs x%x\n",
275 vport->port_state, vport->fc_flag,
276 jiffies_to_msecs(jiffies
277 - start_time));
278 break;
279 }
280 }
281
282 if (time_after(jiffies, wait_time_max))
283 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
284 "1835 Vport discovery quiesce failed:"
285 " state x%x fc_flags x%x wait msecs x%x\n",
286 vport->port_state, vport->fc_flag,
287 jiffies_to_msecs(jiffies - start_time));
288 }
289
290 int
291 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
292 {
293 struct lpfc_nodelist *ndlp;
294 struct Scsi_Host *shost = fc_vport->shost;
295 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
296 struct lpfc_hba *phba = pport->phba;
297 struct lpfc_vport *vport = NULL;
298 int instance;
299 int vpi;
300 int rc = VPORT_ERROR;
301 int status;
302
303 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
304 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
305 "1808 Create VPORT failed: "
306 "NPIV is not enabled: SLImode:%d\n",
307 phba->sli_rev);
308 rc = VPORT_INVAL;
309 goto error_out;
310 }
311
312 vpi = lpfc_alloc_vpi(phba);
313 if (vpi == 0) {
314 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
315 "1809 Create VPORT failed: "
316 "Max VPORTs (%d) exceeded\n",
317 phba->max_vpi);
318 rc = VPORT_NORESOURCES;
319 goto error_out;
320 }
321
322 /* Assign an unused board number */
323 if ((instance = lpfc_get_instance()) < 0) {
324 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
325 "1810 Create VPORT failed: Cannot get "
326 "instance number\n");
327 lpfc_free_vpi(phba, vpi);
328 rc = VPORT_NORESOURCES;
329 goto error_out;
330 }
331
332 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
333 if (!vport) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
335 "1811 Create VPORT failed: vpi x%x\n", vpi);
336 lpfc_free_vpi(phba, vpi);
337 rc = VPORT_NORESOURCES;
338 goto error_out;
339 }
340
341 vport->vpi = vpi;
342 lpfc_debugfs_initialize(vport);
343
344 if ((status = lpfc_vport_sparm(phba, vport))) {
345 if (status == -EINTR) {
346 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
347 "1831 Create VPORT Interrupted.\n");
348 rc = VPORT_ERROR;
349 } else {
350 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
351 "1813 Create VPORT failed. "
352 "Cannot get sparam\n");
353 rc = VPORT_NORESOURCES;
354 }
355 lpfc_free_vpi(phba, vpi);
356 destroy_port(vport);
357 goto error_out;
358 }
359
360 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
361 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
362
363 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
364 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
365
366 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
367 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
368 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
369 "1821 Create VPORT failed. "
370 "Invalid WWN format\n");
371 lpfc_free_vpi(phba, vpi);
372 destroy_port(vport);
373 rc = VPORT_INVAL;
374 goto error_out;
375 }
376
377 if (!lpfc_unique_wwpn(phba, vport)) {
378 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
379 "1823 Create VPORT failed. "
380 "Duplicate WWN on HBA\n");
381 lpfc_free_vpi(phba, vpi);
382 destroy_port(vport);
383 rc = VPORT_INVAL;
384 goto error_out;
385 }
386
387 /* Create binary sysfs attribute for vport */
388 lpfc_alloc_sysfs_attr(vport);
389
390 /* Set the DFT_LUN_Q_DEPTH accordingly */
391 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
392
393 *(struct lpfc_vport **)fc_vport->dd_data = vport;
394 vport->fc_vport = fc_vport;
395
396 /* At this point we are fully registered with SCSI Layer. */
397 vport->load_flag |= FC_ALLOW_FDMI;
398 if (phba->cfg_enable_SmartSAN ||
399 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
400 /* Setup appropriate attribute masks */
401 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
402 vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
403 }
404
405 /*
406 * In SLI4, the vpi must be activated before it can be used
407 * by the port.
408 */
409 if ((phba->sli_rev == LPFC_SLI_REV4) &&
410 (pport->fc_flag & FC_VFI_REGISTERED)) {
411 rc = lpfc_sli4_init_vpi(vport);
412 if (rc) {
413 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
414 "1838 Failed to INIT_VPI on vpi %d "
415 "status %d\n", vpi, rc);
416 rc = VPORT_NORESOURCES;
417 lpfc_free_vpi(phba, vpi);
418 goto error_out;
419 }
420 } else if (phba->sli_rev == LPFC_SLI_REV4) {
421 /*
422 * Driver cannot INIT_VPI now. Set the flags to
423 * init_vpi when reg_vfi complete.
424 */
425 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
426 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
427 rc = VPORT_OK;
428 goto out;
429 }
430
431 if ((phba->link_state < LPFC_LINK_UP) ||
432 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
433 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
434 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
435 rc = VPORT_OK;
436 goto out;
437 }
438
439 if (disable) {
440 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
441 rc = VPORT_OK;
442 goto out;
443 }
444
445 /* Use the Physical nodes Fabric NDLP to determine if the link is
446 * up and ready to FDISC.
447 */
448 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
449 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
450 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
451 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
452 lpfc_set_disctmo(vport);
453 lpfc_initial_fdisc(vport);
454 } else {
455 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
456 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
457 "0262 No NPIV Fabric support\n");
458 }
459 } else {
460 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
461 }
462 rc = VPORT_OK;
463
464 out:
465 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
466 "1825 Vport Created.\n");
467 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
468 error_out:
469 return rc;
470 }
471
472 static int
473 disable_vport(struct fc_vport *fc_vport)
474 {
475 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
476 struct lpfc_hba *phba = vport->phba;
477 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
478 long timeout;
479 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
480
481 ndlp = lpfc_findnode_did(vport, Fabric_DID);
482 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
483 && phba->link_state >= LPFC_LINK_UP) {
484 vport->unreg_vpi_cmpl = VPORT_INVAL;
485 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
486 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
487 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
488 timeout = schedule_timeout(timeout);
489 }
490
491 lpfc_sli_host_down(vport);
492
493 /* Mark all nodes for discovery so we can remove them by
494 * calling lpfc_cleanup_rpis(vport, 1)
495 */
496 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
497 if (!NLP_CHK_NODE_ACT(ndlp))
498 continue;
499 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
500 continue;
501 lpfc_disc_state_machine(vport, ndlp, NULL,
502 NLP_EVT_DEVICE_RECOVERY);
503 }
504 lpfc_cleanup_rpis(vport, 1);
505
506 lpfc_stop_vport_timers(vport);
507 lpfc_unreg_all_rpis(vport);
508 lpfc_unreg_default_rpis(vport);
509 /*
510 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
511 * scsi_host_put() to release the vport.
512 */
513 lpfc_mbx_unreg_vpi(vport);
514 spin_lock_irq(shost->host_lock);
515 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
516 spin_unlock_irq(shost->host_lock);
517
518 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
519 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
520 "1826 Vport Disabled.\n");
521 return VPORT_OK;
522 }
523
524 static int
525 enable_vport(struct fc_vport *fc_vport)
526 {
527 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
528 struct lpfc_hba *phba = vport->phba;
529 struct lpfc_nodelist *ndlp = NULL;
530 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
531
532 if ((phba->link_state < LPFC_LINK_UP) ||
533 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
534 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
535 return VPORT_OK;
536 }
537
538 spin_lock_irq(shost->host_lock);
539 vport->load_flag |= FC_LOADING;
540 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
541 spin_unlock_irq(shost->host_lock);
542 lpfc_issue_init_vpi(vport);
543 goto out;
544 }
545
546 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
547 spin_unlock_irq(shost->host_lock);
548
549 /* Use the Physical nodes Fabric NDLP to determine if the link is
550 * up and ready to FDISC.
551 */
552 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
553 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
554 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
555 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
556 lpfc_set_disctmo(vport);
557 lpfc_initial_fdisc(vport);
558 } else {
559 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
560 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
561 "0264 No NPIV Fabric support\n");
562 }
563 } else {
564 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
565 }
566
567 out:
568 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
569 "1827 Vport Enabled.\n");
570 return VPORT_OK;
571 }
572
573 int
574 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
575 {
576 if (disable)
577 return disable_vport(fc_vport);
578 else
579 return enable_vport(fc_vport);
580 }
581
582
583 int
584 lpfc_vport_delete(struct fc_vport *fc_vport)
585 {
586 struct lpfc_nodelist *ndlp = NULL;
587 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
588 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
589 struct lpfc_hba *phba = vport->phba;
590 long timeout;
591 bool ns_ndlp_referenced = false;
592
593 if (vport->port_type == LPFC_PHYSICAL_PORT) {
594 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
595 "1812 vport_delete failed: Cannot delete "
596 "physical host\n");
597 return VPORT_ERROR;
598 }
599
600 /* If the vport is a static vport fail the deletion. */
601 if ((vport->vport_flag & STATIC_VPORT) &&
602 !(phba->pport->load_flag & FC_UNLOADING)) {
603 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
604 "1837 vport_delete failed: Cannot delete "
605 "static vport.\n");
606 return VPORT_ERROR;
607 }
608 spin_lock_irq(&phba->hbalock);
609 vport->load_flag |= FC_UNLOADING;
610 spin_unlock_irq(&phba->hbalock);
611 /*
612 * If we are not unloading the driver then prevent the vport_delete
613 * from happening until after this vport's discovery is finished.
614 */
615 if (!(phba->pport->load_flag & FC_UNLOADING)) {
616 int check_count = 0;
617 while (check_count < ((phba->fc_ratov * 3) + 3) &&
618 vport->port_state > LPFC_VPORT_FAILED &&
619 vport->port_state < LPFC_VPORT_READY) {
620 check_count++;
621 msleep(1000);
622 }
623 if (vport->port_state > LPFC_VPORT_FAILED &&
624 vport->port_state < LPFC_VPORT_READY)
625 return -EAGAIN;
626 }
627 /*
628 * This is a bit of a mess. We want to ensure the shost doesn't get
629 * torn down until we're done with the embedded lpfc_vport structure.
630 *
631 * Beyond holding a reference for this function, we also need a
632 * reference for outstanding I/O requests we schedule during delete
633 * processing. But once we scsi_remove_host() we can no longer obtain
634 * a reference through scsi_host_get().
635 *
636 * So we take two references here. We release one reference at the
637 * bottom of the function -- after delinking the vport. And we
638 * release the other at the completion of the unreg_vpi that get's
639 * initiated after we've disposed of all other resources associated
640 * with the port.
641 */
642 if (!scsi_host_get(shost))
643 return VPORT_INVAL;
644 if (!scsi_host_get(shost)) {
645 scsi_host_put(shost);
646 return VPORT_INVAL;
647 }
648 lpfc_free_sysfs_attr(vport);
649
650 lpfc_debugfs_terminate(vport);
651
652 /*
653 * The call to fc_remove_host might release the NameServer ndlp. Since
654 * we might need to use the ndlp to send the DA_ID CT command,
655 * increment the reference for the NameServer ndlp to prevent it from
656 * being released.
657 */
658 ndlp = lpfc_findnode_did(vport, NameServer_DID);
659 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
660 lpfc_nlp_get(ndlp);
661 ns_ndlp_referenced = true;
662 }
663
664 /* Remove FC host and then SCSI host with the vport */
665 fc_remove_host(shost);
666 scsi_remove_host(shost);
667
668 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
669
670 /* In case of driver unload, we shall not perform fabric logo as the
671 * worker thread already stopped at this stage and, in this case, we
672 * can safely skip the fabric logo.
673 */
674 if (phba->pport->load_flag & FC_UNLOADING) {
675 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
676 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
677 phba->link_state >= LPFC_LINK_UP) {
678 /* First look for the Fabric ndlp */
679 ndlp = lpfc_findnode_did(vport, Fabric_DID);
680 if (!ndlp)
681 goto skip_logo;
682 else if (!NLP_CHK_NODE_ACT(ndlp)) {
683 ndlp = lpfc_enable_node(vport, ndlp,
684 NLP_STE_UNUSED_NODE);
685 if (!ndlp)
686 goto skip_logo;
687 }
688 /* Remove ndlp from vport npld list */
689 lpfc_dequeue_node(vport, ndlp);
690
691 /* Indicate free memory when release */
692 spin_lock_irq(&phba->ndlp_lock);
693 NLP_SET_FREE_REQ(ndlp);
694 spin_unlock_irq(&phba->ndlp_lock);
695 /* Kick off release ndlp when it can be safely done */
696 lpfc_nlp_put(ndlp);
697 }
698 goto skip_logo;
699 }
700
701 /* Otherwise, we will perform fabric logo as needed */
702 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
703 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
704 phba->link_state >= LPFC_LINK_UP &&
705 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
706 if (vport->cfg_enable_da_id) {
707 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
708 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
709 while (vport->ct_flags && timeout)
710 timeout = schedule_timeout(timeout);
711 else
712 lpfc_printf_log(vport->phba, KERN_WARNING,
713 LOG_VPORT,
714 "1829 CT command failed to "
715 "delete objects on fabric\n");
716 }
717 /* First look for the Fabric ndlp */
718 ndlp = lpfc_findnode_did(vport, Fabric_DID);
719 if (!ndlp) {
720 /* Cannot find existing Fabric ndlp, allocate one */
721 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
722 if (!ndlp)
723 goto skip_logo;
724 lpfc_nlp_init(vport, ndlp, Fabric_DID);
725 /* Indicate free memory when release */
726 NLP_SET_FREE_REQ(ndlp);
727 } else {
728 if (!NLP_CHK_NODE_ACT(ndlp)) {
729 ndlp = lpfc_enable_node(vport, ndlp,
730 NLP_STE_UNUSED_NODE);
731 if (!ndlp)
732 goto skip_logo;
733 }
734
735 /* Remove ndlp from vport list */
736 lpfc_dequeue_node(vport, ndlp);
737 spin_lock_irq(&phba->ndlp_lock);
738 if (!NLP_CHK_FREE_REQ(ndlp))
739 /* Indicate free memory when release */
740 NLP_SET_FREE_REQ(ndlp);
741 else {
742 /* Skip this if ndlp is already in free mode */
743 spin_unlock_irq(&phba->ndlp_lock);
744 goto skip_logo;
745 }
746 spin_unlock_irq(&phba->ndlp_lock);
747 }
748
749 /*
750 * If the vpi is not registered, then a valid FDISC doesn't
751 * exist and there is no need for a ELS LOGO. Just cleanup
752 * the ndlp.
753 */
754 if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
755 lpfc_nlp_put(ndlp);
756 goto skip_logo;
757 }
758
759 vport->unreg_vpi_cmpl = VPORT_INVAL;
760 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
761 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
762 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
763 timeout = schedule_timeout(timeout);
764 }
765
766 if (!(phba->pport->load_flag & FC_UNLOADING))
767 lpfc_discovery_wait(vport);
768
769 skip_logo:
770
771 /*
772 * If the NameServer ndlp has been incremented to allow the DA_ID CT
773 * command to be sent, decrement the ndlp now.
774 */
775 if (ns_ndlp_referenced) {
776 ndlp = lpfc_findnode_did(vport, NameServer_DID);
777 lpfc_nlp_put(ndlp);
778 }
779
780 lpfc_cleanup(vport);
781 lpfc_sli_host_down(vport);
782
783 lpfc_stop_vport_timers(vport);
784
785 if (!(phba->pport->load_flag & FC_UNLOADING)) {
786 lpfc_unreg_all_rpis(vport);
787 lpfc_unreg_default_rpis(vport);
788 /*
789 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
790 * does the scsi_host_put() to release the vport.
791 */
792 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
793 lpfc_mbx_unreg_vpi(vport))
794 scsi_host_put(shost);
795 } else
796 scsi_host_put(shost);
797
798 lpfc_free_vpi(phba, vport->vpi);
799 vport->work_port_events = 0;
800 spin_lock_irq(&phba->hbalock);
801 list_del_init(&vport->listentry);
802 spin_unlock_irq(&phba->hbalock);
803 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
804 "1828 Vport Deleted.\n");
805 scsi_host_put(shost);
806 return VPORT_OK;
807 }
808
809 struct lpfc_vport **
810 lpfc_create_vport_work_array(struct lpfc_hba *phba)
811 {
812 struct lpfc_vport *port_iterator;
813 struct lpfc_vport **vports;
814 int index = 0;
815 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
816 GFP_KERNEL);
817 if (vports == NULL)
818 return NULL;
819 spin_lock_irq(&phba->hbalock);
820 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
821 if (port_iterator->load_flag & FC_UNLOADING)
822 continue;
823 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
824 lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
825 "1801 Create vport work array FAILED: "
826 "cannot do scsi_host_get\n");
827 continue;
828 }
829 vports[index++] = port_iterator;
830 }
831 spin_unlock_irq(&phba->hbalock);
832 return vports;
833 }
834
835 void
836 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
837 {
838 int i;
839 if (vports == NULL)
840 return;
841 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
842 scsi_host_put(lpfc_shost_from_vport(vports[i]));
843 kfree(vports);
844 }
845
846
847 /**
848 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
849 * @vport: Pointer to vport object.
850 *
851 * This function resets the statistical data for the vport. This function
852 * is called with the host_lock held
853 **/
854 void
855 lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
856 {
857 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
858
859 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
860 if (!NLP_CHK_NODE_ACT(ndlp))
861 continue;
862 if (ndlp->lat_data)
863 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
864 sizeof(struct lpfc_scsicmd_bkt));
865 }
866 }
867
868
869 /**
870 * lpfc_alloc_bucket - Allocate data buffer required for statistical data
871 * @vport: Pointer to vport object.
872 *
873 * This function allocates data buffer required for all the FC
874 * nodes of the vport to collect statistical data.
875 **/
876 void
877 lpfc_alloc_bucket(struct lpfc_vport *vport)
878 {
879 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
880
881 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
882 if (!NLP_CHK_NODE_ACT(ndlp))
883 continue;
884
885 kfree(ndlp->lat_data);
886 ndlp->lat_data = NULL;
887
888 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
889 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
890 sizeof(struct lpfc_scsicmd_bkt),
891 GFP_ATOMIC);
892
893 if (!ndlp->lat_data)
894 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
895 "0287 lpfc_alloc_bucket failed to "
896 "allocate statistical data buffer DID "
897 "0x%x\n", ndlp->nlp_DID);
898 }
899 }
900 }
901
902 /**
903 * lpfc_free_bucket - Free data buffer required for statistical data
904 * @vport: Pointer to vport object.
905 *
906 * Th function frees statistical data buffer of all the FC
907 * nodes of the vport.
908 **/
909 void
910 lpfc_free_bucket(struct lpfc_vport *vport)
911 {
912 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
913
914 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
915 if (!NLP_CHK_NODE_ACT(ndlp))
916 continue;
917
918 kfree(ndlp->lat_data);
919 ndlp->lat_data = NULL;
920 }
921 }