]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/lpfc/lpfc_vport.c
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / lpfc / lpfc_vport.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_version.h"
43 #include "lpfc_vport.h"
44
45 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
46 enum fc_vport_state new_state)
47 {
48 struct fc_vport *fc_vport = vport->fc_vport;
49
50 if (fc_vport) {
51 /*
52 * When the transport defines fc_vport_set state we will replace
53 * this code with the following line
54 */
55 /* fc_vport_set_state(fc_vport, new_state); */
56 if (new_state != FC_VPORT_INITIALIZING)
57 fc_vport->vport_last_state = fc_vport->vport_state;
58 fc_vport->vport_state = new_state;
59 }
60
61 /* for all the error states we will set the invternal state to FAILED */
62 switch (new_state) {
63 case FC_VPORT_NO_FABRIC_SUPP:
64 case FC_VPORT_NO_FABRIC_RSCS:
65 case FC_VPORT_FABRIC_LOGOUT:
66 case FC_VPORT_FABRIC_REJ_WWN:
67 case FC_VPORT_FAILED:
68 vport->port_state = LPFC_VPORT_FAILED;
69 break;
70 case FC_VPORT_LINKDOWN:
71 vport->port_state = LPFC_VPORT_UNKNOWN;
72 break;
73 default:
74 /* do nothing */
75 break;
76 }
77 }
78
79 static int
80 lpfc_alloc_vpi(struct lpfc_hba *phba)
81 {
82 int vpi;
83
84 spin_lock_irq(&phba->hbalock);
85 vpi = find_next_zero_bit(phba->vpi_bmask, phba->max_vpi, 1);
86 if (vpi > phba->max_vpi)
87 vpi = 0;
88 else
89 set_bit(vpi, phba->vpi_bmask);
90 spin_unlock_irq(&phba->hbalock);
91 return vpi;
92 }
93
94 static void
95 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
96 {
97 spin_lock_irq(&phba->hbalock);
98 clear_bit(vpi, phba->vpi_bmask);
99 spin_unlock_irq(&phba->hbalock);
100 }
101
102 static int
103 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
104 {
105 LPFC_MBOXQ_t *pmb;
106 MAILBOX_t *mb;
107 struct lpfc_dmabuf *mp;
108 int rc;
109
110 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
111 if (!pmb) {
112 return -ENOMEM;
113 }
114 mb = &pmb->mb;
115
116 lpfc_read_sparam(phba, pmb, vport->vpi);
117 /*
118 * Grab buffer pointer and clear context1 so we can use
119 * lpfc_sli_issue_box_wait
120 */
121 mp = (struct lpfc_dmabuf *) pmb->context1;
122 pmb->context1 = NULL;
123
124 pmb->vport = vport;
125 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
126 if (rc != MBX_SUCCESS) {
127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
128 "%d (%d):1818 VPort failed init, mbxCmd x%x "
129 "READ_SPARM mbxStatus x%x, rc = x%x\n",
130 phba->brd_no, vport->vpi,
131 mb->mbxCommand, mb->mbxStatus, rc);
132 lpfc_mbuf_free(phba, mp->virt, mp->phys);
133 kfree(mp);
134 mempool_free(pmb, phba->mbox_mem_pool);
135 return -EIO;
136 }
137
138 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
139 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
140 sizeof (struct lpfc_name));
141 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
142 sizeof (struct lpfc_name));
143
144 lpfc_mbuf_free(phba, mp->virt, mp->phys);
145 kfree(mp);
146 mempool_free(pmb, phba->mbox_mem_pool);
147
148 return 0;
149 }
150
151 static int
152 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
153 const char *name_type)
154 {
155 /* ensure that IEEE format 1 addresses
156 * contain zeros in bits 59-48
157 */
158 if (!((wwn->u.wwn[0] >> 4) == 1 &&
159 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
160 return 1;
161
162 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
163 "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
164 "%02x:%02x:%02x:%02x\n",
165 phba->brd_no, name_type,
166 wwn->u.wwn[0], wwn->u.wwn[1],
167 wwn->u.wwn[2], wwn->u.wwn[3],
168 wwn->u.wwn[4], wwn->u.wwn[5],
169 wwn->u.wwn[6], wwn->u.wwn[7]);
170 return 0;
171 }
172
173 static int
174 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
175 {
176 struct lpfc_vport *vport;
177
178 list_for_each_entry(vport, &phba->port_list, listentry) {
179 if (vport == new_vport)
180 continue;
181 /* If they match, return not unique */
182 if (memcmp(&vport->fc_sparam.portName,
183 &new_vport->fc_sparam.portName,
184 sizeof(struct lpfc_name)) == 0)
185 return 0;
186 }
187 return 1;
188 }
189
190 int
191 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
192 {
193 struct lpfc_nodelist *ndlp;
194 struct lpfc_vport *pport =
195 (struct lpfc_vport *) fc_vport->shost->hostdata;
196 struct lpfc_hba *phba = pport->phba;
197 struct lpfc_vport *vport = NULL;
198 int instance;
199 int vpi;
200 int rc = VPORT_ERROR;
201
202 if ((phba->sli_rev < 3) ||
203 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
204 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
205 "%d:1808 Create VPORT failed: "
206 "NPIV is not enabled: SLImode:%d\n",
207 phba->brd_no, phba->sli_rev);
208 rc = VPORT_INVAL;
209 goto error_out;
210 }
211
212 vpi = lpfc_alloc_vpi(phba);
213 if (vpi == 0) {
214 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
215 "%d:1809 Create VPORT failed: "
216 "Max VPORTs (%d) exceeded\n",
217 phba->brd_no, phba->max_vpi);
218 rc = VPORT_NORESOURCES;
219 goto error_out;
220 }
221
222
223 /* Assign an unused board number */
224 if ((instance = lpfc_get_instance()) < 0) {
225 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
226 "%d:1810 Create VPORT failed: Cannot get "
227 "instance number\n", phba->brd_no);
228 lpfc_free_vpi(phba, vpi);
229 rc = VPORT_NORESOURCES;
230 goto error_out;
231 }
232
233 vport = lpfc_create_port(phba, instance, fc_vport);
234 if (!vport) {
235 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
236 "%d:1811 Create VPORT failed: vpi x%x\n",
237 phba->brd_no, vpi);
238 lpfc_free_vpi(phba, vpi);
239 rc = VPORT_NORESOURCES;
240 goto error_out;
241 }
242
243 vport->vpi = vpi;
244 if (lpfc_vport_sparm(phba, vport)) {
245 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
246 "%d:1813 Create VPORT failed: vpi:%d "
247 "Cannot get sparam\n",
248 phba->brd_no, vpi);
249 lpfc_free_vpi(phba, vpi);
250 destroy_port(vport);
251 rc = VPORT_NORESOURCES;
252 goto error_out;
253 }
254
255 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
256 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
257
258 if (fc_vport->node_name != 0)
259 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
260 if (fc_vport->port_name != 0)
261 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
262
263 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
264 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
265
266 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
267 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
268 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
269 "%d:1821 Create VPORT failed: vpi:%d "
270 "Invalid WWN format\n",
271 phba->brd_no, vpi);
272 lpfc_free_vpi(phba, vpi);
273 destroy_port(vport);
274 rc = VPORT_INVAL;
275 goto error_out;
276 }
277
278 if (!lpfc_unique_wwpn(phba, vport)) {
279 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
280 "%d:1823 Create VPORT failed: vpi:%d "
281 "Duplicate WWN on HBA\n",
282 phba->brd_no, vpi);
283 lpfc_free_vpi(phba, vpi);
284 destroy_port(vport);
285 rc = VPORT_INVAL;
286 goto error_out;
287 }
288
289 *(struct lpfc_vport **)fc_vport->dd_data = vport;
290 vport->fc_vport = fc_vport;
291
292 if ((phba->link_state < LPFC_LINK_UP) ||
293 (phba->fc_topology == TOPOLOGY_LOOP)) {
294 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
295 rc = VPORT_OK;
296 goto out;
297 }
298
299 if (disable) {
300 rc = VPORT_OK;
301 goto out;
302 }
303
304 /* Use the Physical nodes Fabric NDLP to determine if the link is
305 * up and ready to FDISC.
306 */
307 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
308 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
309 lpfc_set_disctmo(vport);
310 lpfc_initial_fdisc(vport);
311 } else {
312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
313 }
314 rc = VPORT_OK;
315
316 out:
317 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
318 error_out:
319 return rc;
320 }
321
322 int
323 disable_vport(struct fc_vport *fc_vport)
324 {
325 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
326 struct lpfc_hba *phba = vport->phba;
327 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
328 long timeout;
329
330 ndlp = lpfc_findnode_did(vport, Fabric_DID);
331 if (ndlp && phba->link_state >= LPFC_LINK_UP) {
332 vport->unreg_vpi_cmpl = VPORT_INVAL;
333 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
334 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
335 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
336 timeout = schedule_timeout(timeout);
337 }
338
339 lpfc_sli_host_down(vport);
340
341 /* Mark all nodes for discovery so we can remove them by
342 * calling lpfc_cleanup_rpis(vport, 1)
343 */
344 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
345 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
346 continue;
347 lpfc_disc_state_machine(vport, ndlp, NULL,
348 NLP_EVT_DEVICE_RECOVERY);
349 }
350 lpfc_cleanup_rpis(vport, 1);
351
352 lpfc_stop_vport_timers(vport);
353 lpfc_unreg_all_rpis(vport);
354 lpfc_unreg_default_rpis(vport);
355 /*
356 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
357 * scsi_host_put() to release the vport.
358 */
359 lpfc_mbx_unreg_vpi(vport);
360
361 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
362 return VPORT_OK;
363 }
364
365 int
366 enable_vport(struct fc_vport *fc_vport)
367 {
368 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
369 struct lpfc_hba *phba = vport->phba;
370 struct lpfc_nodelist *ndlp = NULL;
371
372 if ((phba->link_state < LPFC_LINK_UP) ||
373 (phba->fc_topology == TOPOLOGY_LOOP)) {
374 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
375 return VPORT_OK;
376 }
377
378 vport->load_flag |= FC_LOADING;
379 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
380
381 /* Use the Physical nodes Fabric NDLP to determine if the link is
382 * up and ready to FDISC.
383 */
384 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
385 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
386 lpfc_set_disctmo(vport);
387 lpfc_initial_fdisc(vport);
388 } else {
389 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
390 }
391
392 return VPORT_OK;
393 }
394
395 int
396 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
397 {
398 if (disable)
399 return disable_vport(fc_vport);
400 else
401 return enable_vport(fc_vport);
402 }
403
404
405 int
406 lpfc_vport_delete(struct fc_vport *fc_vport)
407 {
408 struct lpfc_nodelist *ndlp = NULL;
409 struct lpfc_nodelist *next_ndlp;
410 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
411 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
412 struct lpfc_hba *phba = vport->phba;
413 long timeout;
414 int rc = VPORT_ERROR;
415
416 /*
417 * This is a bit of a mess. We want to ensure the shost doesn't get
418 * torn down until we're done with the embedded lpfc_vport structure.
419 *
420 * Beyond holding a reference for this function, we also need a
421 * reference for outstanding I/O requests we schedule during delete
422 * processing. But once we scsi_remove_host() we can no longer obtain
423 * a reference through scsi_host_get().
424 *
425 * So we take two references here. We release one reference at the
426 * bottom of the function -- after delinking the vport. And we
427 * release the other at the completion of the unreg_vpi that get's
428 * initiated after we've disposed of all other resources associated
429 * with the port.
430 */
431 if (!scsi_host_get(shost) || !scsi_host_get(shost))
432 return VPORT_INVAL;
433
434 if (vport->port_type == LPFC_PHYSICAL_PORT) {
435 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
436 "%d:1812 vport_delete failed: Cannot delete "
437 "physical host\n", phba->brd_no);
438 goto out;
439 }
440
441 vport->load_flag |= FC_UNLOADING;
442
443 kfree(vport->vname);
444 fc_remove_host(lpfc_shost_from_vport(vport));
445 scsi_remove_host(lpfc_shost_from_vport(vport));
446
447 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
448 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
449 phba->link_state >= LPFC_LINK_UP) {
450
451 /* First look for the Fabric ndlp */
452 ndlp = lpfc_findnode_did(vport, Fabric_DID);
453 if (!ndlp) {
454 /* Cannot find existing Fabric ndlp, allocate one */
455 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
456 if (!ndlp)
457 goto skip_logo;
458 lpfc_nlp_init(vport, ndlp, Fabric_DID);
459 } else {
460 lpfc_dequeue_node(vport, ndlp);
461 }
462 vport->unreg_vpi_cmpl = VPORT_INVAL;
463 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
464 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
465 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
466 timeout = schedule_timeout(timeout);
467 }
468
469 skip_logo:
470 lpfc_sli_host_down(vport);
471
472 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
473 lpfc_disc_state_machine(vport, ndlp, NULL,
474 NLP_EVT_DEVICE_RECOVERY);
475 lpfc_disc_state_machine(vport, ndlp, NULL,
476 NLP_EVT_DEVICE_RM);
477 }
478
479 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
480 /* free any ndlp's in unused state */
481 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
482 lpfc_drop_node(vport, ndlp);
483 }
484
485 lpfc_stop_vport_timers(vport);
486 lpfc_unreg_all_rpis(vport);
487 lpfc_unreg_default_rpis(vport);
488 /*
489 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
490 * scsi_host_put() to release the vport.
491 */
492 lpfc_mbx_unreg_vpi(vport);
493
494 lpfc_free_vpi(phba, vport->vpi);
495 vport->work_port_events = 0;
496 spin_lock_irq(&phba->hbalock);
497 list_del_init(&vport->listentry);
498 spin_unlock_irq(&phba->hbalock);
499
500 rc = VPORT_OK;
501 out:
502 scsi_host_put(shost);
503 return rc;
504 }
505
506
507 EXPORT_SYMBOL(lpfc_vport_create);
508 EXPORT_SYMBOL(lpfc_vport_delete);