]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/lpfc/lpfc_init.c
ARM: at91: remove atmel_nand_data
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_transport_fc.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/fc/fc_fs.h>
47
48 #include <linux/nvme-fc-driver.h>
49
50 #include "lpfc_hw4.h"
51 #include "lpfc_hw.h"
52 #include "lpfc_sli.h"
53 #include "lpfc_sli4.h"
54 #include "lpfc_nl.h"
55 #include "lpfc_disc.h"
56 #include "lpfc.h"
57 #include "lpfc_scsi.h"
58 #include "lpfc_nvme.h"
59 #include "lpfc_nvmet.h"
60 #include "lpfc_logmsg.h"
61 #include "lpfc_crtn.h"
62 #include "lpfc_vport.h"
63 #include "lpfc_version.h"
64 #include "lpfc_ids.h"
65
66 char *_dump_buf_data;
67 unsigned long _dump_buf_data_order;
68 char *_dump_buf_dif;
69 unsigned long _dump_buf_dif_order;
70 spinlock_t _dump_buf_lock;
71
72 /* Used when mapping IRQ vectors in a driver centric manner */
73 uint16_t *lpfc_used_cpu;
74 uint32_t lpfc_present_cpu;
75
76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77 static int lpfc_post_rcv_buf(struct lpfc_hba *);
78 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80 static int lpfc_setup_endian_order(struct lpfc_hba *);
81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84 static void lpfc_init_sgl_list(struct lpfc_hba *);
85 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86 static void lpfc_free_active_sgl(struct lpfc_hba *);
87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95
96 static struct scsi_transport_template *lpfc_transport_template = NULL;
97 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
98 static DEFINE_IDR(lpfc_hba_index);
99 #define LPFC_NVMET_BUF_POST 254
100
101 /**
102 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
103 * @phba: pointer to lpfc hba data structure.
104 *
105 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
106 * mailbox command. It retrieves the revision information from the HBA and
107 * collects the Vital Product Data (VPD) about the HBA for preparing the
108 * configuration of the HBA.
109 *
110 * Return codes:
111 * 0 - success.
112 * -ERESTART - requests the SLI layer to reset the HBA and try again.
113 * Any other value - indicates an error.
114 **/
115 int
116 lpfc_config_port_prep(struct lpfc_hba *phba)
117 {
118 lpfc_vpd_t *vp = &phba->vpd;
119 int i = 0, rc;
120 LPFC_MBOXQ_t *pmb;
121 MAILBOX_t *mb;
122 char *lpfc_vpd_data = NULL;
123 uint16_t offset = 0;
124 static char licensed[56] =
125 "key unlock for use with gnu public licensed code only\0";
126 static int init_key = 1;
127
128 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
129 if (!pmb) {
130 phba->link_state = LPFC_HBA_ERROR;
131 return -ENOMEM;
132 }
133
134 mb = &pmb->u.mb;
135 phba->link_state = LPFC_INIT_MBX_CMDS;
136
137 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
138 if (init_key) {
139 uint32_t *ptext = (uint32_t *) licensed;
140
141 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
142 *ptext = cpu_to_be32(*ptext);
143 init_key = 0;
144 }
145
146 lpfc_read_nv(phba, pmb);
147 memset((char*)mb->un.varRDnvp.rsvd3, 0,
148 sizeof (mb->un.varRDnvp.rsvd3));
149 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
150 sizeof (licensed));
151
152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153
154 if (rc != MBX_SUCCESS) {
155 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
156 "0324 Config Port initialization "
157 "error, mbxCmd x%x READ_NVPARM, "
158 "mbxStatus x%x\n",
159 mb->mbxCommand, mb->mbxStatus);
160 mempool_free(pmb, phba->mbox_mem_pool);
161 return -ERESTART;
162 }
163 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
164 sizeof(phba->wwnn));
165 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
166 sizeof(phba->wwpn));
167 }
168
169 phba->sli3_options = 0x0;
170
171 /* Setup and issue mailbox READ REV command */
172 lpfc_read_rev(phba, pmb);
173 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
174 if (rc != MBX_SUCCESS) {
175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
176 "0439 Adapter failed to init, mbxCmd x%x "
177 "READ_REV, mbxStatus x%x\n",
178 mb->mbxCommand, mb->mbxStatus);
179 mempool_free( pmb, phba->mbox_mem_pool);
180 return -ERESTART;
181 }
182
183
184 /*
185 * The value of rr must be 1 since the driver set the cv field to 1.
186 * This setting requires the FW to set all revision fields.
187 */
188 if (mb->un.varRdRev.rr == 0) {
189 vp->rev.rBit = 0;
190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
191 "0440 Adapter failed to init, READ_REV has "
192 "missing revision information.\n");
193 mempool_free(pmb, phba->mbox_mem_pool);
194 return -ERESTART;
195 }
196
197 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
198 mempool_free(pmb, phba->mbox_mem_pool);
199 return -EINVAL;
200 }
201
202 /* Save information as VPD data */
203 vp->rev.rBit = 1;
204 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
205 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
206 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
207 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
208 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
209 vp->rev.biuRev = mb->un.varRdRev.biuRev;
210 vp->rev.smRev = mb->un.varRdRev.smRev;
211 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
212 vp->rev.endecRev = mb->un.varRdRev.endecRev;
213 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
214 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
215 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
216 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
217 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
218 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
219
220 /* If the sli feature level is less then 9, we must
221 * tear down all RPIs and VPIs on link down if NPIV
222 * is enabled.
223 */
224 if (vp->rev.feaLevelHigh < 9)
225 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
226
227 if (lpfc_is_LC_HBA(phba->pcidev->device))
228 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
229 sizeof (phba->RandomData));
230
231 /* Get adapter VPD information */
232 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
233 if (!lpfc_vpd_data)
234 goto out_free_mbox;
235 do {
236 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
237 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
238
239 if (rc != MBX_SUCCESS) {
240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
241 "0441 VPD not present on adapter, "
242 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
243 mb->mbxCommand, mb->mbxStatus);
244 mb->un.varDmp.word_cnt = 0;
245 }
246 /* dump mem may return a zero when finished or we got a
247 * mailbox error, either way we are done.
248 */
249 if (mb->un.varDmp.word_cnt == 0)
250 break;
251 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
252 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
253 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
254 lpfc_vpd_data + offset,
255 mb->un.varDmp.word_cnt);
256 offset += mb->un.varDmp.word_cnt;
257 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
258 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
259
260 kfree(lpfc_vpd_data);
261 out_free_mbox:
262 mempool_free(pmb, phba->mbox_mem_pool);
263 return 0;
264 }
265
266 /**
267 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
268 * @phba: pointer to lpfc hba data structure.
269 * @pmboxq: pointer to the driver internal queue element for mailbox command.
270 *
271 * This is the completion handler for driver's configuring asynchronous event
272 * mailbox command to the device. If the mailbox command returns successfully,
273 * it will set internal async event support flag to 1; otherwise, it will
274 * set internal async event support flag to 0.
275 **/
276 static void
277 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
278 {
279 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
280 phba->temp_sensor_support = 1;
281 else
282 phba->temp_sensor_support = 0;
283 mempool_free(pmboxq, phba->mbox_mem_pool);
284 return;
285 }
286
287 /**
288 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
289 * @phba: pointer to lpfc hba data structure.
290 * @pmboxq: pointer to the driver internal queue element for mailbox command.
291 *
292 * This is the completion handler for dump mailbox command for getting
293 * wake up parameters. When this command complete, the response contain
294 * Option rom version of the HBA. This function translate the version number
295 * into a human readable string and store it in OptionROMVersion.
296 **/
297 static void
298 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
299 {
300 struct prog_id *prg;
301 uint32_t prog_id_word;
302 char dist = ' ';
303 /* character array used for decoding dist type. */
304 char dist_char[] = "nabx";
305
306 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
307 mempool_free(pmboxq, phba->mbox_mem_pool);
308 return;
309 }
310
311 prg = (struct prog_id *) &prog_id_word;
312
313 /* word 7 contain option rom version */
314 prog_id_word = pmboxq->u.mb.un.varWords[7];
315
316 /* Decode the Option rom version word to a readable string */
317 if (prg->dist < 4)
318 dist = dist_char[prg->dist];
319
320 if ((prg->dist == 3) && (prg->num == 0))
321 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
322 prg->ver, prg->rev, prg->lev);
323 else
324 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
325 prg->ver, prg->rev, prg->lev,
326 dist, prg->num);
327 mempool_free(pmboxq, phba->mbox_mem_pool);
328 return;
329 }
330
331 /**
332 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
333 * cfg_soft_wwnn, cfg_soft_wwpn
334 * @vport: pointer to lpfc vport data structure.
335 *
336 *
337 * Return codes
338 * None.
339 **/
340 void
341 lpfc_update_vport_wwn(struct lpfc_vport *vport)
342 {
343 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
344 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
345
346 /* If the soft name exists then update it using the service params */
347 if (vport->phba->cfg_soft_wwnn)
348 u64_to_wwn(vport->phba->cfg_soft_wwnn,
349 vport->fc_sparam.nodeName.u.wwn);
350 if (vport->phba->cfg_soft_wwpn)
351 u64_to_wwn(vport->phba->cfg_soft_wwpn,
352 vport->fc_sparam.portName.u.wwn);
353
354 /*
355 * If the name is empty or there exists a soft name
356 * then copy the service params name, otherwise use the fc name
357 */
358 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360 sizeof(struct lpfc_name));
361 else
362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
363 sizeof(struct lpfc_name));
364
365 /*
366 * If the port name has changed, then set the Param changes flag
367 * to unreg the login
368 */
369 if (vport->fc_portname.u.wwn[0] != 0 &&
370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
371 sizeof(struct lpfc_name)))
372 vport->vport_flag |= FAWWPN_PARAM_CHG;
373
374 if (vport->fc_portname.u.wwn[0] == 0 ||
375 vport->phba->cfg_soft_wwpn ||
376 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
377 vport->vport_flag & FAWWPN_SET) {
378 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name));
380 vport->vport_flag &= ~FAWWPN_SET;
381 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
382 vport->vport_flag |= FAWWPN_SET;
383 }
384 else
385 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
386 sizeof(struct lpfc_name));
387 }
388
389 /**
390 * lpfc_config_port_post - Perform lpfc initialization after config port
391 * @phba: pointer to lpfc hba data structure.
392 *
393 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
394 * command call. It performs all internal resource and state setups on the
395 * port: post IOCB buffers, enable appropriate host interrupt attentions,
396 * ELS ring timers, etc.
397 *
398 * Return codes
399 * 0 - success.
400 * Any other value - error.
401 **/
402 int
403 lpfc_config_port_post(struct lpfc_hba *phba)
404 {
405 struct lpfc_vport *vport = phba->pport;
406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
407 LPFC_MBOXQ_t *pmb;
408 MAILBOX_t *mb;
409 struct lpfc_dmabuf *mp;
410 struct lpfc_sli *psli = &phba->sli;
411 uint32_t status, timeout;
412 int i, j;
413 int rc;
414
415 spin_lock_irq(&phba->hbalock);
416 /*
417 * If the Config port completed correctly the HBA is not
418 * over heated any more.
419 */
420 if (phba->over_temp_state == HBA_OVER_TEMP)
421 phba->over_temp_state = HBA_NORMAL_TEMP;
422 spin_unlock_irq(&phba->hbalock);
423
424 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
425 if (!pmb) {
426 phba->link_state = LPFC_HBA_ERROR;
427 return -ENOMEM;
428 }
429 mb = &pmb->u.mb;
430
431 /* Get login parameters for NID. */
432 rc = lpfc_read_sparam(phba, pmb, 0);
433 if (rc) {
434 mempool_free(pmb, phba->mbox_mem_pool);
435 return -ENOMEM;
436 }
437
438 pmb->vport = vport;
439 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
441 "0448 Adapter failed init, mbxCmd x%x "
442 "READ_SPARM mbxStatus x%x\n",
443 mb->mbxCommand, mb->mbxStatus);
444 phba->link_state = LPFC_HBA_ERROR;
445 mp = (struct lpfc_dmabuf *) pmb->context1;
446 mempool_free(pmb, phba->mbox_mem_pool);
447 lpfc_mbuf_free(phba, mp->virt, mp->phys);
448 kfree(mp);
449 return -EIO;
450 }
451
452 mp = (struct lpfc_dmabuf *) pmb->context1;
453
454 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 pmb->context1 = NULL;
458 lpfc_update_vport_wwn(vport);
459
460 /* Update the fc_host data structures with new wwn. */
461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
463 fc_host_max_npiv_vports(shost) = phba->max_vpi;
464
465 /* If no serial number in VPD data, use low 6 bytes of WWNN */
466 /* This should be consolidated into parse_vpd ? - mr */
467 if (phba->SerialNumber[0] == 0) {
468 uint8_t *outptr;
469
470 outptr = &vport->fc_nodename.u.s.IEEE[0];
471 for (i = 0; i < 12; i++) {
472 status = *outptr++;
473 j = ((status & 0xf0) >> 4);
474 if (j <= 9)
475 phba->SerialNumber[i] =
476 (char)((uint8_t) 0x30 + (uint8_t) j);
477 else
478 phba->SerialNumber[i] =
479 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
480 i++;
481 j = (status & 0xf);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 }
489 }
490
491 lpfc_read_config(phba, pmb);
492 pmb->vport = vport;
493 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
495 "0453 Adapter failed to init, mbxCmd x%x "
496 "READ_CONFIG, mbxStatus x%x\n",
497 mb->mbxCommand, mb->mbxStatus);
498 phba->link_state = LPFC_HBA_ERROR;
499 mempool_free( pmb, phba->mbox_mem_pool);
500 return -EIO;
501 }
502
503 /* Check if the port is disabled */
504 lpfc_sli_read_link_ste(phba);
505
506 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
507 i = (mb->un.varRdConfig.max_xri + 1);
508 if (phba->cfg_hba_queue_depth > i) {
509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
510 "3359 HBA queue depth changed from %d to %d\n",
511 phba->cfg_hba_queue_depth, i);
512 phba->cfg_hba_queue_depth = i;
513 }
514
515 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
516 i = (mb->un.varRdConfig.max_xri >> 3);
517 if (phba->pport->cfg_lun_queue_depth > i) {
518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
519 "3360 LUN queue depth changed from %d to %d\n",
520 phba->pport->cfg_lun_queue_depth, i);
521 phba->pport->cfg_lun_queue_depth = i;
522 }
523
524 phba->lmt = mb->un.varRdConfig.lmt;
525
526 /* Get the default values for Model Name and Description */
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528
529 phba->link_state = LPFC_LINK_DOWN;
530
531 /* Only process IOCBs on ELS ring till hba_state is READY */
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
536
537 /* Post receive buffers for desired rings */
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
540
541 /*
542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
543 */
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
546 if (rc) {
547 mempool_free(pmb, phba->mbox_mem_pool);
548 return -EIO;
549 }
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
557 mempool_free(pmb, phba->mbox_mem_pool);
558 return -EIO;
559 }
560 }
561
562 spin_lock_irq(&phba->hbalock);
563 /* Initialize ERATT handling flag */
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
565
566 /* Enable appropriate host interrupts */
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
569 return -EIO;
570 }
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
580
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 status &= ~(HC_R0INT_ENA);
584
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr); /* flush */
587 spin_unlock_irq(&phba->hbalock);
588
589 /* Set up ring-0 (ELS) timer */
590 timeout = phba->fc_ratov * 2;
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
593 /* Set up heart beat (HB) timer */
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 phba->hb_outstanding = 0;
597 phba->last_completion_time = jiffies;
598 /* Set up error attention (ERATT) polling timer */
599 mod_timer(&phba->eratt_poll,
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
601
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba,
604 KERN_ERR, LOG_INIT,
605 "2598 Adapter Link is disabled.\n");
606 lpfc_down_link(phba, pmb);
607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
610 lpfc_printf_log(phba,
611 KERN_ERR, LOG_INIT,
612 "2599 Adapter failed to issue DOWN_LINK"
613 " mbox command rc 0x%x\n", rc);
614
615 mempool_free(pmb, phba->mbox_mem_pool);
616 return -EIO;
617 }
618 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
619 mempool_free(pmb, phba->mbox_mem_pool);
620 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
621 if (rc)
622 return rc;
623 }
624 /* MBOX buffer will be freed in mbox compl */
625 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
626 if (!pmb) {
627 phba->link_state = LPFC_HBA_ERROR;
628 return -ENOMEM;
629 }
630
631 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
632 pmb->mbox_cmpl = lpfc_config_async_cmpl;
633 pmb->vport = phba->pport;
634 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
635
636 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
637 lpfc_printf_log(phba,
638 KERN_ERR,
639 LOG_INIT,
640 "0456 Adapter failed to issue "
641 "ASYNCEVT_ENABLE mbox status x%x\n",
642 rc);
643 mempool_free(pmb, phba->mbox_mem_pool);
644 }
645
646 /* Get Option rom version */
647 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
648 if (!pmb) {
649 phba->link_state = LPFC_HBA_ERROR;
650 return -ENOMEM;
651 }
652
653 lpfc_dump_wakeup_param(phba, pmb);
654 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
655 pmb->vport = phba->pport;
656 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
657
658 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
660 "to get Option ROM version status x%x\n", rc);
661 mempool_free(pmb, phba->mbox_mem_pool);
662 }
663
664 return 0;
665 }
666
667 /**
668 * lpfc_hba_init_link - Initialize the FC link
669 * @phba: pointer to lpfc hba data structure.
670 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
671 *
672 * This routine will issue the INIT_LINK mailbox command call.
673 * It is available to other drivers through the lpfc_hba data
674 * structure for use as a delayed link up mechanism with the
675 * module parameter lpfc_suppress_link_up.
676 *
677 * Return code
678 * 0 - success
679 * Any other value - error
680 **/
681 static int
682 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
683 {
684 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
685 }
686
687 /**
688 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
689 * @phba: pointer to lpfc hba data structure.
690 * @fc_topology: desired fc topology.
691 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
692 *
693 * This routine will issue the INIT_LINK mailbox command call.
694 * It is available to other drivers through the lpfc_hba data
695 * structure for use as a delayed link up mechanism with the
696 * module parameter lpfc_suppress_link_up.
697 *
698 * Return code
699 * 0 - success
700 * Any other value - error
701 **/
702 int
703 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
704 uint32_t flag)
705 {
706 struct lpfc_vport *vport = phba->pport;
707 LPFC_MBOXQ_t *pmb;
708 MAILBOX_t *mb;
709 int rc;
710
711 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
712 if (!pmb) {
713 phba->link_state = LPFC_HBA_ERROR;
714 return -ENOMEM;
715 }
716 mb = &pmb->u.mb;
717 pmb->vport = vport;
718
719 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
721 !(phba->lmt & LMT_1Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
723 !(phba->lmt & LMT_2Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
725 !(phba->lmt & LMT_4Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
727 !(phba->lmt & LMT_8Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
729 !(phba->lmt & LMT_10Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
731 !(phba->lmt & LMT_16Gb)) ||
732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
733 !(phba->lmt & LMT_32Gb))) {
734 /* Reset link speed to auto */
735 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
736 "1302 Invalid speed for this board:%d "
737 "Reset link speed to auto.\n",
738 phba->cfg_link_speed);
739 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
740 }
741 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
743 if (phba->sli_rev < LPFC_SLI_REV4)
744 lpfc_set_loopback_flag(phba);
745 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
746 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
748 "0498 Adapter failed to init, mbxCmd x%x "
749 "INIT_LINK, mbxStatus x%x\n",
750 mb->mbxCommand, mb->mbxStatus);
751 if (phba->sli_rev <= LPFC_SLI_REV3) {
752 /* Clear all interrupt enable conditions */
753 writel(0, phba->HCregaddr);
754 readl(phba->HCregaddr); /* flush */
755 /* Clear all pending interrupts */
756 writel(0xffffffff, phba->HAregaddr);
757 readl(phba->HAregaddr); /* flush */
758 }
759 phba->link_state = LPFC_HBA_ERROR;
760 if (rc != MBX_BUSY || flag == MBX_POLL)
761 mempool_free(pmb, phba->mbox_mem_pool);
762 return -EIO;
763 }
764 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
765 if (flag == MBX_POLL)
766 mempool_free(pmb, phba->mbox_mem_pool);
767
768 return 0;
769 }
770
771 /**
772 * lpfc_hba_down_link - this routine downs the FC link
773 * @phba: pointer to lpfc hba data structure.
774 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
775 *
776 * This routine will issue the DOWN_LINK mailbox command call.
777 * It is available to other drivers through the lpfc_hba data
778 * structure for use to stop the link.
779 *
780 * Return code
781 * 0 - success
782 * Any other value - error
783 **/
784 static int
785 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
786 {
787 LPFC_MBOXQ_t *pmb;
788 int rc;
789
790 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
791 if (!pmb) {
792 phba->link_state = LPFC_HBA_ERROR;
793 return -ENOMEM;
794 }
795
796 lpfc_printf_log(phba,
797 KERN_ERR, LOG_INIT,
798 "0491 Adapter Link is disabled.\n");
799 lpfc_down_link(phba, pmb);
800 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
801 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
802 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
803 lpfc_printf_log(phba,
804 KERN_ERR, LOG_INIT,
805 "2522 Adapter failed to issue DOWN_LINK"
806 " mbox command rc 0x%x\n", rc);
807
808 mempool_free(pmb, phba->mbox_mem_pool);
809 return -EIO;
810 }
811 if (flag == MBX_POLL)
812 mempool_free(pmb, phba->mbox_mem_pool);
813
814 return 0;
815 }
816
817 /**
818 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
819 * @phba: pointer to lpfc HBA data structure.
820 *
821 * This routine will do LPFC uninitialization before the HBA is reset when
822 * bringing down the SLI Layer.
823 *
824 * Return codes
825 * 0 - success.
826 * Any other value - error.
827 **/
828 int
829 lpfc_hba_down_prep(struct lpfc_hba *phba)
830 {
831 struct lpfc_vport **vports;
832 int i;
833
834 if (phba->sli_rev <= LPFC_SLI_REV3) {
835 /* Disable interrupts */
836 writel(0, phba->HCregaddr);
837 readl(phba->HCregaddr); /* flush */
838 }
839
840 if (phba->pport->load_flag & FC_UNLOADING)
841 lpfc_cleanup_discovery_resources(phba->pport);
842 else {
843 vports = lpfc_create_vport_work_array(phba);
844 if (vports != NULL)
845 for (i = 0; i <= phba->max_vports &&
846 vports[i] != NULL; i++)
847 lpfc_cleanup_discovery_resources(vports[i]);
848 lpfc_destroy_vport_work_array(phba, vports);
849 }
850 return 0;
851 }
852
853 /**
854 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
855 * rspiocb which got deferred
856 *
857 * @phba: pointer to lpfc HBA data structure.
858 *
859 * This routine will cleanup completed slow path events after HBA is reset
860 * when bringing down the SLI Layer.
861 *
862 *
863 * Return codes
864 * void.
865 **/
866 static void
867 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
868 {
869 struct lpfc_iocbq *rspiocbq;
870 struct hbq_dmabuf *dmabuf;
871 struct lpfc_cq_event *cq_event;
872
873 spin_lock_irq(&phba->hbalock);
874 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
875 spin_unlock_irq(&phba->hbalock);
876
877 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
878 /* Get the response iocb from the head of work queue */
879 spin_lock_irq(&phba->hbalock);
880 list_remove_head(&phba->sli4_hba.sp_queue_event,
881 cq_event, struct lpfc_cq_event, list);
882 spin_unlock_irq(&phba->hbalock);
883
884 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
885 case CQE_CODE_COMPL_WQE:
886 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
887 cq_event);
888 lpfc_sli_release_iocbq(phba, rspiocbq);
889 break;
890 case CQE_CODE_RECEIVE:
891 case CQE_CODE_RECEIVE_V1:
892 dmabuf = container_of(cq_event, struct hbq_dmabuf,
893 cq_event);
894 lpfc_in_buf_free(phba, &dmabuf->dbuf);
895 }
896 }
897 }
898
899 /**
900 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
901 * @phba: pointer to lpfc HBA data structure.
902 *
903 * This routine will cleanup posted ELS buffers after the HBA is reset
904 * when bringing down the SLI Layer.
905 *
906 *
907 * Return codes
908 * void.
909 **/
910 static void
911 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
912 {
913 struct lpfc_sli *psli = &phba->sli;
914 struct lpfc_sli_ring *pring;
915 struct lpfc_dmabuf *mp, *next_mp;
916 LIST_HEAD(buflist);
917 int count;
918
919 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
920 lpfc_sli_hbqbuf_free_all(phba);
921 else {
922 /* Cleanup preposted buffers on the ELS ring */
923 pring = &psli->sli3_ring[LPFC_ELS_RING];
924 spin_lock_irq(&phba->hbalock);
925 list_splice_init(&pring->postbufq, &buflist);
926 spin_unlock_irq(&phba->hbalock);
927
928 count = 0;
929 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
930 list_del(&mp->list);
931 count++;
932 lpfc_mbuf_free(phba, mp->virt, mp->phys);
933 kfree(mp);
934 }
935
936 spin_lock_irq(&phba->hbalock);
937 pring->postbufq_cnt -= count;
938 spin_unlock_irq(&phba->hbalock);
939 }
940 }
941
942 /**
943 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
944 * @phba: pointer to lpfc HBA data structure.
945 *
946 * This routine will cleanup the txcmplq after the HBA is reset when bringing
947 * down the SLI Layer.
948 *
949 * Return codes
950 * void
951 **/
952 static void
953 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
954 {
955 struct lpfc_sli *psli = &phba->sli;
956 struct lpfc_queue *qp = NULL;
957 struct lpfc_sli_ring *pring;
958 LIST_HEAD(completions);
959 int i;
960
961 if (phba->sli_rev != LPFC_SLI_REV4) {
962 for (i = 0; i < psli->num_rings; i++) {
963 pring = &psli->sli3_ring[i];
964 spin_lock_irq(&phba->hbalock);
965 /* At this point in time the HBA is either reset or DOA
966 * Nothing should be on txcmplq as it will
967 * NEVER complete.
968 */
969 list_splice_init(&pring->txcmplq, &completions);
970 pring->txcmplq_cnt = 0;
971 spin_unlock_irq(&phba->hbalock);
972
973 lpfc_sli_abort_iocb_ring(phba, pring);
974 }
975 /* Cancel all the IOCBs from the completions list */
976 lpfc_sli_cancel_iocbs(phba, &completions,
977 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
978 return;
979 }
980 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
981 pring = qp->pring;
982 if (!pring)
983 continue;
984 spin_lock_irq(&pring->ring_lock);
985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
990 /* Cancel all the IOCBs from the completions list */
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
993 }
994
995 /**
996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
997 int i;
998 * @phba: pointer to lpfc HBA data structure.
999 *
1000 * This routine will do uninitialization after the HBA is reset when bring
1001 * down the SLI Layer.
1002 *
1003 * Return codes
1004 * 0 - success.
1005 * Any other value - error.
1006 **/
1007 static int
1008 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009 {
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
1012 return 0;
1013 }
1014
1015 /**
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017 * @phba: pointer to lpfc HBA data structure.
1018 *
1019 * This routine will do uninitialization after the HBA is reset when bring
1020 * down the SLI Layer.
1021 *
1022 * Return codes
1023 * 0 - success.
1024 * Any other value - error.
1025 **/
1026 static int
1027 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028 {
1029 struct lpfc_scsi_buf *psb, *psb_next;
1030 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1031 LIST_HEAD(aborts);
1032 LIST_HEAD(nvme_aborts);
1033 LIST_HEAD(nvmet_aborts);
1034 unsigned long iflag = 0;
1035 struct lpfc_sglq *sglq_entry = NULL;
1036
1037
1038 lpfc_sli_hbqbuf_free_all(phba);
1039 lpfc_hba_clean_txcmplq(phba);
1040
1041 /* At this point in time the HBA is either reset or DOA. Either
1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1043 * on the lpfc_els_sgl_list so that it can either be freed if the
1044 * driver is unloading or reposted if the driver is restarting
1045 * the port.
1046 */
1047 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
1048 /* scsl_buf_list */
1049 /* sgl_list_lock required because worker thread uses this
1050 * list.
1051 */
1052 spin_lock(&phba->sli4_hba.sgl_list_lock);
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1056
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1059
1060
1061 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1062 /* abts_scsi_buf_list_lock required because worker thread uses this
1063 * list.
1064 */
1065 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
1066 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1067 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1068 &aborts);
1069 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1070 }
1071
1072 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1073 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1074 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
1075 &nvme_aborts);
1076 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1077 &nvmet_aborts);
1078 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1079 }
1080
1081 spin_unlock_irq(&phba->hbalock);
1082
1083 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1084 psb->pCmd = NULL;
1085 psb->status = IOSTAT_SUCCESS;
1086 }
1087 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1088 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1089 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1090
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1093 psb->pCmd = NULL;
1094 psb->status = IOSTAT_SUCCESS;
1095 }
1096 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
1097 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1098 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1099
1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1102 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1103 }
1104 }
1105
1106 lpfc_sli4_free_sp_events(phba);
1107 return 0;
1108 }
1109
1110 /**
1111 * lpfc_hba_down_post - Wrapper func for hba down post routine
1112 * @phba: pointer to lpfc HBA data structure.
1113 *
1114 * This routine wraps the actual SLI3 or SLI4 routine for performing
1115 * uninitialization after the HBA is reset when bring down the SLI Layer.
1116 *
1117 * Return codes
1118 * 0 - success.
1119 * Any other value - error.
1120 **/
1121 int
1122 lpfc_hba_down_post(struct lpfc_hba *phba)
1123 {
1124 return (*phba->lpfc_hba_down_post)(phba);
1125 }
1126
1127 /**
1128 * lpfc_hb_timeout - The HBA-timer timeout handler
1129 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1130 *
1131 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1132 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1133 * work-port-events bitmap and the worker thread is notified. This timeout
1134 * event will be used by the worker thread to invoke the actual timeout
1135 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1136 * be performed in the timeout handler and the HBA timeout event bit shall
1137 * be cleared by the worker thread after it has taken the event bitmap out.
1138 **/
1139 static void
1140 lpfc_hb_timeout(unsigned long ptr)
1141 {
1142 struct lpfc_hba *phba;
1143 uint32_t tmo_posted;
1144 unsigned long iflag;
1145
1146 phba = (struct lpfc_hba *)ptr;
1147
1148 /* Check for heart beat timeout conditions */
1149 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1150 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1151 if (!tmo_posted)
1152 phba->pport->work_port_events |= WORKER_HB_TMO;
1153 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1154
1155 /* Tell the worker thread there is work to do */
1156 if (!tmo_posted)
1157 lpfc_worker_wake_up(phba);
1158 return;
1159 }
1160
1161 /**
1162 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1163 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1164 *
1165 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1166 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1167 * work-port-events bitmap and the worker thread is notified. This timeout
1168 * event will be used by the worker thread to invoke the actual timeout
1169 * handler routine, lpfc_rrq_handler. Any periodical operations will
1170 * be performed in the timeout handler and the RRQ timeout event bit shall
1171 * be cleared by the worker thread after it has taken the event bitmap out.
1172 **/
1173 static void
1174 lpfc_rrq_timeout(unsigned long ptr)
1175 {
1176 struct lpfc_hba *phba;
1177 unsigned long iflag;
1178
1179 phba = (struct lpfc_hba *)ptr;
1180 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1181 if (!(phba->pport->load_flag & FC_UNLOADING))
1182 phba->hba_flag |= HBA_RRQ_ACTIVE;
1183 else
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1186
1187 if (!(phba->pport->load_flag & FC_UNLOADING))
1188 lpfc_worker_wake_up(phba);
1189 }
1190
1191 /**
1192 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1193 * @phba: pointer to lpfc hba data structure.
1194 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1195 *
1196 * This is the callback function to the lpfc heart-beat mailbox command.
1197 * If configured, the lpfc driver issues the heart-beat mailbox command to
1198 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1199 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1200 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1201 * heart-beat outstanding state. Once the mailbox command comes back and
1202 * no error conditions detected, the heart-beat mailbox command timer is
1203 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1204 * state is cleared for the next heart-beat. If the timer expired with the
1205 * heart-beat outstanding state set, the driver will put the HBA offline.
1206 **/
1207 static void
1208 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1209 {
1210 unsigned long drvr_flag;
1211
1212 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1213 phba->hb_outstanding = 0;
1214 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1215
1216 /* Check and reset heart-beat timer is necessary */
1217 mempool_free(pmboxq, phba->mbox_mem_pool);
1218 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1219 !(phba->link_state == LPFC_HBA_ERROR) &&
1220 !(phba->pport->load_flag & FC_UNLOADING))
1221 mod_timer(&phba->hb_tmofunc,
1222 jiffies +
1223 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1224 return;
1225 }
1226
1227 /**
1228 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1229 * @phba: pointer to lpfc hba data structure.
1230 *
1231 * This is the actual HBA-timer timeout handler to be invoked by the worker
1232 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1233 * handler performs any periodic operations needed for the device. If such
1234 * periodic event has already been attended to either in the interrupt handler
1235 * or by processing slow-ring or fast-ring events within the HBA-timer
1236 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1237 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1238 * is configured and there is no heart-beat mailbox command outstanding, a
1239 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1240 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1241 * to offline.
1242 **/
1243 void
1244 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1245 {
1246 struct lpfc_vport **vports;
1247 LPFC_MBOXQ_t *pmboxq;
1248 struct lpfc_dmabuf *buf_ptr;
1249 int retval, i;
1250 struct lpfc_sli *psli = &phba->sli;
1251 LIST_HEAD(completions);
1252
1253 vports = lpfc_create_vport_work_array(phba);
1254 if (vports != NULL)
1255 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1256 lpfc_rcv_seq_check_edtov(vports[i]);
1257 lpfc_fdmi_num_disc_check(vports[i]);
1258 }
1259 lpfc_destroy_vport_work_array(phba, vports);
1260
1261 if ((phba->link_state == LPFC_HBA_ERROR) ||
1262 (phba->pport->load_flag & FC_UNLOADING) ||
1263 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1264 return;
1265
1266 spin_lock_irq(&phba->pport->work_port_lock);
1267
1268 if (time_after(phba->last_completion_time +
1269 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1270 jiffies)) {
1271 spin_unlock_irq(&phba->pport->work_port_lock);
1272 if (!phba->hb_outstanding)
1273 mod_timer(&phba->hb_tmofunc,
1274 jiffies +
1275 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1276 else
1277 mod_timer(&phba->hb_tmofunc,
1278 jiffies +
1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1280 return;
1281 }
1282 spin_unlock_irq(&phba->pport->work_port_lock);
1283
1284 if (phba->elsbuf_cnt &&
1285 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1286 spin_lock_irq(&phba->hbalock);
1287 list_splice_init(&phba->elsbuf, &completions);
1288 phba->elsbuf_cnt = 0;
1289 phba->elsbuf_prev_cnt = 0;
1290 spin_unlock_irq(&phba->hbalock);
1291
1292 while (!list_empty(&completions)) {
1293 list_remove_head(&completions, buf_ptr,
1294 struct lpfc_dmabuf, list);
1295 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1296 kfree(buf_ptr);
1297 }
1298 }
1299 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1300
1301 /* If there is no heart beat outstanding, issue a heartbeat command */
1302 if (phba->cfg_enable_hba_heartbeat) {
1303 if (!phba->hb_outstanding) {
1304 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1305 (list_empty(&psli->mboxq))) {
1306 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1307 GFP_KERNEL);
1308 if (!pmboxq) {
1309 mod_timer(&phba->hb_tmofunc,
1310 jiffies +
1311 msecs_to_jiffies(1000 *
1312 LPFC_HB_MBOX_INTERVAL));
1313 return;
1314 }
1315
1316 lpfc_heart_beat(phba, pmboxq);
1317 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1318 pmboxq->vport = phba->pport;
1319 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1320 MBX_NOWAIT);
1321
1322 if (retval != MBX_BUSY &&
1323 retval != MBX_SUCCESS) {
1324 mempool_free(pmboxq,
1325 phba->mbox_mem_pool);
1326 mod_timer(&phba->hb_tmofunc,
1327 jiffies +
1328 msecs_to_jiffies(1000 *
1329 LPFC_HB_MBOX_INTERVAL));
1330 return;
1331 }
1332 phba->skipped_hb = 0;
1333 phba->hb_outstanding = 1;
1334 } else if (time_before_eq(phba->last_completion_time,
1335 phba->skipped_hb)) {
1336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1337 "2857 Last completion time not "
1338 " updated in %d ms\n",
1339 jiffies_to_msecs(jiffies
1340 - phba->last_completion_time));
1341 } else
1342 phba->skipped_hb = jiffies;
1343
1344 mod_timer(&phba->hb_tmofunc,
1345 jiffies +
1346 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1347 return;
1348 } else {
1349 /*
1350 * If heart beat timeout called with hb_outstanding set
1351 * we need to give the hb mailbox cmd a chance to
1352 * complete or TMO.
1353 */
1354 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1355 "0459 Adapter heartbeat still out"
1356 "standing:last compl time was %d ms.\n",
1357 jiffies_to_msecs(jiffies
1358 - phba->last_completion_time));
1359 mod_timer(&phba->hb_tmofunc,
1360 jiffies +
1361 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1362 }
1363 } else {
1364 mod_timer(&phba->hb_tmofunc,
1365 jiffies +
1366 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1367 }
1368 }
1369
1370 /**
1371 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1372 * @phba: pointer to lpfc hba data structure.
1373 *
1374 * This routine is called to bring the HBA offline when HBA hardware error
1375 * other than Port Error 6 has been detected.
1376 **/
1377 static void
1378 lpfc_offline_eratt(struct lpfc_hba *phba)
1379 {
1380 struct lpfc_sli *psli = &phba->sli;
1381
1382 spin_lock_irq(&phba->hbalock);
1383 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1384 spin_unlock_irq(&phba->hbalock);
1385 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1386
1387 lpfc_offline(phba);
1388 lpfc_reset_barrier(phba);
1389 spin_lock_irq(&phba->hbalock);
1390 lpfc_sli_brdreset(phba);
1391 spin_unlock_irq(&phba->hbalock);
1392 lpfc_hba_down_post(phba);
1393 lpfc_sli_brdready(phba, HS_MBRDY);
1394 lpfc_unblock_mgmt_io(phba);
1395 phba->link_state = LPFC_HBA_ERROR;
1396 return;
1397 }
1398
1399 /**
1400 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1401 * @phba: pointer to lpfc hba data structure.
1402 *
1403 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1404 * other than Port Error 6 has been detected.
1405 **/
1406 void
1407 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1408 {
1409 spin_lock_irq(&phba->hbalock);
1410 phba->link_state = LPFC_HBA_ERROR;
1411 spin_unlock_irq(&phba->hbalock);
1412
1413 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1414 lpfc_offline(phba);
1415 lpfc_hba_down_post(phba);
1416 lpfc_unblock_mgmt_io(phba);
1417 }
1418
1419 /**
1420 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1421 * @phba: pointer to lpfc hba data structure.
1422 *
1423 * This routine is invoked to handle the deferred HBA hardware error
1424 * conditions. This type of error is indicated by HBA by setting ER1
1425 * and another ER bit in the host status register. The driver will
1426 * wait until the ER1 bit clears before handling the error condition.
1427 **/
1428 static void
1429 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1430 {
1431 uint32_t old_host_status = phba->work_hs;
1432 struct lpfc_sli *psli = &phba->sli;
1433
1434 /* If the pci channel is offline, ignore possible errors,
1435 * since we cannot communicate with the pci card anyway.
1436 */
1437 if (pci_channel_offline(phba->pcidev)) {
1438 spin_lock_irq(&phba->hbalock);
1439 phba->hba_flag &= ~DEFER_ERATT;
1440 spin_unlock_irq(&phba->hbalock);
1441 return;
1442 }
1443
1444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1445 "0479 Deferred Adapter Hardware Error "
1446 "Data: x%x x%x x%x\n",
1447 phba->work_hs,
1448 phba->work_status[0], phba->work_status[1]);
1449
1450 spin_lock_irq(&phba->hbalock);
1451 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1452 spin_unlock_irq(&phba->hbalock);
1453
1454
1455 /*
1456 * Firmware stops when it triggred erratt. That could cause the I/Os
1457 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1458 * SCSI layer retry it after re-establishing link.
1459 */
1460 lpfc_sli_abort_fcp_rings(phba);
1461
1462 /*
1463 * There was a firmware error. Take the hba offline and then
1464 * attempt to restart it.
1465 */
1466 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1467 lpfc_offline(phba);
1468
1469 /* Wait for the ER1 bit to clear.*/
1470 while (phba->work_hs & HS_FFER1) {
1471 msleep(100);
1472 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1473 phba->work_hs = UNPLUG_ERR ;
1474 break;
1475 }
1476 /* If driver is unloading let the worker thread continue */
1477 if (phba->pport->load_flag & FC_UNLOADING) {
1478 phba->work_hs = 0;
1479 break;
1480 }
1481 }
1482
1483 /*
1484 * This is to ptrotect against a race condition in which
1485 * first write to the host attention register clear the
1486 * host status register.
1487 */
1488 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1489 phba->work_hs = old_host_status & ~HS_FFER1;
1490
1491 spin_lock_irq(&phba->hbalock);
1492 phba->hba_flag &= ~DEFER_ERATT;
1493 spin_unlock_irq(&phba->hbalock);
1494 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1495 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1496 }
1497
1498 static void
1499 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1500 {
1501 struct lpfc_board_event_header board_event;
1502 struct Scsi_Host *shost;
1503
1504 board_event.event_type = FC_REG_BOARD_EVENT;
1505 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1506 shost = lpfc_shost_from_vport(phba->pport);
1507 fc_host_post_vendor_event(shost, fc_get_event_number(),
1508 sizeof(board_event),
1509 (char *) &board_event,
1510 LPFC_NL_VENDOR_ID);
1511 }
1512
1513 /**
1514 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1515 * @phba: pointer to lpfc hba data structure.
1516 *
1517 * This routine is invoked to handle the following HBA hardware error
1518 * conditions:
1519 * 1 - HBA error attention interrupt
1520 * 2 - DMA ring index out of range
1521 * 3 - Mailbox command came back as unknown
1522 **/
1523 static void
1524 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1525 {
1526 struct lpfc_vport *vport = phba->pport;
1527 struct lpfc_sli *psli = &phba->sli;
1528 uint32_t event_data;
1529 unsigned long temperature;
1530 struct temp_event temp_event_data;
1531 struct Scsi_Host *shost;
1532
1533 /* If the pci channel is offline, ignore possible errors,
1534 * since we cannot communicate with the pci card anyway.
1535 */
1536 if (pci_channel_offline(phba->pcidev)) {
1537 spin_lock_irq(&phba->hbalock);
1538 phba->hba_flag &= ~DEFER_ERATT;
1539 spin_unlock_irq(&phba->hbalock);
1540 return;
1541 }
1542
1543 /* If resets are disabled then leave the HBA alone and return */
1544 if (!phba->cfg_enable_hba_reset)
1545 return;
1546
1547 /* Send an internal error event to mgmt application */
1548 lpfc_board_errevt_to_mgmt(phba);
1549
1550 if (phba->hba_flag & DEFER_ERATT)
1551 lpfc_handle_deferred_eratt(phba);
1552
1553 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1554 if (phba->work_hs & HS_FFER6)
1555 /* Re-establishing Link */
1556 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1557 "1301 Re-establishing Link "
1558 "Data: x%x x%x x%x\n",
1559 phba->work_hs, phba->work_status[0],
1560 phba->work_status[1]);
1561 if (phba->work_hs & HS_FFER8)
1562 /* Device Zeroization */
1563 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1564 "2861 Host Authentication device "
1565 "zeroization Data:x%x x%x x%x\n",
1566 phba->work_hs, phba->work_status[0],
1567 phba->work_status[1]);
1568
1569 spin_lock_irq(&phba->hbalock);
1570 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1571 spin_unlock_irq(&phba->hbalock);
1572
1573 /*
1574 * Firmware stops when it triggled erratt with HS_FFER6.
1575 * That could cause the I/Os dropped by the firmware.
1576 * Error iocb (I/O) on txcmplq and let the SCSI layer
1577 * retry it after re-establishing link.
1578 */
1579 lpfc_sli_abort_fcp_rings(phba);
1580
1581 /*
1582 * There was a firmware error. Take the hba offline and then
1583 * attempt to restart it.
1584 */
1585 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1586 lpfc_offline(phba);
1587 lpfc_sli_brdrestart(phba);
1588 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1589 lpfc_unblock_mgmt_io(phba);
1590 return;
1591 }
1592 lpfc_unblock_mgmt_io(phba);
1593 } else if (phba->work_hs & HS_CRIT_TEMP) {
1594 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1595 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1596 temp_event_data.event_code = LPFC_CRIT_TEMP;
1597 temp_event_data.data = (uint32_t)temperature;
1598
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1600 "0406 Adapter maximum temperature exceeded "
1601 "(%ld), taking this port offline "
1602 "Data: x%x x%x x%x\n",
1603 temperature, phba->work_hs,
1604 phba->work_status[0], phba->work_status[1]);
1605
1606 shost = lpfc_shost_from_vport(phba->pport);
1607 fc_host_post_vendor_event(shost, fc_get_event_number(),
1608 sizeof(temp_event_data),
1609 (char *) &temp_event_data,
1610 SCSI_NL_VID_TYPE_PCI
1611 | PCI_VENDOR_ID_EMULEX);
1612
1613 spin_lock_irq(&phba->hbalock);
1614 phba->over_temp_state = HBA_OVER_TEMP;
1615 spin_unlock_irq(&phba->hbalock);
1616 lpfc_offline_eratt(phba);
1617
1618 } else {
1619 /* The if clause above forces this code path when the status
1620 * failure is a value other than FFER6. Do not call the offline
1621 * twice. This is the adapter hardware error path.
1622 */
1623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1624 "0457 Adapter Hardware Error "
1625 "Data: x%x x%x x%x\n",
1626 phba->work_hs,
1627 phba->work_status[0], phba->work_status[1]);
1628
1629 event_data = FC_REG_DUMP_EVENT;
1630 shost = lpfc_shost_from_vport(vport);
1631 fc_host_post_vendor_event(shost, fc_get_event_number(),
1632 sizeof(event_data), (char *) &event_data,
1633 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1634
1635 lpfc_offline_eratt(phba);
1636 }
1637 return;
1638 }
1639
1640 /**
1641 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1642 * @phba: pointer to lpfc hba data structure.
1643 * @mbx_action: flag for mailbox shutdown action.
1644 *
1645 * This routine is invoked to perform an SLI4 port PCI function reset in
1646 * response to port status register polling attention. It waits for port
1647 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1648 * During this process, interrupt vectors are freed and later requested
1649 * for handling possible port resource change.
1650 **/
1651 static int
1652 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1653 bool en_rn_msg)
1654 {
1655 int rc;
1656 uint32_t intr_mode;
1657
1658 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1659 LPFC_SLI_INTF_IF_TYPE_2) {
1660 /*
1661 * On error status condition, driver need to wait for port
1662 * ready before performing reset.
1663 */
1664 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1665 if (rc)
1666 return rc;
1667 }
1668
1669 /* need reset: attempt for port recovery */
1670 if (en_rn_msg)
1671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1672 "2887 Reset Needed: Attempting Port "
1673 "Recovery...\n");
1674 lpfc_offline_prep(phba, mbx_action);
1675 lpfc_offline(phba);
1676 /* release interrupt for possible resource change */
1677 lpfc_sli4_disable_intr(phba);
1678 lpfc_sli_brdrestart(phba);
1679 /* request and enable interrupt */
1680 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1681 if (intr_mode == LPFC_INTR_ERROR) {
1682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1683 "3175 Failed to enable interrupt\n");
1684 return -EIO;
1685 }
1686 phba->intr_mode = intr_mode;
1687 rc = lpfc_online(phba);
1688 if (rc == 0)
1689 lpfc_unblock_mgmt_io(phba);
1690
1691 return rc;
1692 }
1693
1694 /**
1695 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1696 * @phba: pointer to lpfc hba data structure.
1697 *
1698 * This routine is invoked to handle the SLI4 HBA hardware error attention
1699 * conditions.
1700 **/
1701 static void
1702 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1703 {
1704 struct lpfc_vport *vport = phba->pport;
1705 uint32_t event_data;
1706 struct Scsi_Host *shost;
1707 uint32_t if_type;
1708 struct lpfc_register portstat_reg = {0};
1709 uint32_t reg_err1, reg_err2;
1710 uint32_t uerrlo_reg, uemasklo_reg;
1711 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1712 bool en_rn_msg = true;
1713 struct temp_event temp_event_data;
1714 struct lpfc_register portsmphr_reg;
1715 int rc, i;
1716
1717 /* If the pci channel is offline, ignore possible errors, since
1718 * we cannot communicate with the pci card anyway.
1719 */
1720 if (pci_channel_offline(phba->pcidev))
1721 return;
1722
1723 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1724 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1725 switch (if_type) {
1726 case LPFC_SLI_INTF_IF_TYPE_0:
1727 pci_rd_rc1 = lpfc_readl(
1728 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1729 &uerrlo_reg);
1730 pci_rd_rc2 = lpfc_readl(
1731 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1732 &uemasklo_reg);
1733 /* consider PCI bus read error as pci_channel_offline */
1734 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1735 return;
1736 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1737 lpfc_sli4_offline_eratt(phba);
1738 return;
1739 }
1740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1741 "7623 Checking UE recoverable");
1742
1743 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1744 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1745 &portsmphr_reg.word0))
1746 continue;
1747
1748 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1749 &portsmphr_reg);
1750 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1751 LPFC_PORT_SEM_UE_RECOVERABLE)
1752 break;
1753 /*Sleep for 1Sec, before checking SEMAPHORE */
1754 msleep(1000);
1755 }
1756
1757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1758 "4827 smphr_port_status x%x : Waited %dSec",
1759 smphr_port_status, i);
1760
1761 /* Recoverable UE, reset the HBA device */
1762 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1763 LPFC_PORT_SEM_UE_RECOVERABLE) {
1764 for (i = 0; i < 20; i++) {
1765 msleep(1000);
1766 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1767 &portsmphr_reg.word0) &&
1768 (LPFC_POST_STAGE_PORT_READY ==
1769 bf_get(lpfc_port_smphr_port_status,
1770 &portsmphr_reg))) {
1771 rc = lpfc_sli4_port_sta_fn_reset(phba,
1772 LPFC_MBX_NO_WAIT, en_rn_msg);
1773 if (rc == 0)
1774 return;
1775 lpfc_printf_log(phba,
1776 KERN_ERR, LOG_INIT,
1777 "4215 Failed to recover UE");
1778 break;
1779 }
1780 }
1781 }
1782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1783 "7624 Firmware not ready: Failing UE recovery,"
1784 " waited %dSec", i);
1785 lpfc_sli4_offline_eratt(phba);
1786 break;
1787
1788 case LPFC_SLI_INTF_IF_TYPE_2:
1789 pci_rd_rc1 = lpfc_readl(
1790 phba->sli4_hba.u.if_type2.STATUSregaddr,
1791 &portstat_reg.word0);
1792 /* consider PCI bus read error as pci_channel_offline */
1793 if (pci_rd_rc1 == -EIO) {
1794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1795 "3151 PCI bus read access failure: x%x\n",
1796 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1797 return;
1798 }
1799 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1800 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1801 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1803 "2889 Port Overtemperature event, "
1804 "taking port offline Data: x%x x%x\n",
1805 reg_err1, reg_err2);
1806
1807 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1808 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1809 temp_event_data.event_code = LPFC_CRIT_TEMP;
1810 temp_event_data.data = 0xFFFFFFFF;
1811
1812 shost = lpfc_shost_from_vport(phba->pport);
1813 fc_host_post_vendor_event(shost, fc_get_event_number(),
1814 sizeof(temp_event_data),
1815 (char *)&temp_event_data,
1816 SCSI_NL_VID_TYPE_PCI
1817 | PCI_VENDOR_ID_EMULEX);
1818
1819 spin_lock_irq(&phba->hbalock);
1820 phba->over_temp_state = HBA_OVER_TEMP;
1821 spin_unlock_irq(&phba->hbalock);
1822 lpfc_sli4_offline_eratt(phba);
1823 return;
1824 }
1825 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1826 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1828 "3143 Port Down: Firmware Update "
1829 "Detected\n");
1830 en_rn_msg = false;
1831 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1832 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1834 "3144 Port Down: Debug Dump\n");
1835 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1836 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1838 "3145 Port Down: Provisioning\n");
1839
1840 /* If resets are disabled then leave the HBA alone and return */
1841 if (!phba->cfg_enable_hba_reset)
1842 return;
1843
1844 /* Check port status register for function reset */
1845 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1846 en_rn_msg);
1847 if (rc == 0) {
1848 /* don't report event on forced debug dump */
1849 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1850 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1851 return;
1852 else
1853 break;
1854 }
1855 /* fall through for not able to recover */
1856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1857 "3152 Unrecoverable error, bring the port "
1858 "offline\n");
1859 lpfc_sli4_offline_eratt(phba);
1860 break;
1861 case LPFC_SLI_INTF_IF_TYPE_1:
1862 default:
1863 break;
1864 }
1865 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1866 "3123 Report dump event to upper layer\n");
1867 /* Send an internal error event to mgmt application */
1868 lpfc_board_errevt_to_mgmt(phba);
1869
1870 event_data = FC_REG_DUMP_EVENT;
1871 shost = lpfc_shost_from_vport(vport);
1872 fc_host_post_vendor_event(shost, fc_get_event_number(),
1873 sizeof(event_data), (char *) &event_data,
1874 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1875 }
1876
1877 /**
1878 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1879 * @phba: pointer to lpfc HBA data structure.
1880 *
1881 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1882 * routine from the API jump table function pointer from the lpfc_hba struct.
1883 *
1884 * Return codes
1885 * 0 - success.
1886 * Any other value - error.
1887 **/
1888 void
1889 lpfc_handle_eratt(struct lpfc_hba *phba)
1890 {
1891 (*phba->lpfc_handle_eratt)(phba);
1892 }
1893
1894 /**
1895 * lpfc_handle_latt - The HBA link event handler
1896 * @phba: pointer to lpfc hba data structure.
1897 *
1898 * This routine is invoked from the worker thread to handle a HBA host
1899 * attention link event. SLI3 only.
1900 **/
1901 void
1902 lpfc_handle_latt(struct lpfc_hba *phba)
1903 {
1904 struct lpfc_vport *vport = phba->pport;
1905 struct lpfc_sli *psli = &phba->sli;
1906 LPFC_MBOXQ_t *pmb;
1907 volatile uint32_t control;
1908 struct lpfc_dmabuf *mp;
1909 int rc = 0;
1910
1911 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1912 if (!pmb) {
1913 rc = 1;
1914 goto lpfc_handle_latt_err_exit;
1915 }
1916
1917 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1918 if (!mp) {
1919 rc = 2;
1920 goto lpfc_handle_latt_free_pmb;
1921 }
1922
1923 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1924 if (!mp->virt) {
1925 rc = 3;
1926 goto lpfc_handle_latt_free_mp;
1927 }
1928
1929 /* Cleanup any outstanding ELS commands */
1930 lpfc_els_flush_all_cmd(phba);
1931
1932 psli->slistat.link_event++;
1933 lpfc_read_topology(phba, pmb, mp);
1934 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1935 pmb->vport = vport;
1936 /* Block ELS IOCBs until we have processed this mbox command */
1937 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1938 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1939 if (rc == MBX_NOT_FINISHED) {
1940 rc = 4;
1941 goto lpfc_handle_latt_free_mbuf;
1942 }
1943
1944 /* Clear Link Attention in HA REG */
1945 spin_lock_irq(&phba->hbalock);
1946 writel(HA_LATT, phba->HAregaddr);
1947 readl(phba->HAregaddr); /* flush */
1948 spin_unlock_irq(&phba->hbalock);
1949
1950 return;
1951
1952 lpfc_handle_latt_free_mbuf:
1953 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1954 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1955 lpfc_handle_latt_free_mp:
1956 kfree(mp);
1957 lpfc_handle_latt_free_pmb:
1958 mempool_free(pmb, phba->mbox_mem_pool);
1959 lpfc_handle_latt_err_exit:
1960 /* Enable Link attention interrupts */
1961 spin_lock_irq(&phba->hbalock);
1962 psli->sli_flag |= LPFC_PROCESS_LA;
1963 control = readl(phba->HCregaddr);
1964 control |= HC_LAINT_ENA;
1965 writel(control, phba->HCregaddr);
1966 readl(phba->HCregaddr); /* flush */
1967
1968 /* Clear Link Attention in HA REG */
1969 writel(HA_LATT, phba->HAregaddr);
1970 readl(phba->HAregaddr); /* flush */
1971 spin_unlock_irq(&phba->hbalock);
1972 lpfc_linkdown(phba);
1973 phba->link_state = LPFC_HBA_ERROR;
1974
1975 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1976 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1977
1978 return;
1979 }
1980
1981 /**
1982 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1983 * @phba: pointer to lpfc hba data structure.
1984 * @vpd: pointer to the vital product data.
1985 * @len: length of the vital product data in bytes.
1986 *
1987 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1988 * an array of characters. In this routine, the ModelName, ProgramType, and
1989 * ModelDesc, etc. fields of the phba data structure will be populated.
1990 *
1991 * Return codes
1992 * 0 - pointer to the VPD passed in is NULL
1993 * 1 - success
1994 **/
1995 int
1996 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1997 {
1998 uint8_t lenlo, lenhi;
1999 int Length;
2000 int i, j;
2001 int finished = 0;
2002 int index = 0;
2003
2004 if (!vpd)
2005 return 0;
2006
2007 /* Vital Product */
2008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2009 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2010 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2011 (uint32_t) vpd[3]);
2012 while (!finished && (index < (len - 4))) {
2013 switch (vpd[index]) {
2014 case 0x82:
2015 case 0x91:
2016 index += 1;
2017 lenlo = vpd[index];
2018 index += 1;
2019 lenhi = vpd[index];
2020 index += 1;
2021 i = ((((unsigned short)lenhi) << 8) + lenlo);
2022 index += i;
2023 break;
2024 case 0x90:
2025 index += 1;
2026 lenlo = vpd[index];
2027 index += 1;
2028 lenhi = vpd[index];
2029 index += 1;
2030 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2031 if (Length > len - index)
2032 Length = len - index;
2033 while (Length > 0) {
2034 /* Look for Serial Number */
2035 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2036 index += 2;
2037 i = vpd[index];
2038 index += 1;
2039 j = 0;
2040 Length -= (3+i);
2041 while(i--) {
2042 phba->SerialNumber[j++] = vpd[index++];
2043 if (j == 31)
2044 break;
2045 }
2046 phba->SerialNumber[j] = 0;
2047 continue;
2048 }
2049 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2050 phba->vpd_flag |= VPD_MODEL_DESC;
2051 index += 2;
2052 i = vpd[index];
2053 index += 1;
2054 j = 0;
2055 Length -= (3+i);
2056 while(i--) {
2057 phba->ModelDesc[j++] = vpd[index++];
2058 if (j == 255)
2059 break;
2060 }
2061 phba->ModelDesc[j] = 0;
2062 continue;
2063 }
2064 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2065 phba->vpd_flag |= VPD_MODEL_NAME;
2066 index += 2;
2067 i = vpd[index];
2068 index += 1;
2069 j = 0;
2070 Length -= (3+i);
2071 while(i--) {
2072 phba->ModelName[j++] = vpd[index++];
2073 if (j == 79)
2074 break;
2075 }
2076 phba->ModelName[j] = 0;
2077 continue;
2078 }
2079 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2080 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2081 index += 2;
2082 i = vpd[index];
2083 index += 1;
2084 j = 0;
2085 Length -= (3+i);
2086 while(i--) {
2087 phba->ProgramType[j++] = vpd[index++];
2088 if (j == 255)
2089 break;
2090 }
2091 phba->ProgramType[j] = 0;
2092 continue;
2093 }
2094 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2095 phba->vpd_flag |= VPD_PORT;
2096 index += 2;
2097 i = vpd[index];
2098 index += 1;
2099 j = 0;
2100 Length -= (3+i);
2101 while(i--) {
2102 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2103 (phba->sli4_hba.pport_name_sta ==
2104 LPFC_SLI4_PPNAME_GET)) {
2105 j++;
2106 index++;
2107 } else
2108 phba->Port[j++] = vpd[index++];
2109 if (j == 19)
2110 break;
2111 }
2112 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2113 (phba->sli4_hba.pport_name_sta ==
2114 LPFC_SLI4_PPNAME_NON))
2115 phba->Port[j] = 0;
2116 continue;
2117 }
2118 else {
2119 index += 2;
2120 i = vpd[index];
2121 index += 1;
2122 index += i;
2123 Length -= (3 + i);
2124 }
2125 }
2126 finished = 0;
2127 break;
2128 case 0x78:
2129 finished = 1;
2130 break;
2131 default:
2132 index ++;
2133 break;
2134 }
2135 }
2136
2137 return(1);
2138 }
2139
2140 /**
2141 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2142 * @phba: pointer to lpfc hba data structure.
2143 * @mdp: pointer to the data structure to hold the derived model name.
2144 * @descp: pointer to the data structure to hold the derived description.
2145 *
2146 * This routine retrieves HBA's description based on its registered PCI device
2147 * ID. The @descp passed into this function points to an array of 256 chars. It
2148 * shall be returned with the model name, maximum speed, and the host bus type.
2149 * The @mdp passed into this function points to an array of 80 chars. When the
2150 * function returns, the @mdp will be filled with the model name.
2151 **/
2152 static void
2153 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2154 {
2155 lpfc_vpd_t *vp;
2156 uint16_t dev_id = phba->pcidev->device;
2157 int max_speed;
2158 int GE = 0;
2159 int oneConnect = 0; /* default is not a oneConnect */
2160 struct {
2161 char *name;
2162 char *bus;
2163 char *function;
2164 } m = {"<Unknown>", "", ""};
2165
2166 if (mdp && mdp[0] != '\0'
2167 && descp && descp[0] != '\0')
2168 return;
2169
2170 if (phba->lmt & LMT_32Gb)
2171 max_speed = 32;
2172 else if (phba->lmt & LMT_16Gb)
2173 max_speed = 16;
2174 else if (phba->lmt & LMT_10Gb)
2175 max_speed = 10;
2176 else if (phba->lmt & LMT_8Gb)
2177 max_speed = 8;
2178 else if (phba->lmt & LMT_4Gb)
2179 max_speed = 4;
2180 else if (phba->lmt & LMT_2Gb)
2181 max_speed = 2;
2182 else if (phba->lmt & LMT_1Gb)
2183 max_speed = 1;
2184 else
2185 max_speed = 0;
2186
2187 vp = &phba->vpd;
2188
2189 switch (dev_id) {
2190 case PCI_DEVICE_ID_FIREFLY:
2191 m = (typeof(m)){"LP6000", "PCI",
2192 "Obsolete, Unsupported Fibre Channel Adapter"};
2193 break;
2194 case PCI_DEVICE_ID_SUPERFLY:
2195 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2196 m = (typeof(m)){"LP7000", "PCI", ""};
2197 else
2198 m = (typeof(m)){"LP7000E", "PCI", ""};
2199 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2200 break;
2201 case PCI_DEVICE_ID_DRAGONFLY:
2202 m = (typeof(m)){"LP8000", "PCI",
2203 "Obsolete, Unsupported Fibre Channel Adapter"};
2204 break;
2205 case PCI_DEVICE_ID_CENTAUR:
2206 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2207 m = (typeof(m)){"LP9002", "PCI", ""};
2208 else
2209 m = (typeof(m)){"LP9000", "PCI", ""};
2210 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2211 break;
2212 case PCI_DEVICE_ID_RFLY:
2213 m = (typeof(m)){"LP952", "PCI",
2214 "Obsolete, Unsupported Fibre Channel Adapter"};
2215 break;
2216 case PCI_DEVICE_ID_PEGASUS:
2217 m = (typeof(m)){"LP9802", "PCI-X",
2218 "Obsolete, Unsupported Fibre Channel Adapter"};
2219 break;
2220 case PCI_DEVICE_ID_THOR:
2221 m = (typeof(m)){"LP10000", "PCI-X",
2222 "Obsolete, Unsupported Fibre Channel Adapter"};
2223 break;
2224 case PCI_DEVICE_ID_VIPER:
2225 m = (typeof(m)){"LPX1000", "PCI-X",
2226 "Obsolete, Unsupported Fibre Channel Adapter"};
2227 break;
2228 case PCI_DEVICE_ID_PFLY:
2229 m = (typeof(m)){"LP982", "PCI-X",
2230 "Obsolete, Unsupported Fibre Channel Adapter"};
2231 break;
2232 case PCI_DEVICE_ID_TFLY:
2233 m = (typeof(m)){"LP1050", "PCI-X",
2234 "Obsolete, Unsupported Fibre Channel Adapter"};
2235 break;
2236 case PCI_DEVICE_ID_HELIOS:
2237 m = (typeof(m)){"LP11000", "PCI-X2",
2238 "Obsolete, Unsupported Fibre Channel Adapter"};
2239 break;
2240 case PCI_DEVICE_ID_HELIOS_SCSP:
2241 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2242 "Obsolete, Unsupported Fibre Channel Adapter"};
2243 break;
2244 case PCI_DEVICE_ID_HELIOS_DCSP:
2245 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2246 "Obsolete, Unsupported Fibre Channel Adapter"};
2247 break;
2248 case PCI_DEVICE_ID_NEPTUNE:
2249 m = (typeof(m)){"LPe1000", "PCIe",
2250 "Obsolete, Unsupported Fibre Channel Adapter"};
2251 break;
2252 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2253 m = (typeof(m)){"LPe1000-SP", "PCIe",
2254 "Obsolete, Unsupported Fibre Channel Adapter"};
2255 break;
2256 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2257 m = (typeof(m)){"LPe1002-SP", "PCIe",
2258 "Obsolete, Unsupported Fibre Channel Adapter"};
2259 break;
2260 case PCI_DEVICE_ID_BMID:
2261 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2262 break;
2263 case PCI_DEVICE_ID_BSMB:
2264 m = (typeof(m)){"LP111", "PCI-X2",
2265 "Obsolete, Unsupported Fibre Channel Adapter"};
2266 break;
2267 case PCI_DEVICE_ID_ZEPHYR:
2268 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2269 break;
2270 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2271 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2272 break;
2273 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2274 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2275 GE = 1;
2276 break;
2277 case PCI_DEVICE_ID_ZMID:
2278 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2279 break;
2280 case PCI_DEVICE_ID_ZSMB:
2281 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2282 break;
2283 case PCI_DEVICE_ID_LP101:
2284 m = (typeof(m)){"LP101", "PCI-X",
2285 "Obsolete, Unsupported Fibre Channel Adapter"};
2286 break;
2287 case PCI_DEVICE_ID_LP10000S:
2288 m = (typeof(m)){"LP10000-S", "PCI",
2289 "Obsolete, Unsupported Fibre Channel Adapter"};
2290 break;
2291 case PCI_DEVICE_ID_LP11000S:
2292 m = (typeof(m)){"LP11000-S", "PCI-X2",
2293 "Obsolete, Unsupported Fibre Channel Adapter"};
2294 break;
2295 case PCI_DEVICE_ID_LPE11000S:
2296 m = (typeof(m)){"LPe11000-S", "PCIe",
2297 "Obsolete, Unsupported Fibre Channel Adapter"};
2298 break;
2299 case PCI_DEVICE_ID_SAT:
2300 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2301 break;
2302 case PCI_DEVICE_ID_SAT_MID:
2303 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2304 break;
2305 case PCI_DEVICE_ID_SAT_SMB:
2306 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2307 break;
2308 case PCI_DEVICE_ID_SAT_DCSP:
2309 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2310 break;
2311 case PCI_DEVICE_ID_SAT_SCSP:
2312 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2313 break;
2314 case PCI_DEVICE_ID_SAT_S:
2315 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2316 break;
2317 case PCI_DEVICE_ID_HORNET:
2318 m = (typeof(m)){"LP21000", "PCIe",
2319 "Obsolete, Unsupported FCoE Adapter"};
2320 GE = 1;
2321 break;
2322 case PCI_DEVICE_ID_PROTEUS_VF:
2323 m = (typeof(m)){"LPev12000", "PCIe IOV",
2324 "Obsolete, Unsupported Fibre Channel Adapter"};
2325 break;
2326 case PCI_DEVICE_ID_PROTEUS_PF:
2327 m = (typeof(m)){"LPev12000", "PCIe IOV",
2328 "Obsolete, Unsupported Fibre Channel Adapter"};
2329 break;
2330 case PCI_DEVICE_ID_PROTEUS_S:
2331 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2332 "Obsolete, Unsupported Fibre Channel Adapter"};
2333 break;
2334 case PCI_DEVICE_ID_TIGERSHARK:
2335 oneConnect = 1;
2336 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2337 break;
2338 case PCI_DEVICE_ID_TOMCAT:
2339 oneConnect = 1;
2340 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2341 break;
2342 case PCI_DEVICE_ID_FALCON:
2343 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2344 "EmulexSecure Fibre"};
2345 break;
2346 case PCI_DEVICE_ID_BALIUS:
2347 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2348 "Obsolete, Unsupported Fibre Channel Adapter"};
2349 break;
2350 case PCI_DEVICE_ID_LANCER_FC:
2351 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2352 break;
2353 case PCI_DEVICE_ID_LANCER_FC_VF:
2354 m = (typeof(m)){"LPe16000", "PCIe",
2355 "Obsolete, Unsupported Fibre Channel Adapter"};
2356 break;
2357 case PCI_DEVICE_ID_LANCER_FCOE:
2358 oneConnect = 1;
2359 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2360 break;
2361 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2362 oneConnect = 1;
2363 m = (typeof(m)){"OCe15100", "PCIe",
2364 "Obsolete, Unsupported FCoE"};
2365 break;
2366 case PCI_DEVICE_ID_LANCER_G6_FC:
2367 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2368 break;
2369 case PCI_DEVICE_ID_SKYHAWK:
2370 case PCI_DEVICE_ID_SKYHAWK_VF:
2371 oneConnect = 1;
2372 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2373 break;
2374 default:
2375 m = (typeof(m)){"Unknown", "", ""};
2376 break;
2377 }
2378
2379 if (mdp && mdp[0] == '\0')
2380 snprintf(mdp, 79,"%s", m.name);
2381 /*
2382 * oneConnect hba requires special processing, they are all initiators
2383 * and we put the port number on the end
2384 */
2385 if (descp && descp[0] == '\0') {
2386 if (oneConnect)
2387 snprintf(descp, 255,
2388 "Emulex OneConnect %s, %s Initiator %s",
2389 m.name, m.function,
2390 phba->Port);
2391 else if (max_speed == 0)
2392 snprintf(descp, 255,
2393 "Emulex %s %s %s",
2394 m.name, m.bus, m.function);
2395 else
2396 snprintf(descp, 255,
2397 "Emulex %s %d%s %s %s",
2398 m.name, max_speed, (GE) ? "GE" : "Gb",
2399 m.bus, m.function);
2400 }
2401 }
2402
2403 /**
2404 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2405 * @phba: pointer to lpfc hba data structure.
2406 * @pring: pointer to a IOCB ring.
2407 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2408 *
2409 * This routine posts a given number of IOCBs with the associated DMA buffer
2410 * descriptors specified by the cnt argument to the given IOCB ring.
2411 *
2412 * Return codes
2413 * The number of IOCBs NOT able to be posted to the IOCB ring.
2414 **/
2415 int
2416 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2417 {
2418 IOCB_t *icmd;
2419 struct lpfc_iocbq *iocb;
2420 struct lpfc_dmabuf *mp1, *mp2;
2421
2422 cnt += pring->missbufcnt;
2423
2424 /* While there are buffers to post */
2425 while (cnt > 0) {
2426 /* Allocate buffer for command iocb */
2427 iocb = lpfc_sli_get_iocbq(phba);
2428 if (iocb == NULL) {
2429 pring->missbufcnt = cnt;
2430 return cnt;
2431 }
2432 icmd = &iocb->iocb;
2433
2434 /* 2 buffers can be posted per command */
2435 /* Allocate buffer to post */
2436 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2437 if (mp1)
2438 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2439 if (!mp1 || !mp1->virt) {
2440 kfree(mp1);
2441 lpfc_sli_release_iocbq(phba, iocb);
2442 pring->missbufcnt = cnt;
2443 return cnt;
2444 }
2445
2446 INIT_LIST_HEAD(&mp1->list);
2447 /* Allocate buffer to post */
2448 if (cnt > 1) {
2449 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2450 if (mp2)
2451 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2452 &mp2->phys);
2453 if (!mp2 || !mp2->virt) {
2454 kfree(mp2);
2455 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2456 kfree(mp1);
2457 lpfc_sli_release_iocbq(phba, iocb);
2458 pring->missbufcnt = cnt;
2459 return cnt;
2460 }
2461
2462 INIT_LIST_HEAD(&mp2->list);
2463 } else {
2464 mp2 = NULL;
2465 }
2466
2467 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2468 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2469 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2470 icmd->ulpBdeCount = 1;
2471 cnt--;
2472 if (mp2) {
2473 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2474 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2475 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2476 cnt--;
2477 icmd->ulpBdeCount = 2;
2478 }
2479
2480 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2481 icmd->ulpLe = 1;
2482
2483 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2484 IOCB_ERROR) {
2485 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2486 kfree(mp1);
2487 cnt++;
2488 if (mp2) {
2489 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2490 kfree(mp2);
2491 cnt++;
2492 }
2493 lpfc_sli_release_iocbq(phba, iocb);
2494 pring->missbufcnt = cnt;
2495 return cnt;
2496 }
2497 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2498 if (mp2)
2499 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2500 }
2501 pring->missbufcnt = 0;
2502 return 0;
2503 }
2504
2505 /**
2506 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2507 * @phba: pointer to lpfc hba data structure.
2508 *
2509 * This routine posts initial receive IOCB buffers to the ELS ring. The
2510 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2511 * set to 64 IOCBs. SLI3 only.
2512 *
2513 * Return codes
2514 * 0 - success (currently always success)
2515 **/
2516 static int
2517 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2518 {
2519 struct lpfc_sli *psli = &phba->sli;
2520
2521 /* Ring 0, ELS / CT buffers */
2522 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2523 /* Ring 2 - FCP no buffers needed */
2524
2525 return 0;
2526 }
2527
2528 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2529
2530 /**
2531 * lpfc_sha_init - Set up initial array of hash table entries
2532 * @HashResultPointer: pointer to an array as hash table.
2533 *
2534 * This routine sets up the initial values to the array of hash table entries
2535 * for the LC HBAs.
2536 **/
2537 static void
2538 lpfc_sha_init(uint32_t * HashResultPointer)
2539 {
2540 HashResultPointer[0] = 0x67452301;
2541 HashResultPointer[1] = 0xEFCDAB89;
2542 HashResultPointer[2] = 0x98BADCFE;
2543 HashResultPointer[3] = 0x10325476;
2544 HashResultPointer[4] = 0xC3D2E1F0;
2545 }
2546
2547 /**
2548 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2549 * @HashResultPointer: pointer to an initial/result hash table.
2550 * @HashWorkingPointer: pointer to an working hash table.
2551 *
2552 * This routine iterates an initial hash table pointed by @HashResultPointer
2553 * with the values from the working hash table pointeed by @HashWorkingPointer.
2554 * The results are putting back to the initial hash table, returned through
2555 * the @HashResultPointer as the result hash table.
2556 **/
2557 static void
2558 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2559 {
2560 int t;
2561 uint32_t TEMP;
2562 uint32_t A, B, C, D, E;
2563 t = 16;
2564 do {
2565 HashWorkingPointer[t] =
2566 S(1,
2567 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2568 8] ^
2569 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2570 } while (++t <= 79);
2571 t = 0;
2572 A = HashResultPointer[0];
2573 B = HashResultPointer[1];
2574 C = HashResultPointer[2];
2575 D = HashResultPointer[3];
2576 E = HashResultPointer[4];
2577
2578 do {
2579 if (t < 20) {
2580 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2581 } else if (t < 40) {
2582 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2583 } else if (t < 60) {
2584 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2585 } else {
2586 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2587 }
2588 TEMP += S(5, A) + E + HashWorkingPointer[t];
2589 E = D;
2590 D = C;
2591 C = S(30, B);
2592 B = A;
2593 A = TEMP;
2594 } while (++t <= 79);
2595
2596 HashResultPointer[0] += A;
2597 HashResultPointer[1] += B;
2598 HashResultPointer[2] += C;
2599 HashResultPointer[3] += D;
2600 HashResultPointer[4] += E;
2601
2602 }
2603
2604 /**
2605 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2606 * @RandomChallenge: pointer to the entry of host challenge random number array.
2607 * @HashWorking: pointer to the entry of the working hash array.
2608 *
2609 * This routine calculates the working hash array referred by @HashWorking
2610 * from the challenge random numbers associated with the host, referred by
2611 * @RandomChallenge. The result is put into the entry of the working hash
2612 * array and returned by reference through @HashWorking.
2613 **/
2614 static void
2615 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2616 {
2617 *HashWorking = (*RandomChallenge ^ *HashWorking);
2618 }
2619
2620 /**
2621 * lpfc_hba_init - Perform special handling for LC HBA initialization
2622 * @phba: pointer to lpfc hba data structure.
2623 * @hbainit: pointer to an array of unsigned 32-bit integers.
2624 *
2625 * This routine performs the special handling for LC HBA initialization.
2626 **/
2627 void
2628 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2629 {
2630 int t;
2631 uint32_t *HashWorking;
2632 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2633
2634 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2635 if (!HashWorking)
2636 return;
2637
2638 HashWorking[0] = HashWorking[78] = *pwwnn++;
2639 HashWorking[1] = HashWorking[79] = *pwwnn;
2640
2641 for (t = 0; t < 7; t++)
2642 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2643
2644 lpfc_sha_init(hbainit);
2645 lpfc_sha_iterate(hbainit, HashWorking);
2646 kfree(HashWorking);
2647 }
2648
2649 /**
2650 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2651 * @vport: pointer to a virtual N_Port data structure.
2652 *
2653 * This routine performs the necessary cleanups before deleting the @vport.
2654 * It invokes the discovery state machine to perform necessary state
2655 * transitions and to release the ndlps associated with the @vport. Note,
2656 * the physical port is treated as @vport 0.
2657 **/
2658 void
2659 lpfc_cleanup(struct lpfc_vport *vport)
2660 {
2661 struct lpfc_hba *phba = vport->phba;
2662 struct lpfc_nodelist *ndlp, *next_ndlp;
2663 int i = 0;
2664
2665 if (phba->link_state > LPFC_LINK_DOWN)
2666 lpfc_port_link_failure(vport);
2667
2668 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2669 if (!NLP_CHK_NODE_ACT(ndlp)) {
2670 ndlp = lpfc_enable_node(vport, ndlp,
2671 NLP_STE_UNUSED_NODE);
2672 if (!ndlp)
2673 continue;
2674 spin_lock_irq(&phba->ndlp_lock);
2675 NLP_SET_FREE_REQ(ndlp);
2676 spin_unlock_irq(&phba->ndlp_lock);
2677 /* Trigger the release of the ndlp memory */
2678 lpfc_nlp_put(ndlp);
2679 continue;
2680 }
2681 spin_lock_irq(&phba->ndlp_lock);
2682 if (NLP_CHK_FREE_REQ(ndlp)) {
2683 /* The ndlp should not be in memory free mode already */
2684 spin_unlock_irq(&phba->ndlp_lock);
2685 continue;
2686 } else
2687 /* Indicate request for freeing ndlp memory */
2688 NLP_SET_FREE_REQ(ndlp);
2689 spin_unlock_irq(&phba->ndlp_lock);
2690
2691 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2692 ndlp->nlp_DID == Fabric_DID) {
2693 /* Just free up ndlp with Fabric_DID for vports */
2694 lpfc_nlp_put(ndlp);
2695 continue;
2696 }
2697
2698 /* take care of nodes in unused state before the state
2699 * machine taking action.
2700 */
2701 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2702 lpfc_nlp_put(ndlp);
2703 continue;
2704 }
2705
2706 if (ndlp->nlp_type & NLP_FABRIC)
2707 lpfc_disc_state_machine(vport, ndlp, NULL,
2708 NLP_EVT_DEVICE_RECOVERY);
2709
2710 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
2711 /* Remove the NVME transport reference now and
2712 * continue to remove the node.
2713 */
2714 lpfc_nlp_put(ndlp);
2715 }
2716
2717 lpfc_disc_state_machine(vport, ndlp, NULL,
2718 NLP_EVT_DEVICE_RM);
2719 }
2720
2721 /* At this point, ALL ndlp's should be gone
2722 * because of the previous NLP_EVT_DEVICE_RM.
2723 * Lets wait for this to happen, if needed.
2724 */
2725 while (!list_empty(&vport->fc_nodes)) {
2726 if (i++ > 3000) {
2727 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2728 "0233 Nodelist not empty\n");
2729 list_for_each_entry_safe(ndlp, next_ndlp,
2730 &vport->fc_nodes, nlp_listp) {
2731 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2732 LOG_NODE,
2733 "0282 did:x%x ndlp:x%p "
2734 "usgmap:x%x refcnt:%d\n",
2735 ndlp->nlp_DID, (void *)ndlp,
2736 ndlp->nlp_usg_map,
2737 kref_read(&ndlp->kref));
2738 }
2739 break;
2740 }
2741
2742 /* Wait for any activity on ndlps to settle */
2743 msleep(10);
2744 }
2745 lpfc_cleanup_vports_rrqs(vport, NULL);
2746 }
2747
2748 /**
2749 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2750 * @vport: pointer to a virtual N_Port data structure.
2751 *
2752 * This routine stops all the timers associated with a @vport. This function
2753 * is invoked before disabling or deleting a @vport. Note that the physical
2754 * port is treated as @vport 0.
2755 **/
2756 void
2757 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2758 {
2759 del_timer_sync(&vport->els_tmofunc);
2760 del_timer_sync(&vport->delayed_disc_tmo);
2761 lpfc_can_disctmo(vport);
2762 return;
2763 }
2764
2765 /**
2766 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2767 * @phba: pointer to lpfc hba data structure.
2768 *
2769 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2770 * caller of this routine should already hold the host lock.
2771 **/
2772 void
2773 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2774 {
2775 /* Clear pending FCF rediscovery wait flag */
2776 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2777
2778 /* Now, try to stop the timer */
2779 del_timer(&phba->fcf.redisc_wait);
2780 }
2781
2782 /**
2783 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2784 * @phba: pointer to lpfc hba data structure.
2785 *
2786 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2787 * checks whether the FCF rediscovery wait timer is pending with the host
2788 * lock held before proceeding with disabling the timer and clearing the
2789 * wait timer pendig flag.
2790 **/
2791 void
2792 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2793 {
2794 spin_lock_irq(&phba->hbalock);
2795 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2796 /* FCF rediscovery timer already fired or stopped */
2797 spin_unlock_irq(&phba->hbalock);
2798 return;
2799 }
2800 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2801 /* Clear failover in progress flags */
2802 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2803 spin_unlock_irq(&phba->hbalock);
2804 }
2805
2806 /**
2807 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2808 * @phba: pointer to lpfc hba data structure.
2809 *
2810 * This routine stops all the timers associated with a HBA. This function is
2811 * invoked before either putting a HBA offline or unloading the driver.
2812 **/
2813 void
2814 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2815 {
2816 lpfc_stop_vport_timers(phba->pport);
2817 del_timer_sync(&phba->sli.mbox_tmo);
2818 del_timer_sync(&phba->fabric_block_timer);
2819 del_timer_sync(&phba->eratt_poll);
2820 del_timer_sync(&phba->hb_tmofunc);
2821 if (phba->sli_rev == LPFC_SLI_REV4) {
2822 del_timer_sync(&phba->rrq_tmr);
2823 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2824 }
2825 phba->hb_outstanding = 0;
2826
2827 switch (phba->pci_dev_grp) {
2828 case LPFC_PCI_DEV_LP:
2829 /* Stop any LightPulse device specific driver timers */
2830 del_timer_sync(&phba->fcp_poll_timer);
2831 break;
2832 case LPFC_PCI_DEV_OC:
2833 /* Stop any OneConnect device sepcific driver timers */
2834 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2835 break;
2836 default:
2837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2838 "0297 Invalid device group (x%x)\n",
2839 phba->pci_dev_grp);
2840 break;
2841 }
2842 return;
2843 }
2844
2845 /**
2846 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2847 * @phba: pointer to lpfc hba data structure.
2848 *
2849 * This routine marks a HBA's management interface as blocked. Once the HBA's
2850 * management interface is marked as blocked, all the user space access to
2851 * the HBA, whether they are from sysfs interface or libdfc interface will
2852 * all be blocked. The HBA is set to block the management interface when the
2853 * driver prepares the HBA interface for online or offline.
2854 **/
2855 static void
2856 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2857 {
2858 unsigned long iflag;
2859 uint8_t actcmd = MBX_HEARTBEAT;
2860 unsigned long timeout;
2861
2862 spin_lock_irqsave(&phba->hbalock, iflag);
2863 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2864 spin_unlock_irqrestore(&phba->hbalock, iflag);
2865 if (mbx_action == LPFC_MBX_NO_WAIT)
2866 return;
2867 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2868 spin_lock_irqsave(&phba->hbalock, iflag);
2869 if (phba->sli.mbox_active) {
2870 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2871 /* Determine how long we might wait for the active mailbox
2872 * command to be gracefully completed by firmware.
2873 */
2874 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2875 phba->sli.mbox_active) * 1000) + jiffies;
2876 }
2877 spin_unlock_irqrestore(&phba->hbalock, iflag);
2878
2879 /* Wait for the outstnading mailbox command to complete */
2880 while (phba->sli.mbox_active) {
2881 /* Check active mailbox complete status every 2ms */
2882 msleep(2);
2883 if (time_after(jiffies, timeout)) {
2884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2885 "2813 Mgmt IO is Blocked %x "
2886 "- mbox cmd %x still active\n",
2887 phba->sli.sli_flag, actcmd);
2888 break;
2889 }
2890 }
2891 }
2892
2893 /**
2894 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2895 * @phba: pointer to lpfc hba data structure.
2896 *
2897 * Allocate RPIs for all active remote nodes. This is needed whenever
2898 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2899 * is to fixup the temporary rpi assignments.
2900 **/
2901 void
2902 lpfc_sli4_node_prep(struct lpfc_hba *phba)
2903 {
2904 struct lpfc_nodelist *ndlp, *next_ndlp;
2905 struct lpfc_vport **vports;
2906 int i, rpi;
2907 unsigned long flags;
2908
2909 if (phba->sli_rev != LPFC_SLI_REV4)
2910 return;
2911
2912 vports = lpfc_create_vport_work_array(phba);
2913 if (vports == NULL)
2914 return;
2915
2916 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2917 if (vports[i]->load_flag & FC_UNLOADING)
2918 continue;
2919
2920 list_for_each_entry_safe(ndlp, next_ndlp,
2921 &vports[i]->fc_nodes,
2922 nlp_listp) {
2923 if (!NLP_CHK_NODE_ACT(ndlp))
2924 continue;
2925 rpi = lpfc_sli4_alloc_rpi(phba);
2926 if (rpi == LPFC_RPI_ALLOC_ERROR) {
2927 spin_lock_irqsave(&phba->ndlp_lock, flags);
2928 NLP_CLR_NODE_ACT(ndlp);
2929 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2930 continue;
2931 }
2932 ndlp->nlp_rpi = rpi;
2933 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
2934 "0009 rpi:%x DID:%x "
2935 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
2936 ndlp->nlp_DID, ndlp->nlp_flag,
2937 ndlp->nlp_usg_map, ndlp);
2938 }
2939 }
2940 lpfc_destroy_vport_work_array(phba, vports);
2941 }
2942
2943 /**
2944 * lpfc_online - Initialize and bring a HBA online
2945 * @phba: pointer to lpfc hba data structure.
2946 *
2947 * This routine initializes the HBA and brings a HBA online. During this
2948 * process, the management interface is blocked to prevent user space access
2949 * to the HBA interfering with the driver initialization.
2950 *
2951 * Return codes
2952 * 0 - successful
2953 * 1 - failed
2954 **/
2955 int
2956 lpfc_online(struct lpfc_hba *phba)
2957 {
2958 struct lpfc_vport *vport;
2959 struct lpfc_vport **vports;
2960 int i;
2961 bool vpis_cleared = false;
2962
2963 if (!phba)
2964 return 0;
2965 vport = phba->pport;
2966
2967 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2968 return 0;
2969
2970 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2971 "0458 Bring Adapter online\n");
2972
2973 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2974
2975 if (phba->sli_rev == LPFC_SLI_REV4) {
2976 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2977 lpfc_unblock_mgmt_io(phba);
2978 return 1;
2979 }
2980 spin_lock_irq(&phba->hbalock);
2981 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2982 vpis_cleared = true;
2983 spin_unlock_irq(&phba->hbalock);
2984 } else {
2985 lpfc_sli_queue_init(phba);
2986 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2987 lpfc_unblock_mgmt_io(phba);
2988 return 1;
2989 }
2990 }
2991
2992 vports = lpfc_create_vport_work_array(phba);
2993 if (vports != NULL) {
2994 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2995 struct Scsi_Host *shost;
2996 shost = lpfc_shost_from_vport(vports[i]);
2997 spin_lock_irq(shost->host_lock);
2998 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2999 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3000 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3001 if (phba->sli_rev == LPFC_SLI_REV4) {
3002 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3003 if ((vpis_cleared) &&
3004 (vports[i]->port_type !=
3005 LPFC_PHYSICAL_PORT))
3006 vports[i]->vpi = 0;
3007 }
3008 spin_unlock_irq(shost->host_lock);
3009 }
3010 }
3011 lpfc_destroy_vport_work_array(phba, vports);
3012
3013 lpfc_unblock_mgmt_io(phba);
3014 return 0;
3015 }
3016
3017 /**
3018 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3019 * @phba: pointer to lpfc hba data structure.
3020 *
3021 * This routine marks a HBA's management interface as not blocked. Once the
3022 * HBA's management interface is marked as not blocked, all the user space
3023 * access to the HBA, whether they are from sysfs interface or libdfc
3024 * interface will be allowed. The HBA is set to block the management interface
3025 * when the driver prepares the HBA interface for online or offline and then
3026 * set to unblock the management interface afterwards.
3027 **/
3028 void
3029 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3030 {
3031 unsigned long iflag;
3032
3033 spin_lock_irqsave(&phba->hbalock, iflag);
3034 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3035 spin_unlock_irqrestore(&phba->hbalock, iflag);
3036 }
3037
3038 /**
3039 * lpfc_offline_prep - Prepare a HBA to be brought offline
3040 * @phba: pointer to lpfc hba data structure.
3041 *
3042 * This routine is invoked to prepare a HBA to be brought offline. It performs
3043 * unregistration login to all the nodes on all vports and flushes the mailbox
3044 * queue to make it ready to be brought offline.
3045 **/
3046 void
3047 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3048 {
3049 struct lpfc_vport *vport = phba->pport;
3050 struct lpfc_nodelist *ndlp, *next_ndlp;
3051 struct lpfc_vport **vports;
3052 struct Scsi_Host *shost;
3053 int i;
3054
3055 if (vport->fc_flag & FC_OFFLINE_MODE)
3056 return;
3057
3058 lpfc_block_mgmt_io(phba, mbx_action);
3059
3060 lpfc_linkdown(phba);
3061
3062 /* Issue an unreg_login to all nodes on all vports */
3063 vports = lpfc_create_vport_work_array(phba);
3064 if (vports != NULL) {
3065 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3066 if (vports[i]->load_flag & FC_UNLOADING)
3067 continue;
3068 shost = lpfc_shost_from_vport(vports[i]);
3069 spin_lock_irq(shost->host_lock);
3070 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3071 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3072 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3073 spin_unlock_irq(shost->host_lock);
3074
3075 shost = lpfc_shost_from_vport(vports[i]);
3076 list_for_each_entry_safe(ndlp, next_ndlp,
3077 &vports[i]->fc_nodes,
3078 nlp_listp) {
3079 if (!NLP_CHK_NODE_ACT(ndlp))
3080 continue;
3081 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3082 continue;
3083 if (ndlp->nlp_type & NLP_FABRIC) {
3084 lpfc_disc_state_machine(vports[i], ndlp,
3085 NULL, NLP_EVT_DEVICE_RECOVERY);
3086 lpfc_disc_state_machine(vports[i], ndlp,
3087 NULL, NLP_EVT_DEVICE_RM);
3088 }
3089 spin_lock_irq(shost->host_lock);
3090 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3091 spin_unlock_irq(shost->host_lock);
3092 /*
3093 * Whenever an SLI4 port goes offline, free the
3094 * RPI. Get a new RPI when the adapter port
3095 * comes back online.
3096 */
3097 if (phba->sli_rev == LPFC_SLI_REV4) {
3098 lpfc_printf_vlog(ndlp->vport,
3099 KERN_INFO, LOG_NODE,
3100 "0011 lpfc_offline: "
3101 "ndlp:x%p did %x "
3102 "usgmap:x%x rpi:%x\n",
3103 ndlp, ndlp->nlp_DID,
3104 ndlp->nlp_usg_map,
3105 ndlp->nlp_rpi);
3106
3107 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3108 }
3109 lpfc_unreg_rpi(vports[i], ndlp);
3110 }
3111 }
3112 }
3113 lpfc_destroy_vport_work_array(phba, vports);
3114
3115 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3116 }
3117
3118 /**
3119 * lpfc_offline - Bring a HBA offline
3120 * @phba: pointer to lpfc hba data structure.
3121 *
3122 * This routine actually brings a HBA offline. It stops all the timers
3123 * associated with the HBA, brings down the SLI layer, and eventually
3124 * marks the HBA as in offline state for the upper layer protocol.
3125 **/
3126 void
3127 lpfc_offline(struct lpfc_hba *phba)
3128 {
3129 struct Scsi_Host *shost;
3130 struct lpfc_vport **vports;
3131 int i;
3132
3133 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3134 return;
3135
3136 /* stop port and all timers associated with this hba */
3137 lpfc_stop_port(phba);
3138 vports = lpfc_create_vport_work_array(phba);
3139 if (vports != NULL)
3140 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3141 lpfc_stop_vport_timers(vports[i]);
3142 lpfc_destroy_vport_work_array(phba, vports);
3143 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3144 "0460 Bring Adapter offline\n");
3145 /* Bring down the SLI Layer and cleanup. The HBA is offline
3146 now. */
3147 lpfc_sli_hba_down(phba);
3148 spin_lock_irq(&phba->hbalock);
3149 phba->work_ha = 0;
3150 spin_unlock_irq(&phba->hbalock);
3151 vports = lpfc_create_vport_work_array(phba);
3152 if (vports != NULL)
3153 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3154 shost = lpfc_shost_from_vport(vports[i]);
3155 spin_lock_irq(shost->host_lock);
3156 vports[i]->work_port_events = 0;
3157 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3158 spin_unlock_irq(shost->host_lock);
3159 }
3160 lpfc_destroy_vport_work_array(phba, vports);
3161 }
3162
3163 /**
3164 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3165 * @phba: pointer to lpfc hba data structure.
3166 *
3167 * This routine is to free all the SCSI buffers and IOCBs from the driver
3168 * list back to kernel. It is called from lpfc_pci_remove_one to free
3169 * the internal resources before the device is removed from the system.
3170 **/
3171 static void
3172 lpfc_scsi_free(struct lpfc_hba *phba)
3173 {
3174 struct lpfc_scsi_buf *sb, *sb_next;
3175
3176 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3177 return;
3178
3179 spin_lock_irq(&phba->hbalock);
3180
3181 /* Release all the lpfc_scsi_bufs maintained by this host. */
3182
3183 spin_lock(&phba->scsi_buf_list_put_lock);
3184 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3185 list) {
3186 list_del(&sb->list);
3187 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3188 sb->dma_handle);
3189 kfree(sb);
3190 phba->total_scsi_bufs--;
3191 }
3192 spin_unlock(&phba->scsi_buf_list_put_lock);
3193
3194 spin_lock(&phba->scsi_buf_list_get_lock);
3195 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3196 list) {
3197 list_del(&sb->list);
3198 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3199 sb->dma_handle);
3200 kfree(sb);
3201 phba->total_scsi_bufs--;
3202 }
3203 spin_unlock(&phba->scsi_buf_list_get_lock);
3204 spin_unlock_irq(&phba->hbalock);
3205 }
3206 /**
3207 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
3208 * @phba: pointer to lpfc hba data structure.
3209 *
3210 * This routine is to free all the NVME buffers and IOCBs from the driver
3211 * list back to kernel. It is called from lpfc_pci_remove_one to free
3212 * the internal resources before the device is removed from the system.
3213 **/
3214 static void
3215 lpfc_nvme_free(struct lpfc_hba *phba)
3216 {
3217 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
3218
3219 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3220 return;
3221
3222 spin_lock_irq(&phba->hbalock);
3223
3224 /* Release all the lpfc_nvme_bufs maintained by this host. */
3225 spin_lock(&phba->nvme_buf_list_put_lock);
3226 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3227 &phba->lpfc_nvme_buf_list_put, list) {
3228 list_del(&lpfc_ncmd->list);
3229 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3230 lpfc_ncmd->dma_handle);
3231 kfree(lpfc_ncmd);
3232 phba->total_nvme_bufs--;
3233 }
3234 spin_unlock(&phba->nvme_buf_list_put_lock);
3235
3236 spin_lock(&phba->nvme_buf_list_get_lock);
3237 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3238 &phba->lpfc_nvme_buf_list_get, list) {
3239 list_del(&lpfc_ncmd->list);
3240 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3241 lpfc_ncmd->dma_handle);
3242 kfree(lpfc_ncmd);
3243 phba->total_nvme_bufs--;
3244 }
3245 spin_unlock(&phba->nvme_buf_list_get_lock);
3246 spin_unlock_irq(&phba->hbalock);
3247 }
3248 /**
3249 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3250 * @phba: pointer to lpfc hba data structure.
3251 *
3252 * This routine first calculates the sizes of the current els and allocated
3253 * scsi sgl lists, and then goes through all sgls to updates the physical
3254 * XRIs assigned due to port function reset. During port initialization, the
3255 * current els and allocated scsi sgl lists are 0s.
3256 *
3257 * Return codes
3258 * 0 - successful (for now, it always returns 0)
3259 **/
3260 int
3261 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3262 {
3263 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3264 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3265 LIST_HEAD(els_sgl_list);
3266 int rc;
3267
3268 /*
3269 * update on pci function's els xri-sgl list
3270 */
3271 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3272
3273 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3274 /* els xri-sgl expanded */
3275 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3276 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3277 "3157 ELS xri-sgl count increased from "
3278 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3279 els_xri_cnt);
3280 /* allocate the additional els sgls */
3281 for (i = 0; i < xri_cnt; i++) {
3282 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3283 GFP_KERNEL);
3284 if (sglq_entry == NULL) {
3285 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3286 "2562 Failure to allocate an "
3287 "ELS sgl entry:%d\n", i);
3288 rc = -ENOMEM;
3289 goto out_free_mem;
3290 }
3291 sglq_entry->buff_type = GEN_BUFF_TYPE;
3292 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3293 &sglq_entry->phys);
3294 if (sglq_entry->virt == NULL) {
3295 kfree(sglq_entry);
3296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3297 "2563 Failure to allocate an "
3298 "ELS mbuf:%d\n", i);
3299 rc = -ENOMEM;
3300 goto out_free_mem;
3301 }
3302 sglq_entry->sgl = sglq_entry->virt;
3303 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3304 sglq_entry->state = SGL_FREED;
3305 list_add_tail(&sglq_entry->list, &els_sgl_list);
3306 }
3307 spin_lock_irq(&phba->hbalock);
3308 spin_lock(&phba->sli4_hba.sgl_list_lock);
3309 list_splice_init(&els_sgl_list,
3310 &phba->sli4_hba.lpfc_els_sgl_list);
3311 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3312 spin_unlock_irq(&phba->hbalock);
3313 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3314 /* els xri-sgl shrinked */
3315 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3316 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3317 "3158 ELS xri-sgl count decreased from "
3318 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3319 els_xri_cnt);
3320 spin_lock_irq(&phba->hbalock);
3321 spin_lock(&phba->sli4_hba.sgl_list_lock);
3322 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3323 &els_sgl_list);
3324 /* release extra els sgls from list */
3325 for (i = 0; i < xri_cnt; i++) {
3326 list_remove_head(&els_sgl_list,
3327 sglq_entry, struct lpfc_sglq, list);
3328 if (sglq_entry) {
3329 __lpfc_mbuf_free(phba, sglq_entry->virt,
3330 sglq_entry->phys);
3331 kfree(sglq_entry);
3332 }
3333 }
3334 list_splice_init(&els_sgl_list,
3335 &phba->sli4_hba.lpfc_els_sgl_list);
3336 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3337 spin_unlock_irq(&phba->hbalock);
3338 } else
3339 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3340 "3163 ELS xri-sgl count unchanged: %d\n",
3341 els_xri_cnt);
3342 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3343
3344 /* update xris to els sgls on the list */
3345 sglq_entry = NULL;
3346 sglq_entry_next = NULL;
3347 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3348 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3349 lxri = lpfc_sli4_next_xritag(phba);
3350 if (lxri == NO_XRI) {
3351 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3352 "2400 Failed to allocate xri for "
3353 "ELS sgl\n");
3354 rc = -ENOMEM;
3355 goto out_free_mem;
3356 }
3357 sglq_entry->sli4_lxritag = lxri;
3358 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3359 }
3360 return 0;
3361
3362 out_free_mem:
3363 lpfc_free_els_sgl_list(phba);
3364 return rc;
3365 }
3366
3367 /**
3368 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3369 * @phba: pointer to lpfc hba data structure.
3370 *
3371 * This routine first calculates the sizes of the current els and allocated
3372 * scsi sgl lists, and then goes through all sgls to updates the physical
3373 * XRIs assigned due to port function reset. During port initialization, the
3374 * current els and allocated scsi sgl lists are 0s.
3375 *
3376 * Return codes
3377 * 0 - successful (for now, it always returns 0)
3378 **/
3379 int
3380 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3381 {
3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3383 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3384 uint16_t nvmet_xri_cnt, tot_cnt;
3385 LIST_HEAD(nvmet_sgl_list);
3386 int rc;
3387
3388 /*
3389 * update on pci function's nvmet xri-sgl list
3390 */
3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3392 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3393 tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3394 if (nvmet_xri_cnt > tot_cnt) {
3395 phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
3396 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3397 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3398 "6301 NVMET post-sgl count changed to %d\n",
3399 phba->cfg_nvmet_mrq_post);
3400 }
3401
3402 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3403 /* els xri-sgl expanded */
3404 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3405 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3406 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3407 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3408 /* allocate the additional nvmet sgls */
3409 for (i = 0; i < xri_cnt; i++) {
3410 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3411 GFP_KERNEL);
3412 if (sglq_entry == NULL) {
3413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3414 "6303 Failure to allocate an "
3415 "NVMET sgl entry:%d\n", i);
3416 rc = -ENOMEM;
3417 goto out_free_mem;
3418 }
3419 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3420 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3421 &sglq_entry->phys);
3422 if (sglq_entry->virt == NULL) {
3423 kfree(sglq_entry);
3424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3425 "6304 Failure to allocate an "
3426 "NVMET buf:%d\n", i);
3427 rc = -ENOMEM;
3428 goto out_free_mem;
3429 }
3430 sglq_entry->sgl = sglq_entry->virt;
3431 memset(sglq_entry->sgl, 0,
3432 phba->cfg_sg_dma_buf_size);
3433 sglq_entry->state = SGL_FREED;
3434 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3435 }
3436 spin_lock_irq(&phba->hbalock);
3437 spin_lock(&phba->sli4_hba.sgl_list_lock);
3438 list_splice_init(&nvmet_sgl_list,
3439 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3440 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3441 spin_unlock_irq(&phba->hbalock);
3442 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3443 /* nvmet xri-sgl shrunk */
3444 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3445 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3446 "6305 NVMET xri-sgl count decreased from "
3447 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3448 nvmet_xri_cnt);
3449 spin_lock_irq(&phba->hbalock);
3450 spin_lock(&phba->sli4_hba.sgl_list_lock);
3451 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3452 &nvmet_sgl_list);
3453 /* release extra nvmet sgls from list */
3454 for (i = 0; i < xri_cnt; i++) {
3455 list_remove_head(&nvmet_sgl_list,
3456 sglq_entry, struct lpfc_sglq, list);
3457 if (sglq_entry) {
3458 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3459 sglq_entry->phys);
3460 kfree(sglq_entry);
3461 }
3462 }
3463 list_splice_init(&nvmet_sgl_list,
3464 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3465 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3466 spin_unlock_irq(&phba->hbalock);
3467 } else
3468 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3469 "6306 NVMET xri-sgl count unchanged: %d\n",
3470 nvmet_xri_cnt);
3471 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3472
3473 /* update xris to nvmet sgls on the list */
3474 sglq_entry = NULL;
3475 sglq_entry_next = NULL;
3476 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3477 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3478 lxri = lpfc_sli4_next_xritag(phba);
3479 if (lxri == NO_XRI) {
3480 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3481 "6307 Failed to allocate xri for "
3482 "NVMET sgl\n");
3483 rc = -ENOMEM;
3484 goto out_free_mem;
3485 }
3486 sglq_entry->sli4_lxritag = lxri;
3487 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3488 }
3489 return 0;
3490
3491 out_free_mem:
3492 lpfc_free_nvmet_sgl_list(phba);
3493 return rc;
3494 }
3495
3496 /**
3497 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
3498 * @phba: pointer to lpfc hba data structure.
3499 *
3500 * This routine first calculates the sizes of the current els and allocated
3501 * scsi sgl lists, and then goes through all sgls to updates the physical
3502 * XRIs assigned due to port function reset. During port initialization, the
3503 * current els and allocated scsi sgl lists are 0s.
3504 *
3505 * Return codes
3506 * 0 - successful (for now, it always returns 0)
3507 **/
3508 int
3509 lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
3510 {
3511 struct lpfc_scsi_buf *psb, *psb_next;
3512 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
3513 LIST_HEAD(scsi_sgl_list);
3514 int rc;
3515
3516 /*
3517 * update on pci function's els xri-sgl list
3518 */
3519 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3520 phba->total_scsi_bufs = 0;
3521
3522 /*
3523 * update on pci function's allocated scsi xri-sgl list
3524 */
3525 /* maximum number of xris available for scsi buffers */
3526 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3527 els_xri_cnt;
3528
3529 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3530 return 0;
3531
3532 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3533 phba->sli4_hba.scsi_xri_max = /* Split them up */
3534 (phba->sli4_hba.scsi_xri_max *
3535 phba->cfg_xri_split) / 100;
3536
3537 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3538 spin_lock(&phba->scsi_buf_list_put_lock);
3539 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3540 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3541 spin_unlock(&phba->scsi_buf_list_put_lock);
3542 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3543
3544 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3545 "6060 Current allocated SCSI xri-sgl count:%d, "
3546 "maximum SCSI xri count:%d (split:%d)\n",
3547 phba->sli4_hba.scsi_xri_cnt,
3548 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
3549
3550 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3551 /* max scsi xri shrinked below the allocated scsi buffers */
3552 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3553 phba->sli4_hba.scsi_xri_max;
3554 /* release the extra allocated scsi buffers */
3555 for (i = 0; i < scsi_xri_cnt; i++) {
3556 list_remove_head(&scsi_sgl_list, psb,
3557 struct lpfc_scsi_buf, list);
3558 if (psb) {
3559 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
3560 psb->data, psb->dma_handle);
3561 kfree(psb);
3562 }
3563 }
3564 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3565 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3566 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3567 }
3568
3569 /* update xris associated to remaining allocated scsi buffers */
3570 psb = NULL;
3571 psb_next = NULL;
3572 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3573 lxri = lpfc_sli4_next_xritag(phba);
3574 if (lxri == NO_XRI) {
3575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3576 "2560 Failed to allocate xri for "
3577 "scsi buffer\n");
3578 rc = -ENOMEM;
3579 goto out_free_mem;
3580 }
3581 psb->cur_iocbq.sli4_lxritag = lxri;
3582 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3583 }
3584 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3585 spin_lock(&phba->scsi_buf_list_put_lock);
3586 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3587 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3588 spin_unlock(&phba->scsi_buf_list_put_lock);
3589 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3590 return 0;
3591
3592 out_free_mem:
3593 lpfc_scsi_free(phba);
3594 return rc;
3595 }
3596
3597 static uint64_t
3598 lpfc_get_wwpn(struct lpfc_hba *phba)
3599 {
3600 uint64_t wwn;
3601 int rc;
3602 LPFC_MBOXQ_t *mboxq;
3603 MAILBOX_t *mb;
3604
3605
3606 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3607 GFP_KERNEL);
3608 if (!mboxq)
3609 return (uint64_t)-1;
3610
3611 /* First get WWN of HBA instance */
3612 lpfc_read_nv(phba, mboxq);
3613 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3614 if (rc != MBX_SUCCESS) {
3615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3616 "6019 Mailbox failed , mbxCmd x%x "
3617 "READ_NV, mbxStatus x%x\n",
3618 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3619 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3620 mempool_free(mboxq, phba->mbox_mem_pool);
3621 return (uint64_t) -1;
3622 }
3623 mb = &mboxq->u.mb;
3624 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3625 /* wwn is WWPN of HBA instance */
3626 mempool_free(mboxq, phba->mbox_mem_pool);
3627 if (phba->sli_rev == LPFC_SLI_REV4)
3628 return be64_to_cpu(wwn);
3629 else
3630 return (((wwn & 0xffffffff00000000) >> 32) |
3631 ((wwn & 0x00000000ffffffff) << 32));
3632
3633 }
3634
3635 /**
3636 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3637 * @phba: pointer to lpfc hba data structure.
3638 *
3639 * This routine first calculates the sizes of the current els and allocated
3640 * scsi sgl lists, and then goes through all sgls to updates the physical
3641 * XRIs assigned due to port function reset. During port initialization, the
3642 * current els and allocated scsi sgl lists are 0s.
3643 *
3644 * Return codes
3645 * 0 - successful (for now, it always returns 0)
3646 **/
3647 int
3648 lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3649 {
3650 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3651 uint16_t i, lxri, els_xri_cnt;
3652 uint16_t nvme_xri_cnt, nvme_xri_max;
3653 LIST_HEAD(nvme_sgl_list);
3654 int rc;
3655
3656 phba->total_nvme_bufs = 0;
3657
3658 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3659 return 0;
3660 /*
3661 * update on pci function's allocated nvme xri-sgl list
3662 */
3663
3664 /* maximum number of xris available for nvme buffers */
3665 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3666 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3667 phba->sli4_hba.nvme_xri_max = nvme_xri_max;
3668 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
3669
3670 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3671 "6074 Current allocated NVME xri-sgl count:%d, "
3672 "maximum NVME xri count:%d\n",
3673 phba->sli4_hba.nvme_xri_cnt,
3674 phba->sli4_hba.nvme_xri_max);
3675
3676 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3677 spin_lock(&phba->nvme_buf_list_put_lock);
3678 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3679 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
3680 spin_unlock(&phba->nvme_buf_list_put_lock);
3681 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3682
3683 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
3684 /* max nvme xri shrunk below the allocated nvme buffers */
3685 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3686 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
3687 phba->sli4_hba.nvme_xri_max;
3688 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3689 /* release the extra allocated nvme buffers */
3690 for (i = 0; i < nvme_xri_cnt; i++) {
3691 list_remove_head(&nvme_sgl_list, lpfc_ncmd,
3692 struct lpfc_nvme_buf, list);
3693 if (lpfc_ncmd) {
3694 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
3695 lpfc_ncmd->data,
3696 lpfc_ncmd->dma_handle);
3697 kfree(lpfc_ncmd);
3698 }
3699 }
3700 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3701 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
3702 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3703 }
3704
3705 /* update xris associated to remaining allocated nvme buffers */
3706 lpfc_ncmd = NULL;
3707 lpfc_ncmd_next = NULL;
3708 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3709 &nvme_sgl_list, list) {
3710 lxri = lpfc_sli4_next_xritag(phba);
3711 if (lxri == NO_XRI) {
3712 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3713 "6075 Failed to allocate xri for "
3714 "nvme buffer\n");
3715 rc = -ENOMEM;
3716 goto out_free_mem;
3717 }
3718 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
3719 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3720 }
3721 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3722 spin_lock(&phba->nvme_buf_list_put_lock);
3723 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
3724 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3725 spin_unlock(&phba->nvme_buf_list_put_lock);
3726 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3727 return 0;
3728
3729 out_free_mem:
3730 lpfc_nvme_free(phba);
3731 return rc;
3732 }
3733
3734 /**
3735 * lpfc_create_port - Create an FC port
3736 * @phba: pointer to lpfc hba data structure.
3737 * @instance: a unique integer ID to this FC port.
3738 * @dev: pointer to the device data structure.
3739 *
3740 * This routine creates a FC port for the upper layer protocol. The FC port
3741 * can be created on top of either a physical port or a virtual port provided
3742 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3743 * and associates the FC port created before adding the shost into the SCSI
3744 * layer.
3745 *
3746 * Return codes
3747 * @vport - pointer to the virtual N_Port data structure.
3748 * NULL - port create failed.
3749 **/
3750 struct lpfc_vport *
3751 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3752 {
3753 struct lpfc_vport *vport;
3754 struct Scsi_Host *shost = NULL;
3755 int error = 0;
3756 int i;
3757 uint64_t wwn;
3758 bool use_no_reset_hba = false;
3759
3760 wwn = lpfc_get_wwpn(phba);
3761
3762 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3763 if (wwn == lpfc_no_hba_reset[i]) {
3764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3765 "6020 Setting use_no_reset port=%llx\n",
3766 wwn);
3767 use_no_reset_hba = true;
3768 break;
3769 }
3770 }
3771
3772 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3773 if (dev != &phba->pcidev->dev) {
3774 shost = scsi_host_alloc(&lpfc_vport_template,
3775 sizeof(struct lpfc_vport));
3776 } else {
3777 if (!use_no_reset_hba)
3778 shost = scsi_host_alloc(&lpfc_template,
3779 sizeof(struct lpfc_vport));
3780 else
3781 shost = scsi_host_alloc(&lpfc_template_no_hr,
3782 sizeof(struct lpfc_vport));
3783 }
3784 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3785 shost = scsi_host_alloc(&lpfc_template_nvme,
3786 sizeof(struct lpfc_vport));
3787 }
3788 if (!shost)
3789 goto out;
3790
3791 vport = (struct lpfc_vport *) shost->hostdata;
3792 vport->phba = phba;
3793 vport->load_flag |= FC_LOADING;
3794 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3795 vport->fc_rscn_flush = 0;
3796 lpfc_get_vport_cfgparam(vport);
3797
3798 shost->unique_id = instance;
3799 shost->max_id = LPFC_MAX_TARGET;
3800 shost->max_lun = vport->cfg_max_luns;
3801 shost->this_id = -1;
3802 shost->max_cmd_len = 16;
3803 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
3804 if (phba->sli_rev == LPFC_SLI_REV4) {
3805 shost->dma_boundary =
3806 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3807 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3808 }
3809
3810 /*
3811 * Set initial can_queue value since 0 is no longer supported and
3812 * scsi_add_host will fail. This will be adjusted later based on the
3813 * max xri value determined in hba setup.
3814 */
3815 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3816 if (dev != &phba->pcidev->dev) {
3817 shost->transportt = lpfc_vport_transport_template;
3818 vport->port_type = LPFC_NPIV_PORT;
3819 } else {
3820 shost->transportt = lpfc_transport_template;
3821 vport->port_type = LPFC_PHYSICAL_PORT;
3822 }
3823
3824 /* Initialize all internally managed lists. */
3825 INIT_LIST_HEAD(&vport->fc_nodes);
3826 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3827 spin_lock_init(&vport->work_port_lock);
3828
3829 setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
3830 (unsigned long)vport);
3831
3832 setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
3833 (unsigned long)vport);
3834
3835 setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
3836 (unsigned long)vport);
3837
3838 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3839 if (error)
3840 goto out_put_shost;
3841
3842 spin_lock_irq(&phba->hbalock);
3843 list_add_tail(&vport->listentry, &phba->port_list);
3844 spin_unlock_irq(&phba->hbalock);
3845 return vport;
3846
3847 out_put_shost:
3848 scsi_host_put(shost);
3849 out:
3850 return NULL;
3851 }
3852
3853 /**
3854 * destroy_port - destroy an FC port
3855 * @vport: pointer to an lpfc virtual N_Port data structure.
3856 *
3857 * This routine destroys a FC port from the upper layer protocol. All the
3858 * resources associated with the port are released.
3859 **/
3860 void
3861 destroy_port(struct lpfc_vport *vport)
3862 {
3863 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3864 struct lpfc_hba *phba = vport->phba;
3865
3866 lpfc_debugfs_terminate(vport);
3867 fc_remove_host(shost);
3868 scsi_remove_host(shost);
3869
3870 spin_lock_irq(&phba->hbalock);
3871 list_del_init(&vport->listentry);
3872 spin_unlock_irq(&phba->hbalock);
3873
3874 lpfc_cleanup(vport);
3875 return;
3876 }
3877
3878 /**
3879 * lpfc_get_instance - Get a unique integer ID
3880 *
3881 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3882 * uses the kernel idr facility to perform the task.
3883 *
3884 * Return codes:
3885 * instance - a unique integer ID allocated as the new instance.
3886 * -1 - lpfc get instance failed.
3887 **/
3888 int
3889 lpfc_get_instance(void)
3890 {
3891 int ret;
3892
3893 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3894 return ret < 0 ? -1 : ret;
3895 }
3896
3897 /**
3898 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3899 * @shost: pointer to SCSI host data structure.
3900 * @time: elapsed time of the scan in jiffies.
3901 *
3902 * This routine is called by the SCSI layer with a SCSI host to determine
3903 * whether the scan host is finished.
3904 *
3905 * Note: there is no scan_start function as adapter initialization will have
3906 * asynchronously kicked off the link initialization.
3907 *
3908 * Return codes
3909 * 0 - SCSI host scan is not over yet.
3910 * 1 - SCSI host scan is over.
3911 **/
3912 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3913 {
3914 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3915 struct lpfc_hba *phba = vport->phba;
3916 int stat = 0;
3917
3918 spin_lock_irq(shost->host_lock);
3919
3920 if (vport->load_flag & FC_UNLOADING) {
3921 stat = 1;
3922 goto finished;
3923 }
3924 if (time >= msecs_to_jiffies(30 * 1000)) {
3925 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3926 "0461 Scanning longer than 30 "
3927 "seconds. Continuing initialization\n");
3928 stat = 1;
3929 goto finished;
3930 }
3931 if (time >= msecs_to_jiffies(15 * 1000) &&
3932 phba->link_state <= LPFC_LINK_DOWN) {
3933 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3934 "0465 Link down longer than 15 "
3935 "seconds. Continuing initialization\n");
3936 stat = 1;
3937 goto finished;
3938 }
3939
3940 if (vport->port_state != LPFC_VPORT_READY)
3941 goto finished;
3942 if (vport->num_disc_nodes || vport->fc_prli_sent)
3943 goto finished;
3944 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3945 goto finished;
3946 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3947 goto finished;
3948
3949 stat = 1;
3950
3951 finished:
3952 spin_unlock_irq(shost->host_lock);
3953 return stat;
3954 }
3955
3956 /**
3957 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3958 * @shost: pointer to SCSI host data structure.
3959 *
3960 * This routine initializes a given SCSI host attributes on a FC port. The
3961 * SCSI host can be either on top of a physical port or a virtual port.
3962 **/
3963 void lpfc_host_attrib_init(struct Scsi_Host *shost)
3964 {
3965 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3966 struct lpfc_hba *phba = vport->phba;
3967 /*
3968 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3969 */
3970
3971 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3972 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3973 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3974
3975 memset(fc_host_supported_fc4s(shost), 0,
3976 sizeof(fc_host_supported_fc4s(shost)));
3977 fc_host_supported_fc4s(shost)[2] = 1;
3978 fc_host_supported_fc4s(shost)[7] = 1;
3979
3980 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3981 sizeof fc_host_symbolic_name(shost));
3982
3983 fc_host_supported_speeds(shost) = 0;
3984 if (phba->lmt & LMT_32Gb)
3985 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
3986 if (phba->lmt & LMT_16Gb)
3987 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3988 if (phba->lmt & LMT_10Gb)
3989 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3990 if (phba->lmt & LMT_8Gb)
3991 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3992 if (phba->lmt & LMT_4Gb)
3993 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3994 if (phba->lmt & LMT_2Gb)
3995 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3996 if (phba->lmt & LMT_1Gb)
3997 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3998
3999 fc_host_maxframe_size(shost) =
4000 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4001 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4002
4003 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4004
4005 /* This value is also unchanging */
4006 memset(fc_host_active_fc4s(shost), 0,
4007 sizeof(fc_host_active_fc4s(shost)));
4008 fc_host_active_fc4s(shost)[2] = 1;
4009 fc_host_active_fc4s(shost)[7] = 1;
4010
4011 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4012 spin_lock_irq(shost->host_lock);
4013 vport->load_flag &= ~FC_LOADING;
4014 spin_unlock_irq(shost->host_lock);
4015 }
4016
4017 /**
4018 * lpfc_stop_port_s3 - Stop SLI3 device port
4019 * @phba: pointer to lpfc hba data structure.
4020 *
4021 * This routine is invoked to stop an SLI3 device port, it stops the device
4022 * from generating interrupts and stops the device driver's timers for the
4023 * device.
4024 **/
4025 static void
4026 lpfc_stop_port_s3(struct lpfc_hba *phba)
4027 {
4028 /* Clear all interrupt enable conditions */
4029 writel(0, phba->HCregaddr);
4030 readl(phba->HCregaddr); /* flush */
4031 /* Clear all pending interrupts */
4032 writel(0xffffffff, phba->HAregaddr);
4033 readl(phba->HAregaddr); /* flush */
4034
4035 /* Reset some HBA SLI setup states */
4036 lpfc_stop_hba_timers(phba);
4037 phba->pport->work_port_events = 0;
4038 }
4039
4040 /**
4041 * lpfc_stop_port_s4 - Stop SLI4 device port
4042 * @phba: pointer to lpfc hba data structure.
4043 *
4044 * This routine is invoked to stop an SLI4 device port, it stops the device
4045 * from generating interrupts and stops the device driver's timers for the
4046 * device.
4047 **/
4048 static void
4049 lpfc_stop_port_s4(struct lpfc_hba *phba)
4050 {
4051 /* Reset some HBA SLI4 setup states */
4052 lpfc_stop_hba_timers(phba);
4053 phba->pport->work_port_events = 0;
4054 phba->sli4_hba.intr_enable = 0;
4055 }
4056
4057 /**
4058 * lpfc_stop_port - Wrapper function for stopping hba port
4059 * @phba: Pointer to HBA context object.
4060 *
4061 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4062 * the API jump table function pointer from the lpfc_hba struct.
4063 **/
4064 void
4065 lpfc_stop_port(struct lpfc_hba *phba)
4066 {
4067 phba->lpfc_stop_port(phba);
4068 }
4069
4070 /**
4071 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4072 * @phba: Pointer to hba for which this call is being executed.
4073 *
4074 * This routine starts the timer waiting for the FCF rediscovery to complete.
4075 **/
4076 void
4077 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4078 {
4079 unsigned long fcf_redisc_wait_tmo =
4080 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4081 /* Start fcf rediscovery wait period timer */
4082 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4083 spin_lock_irq(&phba->hbalock);
4084 /* Allow action to new fcf asynchronous event */
4085 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4086 /* Mark the FCF rediscovery pending state */
4087 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4088 spin_unlock_irq(&phba->hbalock);
4089 }
4090
4091 /**
4092 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4093 * @ptr: Map to lpfc_hba data structure pointer.
4094 *
4095 * This routine is invoked when waiting for FCF table rediscover has been
4096 * timed out. If new FCF record(s) has (have) been discovered during the
4097 * wait period, a new FCF event shall be added to the FCOE async event
4098 * list, and then worker thread shall be waked up for processing from the
4099 * worker thread context.
4100 **/
4101 static void
4102 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
4103 {
4104 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
4105
4106 /* Don't send FCF rediscovery event if timer cancelled */
4107 spin_lock_irq(&phba->hbalock);
4108 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4109 spin_unlock_irq(&phba->hbalock);
4110 return;
4111 }
4112 /* Clear FCF rediscovery timer pending flag */
4113 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4114 /* FCF rediscovery event to worker thread */
4115 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4116 spin_unlock_irq(&phba->hbalock);
4117 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4118 "2776 FCF rediscover quiescent timer expired\n");
4119 /* wake up worker thread */
4120 lpfc_worker_wake_up(phba);
4121 }
4122
4123 /**
4124 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4125 * @phba: pointer to lpfc hba data structure.
4126 * @acqe_link: pointer to the async link completion queue entry.
4127 *
4128 * This routine is to parse the SLI4 link-attention link fault code and
4129 * translate it into the base driver's read link attention mailbox command
4130 * status.
4131 *
4132 * Return: Link-attention status in terms of base driver's coding.
4133 **/
4134 static uint16_t
4135 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4136 struct lpfc_acqe_link *acqe_link)
4137 {
4138 uint16_t latt_fault;
4139
4140 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4141 case LPFC_ASYNC_LINK_FAULT_NONE:
4142 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4143 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4144 latt_fault = 0;
4145 break;
4146 default:
4147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4148 "0398 Invalid link fault code: x%x\n",
4149 bf_get(lpfc_acqe_link_fault, acqe_link));
4150 latt_fault = MBXERR_ERROR;
4151 break;
4152 }
4153 return latt_fault;
4154 }
4155
4156 /**
4157 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4158 * @phba: pointer to lpfc hba data structure.
4159 * @acqe_link: pointer to the async link completion queue entry.
4160 *
4161 * This routine is to parse the SLI4 link attention type and translate it
4162 * into the base driver's link attention type coding.
4163 *
4164 * Return: Link attention type in terms of base driver's coding.
4165 **/
4166 static uint8_t
4167 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4168 struct lpfc_acqe_link *acqe_link)
4169 {
4170 uint8_t att_type;
4171
4172 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4173 case LPFC_ASYNC_LINK_STATUS_DOWN:
4174 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4175 att_type = LPFC_ATT_LINK_DOWN;
4176 break;
4177 case LPFC_ASYNC_LINK_STATUS_UP:
4178 /* Ignore physical link up events - wait for logical link up */
4179 att_type = LPFC_ATT_RESERVED;
4180 break;
4181 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4182 att_type = LPFC_ATT_LINK_UP;
4183 break;
4184 default:
4185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4186 "0399 Invalid link attention type: x%x\n",
4187 bf_get(lpfc_acqe_link_status, acqe_link));
4188 att_type = LPFC_ATT_RESERVED;
4189 break;
4190 }
4191 return att_type;
4192 }
4193
4194 /**
4195 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4196 * @phba: pointer to lpfc hba data structure.
4197 *
4198 * This routine is to get an SLI3 FC port's link speed in Mbps.
4199 *
4200 * Return: link speed in terms of Mbps.
4201 **/
4202 uint32_t
4203 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4204 {
4205 uint32_t link_speed;
4206
4207 if (!lpfc_is_link_up(phba))
4208 return 0;
4209
4210 if (phba->sli_rev <= LPFC_SLI_REV3) {
4211 switch (phba->fc_linkspeed) {
4212 case LPFC_LINK_SPEED_1GHZ:
4213 link_speed = 1000;
4214 break;
4215 case LPFC_LINK_SPEED_2GHZ:
4216 link_speed = 2000;
4217 break;
4218 case LPFC_LINK_SPEED_4GHZ:
4219 link_speed = 4000;
4220 break;
4221 case LPFC_LINK_SPEED_8GHZ:
4222 link_speed = 8000;
4223 break;
4224 case LPFC_LINK_SPEED_10GHZ:
4225 link_speed = 10000;
4226 break;
4227 case LPFC_LINK_SPEED_16GHZ:
4228 link_speed = 16000;
4229 break;
4230 default:
4231 link_speed = 0;
4232 }
4233 } else {
4234 if (phba->sli4_hba.link_state.logical_speed)
4235 link_speed =
4236 phba->sli4_hba.link_state.logical_speed;
4237 else
4238 link_speed = phba->sli4_hba.link_state.speed;
4239 }
4240 return link_speed;
4241 }
4242
4243 /**
4244 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4245 * @phba: pointer to lpfc hba data structure.
4246 * @evt_code: asynchronous event code.
4247 * @speed_code: asynchronous event link speed code.
4248 *
4249 * This routine is to parse the giving SLI4 async event link speed code into
4250 * value of Mbps for the link speed.
4251 *
4252 * Return: link speed in terms of Mbps.
4253 **/
4254 static uint32_t
4255 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4256 uint8_t speed_code)
4257 {
4258 uint32_t port_speed;
4259
4260 switch (evt_code) {
4261 case LPFC_TRAILER_CODE_LINK:
4262 switch (speed_code) {
4263 case LPFC_ASYNC_LINK_SPEED_ZERO:
4264 port_speed = 0;
4265 break;
4266 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4267 port_speed = 10;
4268 break;
4269 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4270 port_speed = 100;
4271 break;
4272 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4273 port_speed = 1000;
4274 break;
4275 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4276 port_speed = 10000;
4277 break;
4278 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4279 port_speed = 20000;
4280 break;
4281 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4282 port_speed = 25000;
4283 break;
4284 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4285 port_speed = 40000;
4286 break;
4287 default:
4288 port_speed = 0;
4289 }
4290 break;
4291 case LPFC_TRAILER_CODE_FC:
4292 switch (speed_code) {
4293 case LPFC_FC_LA_SPEED_UNKNOWN:
4294 port_speed = 0;
4295 break;
4296 case LPFC_FC_LA_SPEED_1G:
4297 port_speed = 1000;
4298 break;
4299 case LPFC_FC_LA_SPEED_2G:
4300 port_speed = 2000;
4301 break;
4302 case LPFC_FC_LA_SPEED_4G:
4303 port_speed = 4000;
4304 break;
4305 case LPFC_FC_LA_SPEED_8G:
4306 port_speed = 8000;
4307 break;
4308 case LPFC_FC_LA_SPEED_10G:
4309 port_speed = 10000;
4310 break;
4311 case LPFC_FC_LA_SPEED_16G:
4312 port_speed = 16000;
4313 break;
4314 case LPFC_FC_LA_SPEED_32G:
4315 port_speed = 32000;
4316 break;
4317 default:
4318 port_speed = 0;
4319 }
4320 break;
4321 default:
4322 port_speed = 0;
4323 }
4324 return port_speed;
4325 }
4326
4327 /**
4328 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4329 * @phba: pointer to lpfc hba data structure.
4330 * @acqe_link: pointer to the async link completion queue entry.
4331 *
4332 * This routine is to handle the SLI4 asynchronous FCoE link event.
4333 **/
4334 static void
4335 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4336 struct lpfc_acqe_link *acqe_link)
4337 {
4338 struct lpfc_dmabuf *mp;
4339 LPFC_MBOXQ_t *pmb;
4340 MAILBOX_t *mb;
4341 struct lpfc_mbx_read_top *la;
4342 uint8_t att_type;
4343 int rc;
4344
4345 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4346 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4347 return;
4348 phba->fcoe_eventtag = acqe_link->event_tag;
4349 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4350 if (!pmb) {
4351 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4352 "0395 The mboxq allocation failed\n");
4353 return;
4354 }
4355 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4356 if (!mp) {
4357 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4358 "0396 The lpfc_dmabuf allocation failed\n");
4359 goto out_free_pmb;
4360 }
4361 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4362 if (!mp->virt) {
4363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4364 "0397 The mbuf allocation failed\n");
4365 goto out_free_dmabuf;
4366 }
4367
4368 /* Cleanup any outstanding ELS commands */
4369 lpfc_els_flush_all_cmd(phba);
4370
4371 /* Block ELS IOCBs until we have done process link event */
4372 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4373
4374 /* Update link event statistics */
4375 phba->sli.slistat.link_event++;
4376
4377 /* Create lpfc_handle_latt mailbox command from link ACQE */
4378 lpfc_read_topology(phba, pmb, mp);
4379 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4380 pmb->vport = phba->pport;
4381
4382 /* Keep the link status for extra SLI4 state machine reference */
4383 phba->sli4_hba.link_state.speed =
4384 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4385 bf_get(lpfc_acqe_link_speed, acqe_link));
4386 phba->sli4_hba.link_state.duplex =
4387 bf_get(lpfc_acqe_link_duplex, acqe_link);
4388 phba->sli4_hba.link_state.status =
4389 bf_get(lpfc_acqe_link_status, acqe_link);
4390 phba->sli4_hba.link_state.type =
4391 bf_get(lpfc_acqe_link_type, acqe_link);
4392 phba->sli4_hba.link_state.number =
4393 bf_get(lpfc_acqe_link_number, acqe_link);
4394 phba->sli4_hba.link_state.fault =
4395 bf_get(lpfc_acqe_link_fault, acqe_link);
4396 phba->sli4_hba.link_state.logical_speed =
4397 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4398
4399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4400 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4401 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4402 "Logical speed:%dMbps Fault:%d\n",
4403 phba->sli4_hba.link_state.speed,
4404 phba->sli4_hba.link_state.topology,
4405 phba->sli4_hba.link_state.status,
4406 phba->sli4_hba.link_state.type,
4407 phba->sli4_hba.link_state.number,
4408 phba->sli4_hba.link_state.logical_speed,
4409 phba->sli4_hba.link_state.fault);
4410 /*
4411 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4412 * topology info. Note: Optional for non FC-AL ports.
4413 */
4414 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4415 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4416 if (rc == MBX_NOT_FINISHED)
4417 goto out_free_dmabuf;
4418 return;
4419 }
4420 /*
4421 * For FCoE Mode: fill in all the topology information we need and call
4422 * the READ_TOPOLOGY completion routine to continue without actually
4423 * sending the READ_TOPOLOGY mailbox command to the port.
4424 */
4425 /* Parse and translate status field */
4426 mb = &pmb->u.mb;
4427 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
4428
4429 /* Parse and translate link attention fields */
4430 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4431 la->eventTag = acqe_link->event_tag;
4432 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4433 bf_set(lpfc_mbx_read_top_link_spd, la,
4434 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4435
4436 /* Fake the the following irrelvant fields */
4437 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4438 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4439 bf_set(lpfc_mbx_read_top_il, la, 0);
4440 bf_set(lpfc_mbx_read_top_pb, la, 0);
4441 bf_set(lpfc_mbx_read_top_fa, la, 0);
4442 bf_set(lpfc_mbx_read_top_mm, la, 0);
4443
4444 /* Invoke the lpfc_handle_latt mailbox command callback function */
4445 lpfc_mbx_cmpl_read_topology(phba, pmb);
4446
4447 return;
4448
4449 out_free_dmabuf:
4450 kfree(mp);
4451 out_free_pmb:
4452 mempool_free(pmb, phba->mbox_mem_pool);
4453 }
4454
4455 /**
4456 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4457 * @phba: pointer to lpfc hba data structure.
4458 * @acqe_fc: pointer to the async fc completion queue entry.
4459 *
4460 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4461 * that the event was received and then issue a read_topology mailbox command so
4462 * that the rest of the driver will treat it the same as SLI3.
4463 **/
4464 static void
4465 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4466 {
4467 struct lpfc_dmabuf *mp;
4468 LPFC_MBOXQ_t *pmb;
4469 MAILBOX_t *mb;
4470 struct lpfc_mbx_read_top *la;
4471 int rc;
4472
4473 if (bf_get(lpfc_trailer_type, acqe_fc) !=
4474 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4476 "2895 Non FC link Event detected.(%d)\n",
4477 bf_get(lpfc_trailer_type, acqe_fc));
4478 return;
4479 }
4480 /* Keep the link status for extra SLI4 state machine reference */
4481 phba->sli4_hba.link_state.speed =
4482 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4483 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4484 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4485 phba->sli4_hba.link_state.topology =
4486 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4487 phba->sli4_hba.link_state.status =
4488 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4489 phba->sli4_hba.link_state.type =
4490 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4491 phba->sli4_hba.link_state.number =
4492 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4493 phba->sli4_hba.link_state.fault =
4494 bf_get(lpfc_acqe_link_fault, acqe_fc);
4495 phba->sli4_hba.link_state.logical_speed =
4496 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
4497 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4498 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4499 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4500 "%dMbps Fault:%d\n",
4501 phba->sli4_hba.link_state.speed,
4502 phba->sli4_hba.link_state.topology,
4503 phba->sli4_hba.link_state.status,
4504 phba->sli4_hba.link_state.type,
4505 phba->sli4_hba.link_state.number,
4506 phba->sli4_hba.link_state.logical_speed,
4507 phba->sli4_hba.link_state.fault);
4508 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4509 if (!pmb) {
4510 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4511 "2897 The mboxq allocation failed\n");
4512 return;
4513 }
4514 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4515 if (!mp) {
4516 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4517 "2898 The lpfc_dmabuf allocation failed\n");
4518 goto out_free_pmb;
4519 }
4520 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4521 if (!mp->virt) {
4522 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4523 "2899 The mbuf allocation failed\n");
4524 goto out_free_dmabuf;
4525 }
4526
4527 /* Cleanup any outstanding ELS commands */
4528 lpfc_els_flush_all_cmd(phba);
4529
4530 /* Block ELS IOCBs until we have done process link event */
4531 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4532
4533 /* Update link event statistics */
4534 phba->sli.slistat.link_event++;
4535
4536 /* Create lpfc_handle_latt mailbox command from link ACQE */
4537 lpfc_read_topology(phba, pmb, mp);
4538 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4539 pmb->vport = phba->pport;
4540
4541 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
4542 /* Parse and translate status field */
4543 mb = &pmb->u.mb;
4544 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
4545 (void *)acqe_fc);
4546
4547 /* Parse and translate link attention fields */
4548 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
4549 la->eventTag = acqe_fc->event_tag;
4550
4551 if (phba->sli4_hba.link_state.status ==
4552 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
4553 bf_set(lpfc_mbx_read_top_att_type, la,
4554 LPFC_FC_LA_TYPE_UNEXP_WWPN);
4555 } else {
4556 bf_set(lpfc_mbx_read_top_att_type, la,
4557 LPFC_FC_LA_TYPE_LINK_DOWN);
4558 }
4559 /* Invoke the mailbox command callback function */
4560 lpfc_mbx_cmpl_read_topology(phba, pmb);
4561
4562 return;
4563 }
4564
4565 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4566 if (rc == MBX_NOT_FINISHED)
4567 goto out_free_dmabuf;
4568 return;
4569
4570 out_free_dmabuf:
4571 kfree(mp);
4572 out_free_pmb:
4573 mempool_free(pmb, phba->mbox_mem_pool);
4574 }
4575
4576 /**
4577 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4578 * @phba: pointer to lpfc hba data structure.
4579 * @acqe_fc: pointer to the async SLI completion queue entry.
4580 *
4581 * This routine is to handle the SLI4 asynchronous SLI events.
4582 **/
4583 static void
4584 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4585 {
4586 char port_name;
4587 char message[128];
4588 uint8_t status;
4589 uint8_t evt_type;
4590 uint8_t operational = 0;
4591 struct temp_event temp_event_data;
4592 struct lpfc_acqe_misconfigured_event *misconfigured;
4593 struct Scsi_Host *shost;
4594
4595 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4596
4597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4598 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4599 "x%08x SLI Event Type:%d\n",
4600 acqe_sli->event_data1, acqe_sli->event_data2,
4601 evt_type);
4602
4603 port_name = phba->Port[0];
4604 if (port_name == 0x00)
4605 port_name = '?'; /* get port name is empty */
4606
4607 switch (evt_type) {
4608 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4609 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4610 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4611 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4612
4613 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4614 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4615 acqe_sli->event_data1, port_name);
4616
4617 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
4618 shost = lpfc_shost_from_vport(phba->pport);
4619 fc_host_post_vendor_event(shost, fc_get_event_number(),
4620 sizeof(temp_event_data),
4621 (char *)&temp_event_data,
4622 SCSI_NL_VID_TYPE_PCI
4623 | PCI_VENDOR_ID_EMULEX);
4624 break;
4625 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4626 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4627 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4628 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4629
4630 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4631 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4632 acqe_sli->event_data1, port_name);
4633
4634 shost = lpfc_shost_from_vport(phba->pport);
4635 fc_host_post_vendor_event(shost, fc_get_event_number(),
4636 sizeof(temp_event_data),
4637 (char *)&temp_event_data,
4638 SCSI_NL_VID_TYPE_PCI
4639 | PCI_VENDOR_ID_EMULEX);
4640 break;
4641 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4642 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4643 &acqe_sli->event_data1;
4644
4645 /* fetch the status for this port */
4646 switch (phba->sli4_hba.lnk_info.lnk_no) {
4647 case LPFC_LINK_NUMBER_0:
4648 status = bf_get(lpfc_sli_misconfigured_port0_state,
4649 &misconfigured->theEvent);
4650 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4651 &misconfigured->theEvent);
4652 break;
4653 case LPFC_LINK_NUMBER_1:
4654 status = bf_get(lpfc_sli_misconfigured_port1_state,
4655 &misconfigured->theEvent);
4656 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4657 &misconfigured->theEvent);
4658 break;
4659 case LPFC_LINK_NUMBER_2:
4660 status = bf_get(lpfc_sli_misconfigured_port2_state,
4661 &misconfigured->theEvent);
4662 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4663 &misconfigured->theEvent);
4664 break;
4665 case LPFC_LINK_NUMBER_3:
4666 status = bf_get(lpfc_sli_misconfigured_port3_state,
4667 &misconfigured->theEvent);
4668 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4669 &misconfigured->theEvent);
4670 break;
4671 default:
4672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4673 "3296 "
4674 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4675 "event: Invalid link %d",
4676 phba->sli4_hba.lnk_info.lnk_no);
4677 return;
4678 }
4679
4680 /* Skip if optic state unchanged */
4681 if (phba->sli4_hba.lnk_info.optic_state == status)
4682 return;
4683
4684 switch (status) {
4685 case LPFC_SLI_EVENT_STATUS_VALID:
4686 sprintf(message, "Physical Link is functional");
4687 break;
4688 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4689 sprintf(message, "Optics faulted/incorrectly "
4690 "installed/not installed - Reseat optics, "
4691 "if issue not resolved, replace.");
4692 break;
4693 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4694 sprintf(message,
4695 "Optics of two types installed - Remove one "
4696 "optic or install matching pair of optics.");
4697 break;
4698 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4699 sprintf(message, "Incompatible optics - Replace with "
4700 "compatible optics for card to function.");
4701 break;
4702 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
4703 sprintf(message, "Unqualified optics - Replace with "
4704 "Avago optics for Warranty and Technical "
4705 "Support - Link is%s operational",
4706 (operational) ? " not" : "");
4707 break;
4708 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
4709 sprintf(message, "Uncertified optics - Replace with "
4710 "Avago-certified optics to enable link "
4711 "operation - Link is%s operational",
4712 (operational) ? " not" : "");
4713 break;
4714 default:
4715 /* firmware is reporting a status we don't know about */
4716 sprintf(message, "Unknown event status x%02x", status);
4717 break;
4718 }
4719 phba->sli4_hba.lnk_info.optic_state = status;
4720 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4721 "3176 Port Name %c %s\n", port_name, message);
4722 break;
4723 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4724 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4725 "3192 Remote DPort Test Initiated - "
4726 "Event Data1:x%08x Event Data2: x%08x\n",
4727 acqe_sli->event_data1, acqe_sli->event_data2);
4728 break;
4729 default:
4730 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4731 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4732 "x%08x SLI Event Type:%d\n",
4733 acqe_sli->event_data1, acqe_sli->event_data2,
4734 evt_type);
4735 break;
4736 }
4737 }
4738
4739 /**
4740 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4741 * @vport: pointer to vport data structure.
4742 *
4743 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4744 * response to a CVL event.
4745 *
4746 * Return the pointer to the ndlp with the vport if successful, otherwise
4747 * return NULL.
4748 **/
4749 static struct lpfc_nodelist *
4750 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4751 {
4752 struct lpfc_nodelist *ndlp;
4753 struct Scsi_Host *shost;
4754 struct lpfc_hba *phba;
4755
4756 if (!vport)
4757 return NULL;
4758 phba = vport->phba;
4759 if (!phba)
4760 return NULL;
4761 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4762 if (!ndlp) {
4763 /* Cannot find existing Fabric ndlp, so allocate a new one */
4764 ndlp = lpfc_nlp_init(vport, Fabric_DID);
4765 if (!ndlp)
4766 return 0;
4767 /* Set the node type */
4768 ndlp->nlp_type |= NLP_FABRIC;
4769 /* Put ndlp onto node list */
4770 lpfc_enqueue_node(vport, ndlp);
4771 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4772 /* re-setup ndlp without removing from node list */
4773 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4774 if (!ndlp)
4775 return 0;
4776 }
4777 if ((phba->pport->port_state < LPFC_FLOGI) &&
4778 (phba->pport->port_state != LPFC_VPORT_FAILED))
4779 return NULL;
4780 /* If virtual link is not yet instantiated ignore CVL */
4781 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4782 && (vport->port_state != LPFC_VPORT_FAILED))
4783 return NULL;
4784 shost = lpfc_shost_from_vport(vport);
4785 if (!shost)
4786 return NULL;
4787 lpfc_linkdown_port(vport);
4788 lpfc_cleanup_pending_mbox(vport);
4789 spin_lock_irq(shost->host_lock);
4790 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4791 spin_unlock_irq(shost->host_lock);
4792
4793 return ndlp;
4794 }
4795
4796 /**
4797 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4798 * @vport: pointer to lpfc hba data structure.
4799 *
4800 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4801 * response to a FCF dead event.
4802 **/
4803 static void
4804 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4805 {
4806 struct lpfc_vport **vports;
4807 int i;
4808
4809 vports = lpfc_create_vport_work_array(phba);
4810 if (vports)
4811 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4812 lpfc_sli4_perform_vport_cvl(vports[i]);
4813 lpfc_destroy_vport_work_array(phba, vports);
4814 }
4815
4816 /**
4817 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4818 * @phba: pointer to lpfc hba data structure.
4819 * @acqe_link: pointer to the async fcoe completion queue entry.
4820 *
4821 * This routine is to handle the SLI4 asynchronous fcoe event.
4822 **/
4823 static void
4824 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4825 struct lpfc_acqe_fip *acqe_fip)
4826 {
4827 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4828 int rc;
4829 struct lpfc_vport *vport;
4830 struct lpfc_nodelist *ndlp;
4831 struct Scsi_Host *shost;
4832 int active_vlink_present;
4833 struct lpfc_vport **vports;
4834 int i;
4835
4836 phba->fc_eventTag = acqe_fip->event_tag;
4837 phba->fcoe_eventtag = acqe_fip->event_tag;
4838 switch (event_type) {
4839 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4840 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4841 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4842 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4843 LOG_DISCOVERY,
4844 "2546 New FCF event, evt_tag:x%x, "
4845 "index:x%x\n",
4846 acqe_fip->event_tag,
4847 acqe_fip->index);
4848 else
4849 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4850 LOG_DISCOVERY,
4851 "2788 FCF param modified event, "
4852 "evt_tag:x%x, index:x%x\n",
4853 acqe_fip->event_tag,
4854 acqe_fip->index);
4855 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4856 /*
4857 * During period of FCF discovery, read the FCF
4858 * table record indexed by the event to update
4859 * FCF roundrobin failover eligible FCF bmask.
4860 */
4861 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4862 LOG_DISCOVERY,
4863 "2779 Read FCF (x%x) for updating "
4864 "roundrobin FCF failover bmask\n",
4865 acqe_fip->index);
4866 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4867 }
4868
4869 /* If the FCF discovery is in progress, do nothing. */
4870 spin_lock_irq(&phba->hbalock);
4871 if (phba->hba_flag & FCF_TS_INPROG) {
4872 spin_unlock_irq(&phba->hbalock);
4873 break;
4874 }
4875 /* If fast FCF failover rescan event is pending, do nothing */
4876 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4877 spin_unlock_irq(&phba->hbalock);
4878 break;
4879 }
4880
4881 /* If the FCF has been in discovered state, do nothing. */
4882 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4883 spin_unlock_irq(&phba->hbalock);
4884 break;
4885 }
4886 spin_unlock_irq(&phba->hbalock);
4887
4888 /* Otherwise, scan the entire FCF table and re-discover SAN */
4889 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4890 "2770 Start FCF table scan per async FCF "
4891 "event, evt_tag:x%x, index:x%x\n",
4892 acqe_fip->event_tag, acqe_fip->index);
4893 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4894 LPFC_FCOE_FCF_GET_FIRST);
4895 if (rc)
4896 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4897 "2547 Issue FCF scan read FCF mailbox "
4898 "command failed (x%x)\n", rc);
4899 break;
4900
4901 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4903 "2548 FCF Table full count 0x%x tag 0x%x\n",
4904 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4905 acqe_fip->event_tag);
4906 break;
4907
4908 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4909 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4910 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4911 "2549 FCF (x%x) disconnected from network, "
4912 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4913 /*
4914 * If we are in the middle of FCF failover process, clear
4915 * the corresponding FCF bit in the roundrobin bitmap.
4916 */
4917 spin_lock_irq(&phba->hbalock);
4918 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
4919 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
4920 spin_unlock_irq(&phba->hbalock);
4921 /* Update FLOGI FCF failover eligible FCF bmask */
4922 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4923 break;
4924 }
4925 spin_unlock_irq(&phba->hbalock);
4926
4927 /* If the event is not for currently used fcf do nothing */
4928 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4929 break;
4930
4931 /*
4932 * Otherwise, request the port to rediscover the entire FCF
4933 * table for a fast recovery from case that the current FCF
4934 * is no longer valid as we are not in the middle of FCF
4935 * failover process already.
4936 */
4937 spin_lock_irq(&phba->hbalock);
4938 /* Mark the fast failover process in progress */
4939 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4940 spin_unlock_irq(&phba->hbalock);
4941
4942 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4943 "2771 Start FCF fast failover process due to "
4944 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4945 "\n", acqe_fip->event_tag, acqe_fip->index);
4946 rc = lpfc_sli4_redisc_fcf_table(phba);
4947 if (rc) {
4948 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4949 LOG_DISCOVERY,
4950 "2772 Issue FCF rediscover mabilbox "
4951 "command failed, fail through to FCF "
4952 "dead event\n");
4953 spin_lock_irq(&phba->hbalock);
4954 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4955 spin_unlock_irq(&phba->hbalock);
4956 /*
4957 * Last resort will fail over by treating this
4958 * as a link down to FCF registration.
4959 */
4960 lpfc_sli4_fcf_dead_failthrough(phba);
4961 } else {
4962 /* Reset FCF roundrobin bmask for new discovery */
4963 lpfc_sli4_clear_fcf_rr_bmask(phba);
4964 /*
4965 * Handling fast FCF failover to a DEAD FCF event is
4966 * considered equalivant to receiving CVL to all vports.
4967 */
4968 lpfc_sli4_perform_all_vport_cvl(phba);
4969 }
4970 break;
4971 case LPFC_FIP_EVENT_TYPE_CVL:
4972 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4973 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4974 "2718 Clear Virtual Link Received for VPI 0x%x"
4975 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4976
4977 vport = lpfc_find_vport_by_vpid(phba,
4978 acqe_fip->index);
4979 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4980 if (!ndlp)
4981 break;
4982 active_vlink_present = 0;
4983
4984 vports = lpfc_create_vport_work_array(phba);
4985 if (vports) {
4986 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4987 i++) {
4988 if ((!(vports[i]->fc_flag &
4989 FC_VPORT_CVL_RCVD)) &&
4990 (vports[i]->port_state > LPFC_FDISC)) {
4991 active_vlink_present = 1;
4992 break;
4993 }
4994 }
4995 lpfc_destroy_vport_work_array(phba, vports);
4996 }
4997
4998 /*
4999 * Don't re-instantiate if vport is marked for deletion.
5000 * If we are here first then vport_delete is going to wait
5001 * for discovery to complete.
5002 */
5003 if (!(vport->load_flag & FC_UNLOADING) &&
5004 active_vlink_present) {
5005 /*
5006 * If there are other active VLinks present,
5007 * re-instantiate the Vlink using FDISC.
5008 */
5009 mod_timer(&ndlp->nlp_delayfunc,
5010 jiffies + msecs_to_jiffies(1000));
5011 shost = lpfc_shost_from_vport(vport);
5012 spin_lock_irq(shost->host_lock);
5013 ndlp->nlp_flag |= NLP_DELAY_TMO;
5014 spin_unlock_irq(shost->host_lock);
5015 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5016 vport->port_state = LPFC_FDISC;
5017 } else {
5018 /*
5019 * Otherwise, we request port to rediscover
5020 * the entire FCF table for a fast recovery
5021 * from possible case that the current FCF
5022 * is no longer valid if we are not already
5023 * in the FCF failover process.
5024 */
5025 spin_lock_irq(&phba->hbalock);
5026 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5027 spin_unlock_irq(&phba->hbalock);
5028 break;
5029 }
5030 /* Mark the fast failover process in progress */
5031 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5032 spin_unlock_irq(&phba->hbalock);
5033 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5034 LOG_DISCOVERY,
5035 "2773 Start FCF failover per CVL, "
5036 "evt_tag:x%x\n", acqe_fip->event_tag);
5037 rc = lpfc_sli4_redisc_fcf_table(phba);
5038 if (rc) {
5039 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5040 LOG_DISCOVERY,
5041 "2774 Issue FCF rediscover "
5042 "mabilbox command failed, "
5043 "through to CVL event\n");
5044 spin_lock_irq(&phba->hbalock);
5045 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5046 spin_unlock_irq(&phba->hbalock);
5047 /*
5048 * Last resort will be re-try on the
5049 * the current registered FCF entry.
5050 */
5051 lpfc_retry_pport_discovery(phba);
5052 } else
5053 /*
5054 * Reset FCF roundrobin bmask for new
5055 * discovery.
5056 */
5057 lpfc_sli4_clear_fcf_rr_bmask(phba);
5058 }
5059 break;
5060 default:
5061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5062 "0288 Unknown FCoE event type 0x%x event tag "
5063 "0x%x\n", event_type, acqe_fip->event_tag);
5064 break;
5065 }
5066 }
5067
5068 /**
5069 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5070 * @phba: pointer to lpfc hba data structure.
5071 * @acqe_link: pointer to the async dcbx completion queue entry.
5072 *
5073 * This routine is to handle the SLI4 asynchronous dcbx event.
5074 **/
5075 static void
5076 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5077 struct lpfc_acqe_dcbx *acqe_dcbx)
5078 {
5079 phba->fc_eventTag = acqe_dcbx->event_tag;
5080 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5081 "0290 The SLI4 DCBX asynchronous event is not "
5082 "handled yet\n");
5083 }
5084
5085 /**
5086 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5087 * @phba: pointer to lpfc hba data structure.
5088 * @acqe_link: pointer to the async grp5 completion queue entry.
5089 *
5090 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5091 * is an asynchronous notified of a logical link speed change. The Port
5092 * reports the logical link speed in units of 10Mbps.
5093 **/
5094 static void
5095 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5096 struct lpfc_acqe_grp5 *acqe_grp5)
5097 {
5098 uint16_t prev_ll_spd;
5099
5100 phba->fc_eventTag = acqe_grp5->event_tag;
5101 phba->fcoe_eventtag = acqe_grp5->event_tag;
5102 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5103 phba->sli4_hba.link_state.logical_speed =
5104 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5105 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5106 "2789 GRP5 Async Event: Updating logical link speed "
5107 "from %dMbps to %dMbps\n", prev_ll_spd,
5108 phba->sli4_hba.link_state.logical_speed);
5109 }
5110
5111 /**
5112 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5113 * @phba: pointer to lpfc hba data structure.
5114 *
5115 * This routine is invoked by the worker thread to process all the pending
5116 * SLI4 asynchronous events.
5117 **/
5118 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5119 {
5120 struct lpfc_cq_event *cq_event;
5121
5122 /* First, declare the async event has been handled */
5123 spin_lock_irq(&phba->hbalock);
5124 phba->hba_flag &= ~ASYNC_EVENT;
5125 spin_unlock_irq(&phba->hbalock);
5126 /* Now, handle all the async events */
5127 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5128 /* Get the first event from the head of the event queue */
5129 spin_lock_irq(&phba->hbalock);
5130 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5131 cq_event, struct lpfc_cq_event, list);
5132 spin_unlock_irq(&phba->hbalock);
5133 /* Process the asynchronous event */
5134 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5135 case LPFC_TRAILER_CODE_LINK:
5136 lpfc_sli4_async_link_evt(phba,
5137 &cq_event->cqe.acqe_link);
5138 break;
5139 case LPFC_TRAILER_CODE_FCOE:
5140 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5141 break;
5142 case LPFC_TRAILER_CODE_DCBX:
5143 lpfc_sli4_async_dcbx_evt(phba,
5144 &cq_event->cqe.acqe_dcbx);
5145 break;
5146 case LPFC_TRAILER_CODE_GRP5:
5147 lpfc_sli4_async_grp5_evt(phba,
5148 &cq_event->cqe.acqe_grp5);
5149 break;
5150 case LPFC_TRAILER_CODE_FC:
5151 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5152 break;
5153 case LPFC_TRAILER_CODE_SLI:
5154 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5155 break;
5156 default:
5157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5158 "1804 Invalid asynchrous event code: "
5159 "x%x\n", bf_get(lpfc_trailer_code,
5160 &cq_event->cqe.mcqe_cmpl));
5161 break;
5162 }
5163 /* Free the completion event processed to the free pool */
5164 lpfc_sli4_cq_event_release(phba, cq_event);
5165 }
5166 }
5167
5168 /**
5169 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5170 * @phba: pointer to lpfc hba data structure.
5171 *
5172 * This routine is invoked by the worker thread to process FCF table
5173 * rediscovery pending completion event.
5174 **/
5175 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5176 {
5177 int rc;
5178
5179 spin_lock_irq(&phba->hbalock);
5180 /* Clear FCF rediscovery timeout event */
5181 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5182 /* Clear driver fast failover FCF record flag */
5183 phba->fcf.failover_rec.flag = 0;
5184 /* Set state for FCF fast failover */
5185 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5186 spin_unlock_irq(&phba->hbalock);
5187
5188 /* Scan FCF table from the first entry to re-discover SAN */
5189 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5190 "2777 Start post-quiescent FCF table scan\n");
5191 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5192 if (rc)
5193 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5194 "2747 Issue FCF scan read FCF mailbox "
5195 "command failed 0x%x\n", rc);
5196 }
5197
5198 /**
5199 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5200 * @phba: pointer to lpfc hba data structure.
5201 * @dev_grp: The HBA PCI-Device group number.
5202 *
5203 * This routine is invoked to set up the per HBA PCI-Device group function
5204 * API jump table entries.
5205 *
5206 * Return: 0 if success, otherwise -ENODEV
5207 **/
5208 int
5209 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5210 {
5211 int rc;
5212
5213 /* Set up lpfc PCI-device group */
5214 phba->pci_dev_grp = dev_grp;
5215
5216 /* The LPFC_PCI_DEV_OC uses SLI4 */
5217 if (dev_grp == LPFC_PCI_DEV_OC)
5218 phba->sli_rev = LPFC_SLI_REV4;
5219
5220 /* Set up device INIT API function jump table */
5221 rc = lpfc_init_api_table_setup(phba, dev_grp);
5222 if (rc)
5223 return -ENODEV;
5224 /* Set up SCSI API function jump table */
5225 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5226 if (rc)
5227 return -ENODEV;
5228 /* Set up SLI API function jump table */
5229 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5230 if (rc)
5231 return -ENODEV;
5232 /* Set up MBOX API function jump table */
5233 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5234 if (rc)
5235 return -ENODEV;
5236
5237 return 0;
5238 }
5239
5240 /**
5241 * lpfc_log_intr_mode - Log the active interrupt mode
5242 * @phba: pointer to lpfc hba data structure.
5243 * @intr_mode: active interrupt mode adopted.
5244 *
5245 * This routine it invoked to log the currently used active interrupt mode
5246 * to the device.
5247 **/
5248 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5249 {
5250 switch (intr_mode) {
5251 case 0:
5252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5253 "0470 Enable INTx interrupt mode.\n");
5254 break;
5255 case 1:
5256 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5257 "0481 Enabled MSI interrupt mode.\n");
5258 break;
5259 case 2:
5260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5261 "0480 Enabled MSI-X interrupt mode.\n");
5262 break;
5263 default:
5264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5265 "0482 Illegal interrupt mode.\n");
5266 break;
5267 }
5268 return;
5269 }
5270
5271 /**
5272 * lpfc_enable_pci_dev - Enable a generic PCI device.
5273 * @phba: pointer to lpfc hba data structure.
5274 *
5275 * This routine is invoked to enable the PCI device that is common to all
5276 * PCI devices.
5277 *
5278 * Return codes
5279 * 0 - successful
5280 * other values - error
5281 **/
5282 static int
5283 lpfc_enable_pci_dev(struct lpfc_hba *phba)
5284 {
5285 struct pci_dev *pdev;
5286
5287 /* Obtain PCI device reference */
5288 if (!phba->pcidev)
5289 goto out_error;
5290 else
5291 pdev = phba->pcidev;
5292 /* Enable PCI device */
5293 if (pci_enable_device_mem(pdev))
5294 goto out_error;
5295 /* Request PCI resource for the device */
5296 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
5297 goto out_disable_device;
5298 /* Set up device as PCI master and save state for EEH */
5299 pci_set_master(pdev);
5300 pci_try_set_mwi(pdev);
5301 pci_save_state(pdev);
5302
5303 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5304 if (pci_is_pcie(pdev))
5305 pdev->needs_freset = 1;
5306
5307 return 0;
5308
5309 out_disable_device:
5310 pci_disable_device(pdev);
5311 out_error:
5312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5313 "1401 Failed to enable pci device\n");
5314 return -ENODEV;
5315 }
5316
5317 /**
5318 * lpfc_disable_pci_dev - Disable a generic PCI device.
5319 * @phba: pointer to lpfc hba data structure.
5320 *
5321 * This routine is invoked to disable the PCI device that is common to all
5322 * PCI devices.
5323 **/
5324 static void
5325 lpfc_disable_pci_dev(struct lpfc_hba *phba)
5326 {
5327 struct pci_dev *pdev;
5328
5329 /* Obtain PCI device reference */
5330 if (!phba->pcidev)
5331 return;
5332 else
5333 pdev = phba->pcidev;
5334 /* Release PCI resource and disable PCI device */
5335 pci_release_mem_regions(pdev);
5336 pci_disable_device(pdev);
5337
5338 return;
5339 }
5340
5341 /**
5342 * lpfc_reset_hba - Reset a hba
5343 * @phba: pointer to lpfc hba data structure.
5344 *
5345 * This routine is invoked to reset a hba device. It brings the HBA
5346 * offline, performs a board restart, and then brings the board back
5347 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
5348 * on outstanding mailbox commands.
5349 **/
5350 void
5351 lpfc_reset_hba(struct lpfc_hba *phba)
5352 {
5353 /* If resets are disabled then set error state and return. */
5354 if (!phba->cfg_enable_hba_reset) {
5355 phba->link_state = LPFC_HBA_ERROR;
5356 return;
5357 }
5358 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
5359 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5360 else
5361 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
5362 lpfc_offline(phba);
5363 lpfc_sli_brdrestart(phba);
5364 lpfc_online(phba);
5365 lpfc_unblock_mgmt_io(phba);
5366 }
5367
5368 /**
5369 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
5370 * @phba: pointer to lpfc hba data structure.
5371 *
5372 * This function enables the PCI SR-IOV virtual functions to a physical
5373 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5374 * enable the number of virtual functions to the physical function. As
5375 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5376 * API call does not considered as an error condition for most of the device.
5377 **/
5378 uint16_t
5379 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
5380 {
5381 struct pci_dev *pdev = phba->pcidev;
5382 uint16_t nr_virtfn;
5383 int pos;
5384
5385 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5386 if (pos == 0)
5387 return 0;
5388
5389 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
5390 return nr_virtfn;
5391 }
5392
5393 /**
5394 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
5395 * @phba: pointer to lpfc hba data structure.
5396 * @nr_vfn: number of virtual functions to be enabled.
5397 *
5398 * This function enables the PCI SR-IOV virtual functions to a physical
5399 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5400 * enable the number of virtual functions to the physical function. As
5401 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5402 * API call does not considered as an error condition for most of the device.
5403 **/
5404 int
5405 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
5406 {
5407 struct pci_dev *pdev = phba->pcidev;
5408 uint16_t max_nr_vfn;
5409 int rc;
5410
5411 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
5412 if (nr_vfn > max_nr_vfn) {
5413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5414 "3057 Requested vfs (%d) greater than "
5415 "supported vfs (%d)", nr_vfn, max_nr_vfn);
5416 return -EINVAL;
5417 }
5418
5419 rc = pci_enable_sriov(pdev, nr_vfn);
5420 if (rc) {
5421 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5422 "2806 Failed to enable sriov on this device "
5423 "with vfn number nr_vf:%d, rc:%d\n",
5424 nr_vfn, rc);
5425 } else
5426 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5427 "2807 Successful enable sriov on this device "
5428 "with vfn number nr_vf:%d\n", nr_vfn);
5429 return rc;
5430 }
5431
5432 /**
5433 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5434 * @phba: pointer to lpfc hba data structure.
5435 *
5436 * This routine is invoked to set up the driver internal resources before the
5437 * device specific resource setup to support the HBA device it attached to.
5438 *
5439 * Return codes
5440 * 0 - successful
5441 * other values - error
5442 **/
5443 static int
5444 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5445 {
5446 struct lpfc_sli *psli = &phba->sli;
5447
5448 /*
5449 * Driver resources common to all SLI revisions
5450 */
5451 atomic_set(&phba->fast_event_count, 0);
5452 spin_lock_init(&phba->hbalock);
5453
5454 /* Initialize ndlp management spinlock */
5455 spin_lock_init(&phba->ndlp_lock);
5456
5457 INIT_LIST_HEAD(&phba->port_list);
5458 INIT_LIST_HEAD(&phba->work_list);
5459 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5460
5461 /* Initialize the wait queue head for the kernel thread */
5462 init_waitqueue_head(&phba->work_waitq);
5463
5464 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5465 "1403 Protocols supported %s %s %s\n",
5466 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
5467 "SCSI" : " "),
5468 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
5469 "NVME" : " "),
5470 (phba->nvmet_support ? "NVMET" : " "));
5471
5472 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5473 /* Initialize the scsi buffer list used by driver for scsi IO */
5474 spin_lock_init(&phba->scsi_buf_list_get_lock);
5475 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5476 spin_lock_init(&phba->scsi_buf_list_put_lock);
5477 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5478 }
5479
5480 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
5481 (phba->nvmet_support == 0)) {
5482 /* Initialize the NVME buffer list used by driver for NVME IO */
5483 spin_lock_init(&phba->nvme_buf_list_get_lock);
5484 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
5485 spin_lock_init(&phba->nvme_buf_list_put_lock);
5486 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
5487 }
5488
5489 /* Initialize the fabric iocb list */
5490 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5491
5492 /* Initialize list to save ELS buffers */
5493 INIT_LIST_HEAD(&phba->elsbuf);
5494
5495 /* Initialize FCF connection rec list */
5496 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5497
5498 /* Initialize OAS configuration list */
5499 spin_lock_init(&phba->devicelock);
5500 INIT_LIST_HEAD(&phba->luns);
5501
5502 /* MBOX heartbeat timer */
5503 setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
5504 /* Fabric block timer */
5505 setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
5506 (unsigned long)phba);
5507 /* EA polling mode timer */
5508 setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
5509 (unsigned long)phba);
5510 /* Heartbeat timer */
5511 setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
5512
5513 return 0;
5514 }
5515
5516 /**
5517 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
5518 * @phba: pointer to lpfc hba data structure.
5519 *
5520 * This routine is invoked to set up the driver internal resources specific to
5521 * support the SLI-3 HBA device it attached to.
5522 *
5523 * Return codes
5524 * 0 - successful
5525 * other values - error
5526 **/
5527 static int
5528 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5529 {
5530 int rc;
5531
5532 /*
5533 * Initialize timers used by driver
5534 */
5535
5536 /* FCP polling mode timer */
5537 setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
5538 (unsigned long)phba);
5539
5540 /* Host attention work mask setup */
5541 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
5542 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
5543
5544 /* Get all the module params for configuring this host */
5545 lpfc_get_cfgparam(phba);
5546 /* Set up phase-1 common device driver resources */
5547
5548 rc = lpfc_setup_driver_resource_phase1(phba);
5549 if (rc)
5550 return -ENODEV;
5551
5552 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
5553 phba->menlo_flag |= HBA_MENLO_SUPPORT;
5554 /* check for menlo minimum sg count */
5555 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
5556 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
5557 }
5558
5559 if (!phba->sli.sli3_ring)
5560 phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
5561 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5562 if (!phba->sli.sli3_ring)
5563 return -ENOMEM;
5564
5565 /*
5566 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5567 * used to create the sg_dma_buf_pool must be dynamically calculated.
5568 */
5569
5570 /* Initialize the host templates the configured values. */
5571 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5572 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5573 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5574
5575 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5576 if (phba->cfg_enable_bg) {
5577 /*
5578 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5579 * the FCP rsp, and a BDE for each. Sice we have no control
5580 * over how many protection data segments the SCSI Layer
5581 * will hand us (ie: there could be one for every block
5582 * in the IO), we just allocate enough BDEs to accomidate
5583 * our max amount and we need to limit lpfc_sg_seg_cnt to
5584 * minimize the risk of running out.
5585 */
5586 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5587 sizeof(struct fcp_rsp) +
5588 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5589
5590 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5591 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5592
5593 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5594 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5595 } else {
5596 /*
5597 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5598 * the FCP rsp, a BDE for each, and a BDE for up to
5599 * cfg_sg_seg_cnt data segments.
5600 */
5601 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5602 sizeof(struct fcp_rsp) +
5603 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5604
5605 /* Total BDEs in BPL for scsi_sg_list */
5606 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5607 }
5608
5609 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5610 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5611 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5612 phba->cfg_total_seg_cnt);
5613
5614 phba->max_vpi = LPFC_MAX_VPI;
5615 /* This will be set to correct value after config_port mbox */
5616 phba->max_vports = 0;
5617
5618 /*
5619 * Initialize the SLI Layer to run with lpfc HBAs.
5620 */
5621 lpfc_sli_setup(phba);
5622 lpfc_sli_queue_init(phba);
5623
5624 /* Allocate device driver memory */
5625 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5626 return -ENOMEM;
5627
5628 /*
5629 * Enable sr-iov virtual functions if supported and configured
5630 * through the module parameter.
5631 */
5632 if (phba->cfg_sriov_nr_virtfn > 0) {
5633 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5634 phba->cfg_sriov_nr_virtfn);
5635 if (rc) {
5636 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5637 "2808 Requested number of SR-IOV "
5638 "virtual functions (%d) is not "
5639 "supported\n",
5640 phba->cfg_sriov_nr_virtfn);
5641 phba->cfg_sriov_nr_virtfn = 0;
5642 }
5643 }
5644
5645 return 0;
5646 }
5647
5648 /**
5649 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5650 * @phba: pointer to lpfc hba data structure.
5651 *
5652 * This routine is invoked to unset the driver internal resources set up
5653 * specific for supporting the SLI-3 HBA device it attached to.
5654 **/
5655 static void
5656 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5657 {
5658 /* Free device driver memory allocated */
5659 lpfc_mem_free_all(phba);
5660
5661 return;
5662 }
5663
5664 /**
5665 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
5666 * @phba: pointer to lpfc hba data structure.
5667 *
5668 * This routine is invoked to set up the driver internal resources specific to
5669 * support the SLI-4 HBA device it attached to.
5670 *
5671 * Return codes
5672 * 0 - successful
5673 * other values - error
5674 **/
5675 static int
5676 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5677 {
5678 LPFC_MBOXQ_t *mboxq;
5679 MAILBOX_t *mb;
5680 int rc, i, max_buf_size;
5681 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5682 struct lpfc_mqe *mqe;
5683 int longs;
5684 int fof_vectors = 0;
5685 uint64_t wwn;
5686
5687 phba->sli4_hba.num_online_cpu = num_online_cpus();
5688 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
5689 phba->sli4_hba.curr_disp_cpu = 0;
5690
5691 /* Get all the module params for configuring this host */
5692 lpfc_get_cfgparam(phba);
5693
5694 /* Set up phase-1 common device driver resources */
5695 rc = lpfc_setup_driver_resource_phase1(phba);
5696 if (rc)
5697 return -ENODEV;
5698
5699 /* Before proceed, wait for POST done and device ready */
5700 rc = lpfc_sli4_post_status_check(phba);
5701 if (rc)
5702 return -ENODEV;
5703
5704 /*
5705 * Initialize timers used by driver
5706 */
5707
5708 setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
5709
5710 /* FCF rediscover timer */
5711 setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
5712 (unsigned long)phba);
5713
5714 /*
5715 * Control structure for handling external multi-buffer mailbox
5716 * command pass-through.
5717 */
5718 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5719 sizeof(struct lpfc_mbox_ext_buf_ctx));
5720 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5721
5722 phba->max_vpi = LPFC_MAX_VPI;
5723
5724 /* This will be set to correct value after the read_config mbox */
5725 phba->max_vports = 0;
5726
5727 /* Program the default value of vlan_id and fc_map */
5728 phba->valid_vlan = 0;
5729 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5730 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5731 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5732
5733 /*
5734 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5735 * we will associate a new ring, for each EQ/CQ/WQ tuple.
5736 * The WQ create will allocate the ring.
5737 */
5738
5739 /*
5740 * It doesn't matter what family our adapter is in, we are
5741 * limited to 2 Pages, 512 SGEs, for our SGL.
5742 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5743 */
5744 max_buf_size = (2 * SLI4_PAGE_SIZE);
5745 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
5746 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
5747
5748 /*
5749 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
5750 * used to create the sg_dma_buf_pool must be calculated.
5751 */
5752 if (phba->cfg_enable_bg) {
5753 /*
5754 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
5755 * the FCP rsp, and a SGE. Sice we have no control
5756 * over how many protection segments the SCSI Layer
5757 * will hand us (ie: there could be one for every block
5758 * in the IO), just allocate enough SGEs to accomidate
5759 * our max amount and we need to limit lpfc_sg_seg_cnt
5760 * to minimize the risk of running out.
5761 */
5762 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5763 sizeof(struct fcp_rsp) + max_buf_size;
5764
5765 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5766 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5767
5768 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
5769 phba->cfg_sg_seg_cnt =
5770 LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
5771 } else {
5772 /*
5773 * The scsi_buf for a regular I/O holds the FCP cmnd,
5774 * the FCP rsp, a SGE for each, and a SGE for up to
5775 * cfg_sg_seg_cnt data segments.
5776 */
5777 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5778 sizeof(struct fcp_rsp) +
5779 ((phba->cfg_sg_seg_cnt + 2) *
5780 sizeof(struct sli4_sge));
5781
5782 /* Total SGEs for scsi_sg_list */
5783 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5784
5785 /*
5786 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
5787 * need to post 1 page for the SGL.
5788 */
5789 }
5790
5791 /* Initialize the host templates with the updated values. */
5792 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5793 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5794 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5795
5796 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5797 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5798 else
5799 phba->cfg_sg_dma_buf_size =
5800 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5801
5802 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5803 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5804 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5805 phba->cfg_total_seg_cnt);
5806
5807 /* Initialize buffer queue management fields */
5808 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
5809 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5810 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
5811
5812 /*
5813 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5814 */
5815 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5816 /* Initialize the Abort scsi buffer list used by driver */
5817 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5818 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5819 }
5820
5821 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
5822 /* Initialize the Abort nvme buffer list used by driver */
5823 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5824 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5825 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
5826 /* Fast-path XRI aborted CQ Event work queue list */
5827 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5828 }
5829
5830 /* This abort list used by worker thread */
5831 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
5832 spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
5833
5834 /*
5835 * Initialize driver internal slow-path work queues
5836 */
5837
5838 /* Driver internel slow-path CQ Event pool */
5839 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5840 /* Response IOCB work queue list */
5841 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5842 /* Asynchronous event CQ Event work queue list */
5843 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5844 /* Fast-path XRI aborted CQ Event work queue list */
5845 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5846 /* Slow-path XRI aborted CQ Event work queue list */
5847 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5848 /* Receive queue CQ Event work queue list */
5849 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5850
5851 /* Initialize extent block lists. */
5852 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5853 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5854 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5855 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5856
5857 /* Initialize mboxq lists. If the early init routines fail
5858 * these lists need to be correctly initialized.
5859 */
5860 INIT_LIST_HEAD(&phba->sli.mboxq);
5861 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
5862
5863 /* initialize optic_state to 0xFF */
5864 phba->sli4_hba.lnk_info.optic_state = 0xff;
5865
5866 /* Allocate device driver memory */
5867 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5868 if (rc)
5869 return -ENOMEM;
5870
5871 /* IF Type 2 ports get initialized now. */
5872 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5873 LPFC_SLI_INTF_IF_TYPE_2) {
5874 rc = lpfc_pci_function_reset(phba);
5875 if (unlikely(rc)) {
5876 rc = -ENODEV;
5877 goto out_free_mem;
5878 }
5879 phba->temp_sensor_support = 1;
5880 }
5881
5882 /* Create the bootstrap mailbox command */
5883 rc = lpfc_create_bootstrap_mbox(phba);
5884 if (unlikely(rc))
5885 goto out_free_mem;
5886
5887 /* Set up the host's endian order with the device. */
5888 rc = lpfc_setup_endian_order(phba);
5889 if (unlikely(rc))
5890 goto out_free_bsmbx;
5891
5892 /* Set up the hba's configuration parameters. */
5893 rc = lpfc_sli4_read_config(phba);
5894 if (unlikely(rc))
5895 goto out_free_bsmbx;
5896 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5897 if (unlikely(rc))
5898 goto out_free_bsmbx;
5899
5900 /* IF Type 0 ports get initialized now. */
5901 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5902 LPFC_SLI_INTF_IF_TYPE_0) {
5903 rc = lpfc_pci_function_reset(phba);
5904 if (unlikely(rc))
5905 goto out_free_bsmbx;
5906 }
5907
5908 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5909 GFP_KERNEL);
5910 if (!mboxq) {
5911 rc = -ENOMEM;
5912 goto out_free_bsmbx;
5913 }
5914
5915 /* Check for NVMET being configured */
5916 phba->nvmet_support = 0;
5917 if (lpfc_enable_nvmet_cnt) {
5918
5919 /* First get WWN of HBA instance */
5920 lpfc_read_nv(phba, mboxq);
5921 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5922 if (rc != MBX_SUCCESS) {
5923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5924 "6016 Mailbox failed , mbxCmd x%x "
5925 "READ_NV, mbxStatus x%x\n",
5926 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5927 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
5928 mempool_free(mboxq, phba->mbox_mem_pool);
5929 rc = -EIO;
5930 goto out_free_bsmbx;
5931 }
5932 mb = &mboxq->u.mb;
5933 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
5934 sizeof(uint64_t));
5935 wwn = cpu_to_be64(wwn);
5936 phba->sli4_hba.wwnn.u.name = wwn;
5937 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
5938 sizeof(uint64_t));
5939 /* wwn is WWPN of HBA instance */
5940 wwn = cpu_to_be64(wwn);
5941 phba->sli4_hba.wwpn.u.name = wwn;
5942
5943 /* Check to see if it matches any module parameter */
5944 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
5945 if (wwn == lpfc_enable_nvmet[i]) {
5946 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
5947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5948 "6017 NVME Target %016llx\n",
5949 wwn);
5950 phba->nvmet_support = 1; /* a match */
5951 #else
5952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5953 "6021 Can't enable NVME Target."
5954 " NVME_TARGET_FC infrastructure"
5955 " is not in kernel\n");
5956 #endif
5957 }
5958 }
5959 }
5960
5961 lpfc_nvme_mod_param_dep(phba);
5962
5963 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5964 lpfc_supported_pages(mboxq);
5965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5966 if (!rc) {
5967 mqe = &mboxq->u.mqe;
5968 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5969 LPFC_MAX_SUPPORTED_PAGES);
5970 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5971 switch (pn_page[i]) {
5972 case LPFC_SLI4_PARAMETERS:
5973 phba->sli4_hba.pc_sli4_params.supported = 1;
5974 break;
5975 default:
5976 break;
5977 }
5978 }
5979 /* Read the port's SLI4 Parameters capabilities if supported. */
5980 if (phba->sli4_hba.pc_sli4_params.supported)
5981 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5982 if (rc) {
5983 mempool_free(mboxq, phba->mbox_mem_pool);
5984 rc = -EIO;
5985 goto out_free_bsmbx;
5986 }
5987 }
5988
5989 /*
5990 * Get sli4 parameters that override parameters from Port capabilities.
5991 * If this call fails, it isn't critical unless the SLI4 parameters come
5992 * back in conflict.
5993 */
5994 rc = lpfc_get_sli4_parameters(phba, mboxq);
5995 if (rc) {
5996 if (phba->sli4_hba.extents_in_use &&
5997 phba->sli4_hba.rpi_hdrs_in_use) {
5998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5999 "2999 Unsupported SLI4 Parameters "
6000 "Extents and RPI headers enabled.\n");
6001 }
6002 mempool_free(mboxq, phba->mbox_mem_pool);
6003 goto out_free_bsmbx;
6004 }
6005
6006 mempool_free(mboxq, phba->mbox_mem_pool);
6007
6008 /* Verify OAS is supported */
6009 lpfc_sli4_oas_verify(phba);
6010 if (phba->cfg_fof)
6011 fof_vectors = 1;
6012
6013 /* Verify all the SLI4 queues */
6014 rc = lpfc_sli4_queue_verify(phba);
6015 if (rc)
6016 goto out_free_bsmbx;
6017
6018 /* Create driver internal CQE event pool */
6019 rc = lpfc_sli4_cq_event_pool_create(phba);
6020 if (rc)
6021 goto out_free_bsmbx;
6022
6023 /* Initialize sgl lists per host */
6024 lpfc_init_sgl_list(phba);
6025
6026 /* Allocate and initialize active sgl array */
6027 rc = lpfc_init_active_sgl_array(phba);
6028 if (rc) {
6029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6030 "1430 Failed to initialize sgl list.\n");
6031 goto out_destroy_cq_event_pool;
6032 }
6033 rc = lpfc_sli4_init_rpi_hdrs(phba);
6034 if (rc) {
6035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6036 "1432 Failed to initialize rpi headers.\n");
6037 goto out_free_active_sgl;
6038 }
6039
6040 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6041 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6042 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
6043 GFP_KERNEL);
6044 if (!phba->fcf.fcf_rr_bmask) {
6045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6046 "2759 Failed allocate memory for FCF round "
6047 "robin failover bmask\n");
6048 rc = -ENOMEM;
6049 goto out_remove_rpi_hdrs;
6050 }
6051
6052 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
6053 sizeof(struct lpfc_hba_eq_hdl),
6054 GFP_KERNEL);
6055 if (!phba->sli4_hba.hba_eq_hdl) {
6056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6057 "2572 Failed allocate memory for "
6058 "fast-path per-EQ handle array\n");
6059 rc = -ENOMEM;
6060 goto out_free_fcf_rr_bmask;
6061 }
6062
6063 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6064 sizeof(struct lpfc_vector_map_info),
6065 GFP_KERNEL);
6066 if (!phba->sli4_hba.cpu_map) {
6067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6068 "3327 Failed allocate memory for msi-x "
6069 "interrupt vector mapping\n");
6070 rc = -ENOMEM;
6071 goto out_free_hba_eq_hdl;
6072 }
6073 if (lpfc_used_cpu == NULL) {
6074 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
6075 GFP_KERNEL);
6076 if (!lpfc_used_cpu) {
6077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6078 "3335 Failed allocate memory for msi-x "
6079 "interrupt vector mapping\n");
6080 kfree(phba->sli4_hba.cpu_map);
6081 rc = -ENOMEM;
6082 goto out_free_hba_eq_hdl;
6083 }
6084 for (i = 0; i < lpfc_present_cpu; i++)
6085 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
6086 }
6087
6088 /*
6089 * Enable sr-iov virtual functions if supported and configured
6090 * through the module parameter.
6091 */
6092 if (phba->cfg_sriov_nr_virtfn > 0) {
6093 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6094 phba->cfg_sriov_nr_virtfn);
6095 if (rc) {
6096 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6097 "3020 Requested number of SR-IOV "
6098 "virtual functions (%d) is not "
6099 "supported\n",
6100 phba->cfg_sriov_nr_virtfn);
6101 phba->cfg_sriov_nr_virtfn = 0;
6102 }
6103 }
6104
6105 return 0;
6106
6107 out_free_hba_eq_hdl:
6108 kfree(phba->sli4_hba.hba_eq_hdl);
6109 out_free_fcf_rr_bmask:
6110 kfree(phba->fcf.fcf_rr_bmask);
6111 out_remove_rpi_hdrs:
6112 lpfc_sli4_remove_rpi_hdrs(phba);
6113 out_free_active_sgl:
6114 lpfc_free_active_sgl(phba);
6115 out_destroy_cq_event_pool:
6116 lpfc_sli4_cq_event_pool_destroy(phba);
6117 out_free_bsmbx:
6118 lpfc_destroy_bootstrap_mbox(phba);
6119 out_free_mem:
6120 lpfc_mem_free(phba);
6121 return rc;
6122 }
6123
6124 /**
6125 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6126 * @phba: pointer to lpfc hba data structure.
6127 *
6128 * This routine is invoked to unset the driver internal resources set up
6129 * specific for supporting the SLI-4 HBA device it attached to.
6130 **/
6131 static void
6132 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6133 {
6134 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6135
6136 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6137 kfree(phba->sli4_hba.cpu_map);
6138 phba->sli4_hba.num_present_cpu = 0;
6139 phba->sli4_hba.num_online_cpu = 0;
6140 phba->sli4_hba.curr_disp_cpu = 0;
6141
6142 /* Free memory allocated for fast-path work queue handles */
6143 kfree(phba->sli4_hba.hba_eq_hdl);
6144
6145 /* Free the allocated rpi headers. */
6146 lpfc_sli4_remove_rpi_hdrs(phba);
6147 lpfc_sli4_remove_rpis(phba);
6148
6149 /* Free eligible FCF index bmask */
6150 kfree(phba->fcf.fcf_rr_bmask);
6151
6152 /* Free the ELS sgl list */
6153 lpfc_free_active_sgl(phba);
6154 lpfc_free_els_sgl_list(phba);
6155 lpfc_free_nvmet_sgl_list(phba);
6156
6157 /* Free the completion queue EQ event pool */
6158 lpfc_sli4_cq_event_release_all(phba);
6159 lpfc_sli4_cq_event_pool_destroy(phba);
6160
6161 /* Release resource identifiers. */
6162 lpfc_sli4_dealloc_resource_identifiers(phba);
6163
6164 /* Free the bsmbx region. */
6165 lpfc_destroy_bootstrap_mbox(phba);
6166
6167 /* Free the SLI Layer memory with SLI4 HBAs */
6168 lpfc_mem_free_all(phba);
6169
6170 /* Free the current connect table */
6171 list_for_each_entry_safe(conn_entry, next_conn_entry,
6172 &phba->fcf_conn_rec_list, list) {
6173 list_del_init(&conn_entry->list);
6174 kfree(conn_entry);
6175 }
6176
6177 return;
6178 }
6179
6180 /**
6181 * lpfc_init_api_table_setup - Set up init api function jump table
6182 * @phba: The hba struct for which this call is being executed.
6183 * @dev_grp: The HBA PCI-Device group number.
6184 *
6185 * This routine sets up the device INIT interface API function jump table
6186 * in @phba struct.
6187 *
6188 * Returns: 0 - success, -ENODEV - failure.
6189 **/
6190 int
6191 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6192 {
6193 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6194 phba->lpfc_hba_down_link = lpfc_hba_down_link;
6195 phba->lpfc_selective_reset = lpfc_selective_reset;
6196 switch (dev_grp) {
6197 case LPFC_PCI_DEV_LP:
6198 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6199 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6200 phba->lpfc_stop_port = lpfc_stop_port_s3;
6201 break;
6202 case LPFC_PCI_DEV_OC:
6203 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6204 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6205 phba->lpfc_stop_port = lpfc_stop_port_s4;
6206 break;
6207 default:
6208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6209 "1431 Invalid HBA PCI-device group: 0x%x\n",
6210 dev_grp);
6211 return -ENODEV;
6212 break;
6213 }
6214 return 0;
6215 }
6216
6217 /**
6218 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6219 * @phba: pointer to lpfc hba data structure.
6220 *
6221 * This routine is invoked to set up the driver internal resources after the
6222 * device specific resource setup to support the HBA device it attached to.
6223 *
6224 * Return codes
6225 * 0 - successful
6226 * other values - error
6227 **/
6228 static int
6229 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6230 {
6231 int error;
6232
6233 /* Startup the kernel thread for this host adapter. */
6234 phba->worker_thread = kthread_run(lpfc_do_work, phba,
6235 "lpfc_worker_%d", phba->brd_no);
6236 if (IS_ERR(phba->worker_thread)) {
6237 error = PTR_ERR(phba->worker_thread);
6238 return error;
6239 }
6240
6241 return 0;
6242 }
6243
6244 /**
6245 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
6246 * @phba: pointer to lpfc hba data structure.
6247 *
6248 * This routine is invoked to unset the driver internal resources set up after
6249 * the device specific resource setup for supporting the HBA device it
6250 * attached to.
6251 **/
6252 static void
6253 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6254 {
6255 /* Stop kernel worker thread */
6256 kthread_stop(phba->worker_thread);
6257 }
6258
6259 /**
6260 * lpfc_free_iocb_list - Free iocb list.
6261 * @phba: pointer to lpfc hba data structure.
6262 *
6263 * This routine is invoked to free the driver's IOCB list and memory.
6264 **/
6265 static void
6266 lpfc_free_iocb_list(struct lpfc_hba *phba)
6267 {
6268 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
6269
6270 spin_lock_irq(&phba->hbalock);
6271 list_for_each_entry_safe(iocbq_entry, iocbq_next,
6272 &phba->lpfc_iocb_list, list) {
6273 list_del(&iocbq_entry->list);
6274 kfree(iocbq_entry);
6275 phba->total_iocbq_bufs--;
6276 }
6277 spin_unlock_irq(&phba->hbalock);
6278
6279 return;
6280 }
6281
6282 /**
6283 * lpfc_init_iocb_list - Allocate and initialize iocb list.
6284 * @phba: pointer to lpfc hba data structure.
6285 *
6286 * This routine is invoked to allocate and initizlize the driver's IOCB
6287 * list and set up the IOCB tag array accordingly.
6288 *
6289 * Return codes
6290 * 0 - successful
6291 * other values - error
6292 **/
6293 static int
6294 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6295 {
6296 struct lpfc_iocbq *iocbq_entry = NULL;
6297 uint16_t iotag;
6298 int i;
6299
6300 /* Initialize and populate the iocb list per host. */
6301 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
6302 for (i = 0; i < iocb_count; i++) {
6303 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
6304 if (iocbq_entry == NULL) {
6305 printk(KERN_ERR "%s: only allocated %d iocbs of "
6306 "expected %d count. Unloading driver.\n",
6307 __func__, i, LPFC_IOCB_LIST_CNT);
6308 goto out_free_iocbq;
6309 }
6310
6311 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
6312 if (iotag == 0) {
6313 kfree(iocbq_entry);
6314 printk(KERN_ERR "%s: failed to allocate IOTAG. "
6315 "Unloading driver.\n", __func__);
6316 goto out_free_iocbq;
6317 }
6318 iocbq_entry->sli4_lxritag = NO_XRI;
6319 iocbq_entry->sli4_xritag = NO_XRI;
6320
6321 spin_lock_irq(&phba->hbalock);
6322 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
6323 phba->total_iocbq_bufs++;
6324 spin_unlock_irq(&phba->hbalock);
6325 }
6326
6327 return 0;
6328
6329 out_free_iocbq:
6330 lpfc_free_iocb_list(phba);
6331
6332 return -ENOMEM;
6333 }
6334
6335 /**
6336 * lpfc_free_sgl_list - Free a given sgl list.
6337 * @phba: pointer to lpfc hba data structure.
6338 * @sglq_list: pointer to the head of sgl list.
6339 *
6340 * This routine is invoked to free a give sgl list and memory.
6341 **/
6342 void
6343 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
6344 {
6345 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6346
6347 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
6348 list_del(&sglq_entry->list);
6349 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
6350 kfree(sglq_entry);
6351 }
6352 }
6353
6354 /**
6355 * lpfc_free_els_sgl_list - Free els sgl list.
6356 * @phba: pointer to lpfc hba data structure.
6357 *
6358 * This routine is invoked to free the driver's els sgl list and memory.
6359 **/
6360 static void
6361 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
6362 {
6363 LIST_HEAD(sglq_list);
6364
6365 /* Retrieve all els sgls from driver list */
6366 spin_lock_irq(&phba->hbalock);
6367 spin_lock(&phba->sli4_hba.sgl_list_lock);
6368 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
6369 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6370 spin_unlock_irq(&phba->hbalock);
6371
6372 /* Now free the sgl list */
6373 lpfc_free_sgl_list(phba, &sglq_list);
6374 }
6375
6376 /**
6377 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
6378 * @phba: pointer to lpfc hba data structure.
6379 *
6380 * This routine is invoked to free the driver's nvmet sgl list and memory.
6381 **/
6382 static void
6383 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
6384 {
6385 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6386 LIST_HEAD(sglq_list);
6387
6388 /* Retrieve all nvmet sgls from driver list */
6389 spin_lock_irq(&phba->hbalock);
6390 spin_lock(&phba->sli4_hba.sgl_list_lock);
6391 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
6392 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6393 spin_unlock_irq(&phba->hbalock);
6394
6395 /* Now free the sgl list */
6396 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
6397 list_del(&sglq_entry->list);
6398 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
6399 kfree(sglq_entry);
6400 }
6401 }
6402
6403 /**
6404 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
6405 * @phba: pointer to lpfc hba data structure.
6406 *
6407 * This routine is invoked to allocate the driver's active sgl memory.
6408 * This array will hold the sglq_entry's for active IOs.
6409 **/
6410 static int
6411 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
6412 {
6413 int size;
6414 size = sizeof(struct lpfc_sglq *);
6415 size *= phba->sli4_hba.max_cfg_param.max_xri;
6416
6417 phba->sli4_hba.lpfc_sglq_active_list =
6418 kzalloc(size, GFP_KERNEL);
6419 if (!phba->sli4_hba.lpfc_sglq_active_list)
6420 return -ENOMEM;
6421 return 0;
6422 }
6423
6424 /**
6425 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
6426 * @phba: pointer to lpfc hba data structure.
6427 *
6428 * This routine is invoked to walk through the array of active sglq entries
6429 * and free all of the resources.
6430 * This is just a place holder for now.
6431 **/
6432 static void
6433 lpfc_free_active_sgl(struct lpfc_hba *phba)
6434 {
6435 kfree(phba->sli4_hba.lpfc_sglq_active_list);
6436 }
6437
6438 /**
6439 * lpfc_init_sgl_list - Allocate and initialize sgl list.
6440 * @phba: pointer to lpfc hba data structure.
6441 *
6442 * This routine is invoked to allocate and initizlize the driver's sgl
6443 * list and set up the sgl xritag tag array accordingly.
6444 *
6445 **/
6446 static void
6447 lpfc_init_sgl_list(struct lpfc_hba *phba)
6448 {
6449 /* Initialize and populate the sglq list per host/VF. */
6450 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
6451 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
6452 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
6453 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6454
6455 /* els xri-sgl book keeping */
6456 phba->sli4_hba.els_xri_cnt = 0;
6457
6458 /* scsi xri-buffer book keeping */
6459 phba->sli4_hba.scsi_xri_cnt = 0;
6460
6461 /* nvme xri-buffer book keeping */
6462 phba->sli4_hba.nvme_xri_cnt = 0;
6463 }
6464
6465 /**
6466 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
6467 * @phba: pointer to lpfc hba data structure.
6468 *
6469 * This routine is invoked to post rpi header templates to the
6470 * port for those SLI4 ports that do not support extents. This routine
6471 * posts a PAGE_SIZE memory region to the port to hold up to
6472 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
6473 * and should be called only when interrupts are disabled.
6474 *
6475 * Return codes
6476 * 0 - successful
6477 * -ERROR - otherwise.
6478 **/
6479 int
6480 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
6481 {
6482 int rc = 0;
6483 struct lpfc_rpi_hdr *rpi_hdr;
6484
6485 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
6486 if (!phba->sli4_hba.rpi_hdrs_in_use)
6487 return rc;
6488 if (phba->sli4_hba.extents_in_use)
6489 return -EIO;
6490
6491 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
6492 if (!rpi_hdr) {
6493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6494 "0391 Error during rpi post operation\n");
6495 lpfc_sli4_remove_rpis(phba);
6496 rc = -ENODEV;
6497 }
6498
6499 return rc;
6500 }
6501
6502 /**
6503 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
6504 * @phba: pointer to lpfc hba data structure.
6505 *
6506 * This routine is invoked to allocate a single 4KB memory region to
6507 * support rpis and stores them in the phba. This single region
6508 * provides support for up to 64 rpis. The region is used globally
6509 * by the device.
6510 *
6511 * Returns:
6512 * A valid rpi hdr on success.
6513 * A NULL pointer on any failure.
6514 **/
6515 struct lpfc_rpi_hdr *
6516 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6517 {
6518 uint16_t rpi_limit, curr_rpi_range;
6519 struct lpfc_dmabuf *dmabuf;
6520 struct lpfc_rpi_hdr *rpi_hdr;
6521 uint32_t rpi_count;
6522
6523 /*
6524 * If the SLI4 port supports extents, posting the rpi header isn't
6525 * required. Set the expected maximum count and let the actual value
6526 * get set when extents are fully allocated.
6527 */
6528 if (!phba->sli4_hba.rpi_hdrs_in_use)
6529 return NULL;
6530 if (phba->sli4_hba.extents_in_use)
6531 return NULL;
6532
6533 /* The limit on the logical index is just the max_rpi count. */
6534 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
6535 phba->sli4_hba.max_cfg_param.max_rpi - 1;
6536
6537 spin_lock_irq(&phba->hbalock);
6538 /*
6539 * Establish the starting RPI in this header block. The starting
6540 * rpi is normalized to a zero base because the physical rpi is
6541 * port based.
6542 */
6543 curr_rpi_range = phba->sli4_hba.next_rpi;
6544 spin_unlock_irq(&phba->hbalock);
6545
6546 /*
6547 * The port has a limited number of rpis. The increment here
6548 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
6549 * and to allow the full max_rpi range per port.
6550 */
6551 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
6552 rpi_count = rpi_limit - curr_rpi_range;
6553 else
6554 rpi_count = LPFC_RPI_HDR_COUNT;
6555
6556 if (!rpi_count)
6557 return NULL;
6558 /*
6559 * First allocate the protocol header region for the port. The
6560 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6561 */
6562 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6563 if (!dmabuf)
6564 return NULL;
6565
6566 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6567 LPFC_HDR_TEMPLATE_SIZE,
6568 &dmabuf->phys, GFP_KERNEL);
6569 if (!dmabuf->virt) {
6570 rpi_hdr = NULL;
6571 goto err_free_dmabuf;
6572 }
6573
6574 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6575 rpi_hdr = NULL;
6576 goto err_free_coherent;
6577 }
6578
6579 /* Save the rpi header data for cleanup later. */
6580 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6581 if (!rpi_hdr)
6582 goto err_free_coherent;
6583
6584 rpi_hdr->dmabuf = dmabuf;
6585 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6586 rpi_hdr->page_count = 1;
6587 spin_lock_irq(&phba->hbalock);
6588
6589 /* The rpi_hdr stores the logical index only. */
6590 rpi_hdr->start_rpi = curr_rpi_range;
6591 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6592
6593 /*
6594 * The next_rpi stores the next logical module-64 rpi value used
6595 * to post physical rpis in subsequent rpi postings.
6596 */
6597 phba->sli4_hba.next_rpi += rpi_count;
6598 spin_unlock_irq(&phba->hbalock);
6599 return rpi_hdr;
6600
6601 err_free_coherent:
6602 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6603 dmabuf->virt, dmabuf->phys);
6604 err_free_dmabuf:
6605 kfree(dmabuf);
6606 return NULL;
6607 }
6608
6609 /**
6610 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6611 * @phba: pointer to lpfc hba data structure.
6612 *
6613 * This routine is invoked to remove all memory resources allocated
6614 * to support rpis for SLI4 ports not supporting extents. This routine
6615 * presumes the caller has released all rpis consumed by fabric or port
6616 * logins and is prepared to have the header pages removed.
6617 **/
6618 void
6619 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6620 {
6621 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6622
6623 if (!phba->sli4_hba.rpi_hdrs_in_use)
6624 goto exit;
6625
6626 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6627 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6628 list_del(&rpi_hdr->list);
6629 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6630 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6631 kfree(rpi_hdr->dmabuf);
6632 kfree(rpi_hdr);
6633 }
6634 exit:
6635 /* There are no rpis available to the port now. */
6636 phba->sli4_hba.next_rpi = 0;
6637 }
6638
6639 /**
6640 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6641 * @pdev: pointer to pci device data structure.
6642 *
6643 * This routine is invoked to allocate the driver hba data structure for an
6644 * HBA device. If the allocation is successful, the phba reference to the
6645 * PCI device data structure is set.
6646 *
6647 * Return codes
6648 * pointer to @phba - successful
6649 * NULL - error
6650 **/
6651 static struct lpfc_hba *
6652 lpfc_hba_alloc(struct pci_dev *pdev)
6653 {
6654 struct lpfc_hba *phba;
6655
6656 /* Allocate memory for HBA structure */
6657 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6658 if (!phba) {
6659 dev_err(&pdev->dev, "failed to allocate hba struct\n");
6660 return NULL;
6661 }
6662
6663 /* Set reference to PCI device in HBA structure */
6664 phba->pcidev = pdev;
6665
6666 /* Assign an unused board number */
6667 phba->brd_no = lpfc_get_instance();
6668 if (phba->brd_no < 0) {
6669 kfree(phba);
6670 return NULL;
6671 }
6672 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
6673
6674 spin_lock_init(&phba->ct_ev_lock);
6675 INIT_LIST_HEAD(&phba->ct_ev_waiters);
6676
6677 return phba;
6678 }
6679
6680 /**
6681 * lpfc_hba_free - Free driver hba data structure with a device.
6682 * @phba: pointer to lpfc hba data structure.
6683 *
6684 * This routine is invoked to free the driver hba data structure with an
6685 * HBA device.
6686 **/
6687 static void
6688 lpfc_hba_free(struct lpfc_hba *phba)
6689 {
6690 /* Release the driver assigned board number */
6691 idr_remove(&lpfc_hba_index, phba->brd_no);
6692
6693 /* Free memory allocated with sli3 rings */
6694 kfree(phba->sli.sli3_ring);
6695 phba->sli.sli3_ring = NULL;
6696
6697 kfree(phba);
6698 return;
6699 }
6700
6701 /**
6702 * lpfc_create_shost - Create hba physical port with associated scsi host.
6703 * @phba: pointer to lpfc hba data structure.
6704 *
6705 * This routine is invoked to create HBA physical port and associate a SCSI
6706 * host with it.
6707 *
6708 * Return codes
6709 * 0 - successful
6710 * other values - error
6711 **/
6712 static int
6713 lpfc_create_shost(struct lpfc_hba *phba)
6714 {
6715 struct lpfc_vport *vport;
6716 struct Scsi_Host *shost;
6717
6718 /* Initialize HBA FC structure */
6719 phba->fc_edtov = FF_DEF_EDTOV;
6720 phba->fc_ratov = FF_DEF_RATOV;
6721 phba->fc_altov = FF_DEF_ALTOV;
6722 phba->fc_arbtov = FF_DEF_ARBTOV;
6723
6724 atomic_set(&phba->sdev_cnt, 0);
6725 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6726 if (!vport)
6727 return -ENODEV;
6728
6729 shost = lpfc_shost_from_vport(vport);
6730 phba->pport = vport;
6731
6732 if (phba->nvmet_support) {
6733 /* Only 1 vport (pport) will support NVME target */
6734 if (phba->txrdy_payload_pool == NULL) {
6735 phba->txrdy_payload_pool = pci_pool_create(
6736 "txrdy_pool", phba->pcidev,
6737 TXRDY_PAYLOAD_LEN, 16, 0);
6738 if (phba->txrdy_payload_pool) {
6739 phba->targetport = NULL;
6740 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
6741 lpfc_printf_log(phba, KERN_INFO,
6742 LOG_INIT | LOG_NVME_DISC,
6743 "6076 NVME Target Found\n");
6744 }
6745 }
6746 }
6747
6748 lpfc_debugfs_initialize(vport);
6749 /* Put reference to SCSI host to driver's device private data */
6750 pci_set_drvdata(phba->pcidev, shost);
6751
6752 /*
6753 * At this point we are fully registered with PSA. In addition,
6754 * any initial discovery should be completed.
6755 */
6756 vport->load_flag |= FC_ALLOW_FDMI;
6757 if (phba->cfg_enable_SmartSAN ||
6758 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
6759
6760 /* Setup appropriate attribute masks */
6761 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
6762 if (phba->cfg_enable_SmartSAN)
6763 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
6764 else
6765 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
6766 }
6767 return 0;
6768 }
6769
6770 /**
6771 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6772 * @phba: pointer to lpfc hba data structure.
6773 *
6774 * This routine is invoked to destroy HBA physical port and the associated
6775 * SCSI host.
6776 **/
6777 static void
6778 lpfc_destroy_shost(struct lpfc_hba *phba)
6779 {
6780 struct lpfc_vport *vport = phba->pport;
6781
6782 /* Destroy physical port that associated with the SCSI host */
6783 destroy_port(vport);
6784
6785 return;
6786 }
6787
6788 /**
6789 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6790 * @phba: pointer to lpfc hba data structure.
6791 * @shost: the shost to be used to detect Block guard settings.
6792 *
6793 * This routine sets up the local Block guard protocol settings for @shost.
6794 * This routine also allocates memory for debugging bg buffers.
6795 **/
6796 static void
6797 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6798 {
6799 uint32_t old_mask;
6800 uint32_t old_guard;
6801
6802 int pagecnt = 10;
6803 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6804 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6805 "1478 Registering BlockGuard with the "
6806 "SCSI layer\n");
6807
6808 old_mask = phba->cfg_prot_mask;
6809 old_guard = phba->cfg_prot_guard;
6810
6811 /* Only allow supported values */
6812 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
6813 SHOST_DIX_TYPE0_PROTECTION |
6814 SHOST_DIX_TYPE1_PROTECTION);
6815 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
6816 SHOST_DIX_GUARD_CRC);
6817
6818 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6819 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6820 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
6821
6822 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6823 if ((old_mask != phba->cfg_prot_mask) ||
6824 (old_guard != phba->cfg_prot_guard))
6825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6826 "1475 Registering BlockGuard with the "
6827 "SCSI layer: mask %d guard %d\n",
6828 phba->cfg_prot_mask,
6829 phba->cfg_prot_guard);
6830
6831 scsi_host_set_prot(shost, phba->cfg_prot_mask);
6832 scsi_host_set_guard(shost, phba->cfg_prot_guard);
6833 } else
6834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6835 "1479 Not Registering BlockGuard with the SCSI "
6836 "layer, Bad protection parameters: %d %d\n",
6837 old_mask, old_guard);
6838 }
6839
6840 if (!_dump_buf_data) {
6841 while (pagecnt) {
6842 spin_lock_init(&_dump_buf_lock);
6843 _dump_buf_data =
6844 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6845 if (_dump_buf_data) {
6846 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6847 "9043 BLKGRD: allocated %d pages for "
6848 "_dump_buf_data at 0x%p\n",
6849 (1 << pagecnt), _dump_buf_data);
6850 _dump_buf_data_order = pagecnt;
6851 memset(_dump_buf_data, 0,
6852 ((1 << PAGE_SHIFT) << pagecnt));
6853 break;
6854 } else
6855 --pagecnt;
6856 }
6857 if (!_dump_buf_data_order)
6858 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6859 "9044 BLKGRD: ERROR unable to allocate "
6860 "memory for hexdump\n");
6861 } else
6862 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6863 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6864 "\n", _dump_buf_data);
6865 if (!_dump_buf_dif) {
6866 while (pagecnt) {
6867 _dump_buf_dif =
6868 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6869 if (_dump_buf_dif) {
6870 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6871 "9046 BLKGRD: allocated %d pages for "
6872 "_dump_buf_dif at 0x%p\n",
6873 (1 << pagecnt), _dump_buf_dif);
6874 _dump_buf_dif_order = pagecnt;
6875 memset(_dump_buf_dif, 0,
6876 ((1 << PAGE_SHIFT) << pagecnt));
6877 break;
6878 } else
6879 --pagecnt;
6880 }
6881 if (!_dump_buf_dif_order)
6882 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6883 "9047 BLKGRD: ERROR unable to allocate "
6884 "memory for hexdump\n");
6885 } else
6886 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6887 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6888 _dump_buf_dif);
6889 }
6890
6891 /**
6892 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6893 * @phba: pointer to lpfc hba data structure.
6894 *
6895 * This routine is invoked to perform all the necessary post initialization
6896 * setup for the device.
6897 **/
6898 static void
6899 lpfc_post_init_setup(struct lpfc_hba *phba)
6900 {
6901 struct Scsi_Host *shost;
6902 struct lpfc_adapter_event_header adapter_event;
6903
6904 /* Get the default values for Model Name and Description */
6905 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6906
6907 /*
6908 * hba setup may have changed the hba_queue_depth so we need to
6909 * adjust the value of can_queue.
6910 */
6911 shost = pci_get_drvdata(phba->pcidev);
6912 shost->can_queue = phba->cfg_hba_queue_depth - 10;
6913 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6914 lpfc_setup_bg(phba, shost);
6915
6916 lpfc_host_attrib_init(shost);
6917
6918 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6919 spin_lock_irq(shost->host_lock);
6920 lpfc_poll_start_timer(phba);
6921 spin_unlock_irq(shost->host_lock);
6922 }
6923
6924 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6925 "0428 Perform SCSI scan\n");
6926 /* Send board arrival event to upper layer */
6927 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6928 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6929 fc_host_post_vendor_event(shost, fc_get_event_number(),
6930 sizeof(adapter_event),
6931 (char *) &adapter_event,
6932 LPFC_NL_VENDOR_ID);
6933 return;
6934 }
6935
6936 /**
6937 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6938 * @phba: pointer to lpfc hba data structure.
6939 *
6940 * This routine is invoked to set up the PCI device memory space for device
6941 * with SLI-3 interface spec.
6942 *
6943 * Return codes
6944 * 0 - successful
6945 * other values - error
6946 **/
6947 static int
6948 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6949 {
6950 struct pci_dev *pdev;
6951 unsigned long bar0map_len, bar2map_len;
6952 int i, hbq_count;
6953 void *ptr;
6954 int error = -ENODEV;
6955
6956 /* Obtain PCI device reference */
6957 if (!phba->pcidev)
6958 return error;
6959 else
6960 pdev = phba->pcidev;
6961
6962 /* Set the device DMA mask size */
6963 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6964 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6965 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6966 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6967 return error;
6968 }
6969 }
6970
6971 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6972 * required by each mapping.
6973 */
6974 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6975 bar0map_len = pci_resource_len(pdev, 0);
6976
6977 phba->pci_bar2_map = pci_resource_start(pdev, 2);
6978 bar2map_len = pci_resource_len(pdev, 2);
6979
6980 /* Map HBA SLIM to a kernel virtual address. */
6981 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6982 if (!phba->slim_memmap_p) {
6983 dev_printk(KERN_ERR, &pdev->dev,
6984 "ioremap failed for SLIM memory.\n");
6985 goto out;
6986 }
6987
6988 /* Map HBA Control Registers to a kernel virtual address. */
6989 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6990 if (!phba->ctrl_regs_memmap_p) {
6991 dev_printk(KERN_ERR, &pdev->dev,
6992 "ioremap failed for HBA control registers.\n");
6993 goto out_iounmap_slim;
6994 }
6995
6996 /* Allocate memory for SLI-2 structures */
6997 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6998 &phba->slim2p.phys, GFP_KERNEL);
6999 if (!phba->slim2p.virt)
7000 goto out_iounmap;
7001
7002 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7003 phba->mbox_ext = (phba->slim2p.virt +
7004 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7005 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7006 phba->IOCBs = (phba->slim2p.virt +
7007 offsetof(struct lpfc_sli2_slim, IOCBs));
7008
7009 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7010 lpfc_sli_hbq_size(),
7011 &phba->hbqslimp.phys,
7012 GFP_KERNEL);
7013 if (!phba->hbqslimp.virt)
7014 goto out_free_slim;
7015
7016 hbq_count = lpfc_sli_hbq_count();
7017 ptr = phba->hbqslimp.virt;
7018 for (i = 0; i < hbq_count; ++i) {
7019 phba->hbqs[i].hbq_virt = ptr;
7020 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7021 ptr += (lpfc_hbq_defs[i]->entry_count *
7022 sizeof(struct lpfc_hbq_entry));
7023 }
7024 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7025 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7026
7027 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7028
7029 phba->MBslimaddr = phba->slim_memmap_p;
7030 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7031 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7032 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7033 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7034
7035 return 0;
7036
7037 out_free_slim:
7038 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7039 phba->slim2p.virt, phba->slim2p.phys);
7040 out_iounmap:
7041 iounmap(phba->ctrl_regs_memmap_p);
7042 out_iounmap_slim:
7043 iounmap(phba->slim_memmap_p);
7044 out:
7045 return error;
7046 }
7047
7048 /**
7049 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7050 * @phba: pointer to lpfc hba data structure.
7051 *
7052 * This routine is invoked to unset the PCI device memory space for device
7053 * with SLI-3 interface spec.
7054 **/
7055 static void
7056 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7057 {
7058 struct pci_dev *pdev;
7059
7060 /* Obtain PCI device reference */
7061 if (!phba->pcidev)
7062 return;
7063 else
7064 pdev = phba->pcidev;
7065
7066 /* Free coherent DMA memory allocated */
7067 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7068 phba->hbqslimp.virt, phba->hbqslimp.phys);
7069 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7070 phba->slim2p.virt, phba->slim2p.phys);
7071
7072 /* I/O memory unmap */
7073 iounmap(phba->ctrl_regs_memmap_p);
7074 iounmap(phba->slim_memmap_p);
7075
7076 return;
7077 }
7078
7079 /**
7080 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7081 * @phba: pointer to lpfc hba data structure.
7082 *
7083 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7084 * done and check status.
7085 *
7086 * Return 0 if successful, otherwise -ENODEV.
7087 **/
7088 int
7089 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7090 {
7091 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7092 struct lpfc_register reg_data;
7093 int i, port_error = 0;
7094 uint32_t if_type;
7095
7096 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7097 memset(&reg_data, 0, sizeof(reg_data));
7098 if (!phba->sli4_hba.PSMPHRregaddr)
7099 return -ENODEV;
7100
7101 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7102 for (i = 0; i < 3000; i++) {
7103 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7104 &portsmphr_reg.word0) ||
7105 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7106 /* Port has a fatal POST error, break out */
7107 port_error = -ENODEV;
7108 break;
7109 }
7110 if (LPFC_POST_STAGE_PORT_READY ==
7111 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7112 break;
7113 msleep(10);
7114 }
7115
7116 /*
7117 * If there was a port error during POST, then don't proceed with
7118 * other register reads as the data may not be valid. Just exit.
7119 */
7120 if (port_error) {
7121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7122 "1408 Port Failed POST - portsmphr=0x%x, "
7123 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7124 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7125 portsmphr_reg.word0,
7126 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7127 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7128 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7129 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7130 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7131 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7132 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7133 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7134 } else {
7135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7136 "2534 Device Info: SLIFamily=0x%x, "
7137 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7138 "SLIHint_2=0x%x, FT=0x%x\n",
7139 bf_get(lpfc_sli_intf_sli_family,
7140 &phba->sli4_hba.sli_intf),
7141 bf_get(lpfc_sli_intf_slirev,
7142 &phba->sli4_hba.sli_intf),
7143 bf_get(lpfc_sli_intf_if_type,
7144 &phba->sli4_hba.sli_intf),
7145 bf_get(lpfc_sli_intf_sli_hint1,
7146 &phba->sli4_hba.sli_intf),
7147 bf_get(lpfc_sli_intf_sli_hint2,
7148 &phba->sli4_hba.sli_intf),
7149 bf_get(lpfc_sli_intf_func_type,
7150 &phba->sli4_hba.sli_intf));
7151 /*
7152 * Check for other Port errors during the initialization
7153 * process. Fail the load if the port did not come up
7154 * correctly.
7155 */
7156 if_type = bf_get(lpfc_sli_intf_if_type,
7157 &phba->sli4_hba.sli_intf);
7158 switch (if_type) {
7159 case LPFC_SLI_INTF_IF_TYPE_0:
7160 phba->sli4_hba.ue_mask_lo =
7161 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7162 phba->sli4_hba.ue_mask_hi =
7163 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7164 uerrlo_reg.word0 =
7165 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7166 uerrhi_reg.word0 =
7167 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7168 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7169 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7171 "1422 Unrecoverable Error "
7172 "Detected during POST "
7173 "uerr_lo_reg=0x%x, "
7174 "uerr_hi_reg=0x%x, "
7175 "ue_mask_lo_reg=0x%x, "
7176 "ue_mask_hi_reg=0x%x\n",
7177 uerrlo_reg.word0,
7178 uerrhi_reg.word0,
7179 phba->sli4_hba.ue_mask_lo,
7180 phba->sli4_hba.ue_mask_hi);
7181 port_error = -ENODEV;
7182 }
7183 break;
7184 case LPFC_SLI_INTF_IF_TYPE_2:
7185 /* Final checks. The port status should be clean. */
7186 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7187 &reg_data.word0) ||
7188 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7189 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
7190 phba->work_status[0] =
7191 readl(phba->sli4_hba.u.if_type2.
7192 ERR1regaddr);
7193 phba->work_status[1] =
7194 readl(phba->sli4_hba.u.if_type2.
7195 ERR2regaddr);
7196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7197 "2888 Unrecoverable port error "
7198 "following POST: port status reg "
7199 "0x%x, port_smphr reg 0x%x, "
7200 "error 1=0x%x, error 2=0x%x\n",
7201 reg_data.word0,
7202 portsmphr_reg.word0,
7203 phba->work_status[0],
7204 phba->work_status[1]);
7205 port_error = -ENODEV;
7206 }
7207 break;
7208 case LPFC_SLI_INTF_IF_TYPE_1:
7209 default:
7210 break;
7211 }
7212 }
7213 return port_error;
7214 }
7215
7216 /**
7217 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7218 * @phba: pointer to lpfc hba data structure.
7219 * @if_type: The SLI4 interface type getting configured.
7220 *
7221 * This routine is invoked to set up SLI4 BAR0 PCI config space register
7222 * memory map.
7223 **/
7224 static void
7225 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7226 {
7227 switch (if_type) {
7228 case LPFC_SLI_INTF_IF_TYPE_0:
7229 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7230 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7231 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7232 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7233 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7234 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7235 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7236 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7237 phba->sli4_hba.SLIINTFregaddr =
7238 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7239 break;
7240 case LPFC_SLI_INTF_IF_TYPE_2:
7241 phba->sli4_hba.u.if_type2.ERR1regaddr =
7242 phba->sli4_hba.conf_regs_memmap_p +
7243 LPFC_CTL_PORT_ER1_OFFSET;
7244 phba->sli4_hba.u.if_type2.ERR2regaddr =
7245 phba->sli4_hba.conf_regs_memmap_p +
7246 LPFC_CTL_PORT_ER2_OFFSET;
7247 phba->sli4_hba.u.if_type2.CTRLregaddr =
7248 phba->sli4_hba.conf_regs_memmap_p +
7249 LPFC_CTL_PORT_CTL_OFFSET;
7250 phba->sli4_hba.u.if_type2.STATUSregaddr =
7251 phba->sli4_hba.conf_regs_memmap_p +
7252 LPFC_CTL_PORT_STA_OFFSET;
7253 phba->sli4_hba.SLIINTFregaddr =
7254 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7255 phba->sli4_hba.PSMPHRregaddr =
7256 phba->sli4_hba.conf_regs_memmap_p +
7257 LPFC_CTL_PORT_SEM_OFFSET;
7258 phba->sli4_hba.RQDBregaddr =
7259 phba->sli4_hba.conf_regs_memmap_p +
7260 LPFC_ULP0_RQ_DOORBELL;
7261 phba->sli4_hba.WQDBregaddr =
7262 phba->sli4_hba.conf_regs_memmap_p +
7263 LPFC_ULP0_WQ_DOORBELL;
7264 phba->sli4_hba.EQCQDBregaddr =
7265 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
7266 phba->sli4_hba.MQDBregaddr =
7267 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
7268 phba->sli4_hba.BMBXregaddr =
7269 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7270 break;
7271 case LPFC_SLI_INTF_IF_TYPE_1:
7272 default:
7273 dev_printk(KERN_ERR, &phba->pcidev->dev,
7274 "FATAL - unsupported SLI4 interface type - %d\n",
7275 if_type);
7276 break;
7277 }
7278 }
7279
7280 /**
7281 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
7282 * @phba: pointer to lpfc hba data structure.
7283 *
7284 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
7285 * memory map.
7286 **/
7287 static void
7288 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
7289 {
7290 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7291 LPFC_SLIPORT_IF0_SMPHR;
7292 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7293 LPFC_HST_ISR0;
7294 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7295 LPFC_HST_IMR0;
7296 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7297 LPFC_HST_ISCR0;
7298 }
7299
7300 /**
7301 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
7302 * @phba: pointer to lpfc hba data structure.
7303 * @vf: virtual function number
7304 *
7305 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
7306 * based on the given viftual function number, @vf.
7307 *
7308 * Return 0 if successful, otherwise -ENODEV.
7309 **/
7310 static int
7311 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
7312 {
7313 if (vf > LPFC_VIR_FUNC_MAX)
7314 return -ENODEV;
7315
7316 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7317 vf * LPFC_VFR_PAGE_SIZE +
7318 LPFC_ULP0_RQ_DOORBELL);
7319 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7320 vf * LPFC_VFR_PAGE_SIZE +
7321 LPFC_ULP0_WQ_DOORBELL);
7322 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7323 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
7324 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7325 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
7326 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7327 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
7328 return 0;
7329 }
7330
7331 /**
7332 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
7333 * @phba: pointer to lpfc hba data structure.
7334 *
7335 * This routine is invoked to create the bootstrap mailbox
7336 * region consistent with the SLI-4 interface spec. This
7337 * routine allocates all memory necessary to communicate
7338 * mailbox commands to the port and sets up all alignment
7339 * needs. No locks are expected to be held when calling
7340 * this routine.
7341 *
7342 * Return codes
7343 * 0 - successful
7344 * -ENOMEM - could not allocated memory.
7345 **/
7346 static int
7347 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
7348 {
7349 uint32_t bmbx_size;
7350 struct lpfc_dmabuf *dmabuf;
7351 struct dma_address *dma_address;
7352 uint32_t pa_addr;
7353 uint64_t phys_addr;
7354
7355 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7356 if (!dmabuf)
7357 return -ENOMEM;
7358
7359 /*
7360 * The bootstrap mailbox region is comprised of 2 parts
7361 * plus an alignment restriction of 16 bytes.
7362 */
7363 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7364 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
7365 &dmabuf->phys, GFP_KERNEL);
7366 if (!dmabuf->virt) {
7367 kfree(dmabuf);
7368 return -ENOMEM;
7369 }
7370
7371 /*
7372 * Initialize the bootstrap mailbox pointers now so that the register
7373 * operations are simple later. The mailbox dma address is required
7374 * to be 16-byte aligned. Also align the virtual memory as each
7375 * maibox is copied into the bmbx mailbox region before issuing the
7376 * command to the port.
7377 */
7378 phba->sli4_hba.bmbx.dmabuf = dmabuf;
7379 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
7380
7381 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
7382 LPFC_ALIGN_16_BYTE);
7383 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
7384 LPFC_ALIGN_16_BYTE);
7385
7386 /*
7387 * Set the high and low physical addresses now. The SLI4 alignment
7388 * requirement is 16 bytes and the mailbox is posted to the port
7389 * as two 30-bit addresses. The other data is a bit marking whether
7390 * the 30-bit address is the high or low address.
7391 * Upcast bmbx aphys to 64bits so shift instruction compiles
7392 * clean on 32 bit machines.
7393 */
7394 dma_address = &phba->sli4_hba.bmbx.dma_address;
7395 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
7396 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
7397 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
7398 LPFC_BMBX_BIT1_ADDR_HI);
7399
7400 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
7401 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
7402 LPFC_BMBX_BIT1_ADDR_LO);
7403 return 0;
7404 }
7405
7406 /**
7407 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
7408 * @phba: pointer to lpfc hba data structure.
7409 *
7410 * This routine is invoked to teardown the bootstrap mailbox
7411 * region and release all host resources. This routine requires
7412 * the caller to ensure all mailbox commands recovered, no
7413 * additional mailbox comands are sent, and interrupts are disabled
7414 * before calling this routine.
7415 *
7416 **/
7417 static void
7418 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
7419 {
7420 dma_free_coherent(&phba->pcidev->dev,
7421 phba->sli4_hba.bmbx.bmbx_size,
7422 phba->sli4_hba.bmbx.dmabuf->virt,
7423 phba->sli4_hba.bmbx.dmabuf->phys);
7424
7425 kfree(phba->sli4_hba.bmbx.dmabuf);
7426 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
7427 }
7428
7429 /**
7430 * lpfc_sli4_read_config - Get the config parameters.
7431 * @phba: pointer to lpfc hba data structure.
7432 *
7433 * This routine is invoked to read the configuration parameters from the HBA.
7434 * The configuration parameters are used to set the base and maximum values
7435 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
7436 * allocation for the port.
7437 *
7438 * Return codes
7439 * 0 - successful
7440 * -ENOMEM - No available memory
7441 * -EIO - The mailbox failed to complete successfully.
7442 **/
7443 int
7444 lpfc_sli4_read_config(struct lpfc_hba *phba)
7445 {
7446 LPFC_MBOXQ_t *pmb;
7447 struct lpfc_mbx_read_config *rd_config;
7448 union lpfc_sli4_cfg_shdr *shdr;
7449 uint32_t shdr_status, shdr_add_status;
7450 struct lpfc_mbx_get_func_cfg *get_func_cfg;
7451 struct lpfc_rsrc_desc_fcfcoe *desc;
7452 char *pdesc_0;
7453 uint16_t forced_link_speed;
7454 uint32_t if_type;
7455 int length, i, rc = 0, rc2;
7456
7457 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7458 if (!pmb) {
7459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7460 "2011 Unable to allocate memory for issuing "
7461 "SLI_CONFIG_SPECIAL mailbox command\n");
7462 return -ENOMEM;
7463 }
7464
7465 lpfc_read_config(phba, pmb);
7466
7467 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7468 if (rc != MBX_SUCCESS) {
7469 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7470 "2012 Mailbox failed , mbxCmd x%x "
7471 "READ_CONFIG, mbxStatus x%x\n",
7472 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7473 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7474 rc = -EIO;
7475 } else {
7476 rd_config = &pmb->u.mqe.un.rd_config;
7477 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
7478 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
7479 phba->sli4_hba.lnk_info.lnk_tp =
7480 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
7481 phba->sli4_hba.lnk_info.lnk_no =
7482 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
7483 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7484 "3081 lnk_type:%d, lnk_numb:%d\n",
7485 phba->sli4_hba.lnk_info.lnk_tp,
7486 phba->sli4_hba.lnk_info.lnk_no);
7487 } else
7488 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7489 "3082 Mailbox (x%x) returned ldv:x0\n",
7490 bf_get(lpfc_mqe_command, &pmb->u.mqe));
7491 phba->sli4_hba.extents_in_use =
7492 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
7493 phba->sli4_hba.max_cfg_param.max_xri =
7494 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
7495 phba->sli4_hba.max_cfg_param.xri_base =
7496 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
7497 phba->sli4_hba.max_cfg_param.max_vpi =
7498 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
7499 phba->sli4_hba.max_cfg_param.vpi_base =
7500 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
7501 phba->sli4_hba.max_cfg_param.max_rpi =
7502 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
7503 phba->sli4_hba.max_cfg_param.rpi_base =
7504 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
7505 phba->sli4_hba.max_cfg_param.max_vfi =
7506 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
7507 phba->sli4_hba.max_cfg_param.vfi_base =
7508 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
7509 phba->sli4_hba.max_cfg_param.max_fcfi =
7510 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
7511 phba->sli4_hba.max_cfg_param.max_eq =
7512 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
7513 phba->sli4_hba.max_cfg_param.max_rq =
7514 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
7515 phba->sli4_hba.max_cfg_param.max_wq =
7516 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
7517 phba->sli4_hba.max_cfg_param.max_cq =
7518 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
7519 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
7520 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
7521 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
7522 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
7523 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
7524 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
7525 phba->max_vports = phba->max_vpi;
7526 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7527 "2003 cfg params Extents? %d "
7528 "XRI(B:%d M:%d), "
7529 "VPI(B:%d M:%d) "
7530 "VFI(B:%d M:%d) "
7531 "RPI(B:%d M:%d) "
7532 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
7533 phba->sli4_hba.extents_in_use,
7534 phba->sli4_hba.max_cfg_param.xri_base,
7535 phba->sli4_hba.max_cfg_param.max_xri,
7536 phba->sli4_hba.max_cfg_param.vpi_base,
7537 phba->sli4_hba.max_cfg_param.max_vpi,
7538 phba->sli4_hba.max_cfg_param.vfi_base,
7539 phba->sli4_hba.max_cfg_param.max_vfi,
7540 phba->sli4_hba.max_cfg_param.rpi_base,
7541 phba->sli4_hba.max_cfg_param.max_rpi,
7542 phba->sli4_hba.max_cfg_param.max_fcfi,
7543 phba->sli4_hba.max_cfg_param.max_eq,
7544 phba->sli4_hba.max_cfg_param.max_cq,
7545 phba->sli4_hba.max_cfg_param.max_wq,
7546 phba->sli4_hba.max_cfg_param.max_rq);
7547
7548 }
7549
7550 if (rc)
7551 goto read_cfg_out;
7552
7553 /* Update link speed if forced link speed is supported */
7554 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7555 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7556 forced_link_speed =
7557 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7558 if (forced_link_speed) {
7559 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7560
7561 switch (forced_link_speed) {
7562 case LINK_SPEED_1G:
7563 phba->cfg_link_speed =
7564 LPFC_USER_LINK_SPEED_1G;
7565 break;
7566 case LINK_SPEED_2G:
7567 phba->cfg_link_speed =
7568 LPFC_USER_LINK_SPEED_2G;
7569 break;
7570 case LINK_SPEED_4G:
7571 phba->cfg_link_speed =
7572 LPFC_USER_LINK_SPEED_4G;
7573 break;
7574 case LINK_SPEED_8G:
7575 phba->cfg_link_speed =
7576 LPFC_USER_LINK_SPEED_8G;
7577 break;
7578 case LINK_SPEED_10G:
7579 phba->cfg_link_speed =
7580 LPFC_USER_LINK_SPEED_10G;
7581 break;
7582 case LINK_SPEED_16G:
7583 phba->cfg_link_speed =
7584 LPFC_USER_LINK_SPEED_16G;
7585 break;
7586 case LINK_SPEED_32G:
7587 phba->cfg_link_speed =
7588 LPFC_USER_LINK_SPEED_32G;
7589 break;
7590 case 0xffff:
7591 phba->cfg_link_speed =
7592 LPFC_USER_LINK_SPEED_AUTO;
7593 break;
7594 default:
7595 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7596 "0047 Unrecognized link "
7597 "speed : %d\n",
7598 forced_link_speed);
7599 phba->cfg_link_speed =
7600 LPFC_USER_LINK_SPEED_AUTO;
7601 }
7602 }
7603 }
7604
7605 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
7606 length = phba->sli4_hba.max_cfg_param.max_xri -
7607 lpfc_sli4_get_els_iocb_cnt(phba);
7608 if (phba->cfg_hba_queue_depth > length) {
7609 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7610 "3361 HBA queue depth changed from %d to %d\n",
7611 phba->cfg_hba_queue_depth, length);
7612 phba->cfg_hba_queue_depth = length;
7613 }
7614
7615 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
7616 LPFC_SLI_INTF_IF_TYPE_2)
7617 goto read_cfg_out;
7618
7619 /* get the pf# and vf# for SLI4 if_type 2 port */
7620 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
7621 sizeof(struct lpfc_sli4_cfg_mhdr));
7622 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
7623 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
7624 length, LPFC_SLI4_MBX_EMBED);
7625
7626 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7627 shdr = (union lpfc_sli4_cfg_shdr *)
7628 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7629 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7630 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7631 if (rc2 || shdr_status || shdr_add_status) {
7632 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7633 "3026 Mailbox failed , mbxCmd x%x "
7634 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7635 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7636 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7637 goto read_cfg_out;
7638 }
7639
7640 /* search for fc_fcoe resrouce descriptor */
7641 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
7642
7643 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
7644 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
7645 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
7646 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
7647 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
7648 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
7649 goto read_cfg_out;
7650
7651 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
7652 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
7653 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
7654 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
7655 phba->sli4_hba.iov.pf_number =
7656 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
7657 phba->sli4_hba.iov.vf_number =
7658 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7659 break;
7660 }
7661 }
7662
7663 if (i < LPFC_RSRC_DESC_MAX_NUM)
7664 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7665 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7666 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7667 phba->sli4_hba.iov.vf_number);
7668 else
7669 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7670 "3028 GET_FUNCTION_CONFIG: failed to find "
7671 "Resrouce Descriptor:x%x\n",
7672 LPFC_RSRC_DESC_TYPE_FCFCOE);
7673
7674 read_cfg_out:
7675 mempool_free(pmb, phba->mbox_mem_pool);
7676 return rc;
7677 }
7678
7679 /**
7680 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
7681 * @phba: pointer to lpfc hba data structure.
7682 *
7683 * This routine is invoked to setup the port-side endian order when
7684 * the port if_type is 0. This routine has no function for other
7685 * if_types.
7686 *
7687 * Return codes
7688 * 0 - successful
7689 * -ENOMEM - No available memory
7690 * -EIO - The mailbox failed to complete successfully.
7691 **/
7692 static int
7693 lpfc_setup_endian_order(struct lpfc_hba *phba)
7694 {
7695 LPFC_MBOXQ_t *mboxq;
7696 uint32_t if_type, rc = 0;
7697 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
7698 HOST_ENDIAN_HIGH_WORD1};
7699
7700 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7701 switch (if_type) {
7702 case LPFC_SLI_INTF_IF_TYPE_0:
7703 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7704 GFP_KERNEL);
7705 if (!mboxq) {
7706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7707 "0492 Unable to allocate memory for "
7708 "issuing SLI_CONFIG_SPECIAL mailbox "
7709 "command\n");
7710 return -ENOMEM;
7711 }
7712
7713 /*
7714 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7715 * two words to contain special data values and no other data.
7716 */
7717 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
7718 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
7719 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7720 if (rc != MBX_SUCCESS) {
7721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7722 "0493 SLI_CONFIG_SPECIAL mailbox "
7723 "failed with status x%x\n",
7724 rc);
7725 rc = -EIO;
7726 }
7727 mempool_free(mboxq, phba->mbox_mem_pool);
7728 break;
7729 case LPFC_SLI_INTF_IF_TYPE_2:
7730 case LPFC_SLI_INTF_IF_TYPE_1:
7731 default:
7732 break;
7733 }
7734 return rc;
7735 }
7736
7737 /**
7738 * lpfc_sli4_queue_verify - Verify and update EQ counts
7739 * @phba: pointer to lpfc hba data structure.
7740 *
7741 * This routine is invoked to check the user settable queue counts for EQs.
7742 * After this routine is called the counts will be set to valid values that
7743 * adhere to the constraints of the system's interrupt vectors and the port's
7744 * queue resources.
7745 *
7746 * Return codes
7747 * 0 - successful
7748 * -ENOMEM - No available memory
7749 **/
7750 static int
7751 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
7752 {
7753 int io_channel;
7754 int fof_vectors = phba->cfg_fof ? 1 : 0;
7755
7756 /*
7757 * Sanity check for configured queue parameters against the run-time
7758 * device parameters
7759 */
7760
7761 /* Sanity check on HBA EQ parameters */
7762 io_channel = phba->io_channel_irqs;
7763
7764 if (phba->sli4_hba.num_online_cpu < io_channel) {
7765 lpfc_printf_log(phba,
7766 KERN_ERR, LOG_INIT,
7767 "3188 Reducing IO channels to match number of "
7768 "online CPUs: from %d to %d\n",
7769 io_channel, phba->sli4_hba.num_online_cpu);
7770 io_channel = phba->sli4_hba.num_online_cpu;
7771 }
7772
7773 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
7774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7775 "2575 Reducing IO channels to match number of "
7776 "available EQs: from %d to %d\n",
7777 io_channel,
7778 phba->sli4_hba.max_cfg_param.max_eq);
7779 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
7780 }
7781
7782 /* The actual number of FCP / NVME event queues adopted */
7783 if (io_channel != phba->io_channel_irqs)
7784 phba->io_channel_irqs = io_channel;
7785 if (phba->cfg_fcp_io_channel > io_channel)
7786 phba->cfg_fcp_io_channel = io_channel;
7787 if (phba->cfg_nvme_io_channel > io_channel)
7788 phba->cfg_nvme_io_channel = io_channel;
7789 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
7790 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
7791
7792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7793 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
7794 phba->io_channel_irqs, phba->cfg_fcp_io_channel,
7795 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
7796
7797 /* Get EQ depth from module parameter, fake the default for now */
7798 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7799 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
7800
7801 /* Get CQ depth from module parameter, fake the default for now */
7802 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7803 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
7804 return 0;
7805 }
7806
7807 static int
7808 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
7809 {
7810 struct lpfc_queue *qdesc;
7811 int cnt;
7812
7813 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7814 phba->sli4_hba.cq_ecount);
7815 if (!qdesc) {
7816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7817 "0508 Failed allocate fast-path NVME CQ (%d)\n",
7818 wqidx);
7819 return 1;
7820 }
7821 phba->sli4_hba.nvme_cq[wqidx] = qdesc;
7822
7823 cnt = LPFC_NVME_WQSIZE;
7824 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
7825 if (!qdesc) {
7826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7827 "0509 Failed allocate fast-path NVME WQ (%d)\n",
7828 wqidx);
7829 return 1;
7830 }
7831 phba->sli4_hba.nvme_wq[wqidx] = qdesc;
7832 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
7833 return 0;
7834 }
7835
7836 static int
7837 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
7838 {
7839 struct lpfc_queue *qdesc;
7840 uint32_t wqesize;
7841
7842 /* Create Fast Path FCP CQs */
7843 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7844 phba->sli4_hba.cq_ecount);
7845 if (!qdesc) {
7846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7847 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
7848 return 1;
7849 }
7850 phba->sli4_hba.fcp_cq[wqidx] = qdesc;
7851
7852 /* Create Fast Path FCP WQs */
7853 wqesize = (phba->fcp_embed_io) ?
7854 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
7855 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
7856 if (!qdesc) {
7857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7858 "0503 Failed allocate fast-path FCP WQ (%d)\n",
7859 wqidx);
7860 return 1;
7861 }
7862 phba->sli4_hba.fcp_wq[wqidx] = qdesc;
7863 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
7864 return 0;
7865 }
7866
7867 /**
7868 * lpfc_sli4_queue_create - Create all the SLI4 queues
7869 * @phba: pointer to lpfc hba data structure.
7870 *
7871 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7872 * operation. For each SLI4 queue type, the parameters such as queue entry
7873 * count (queue depth) shall be taken from the module parameter. For now,
7874 * we just use some constant number as place holder.
7875 *
7876 * Return codes
7877 * 0 - successful
7878 * -ENOMEM - No availble memory
7879 * -EIO - The mailbox failed to complete successfully.
7880 **/
7881 int
7882 lpfc_sli4_queue_create(struct lpfc_hba *phba)
7883 {
7884 struct lpfc_queue *qdesc;
7885 int idx, io_channel;
7886
7887 /*
7888 * Create HBA Record arrays.
7889 * Both NVME and FCP will share that same vectors / EQs
7890 */
7891 io_channel = phba->io_channel_irqs;
7892 if (!io_channel)
7893 return -ERANGE;
7894
7895 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
7896 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
7897 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
7898 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
7899 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
7900 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
7901 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7902 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
7903 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7904 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
7905
7906 phba->sli4_hba.hba_eq = kcalloc(io_channel,
7907 sizeof(struct lpfc_queue *),
7908 GFP_KERNEL);
7909 if (!phba->sli4_hba.hba_eq) {
7910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7911 "2576 Failed allocate memory for "
7912 "fast-path EQ record array\n");
7913 goto out_error;
7914 }
7915
7916 if (phba->cfg_fcp_io_channel) {
7917 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
7918 sizeof(struct lpfc_queue *),
7919 GFP_KERNEL);
7920 if (!phba->sli4_hba.fcp_cq) {
7921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7922 "2577 Failed allocate memory for "
7923 "fast-path CQ record array\n");
7924 goto out_error;
7925 }
7926 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
7927 sizeof(struct lpfc_queue *),
7928 GFP_KERNEL);
7929 if (!phba->sli4_hba.fcp_wq) {
7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7931 "2578 Failed allocate memory for "
7932 "fast-path FCP WQ record array\n");
7933 goto out_error;
7934 }
7935 /*
7936 * Since the first EQ can have multiple CQs associated with it,
7937 * this array is used to quickly see if we have a FCP fast-path
7938 * CQ match.
7939 */
7940 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
7941 sizeof(uint16_t),
7942 GFP_KERNEL);
7943 if (!phba->sli4_hba.fcp_cq_map) {
7944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7945 "2545 Failed allocate memory for "
7946 "fast-path CQ map\n");
7947 goto out_error;
7948 }
7949 }
7950
7951 if (phba->cfg_nvme_io_channel) {
7952 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
7953 sizeof(struct lpfc_queue *),
7954 GFP_KERNEL);
7955 if (!phba->sli4_hba.nvme_cq) {
7956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7957 "6077 Failed allocate memory for "
7958 "fast-path CQ record array\n");
7959 goto out_error;
7960 }
7961
7962 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
7963 sizeof(struct lpfc_queue *),
7964 GFP_KERNEL);
7965 if (!phba->sli4_hba.nvme_wq) {
7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7967 "2581 Failed allocate memory for "
7968 "fast-path NVME WQ record array\n");
7969 goto out_error;
7970 }
7971
7972 /*
7973 * Since the first EQ can have multiple CQs associated with it,
7974 * this array is used to quickly see if we have a NVME fast-path
7975 * CQ match.
7976 */
7977 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
7978 sizeof(uint16_t),
7979 GFP_KERNEL);
7980 if (!phba->sli4_hba.nvme_cq_map) {
7981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7982 "6078 Failed allocate memory for "
7983 "fast-path CQ map\n");
7984 goto out_error;
7985 }
7986
7987 if (phba->nvmet_support) {
7988 phba->sli4_hba.nvmet_cqset = kcalloc(
7989 phba->cfg_nvmet_mrq,
7990 sizeof(struct lpfc_queue *),
7991 GFP_KERNEL);
7992 if (!phba->sli4_hba.nvmet_cqset) {
7993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7994 "3121 Fail allocate memory for "
7995 "fast-path CQ set array\n");
7996 goto out_error;
7997 }
7998 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
7999 phba->cfg_nvmet_mrq,
8000 sizeof(struct lpfc_queue *),
8001 GFP_KERNEL);
8002 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8004 "3122 Fail allocate memory for "
8005 "fast-path RQ set hdr array\n");
8006 goto out_error;
8007 }
8008 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8009 phba->cfg_nvmet_mrq,
8010 sizeof(struct lpfc_queue *),
8011 GFP_KERNEL);
8012 if (!phba->sli4_hba.nvmet_mrq_data) {
8013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8014 "3124 Fail allocate memory for "
8015 "fast-path RQ set data array\n");
8016 goto out_error;
8017 }
8018 }
8019 }
8020
8021 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8022
8023 /* Create HBA Event Queues (EQs) */
8024 for (idx = 0; idx < io_channel; idx++) {
8025 /* Create EQs */
8026 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
8027 phba->sli4_hba.eq_ecount);
8028 if (!qdesc) {
8029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8030 "0497 Failed allocate EQ (%d)\n", idx);
8031 goto out_error;
8032 }
8033 phba->sli4_hba.hba_eq[idx] = qdesc;
8034 }
8035
8036 /* FCP and NVME io channels are not required to be balanced */
8037
8038 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8039 if (lpfc_alloc_fcp_wq_cq(phba, idx))
8040 goto out_error;
8041
8042 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
8043 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8044 goto out_error;
8045
8046 if (phba->nvmet_support) {
8047 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8048 qdesc = lpfc_sli4_queue_alloc(phba,
8049 phba->sli4_hba.cq_esize,
8050 phba->sli4_hba.cq_ecount);
8051 if (!qdesc) {
8052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8053 "3142 Failed allocate NVME "
8054 "CQ Set (%d)\n", idx);
8055 goto out_error;
8056 }
8057 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8058 }
8059 }
8060
8061 /*
8062 * Create Slow Path Completion Queues (CQs)
8063 */
8064
8065 /* Create slow-path Mailbox Command Complete Queue */
8066 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8067 phba->sli4_hba.cq_ecount);
8068 if (!qdesc) {
8069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8070 "0500 Failed allocate slow-path mailbox CQ\n");
8071 goto out_error;
8072 }
8073 phba->sli4_hba.mbx_cq = qdesc;
8074
8075 /* Create slow-path ELS Complete Queue */
8076 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8077 phba->sli4_hba.cq_ecount);
8078 if (!qdesc) {
8079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8080 "0501 Failed allocate slow-path ELS CQ\n");
8081 goto out_error;
8082 }
8083 phba->sli4_hba.els_cq = qdesc;
8084
8085
8086 /*
8087 * Create Slow Path Work Queues (WQs)
8088 */
8089
8090 /* Create Mailbox Command Queue */
8091
8092 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
8093 phba->sli4_hba.mq_ecount);
8094 if (!qdesc) {
8095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8096 "0505 Failed allocate slow-path MQ\n");
8097 goto out_error;
8098 }
8099 phba->sli4_hba.mbx_wq = qdesc;
8100
8101 /*
8102 * Create ELS Work Queues
8103 */
8104
8105 /* Create slow-path ELS Work Queue */
8106 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
8107 phba->sli4_hba.wq_ecount);
8108 if (!qdesc) {
8109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8110 "0504 Failed allocate slow-path ELS WQ\n");
8111 goto out_error;
8112 }
8113 phba->sli4_hba.els_wq = qdesc;
8114 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8115
8116 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8117 /* Create NVME LS Complete Queue */
8118 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8119 phba->sli4_hba.cq_ecount);
8120 if (!qdesc) {
8121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8122 "6079 Failed allocate NVME LS CQ\n");
8123 goto out_error;
8124 }
8125 phba->sli4_hba.nvmels_cq = qdesc;
8126
8127 /* Create NVME LS Work Queue */
8128 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
8129 phba->sli4_hba.wq_ecount);
8130 if (!qdesc) {
8131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8132 "6080 Failed allocate NVME LS WQ\n");
8133 goto out_error;
8134 }
8135 phba->sli4_hba.nvmels_wq = qdesc;
8136 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8137 }
8138
8139 /*
8140 * Create Receive Queue (RQ)
8141 */
8142
8143 /* Create Receive Queue for header */
8144 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
8145 phba->sli4_hba.rq_ecount);
8146 if (!qdesc) {
8147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8148 "0506 Failed allocate receive HRQ\n");
8149 goto out_error;
8150 }
8151 phba->sli4_hba.hdr_rq = qdesc;
8152
8153 /* Create Receive Queue for data */
8154 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
8155 phba->sli4_hba.rq_ecount);
8156 if (!qdesc) {
8157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8158 "0507 Failed allocate receive DRQ\n");
8159 goto out_error;
8160 }
8161 phba->sli4_hba.dat_rq = qdesc;
8162
8163 if (phba->nvmet_support) {
8164 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8165 /* Create NVMET Receive Queue for header */
8166 qdesc = lpfc_sli4_queue_alloc(phba,
8167 phba->sli4_hba.rq_esize,
8168 phba->sli4_hba.rq_ecount);
8169 if (!qdesc) {
8170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8171 "3146 Failed allocate "
8172 "receive HRQ\n");
8173 goto out_error;
8174 }
8175 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
8176
8177 /* Only needed for header of RQ pair */
8178 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
8179 GFP_KERNEL);
8180 if (qdesc->rqbp == NULL) {
8181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8182 "6131 Failed allocate "
8183 "Header RQBP\n");
8184 goto out_error;
8185 }
8186
8187 /* Create NVMET Receive Queue for data */
8188 qdesc = lpfc_sli4_queue_alloc(phba,
8189 phba->sli4_hba.rq_esize,
8190 phba->sli4_hba.rq_ecount);
8191 if (!qdesc) {
8192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8193 "3156 Failed allocate "
8194 "receive DRQ\n");
8195 goto out_error;
8196 }
8197 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
8198 }
8199 }
8200
8201 /* Create the Queues needed for Flash Optimized Fabric operations */
8202 if (phba->cfg_fof)
8203 lpfc_fof_queue_create(phba);
8204 return 0;
8205
8206 out_error:
8207 lpfc_sli4_queue_destroy(phba);
8208 return -ENOMEM;
8209 }
8210
8211 static inline void
8212 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
8213 {
8214 if (*qp != NULL) {
8215 lpfc_sli4_queue_free(*qp);
8216 *qp = NULL;
8217 }
8218 }
8219
8220 static inline void
8221 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
8222 {
8223 int idx;
8224
8225 if (*qs == NULL)
8226 return;
8227
8228 for (idx = 0; idx < max; idx++)
8229 __lpfc_sli4_release_queue(&(*qs)[idx]);
8230
8231 kfree(*qs);
8232 *qs = NULL;
8233 }
8234
8235 static inline void
8236 lpfc_sli4_release_queue_map(uint16_t **qmap)
8237 {
8238 if (*qmap != NULL) {
8239 kfree(*qmap);
8240 *qmap = NULL;
8241 }
8242 }
8243
8244 /**
8245 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
8246 * @phba: pointer to lpfc hba data structure.
8247 *
8248 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
8249 * operation.
8250 *
8251 * Return codes
8252 * 0 - successful
8253 * -ENOMEM - No available memory
8254 * -EIO - The mailbox failed to complete successfully.
8255 **/
8256 void
8257 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8258 {
8259 if (phba->cfg_fof)
8260 lpfc_fof_queue_destroy(phba);
8261
8262 /* Release HBA eqs */
8263 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
8264
8265 /* Release FCP cqs */
8266 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
8267 phba->cfg_fcp_io_channel);
8268
8269 /* Release FCP wqs */
8270 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
8271 phba->cfg_fcp_io_channel);
8272
8273 /* Release FCP CQ mapping array */
8274 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
8275
8276 /* Release NVME cqs */
8277 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
8278 phba->cfg_nvme_io_channel);
8279
8280 /* Release NVME wqs */
8281 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
8282 phba->cfg_nvme_io_channel);
8283
8284 /* Release NVME CQ mapping array */
8285 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
8286
8287 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
8288 phba->cfg_nvmet_mrq);
8289
8290 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
8291 phba->cfg_nvmet_mrq);
8292 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
8293 phba->cfg_nvmet_mrq);
8294
8295 /* Release mailbox command work queue */
8296 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
8297
8298 /* Release ELS work queue */
8299 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
8300
8301 /* Release ELS work queue */
8302 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
8303
8304 /* Release unsolicited receive queue */
8305 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
8306 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
8307
8308 /* Release ELS complete queue */
8309 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
8310
8311 /* Release NVME LS complete queue */
8312 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
8313
8314 /* Release mailbox command complete queue */
8315 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
8316
8317 /* Everything on this list has been freed */
8318 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8319 }
8320
8321 int
8322 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
8323 struct lpfc_queue *drq, int count)
8324 {
8325 int rc, i;
8326 struct lpfc_rqe hrqe;
8327 struct lpfc_rqe drqe;
8328 struct lpfc_rqb *rqbp;
8329 struct rqb_dmabuf *rqb_buffer;
8330 LIST_HEAD(rqb_buf_list);
8331
8332 rqbp = hrq->rqbp;
8333 for (i = 0; i < count; i++) {
8334 rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
8335 if (!rqb_buffer)
8336 break;
8337 rqb_buffer->hrq = hrq;
8338 rqb_buffer->drq = drq;
8339 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
8340 }
8341 while (!list_empty(&rqb_buf_list)) {
8342 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
8343 hbuf.list);
8344
8345 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
8346 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
8347 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
8348 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
8349 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
8350 if (rc < 0) {
8351 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8352 } else {
8353 list_add_tail(&rqb_buffer->hbuf.list,
8354 &rqbp->rqb_buffer_list);
8355 rqbp->buffer_count++;
8356 }
8357 }
8358 return 1;
8359 }
8360
8361 int
8362 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8363 {
8364 struct lpfc_rqb *rqbp;
8365 struct lpfc_dmabuf *h_buf;
8366 struct rqb_dmabuf *rqb_buffer;
8367
8368 rqbp = rq->rqbp;
8369 while (!list_empty(&rqbp->rqb_buffer_list)) {
8370 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
8371 struct lpfc_dmabuf, list);
8372
8373 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
8374 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8375 rqbp->buffer_count--;
8376 }
8377 return 1;
8378 }
8379
8380 static int
8381 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8382 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
8383 int qidx, uint32_t qtype)
8384 {
8385 struct lpfc_sli_ring *pring;
8386 int rc;
8387
8388 if (!eq || !cq || !wq) {
8389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8390 "6085 Fast-path %s (%d) not allocated\n",
8391 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
8392 return -ENOMEM;
8393 }
8394
8395 /* create the Cq first */
8396 rc = lpfc_cq_create(phba, cq, eq,
8397 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
8398 if (rc) {
8399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8400 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
8401 qidx, (uint32_t)rc);
8402 return rc;
8403 }
8404
8405 if (qtype != LPFC_MBOX) {
8406 /* Setup nvme_cq_map for fast lookup */
8407 if (cq_map)
8408 *cq_map = cq->queue_id;
8409
8410 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8411 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
8412 qidx, cq->queue_id, qidx, eq->queue_id);
8413
8414 /* create the wq */
8415 rc = lpfc_wq_create(phba, wq, cq, qtype);
8416 if (rc) {
8417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8418 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
8419 qidx, (uint32_t)rc);
8420 /* no need to tear down cq - caller will do so */
8421 return rc;
8422 }
8423
8424 /* Bind this CQ/WQ to the NVME ring */
8425 pring = wq->pring;
8426 pring->sli.sli4.wqp = (void *)wq;
8427 cq->pring = pring;
8428
8429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8430 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
8431 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
8432 } else {
8433 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
8434 if (rc) {
8435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8436 "0539 Failed setup of slow-path MQ: "
8437 "rc = 0x%x\n", rc);
8438 /* no need to tear down cq - caller will do so */
8439 return rc;
8440 }
8441
8442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8443 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
8444 phba->sli4_hba.mbx_wq->queue_id,
8445 phba->sli4_hba.mbx_cq->queue_id);
8446 }
8447
8448 return 0;
8449 }
8450
8451 /**
8452 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
8453 * @phba: pointer to lpfc hba data structure.
8454 *
8455 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
8456 * operation.
8457 *
8458 * Return codes
8459 * 0 - successful
8460 * -ENOMEM - No available memory
8461 * -EIO - The mailbox failed to complete successfully.
8462 **/
8463 int
8464 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8465 {
8466 uint32_t shdr_status, shdr_add_status;
8467 union lpfc_sli4_cfg_shdr *shdr;
8468 LPFC_MBOXQ_t *mboxq;
8469 int qidx;
8470 uint32_t length, io_channel;
8471 int rc = -ENOMEM;
8472
8473 /* Check for dual-ULP support */
8474 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8475 if (!mboxq) {
8476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8477 "3249 Unable to allocate memory for "
8478 "QUERY_FW_CFG mailbox command\n");
8479 return -ENOMEM;
8480 }
8481 length = (sizeof(struct lpfc_mbx_query_fw_config) -
8482 sizeof(struct lpfc_sli4_cfg_mhdr));
8483 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8484 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
8485 length, LPFC_SLI4_MBX_EMBED);
8486
8487 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8488
8489 shdr = (union lpfc_sli4_cfg_shdr *)
8490 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8493 if (shdr_status || shdr_add_status || rc) {
8494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8495 "3250 QUERY_FW_CFG mailbox failed with status "
8496 "x%x add_status x%x, mbx status x%x\n",
8497 shdr_status, shdr_add_status, rc);
8498 if (rc != MBX_TIMEOUT)
8499 mempool_free(mboxq, phba->mbox_mem_pool);
8500 rc = -ENXIO;
8501 goto out_error;
8502 }
8503
8504 phba->sli4_hba.fw_func_mode =
8505 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
8506 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
8507 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8508 phba->sli4_hba.physical_port =
8509 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
8510 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8511 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
8512 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
8513 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
8514
8515 if (rc != MBX_TIMEOUT)
8516 mempool_free(mboxq, phba->mbox_mem_pool);
8517
8518 /*
8519 * Set up HBA Event Queues (EQs)
8520 */
8521 io_channel = phba->io_channel_irqs;
8522
8523 /* Set up HBA event queue */
8524 if (io_channel && !phba->sli4_hba.hba_eq) {
8525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8526 "3147 Fast-path EQs not allocated\n");
8527 rc = -ENOMEM;
8528 goto out_error;
8529 }
8530 for (qidx = 0; qidx < io_channel; qidx++) {
8531 if (!phba->sli4_hba.hba_eq[qidx]) {
8532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8533 "0522 Fast-path EQ (%d) not "
8534 "allocated\n", qidx);
8535 rc = -ENOMEM;
8536 goto out_destroy;
8537 }
8538 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
8539 phba->cfg_fcp_imax);
8540 if (rc) {
8541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8542 "0523 Failed setup of fast-path EQ "
8543 "(%d), rc = 0x%x\n", qidx,
8544 (uint32_t)rc);
8545 goto out_destroy;
8546 }
8547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8548 "2584 HBA EQ setup: queue[%d]-id=%d\n",
8549 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
8550 }
8551
8552 if (phba->cfg_nvme_io_channel) {
8553 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
8554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8555 "6084 Fast-path NVME %s array not allocated\n",
8556 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
8557 rc = -ENOMEM;
8558 goto out_destroy;
8559 }
8560
8561 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
8562 rc = lpfc_create_wq_cq(phba,
8563 phba->sli4_hba.hba_eq[
8564 qidx % io_channel],
8565 phba->sli4_hba.nvme_cq[qidx],
8566 phba->sli4_hba.nvme_wq[qidx],
8567 &phba->sli4_hba.nvme_cq_map[qidx],
8568 qidx, LPFC_NVME);
8569 if (rc) {
8570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8571 "6123 Failed to setup fastpath "
8572 "NVME WQ/CQ (%d), rc = 0x%x\n",
8573 qidx, (uint32_t)rc);
8574 goto out_destroy;
8575 }
8576 }
8577 }
8578
8579 if (phba->cfg_fcp_io_channel) {
8580 /* Set up fast-path FCP Response Complete Queue */
8581 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
8582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8583 "3148 Fast-path FCP %s array not allocated\n",
8584 phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
8585 rc = -ENOMEM;
8586 goto out_destroy;
8587 }
8588
8589 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
8590 rc = lpfc_create_wq_cq(phba,
8591 phba->sli4_hba.hba_eq[
8592 qidx % io_channel],
8593 phba->sli4_hba.fcp_cq[qidx],
8594 phba->sli4_hba.fcp_wq[qidx],
8595 &phba->sli4_hba.fcp_cq_map[qidx],
8596 qidx, LPFC_FCP);
8597 if (rc) {
8598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8599 "0535 Failed to setup fastpath "
8600 "FCP WQ/CQ (%d), rc = 0x%x\n",
8601 qidx, (uint32_t)rc);
8602 goto out_destroy;
8603 }
8604 }
8605 }
8606
8607 /*
8608 * Set up Slow Path Complete Queues (CQs)
8609 */
8610
8611 /* Set up slow-path MBOX CQ/MQ */
8612
8613 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
8614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8615 "0528 %s not allocated\n",
8616 phba->sli4_hba.mbx_cq ?
8617 "Mailbox WQ" : "Mailbox CQ");
8618 rc = -ENOMEM;
8619 goto out_destroy;
8620 }
8621
8622 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8623 phba->sli4_hba.mbx_cq,
8624 phba->sli4_hba.mbx_wq,
8625 NULL, 0, LPFC_MBOX);
8626 if (rc) {
8627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8628 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
8629 (uint32_t)rc);
8630 goto out_destroy;
8631 }
8632 if (phba->nvmet_support) {
8633 if (!phba->sli4_hba.nvmet_cqset) {
8634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8635 "3165 Fast-path NVME CQ Set "
8636 "array not allocated\n");
8637 rc = -ENOMEM;
8638 goto out_destroy;
8639 }
8640 if (phba->cfg_nvmet_mrq > 1) {
8641 rc = lpfc_cq_create_set(phba,
8642 phba->sli4_hba.nvmet_cqset,
8643 phba->sli4_hba.hba_eq,
8644 LPFC_WCQ, LPFC_NVMET);
8645 if (rc) {
8646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8647 "3164 Failed setup of NVME CQ "
8648 "Set, rc = 0x%x\n",
8649 (uint32_t)rc);
8650 goto out_destroy;
8651 }
8652 } else {
8653 /* Set up NVMET Receive Complete Queue */
8654 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
8655 phba->sli4_hba.hba_eq[0],
8656 LPFC_WCQ, LPFC_NVMET);
8657 if (rc) {
8658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8659 "6089 Failed setup NVMET CQ: "
8660 "rc = 0x%x\n", (uint32_t)rc);
8661 goto out_destroy;
8662 }
8663 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8664 "6090 NVMET CQ setup: cq-id=%d, "
8665 "parent eq-id=%d\n",
8666 phba->sli4_hba.nvmet_cqset[0]->queue_id,
8667 phba->sli4_hba.hba_eq[0]->queue_id);
8668 }
8669 }
8670
8671 /* Set up slow-path ELS WQ/CQ */
8672 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
8673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8674 "0530 ELS %s not allocated\n",
8675 phba->sli4_hba.els_cq ? "WQ" : "CQ");
8676 rc = -ENOMEM;
8677 goto out_destroy;
8678 }
8679 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8680 phba->sli4_hba.els_cq,
8681 phba->sli4_hba.els_wq,
8682 NULL, 0, LPFC_ELS);
8683 if (rc) {
8684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8685 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
8686 (uint32_t)rc);
8687 goto out_destroy;
8688 }
8689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8690 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
8691 phba->sli4_hba.els_wq->queue_id,
8692 phba->sli4_hba.els_cq->queue_id);
8693
8694 if (phba->cfg_nvme_io_channel) {
8695 /* Set up NVME LS Complete Queue */
8696 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8698 "6091 LS %s not allocated\n",
8699 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
8700 rc = -ENOMEM;
8701 goto out_destroy;
8702 }
8703 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8704 phba->sli4_hba.nvmels_cq,
8705 phba->sli4_hba.nvmels_wq,
8706 NULL, 0, LPFC_NVME_LS);
8707 if (rc) {
8708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8709 "0529 Failed setup of NVVME LS WQ/CQ: "
8710 "rc = 0x%x\n", (uint32_t)rc);
8711 goto out_destroy;
8712 }
8713
8714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8715 "6096 ELS WQ setup: wq-id=%d, "
8716 "parent cq-id=%d\n",
8717 phba->sli4_hba.nvmels_wq->queue_id,
8718 phba->sli4_hba.nvmels_cq->queue_id);
8719 }
8720
8721 /*
8722 * Create NVMET Receive Queue (RQ)
8723 */
8724 if (phba->nvmet_support) {
8725 if ((!phba->sli4_hba.nvmet_cqset) ||
8726 (!phba->sli4_hba.nvmet_mrq_hdr) ||
8727 (!phba->sli4_hba.nvmet_mrq_data)) {
8728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8729 "6130 MRQ CQ Queues not "
8730 "allocated\n");
8731 rc = -ENOMEM;
8732 goto out_destroy;
8733 }
8734 if (phba->cfg_nvmet_mrq > 1) {
8735 rc = lpfc_mrq_create(phba,
8736 phba->sli4_hba.nvmet_mrq_hdr,
8737 phba->sli4_hba.nvmet_mrq_data,
8738 phba->sli4_hba.nvmet_cqset,
8739 LPFC_NVMET);
8740 if (rc) {
8741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8742 "6098 Failed setup of NVMET "
8743 "MRQ: rc = 0x%x\n",
8744 (uint32_t)rc);
8745 goto out_destroy;
8746 }
8747
8748 } else {
8749 rc = lpfc_rq_create(phba,
8750 phba->sli4_hba.nvmet_mrq_hdr[0],
8751 phba->sli4_hba.nvmet_mrq_data[0],
8752 phba->sli4_hba.nvmet_cqset[0],
8753 LPFC_NVMET);
8754 if (rc) {
8755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8756 "6057 Failed setup of NVMET "
8757 "Receive Queue: rc = 0x%x\n",
8758 (uint32_t)rc);
8759 goto out_destroy;
8760 }
8761
8762 lpfc_printf_log(
8763 phba, KERN_INFO, LOG_INIT,
8764 "6099 NVMET RQ setup: hdr-rq-id=%d, "
8765 "dat-rq-id=%d parent cq-id=%d\n",
8766 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
8767 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
8768 phba->sli4_hba.nvmet_cqset[0]->queue_id);
8769
8770 }
8771 }
8772
8773 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
8774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8775 "0540 Receive Queue not allocated\n");
8776 rc = -ENOMEM;
8777 goto out_destroy;
8778 }
8779
8780 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
8781 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
8782
8783 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
8784 phba->sli4_hba.els_cq, LPFC_USOL);
8785 if (rc) {
8786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8787 "0541 Failed setup of Receive Queue: "
8788 "rc = 0x%x\n", (uint32_t)rc);
8789 goto out_destroy;
8790 }
8791
8792 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8793 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
8794 "parent cq-id=%d\n",
8795 phba->sli4_hba.hdr_rq->queue_id,
8796 phba->sli4_hba.dat_rq->queue_id,
8797 phba->sli4_hba.els_cq->queue_id);
8798
8799 if (phba->cfg_fof) {
8800 rc = lpfc_fof_queue_setup(phba);
8801 if (rc) {
8802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8803 "0549 Failed setup of FOF Queues: "
8804 "rc = 0x%x\n", rc);
8805 goto out_destroy;
8806 }
8807 }
8808
8809 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
8810 lpfc_modify_hba_eq_delay(phba, qidx);
8811
8812 return 0;
8813
8814 out_destroy:
8815 lpfc_sli4_queue_unset(phba);
8816 out_error:
8817 return rc;
8818 }
8819
8820 /**
8821 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
8822 * @phba: pointer to lpfc hba data structure.
8823 *
8824 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
8825 * operation.
8826 *
8827 * Return codes
8828 * 0 - successful
8829 * -ENOMEM - No available memory
8830 * -EIO - The mailbox failed to complete successfully.
8831 **/
8832 void
8833 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
8834 {
8835 int qidx;
8836
8837 /* Unset the queues created for Flash Optimized Fabric operations */
8838 if (phba->cfg_fof)
8839 lpfc_fof_queue_destroy(phba);
8840
8841 /* Unset mailbox command work queue */
8842 if (phba->sli4_hba.mbx_wq)
8843 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
8844
8845 /* Unset NVME LS work queue */
8846 if (phba->sli4_hba.nvmels_wq)
8847 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
8848
8849 /* Unset ELS work queue */
8850 if (phba->sli4_hba.els_cq)
8851 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
8852
8853 /* Unset unsolicited receive queue */
8854 if (phba->sli4_hba.hdr_rq)
8855 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
8856 phba->sli4_hba.dat_rq);
8857
8858 /* Unset FCP work queue */
8859 if (phba->sli4_hba.fcp_wq)
8860 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
8861 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
8862
8863 /* Unset NVME work queue */
8864 if (phba->sli4_hba.nvme_wq) {
8865 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
8866 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
8867 }
8868
8869 /* Unset mailbox command complete queue */
8870 if (phba->sli4_hba.mbx_cq)
8871 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
8872
8873 /* Unset ELS complete queue */
8874 if (phba->sli4_hba.els_cq)
8875 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
8876
8877 /* Unset NVME LS complete queue */
8878 if (phba->sli4_hba.nvmels_cq)
8879 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
8880
8881 /* Unset NVME response complete queue */
8882 if (phba->sli4_hba.nvme_cq)
8883 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
8884 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
8885
8886 /* Unset NVMET MRQ queue */
8887 if (phba->sli4_hba.nvmet_mrq_hdr) {
8888 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
8889 lpfc_rq_destroy(phba,
8890 phba->sli4_hba.nvmet_mrq_hdr[qidx],
8891 phba->sli4_hba.nvmet_mrq_data[qidx]);
8892 }
8893
8894 /* Unset NVMET CQ Set complete queue */
8895 if (phba->sli4_hba.nvmet_cqset) {
8896 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
8897 lpfc_cq_destroy(phba,
8898 phba->sli4_hba.nvmet_cqset[qidx]);
8899 }
8900
8901 /* Unset FCP response complete queue */
8902 if (phba->sli4_hba.fcp_cq)
8903 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
8904 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
8905
8906 /* Unset fast-path event queue */
8907 if (phba->sli4_hba.hba_eq)
8908 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
8909 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
8910 }
8911
8912 /**
8913 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
8914 * @phba: pointer to lpfc hba data structure.
8915 *
8916 * This routine is invoked to allocate and set up a pool of completion queue
8917 * events. The body of the completion queue event is a completion queue entry
8918 * CQE. For now, this pool is used for the interrupt service routine to queue
8919 * the following HBA completion queue events for the worker thread to process:
8920 * - Mailbox asynchronous events
8921 * - Receive queue completion unsolicited events
8922 * Later, this can be used for all the slow-path events.
8923 *
8924 * Return codes
8925 * 0 - successful
8926 * -ENOMEM - No available memory
8927 **/
8928 static int
8929 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
8930 {
8931 struct lpfc_cq_event *cq_event;
8932 int i;
8933
8934 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
8935 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
8936 if (!cq_event)
8937 goto out_pool_create_fail;
8938 list_add_tail(&cq_event->list,
8939 &phba->sli4_hba.sp_cqe_event_pool);
8940 }
8941 return 0;
8942
8943 out_pool_create_fail:
8944 lpfc_sli4_cq_event_pool_destroy(phba);
8945 return -ENOMEM;
8946 }
8947
8948 /**
8949 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
8950 * @phba: pointer to lpfc hba data structure.
8951 *
8952 * This routine is invoked to free the pool of completion queue events at
8953 * driver unload time. Note that, it is the responsibility of the driver
8954 * cleanup routine to free all the outstanding completion-queue events
8955 * allocated from this pool back into the pool before invoking this routine
8956 * to destroy the pool.
8957 **/
8958 static void
8959 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
8960 {
8961 struct lpfc_cq_event *cq_event, *next_cq_event;
8962
8963 list_for_each_entry_safe(cq_event, next_cq_event,
8964 &phba->sli4_hba.sp_cqe_event_pool, list) {
8965 list_del(&cq_event->list);
8966 kfree(cq_event);
8967 }
8968 }
8969
8970 /**
8971 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8972 * @phba: pointer to lpfc hba data structure.
8973 *
8974 * This routine is the lock free version of the API invoked to allocate a
8975 * completion-queue event from the free pool.
8976 *
8977 * Return: Pointer to the newly allocated completion-queue event if successful
8978 * NULL otherwise.
8979 **/
8980 struct lpfc_cq_event *
8981 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
8982 {
8983 struct lpfc_cq_event *cq_event = NULL;
8984
8985 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
8986 struct lpfc_cq_event, list);
8987 return cq_event;
8988 }
8989
8990 /**
8991 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8992 * @phba: pointer to lpfc hba data structure.
8993 *
8994 * This routine is the lock version of the API invoked to allocate a
8995 * completion-queue event from the free pool.
8996 *
8997 * Return: Pointer to the newly allocated completion-queue event if successful
8998 * NULL otherwise.
8999 **/
9000 struct lpfc_cq_event *
9001 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9002 {
9003 struct lpfc_cq_event *cq_event;
9004 unsigned long iflags;
9005
9006 spin_lock_irqsave(&phba->hbalock, iflags);
9007 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9008 spin_unlock_irqrestore(&phba->hbalock, iflags);
9009 return cq_event;
9010 }
9011
9012 /**
9013 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9014 * @phba: pointer to lpfc hba data structure.
9015 * @cq_event: pointer to the completion queue event to be freed.
9016 *
9017 * This routine is the lock free version of the API invoked to release a
9018 * completion-queue event back into the free pool.
9019 **/
9020 void
9021 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9022 struct lpfc_cq_event *cq_event)
9023 {
9024 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9025 }
9026
9027 /**
9028 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9029 * @phba: pointer to lpfc hba data structure.
9030 * @cq_event: pointer to the completion queue event to be freed.
9031 *
9032 * This routine is the lock version of the API invoked to release a
9033 * completion-queue event back into the free pool.
9034 **/
9035 void
9036 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9037 struct lpfc_cq_event *cq_event)
9038 {
9039 unsigned long iflags;
9040 spin_lock_irqsave(&phba->hbalock, iflags);
9041 __lpfc_sli4_cq_event_release(phba, cq_event);
9042 spin_unlock_irqrestore(&phba->hbalock, iflags);
9043 }
9044
9045 /**
9046 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9047 * @phba: pointer to lpfc hba data structure.
9048 *
9049 * This routine is to free all the pending completion-queue events to the
9050 * back into the free pool for device reset.
9051 **/
9052 static void
9053 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9054 {
9055 LIST_HEAD(cqelist);
9056 struct lpfc_cq_event *cqe;
9057 unsigned long iflags;
9058
9059 /* Retrieve all the pending WCQEs from pending WCQE lists */
9060 spin_lock_irqsave(&phba->hbalock, iflags);
9061 /* Pending FCP XRI abort events */
9062 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9063 &cqelist);
9064 /* Pending ELS XRI abort events */
9065 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9066 &cqelist);
9067 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9068 /* Pending NVME XRI abort events */
9069 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
9070 &cqelist);
9071 }
9072 /* Pending asynnc events */
9073 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9074 &cqelist);
9075 spin_unlock_irqrestore(&phba->hbalock, iflags);
9076
9077 while (!list_empty(&cqelist)) {
9078 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9079 lpfc_sli4_cq_event_release(phba, cqe);
9080 }
9081 }
9082
9083 /**
9084 * lpfc_pci_function_reset - Reset pci function.
9085 * @phba: pointer to lpfc hba data structure.
9086 *
9087 * This routine is invoked to request a PCI function reset. It will destroys
9088 * all resources assigned to the PCI function which originates this request.
9089 *
9090 * Return codes
9091 * 0 - successful
9092 * -ENOMEM - No available memory
9093 * -EIO - The mailbox failed to complete successfully.
9094 **/
9095 int
9096 lpfc_pci_function_reset(struct lpfc_hba *phba)
9097 {
9098 LPFC_MBOXQ_t *mboxq;
9099 uint32_t rc = 0, if_type;
9100 uint32_t shdr_status, shdr_add_status;
9101 uint32_t rdy_chk;
9102 uint32_t port_reset = 0;
9103 union lpfc_sli4_cfg_shdr *shdr;
9104 struct lpfc_register reg_data;
9105 uint16_t devid;
9106
9107 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9108 switch (if_type) {
9109 case LPFC_SLI_INTF_IF_TYPE_0:
9110 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9111 GFP_KERNEL);
9112 if (!mboxq) {
9113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9114 "0494 Unable to allocate memory for "
9115 "issuing SLI_FUNCTION_RESET mailbox "
9116 "command\n");
9117 return -ENOMEM;
9118 }
9119
9120 /* Setup PCI function reset mailbox-ioctl command */
9121 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9122 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9123 LPFC_SLI4_MBX_EMBED);
9124 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9125 shdr = (union lpfc_sli4_cfg_shdr *)
9126 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9127 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9128 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
9129 &shdr->response);
9130 if (rc != MBX_TIMEOUT)
9131 mempool_free(mboxq, phba->mbox_mem_pool);
9132 if (shdr_status || shdr_add_status || rc) {
9133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9134 "0495 SLI_FUNCTION_RESET mailbox "
9135 "failed with status x%x add_status x%x,"
9136 " mbx status x%x\n",
9137 shdr_status, shdr_add_status, rc);
9138 rc = -ENXIO;
9139 }
9140 break;
9141 case LPFC_SLI_INTF_IF_TYPE_2:
9142 wait:
9143 /*
9144 * Poll the Port Status Register and wait for RDY for
9145 * up to 30 seconds. If the port doesn't respond, treat
9146 * it as an error.
9147 */
9148 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
9149 if (lpfc_readl(phba->sli4_hba.u.if_type2.
9150 STATUSregaddr, &reg_data.word0)) {
9151 rc = -ENODEV;
9152 goto out;
9153 }
9154 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
9155 break;
9156 msleep(20);
9157 }
9158
9159 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
9160 phba->work_status[0] = readl(
9161 phba->sli4_hba.u.if_type2.ERR1regaddr);
9162 phba->work_status[1] = readl(
9163 phba->sli4_hba.u.if_type2.ERR2regaddr);
9164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9165 "2890 Port not ready, port status reg "
9166 "0x%x error 1=0x%x, error 2=0x%x\n",
9167 reg_data.word0,
9168 phba->work_status[0],
9169 phba->work_status[1]);
9170 rc = -ENODEV;
9171 goto out;
9172 }
9173
9174 if (!port_reset) {
9175 /*
9176 * Reset the port now
9177 */
9178 reg_data.word0 = 0;
9179 bf_set(lpfc_sliport_ctrl_end, &reg_data,
9180 LPFC_SLIPORT_LITTLE_ENDIAN);
9181 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
9182 LPFC_SLIPORT_INIT_PORT);
9183 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
9184 CTRLregaddr);
9185 /* flush */
9186 pci_read_config_word(phba->pcidev,
9187 PCI_DEVICE_ID, &devid);
9188
9189 port_reset = 1;
9190 msleep(20);
9191 goto wait;
9192 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
9193 rc = -ENODEV;
9194 goto out;
9195 }
9196 break;
9197
9198 case LPFC_SLI_INTF_IF_TYPE_1:
9199 default:
9200 break;
9201 }
9202
9203 out:
9204 /* Catch the not-ready port failure after a port reset. */
9205 if (rc) {
9206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9207 "3317 HBA not functional: IP Reset Failed "
9208 "try: echo fw_reset > board_mode\n");
9209 rc = -ENODEV;
9210 }
9211
9212 return rc;
9213 }
9214
9215 /**
9216 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
9217 * @phba: pointer to lpfc hba data structure.
9218 *
9219 * This routine is invoked to set up the PCI device memory space for device
9220 * with SLI-4 interface spec.
9221 *
9222 * Return codes
9223 * 0 - successful
9224 * other values - error
9225 **/
9226 static int
9227 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9228 {
9229 struct pci_dev *pdev;
9230 unsigned long bar0map_len, bar1map_len, bar2map_len;
9231 int error = -ENODEV;
9232 uint32_t if_type;
9233
9234 /* Obtain PCI device reference */
9235 if (!phba->pcidev)
9236 return error;
9237 else
9238 pdev = phba->pcidev;
9239
9240 /* Set the device DMA mask size */
9241 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
9242 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
9243 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
9244 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
9245 return error;
9246 }
9247 }
9248
9249 /*
9250 * The BARs and register set definitions and offset locations are
9251 * dependent on the if_type.
9252 */
9253 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
9254 &phba->sli4_hba.sli_intf.word0)) {
9255 return error;
9256 }
9257
9258 /* There is no SLI3 failback for SLI4 devices. */
9259 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
9260 LPFC_SLI_INTF_VALID) {
9261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9262 "2894 SLI_INTF reg contents invalid "
9263 "sli_intf reg 0x%x\n",
9264 phba->sli4_hba.sli_intf.word0);
9265 return error;
9266 }
9267
9268 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9269 /*
9270 * Get the bus address of SLI4 device Bar regions and the
9271 * number of bytes required by each mapping. The mapping of the
9272 * particular PCI BARs regions is dependent on the type of
9273 * SLI4 device.
9274 */
9275 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
9276 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
9277 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
9278
9279 /*
9280 * Map SLI4 PCI Config Space Register base to a kernel virtual
9281 * addr
9282 */
9283 phba->sli4_hba.conf_regs_memmap_p =
9284 ioremap(phba->pci_bar0_map, bar0map_len);
9285 if (!phba->sli4_hba.conf_regs_memmap_p) {
9286 dev_printk(KERN_ERR, &pdev->dev,
9287 "ioremap failed for SLI4 PCI config "
9288 "registers.\n");
9289 goto out;
9290 }
9291 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
9292 /* Set up BAR0 PCI config space register memory map */
9293 lpfc_sli4_bar0_register_memmap(phba, if_type);
9294 } else {
9295 phba->pci_bar0_map = pci_resource_start(pdev, 1);
9296 bar0map_len = pci_resource_len(pdev, 1);
9297 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
9298 dev_printk(KERN_ERR, &pdev->dev,
9299 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
9300 goto out;
9301 }
9302 phba->sli4_hba.conf_regs_memmap_p =
9303 ioremap(phba->pci_bar0_map, bar0map_len);
9304 if (!phba->sli4_hba.conf_regs_memmap_p) {
9305 dev_printk(KERN_ERR, &pdev->dev,
9306 "ioremap failed for SLI4 PCI config "
9307 "registers.\n");
9308 goto out;
9309 }
9310 lpfc_sli4_bar0_register_memmap(phba, if_type);
9311 }
9312
9313 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
9314 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
9315 /*
9316 * Map SLI4 if type 0 HBA Control Register base to a kernel
9317 * virtual address and setup the registers.
9318 */
9319 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
9320 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9321 phba->sli4_hba.ctrl_regs_memmap_p =
9322 ioremap(phba->pci_bar1_map, bar1map_len);
9323 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
9324 dev_printk(KERN_ERR, &pdev->dev,
9325 "ioremap failed for SLI4 HBA control registers.\n");
9326 goto out_iounmap_conf;
9327 }
9328 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
9329 lpfc_sli4_bar1_register_memmap(phba);
9330 }
9331
9332 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
9333 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
9334 /*
9335 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
9336 * virtual address and setup the registers.
9337 */
9338 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
9339 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9340 phba->sli4_hba.drbl_regs_memmap_p =
9341 ioremap(phba->pci_bar2_map, bar2map_len);
9342 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9343 dev_printk(KERN_ERR, &pdev->dev,
9344 "ioremap failed for SLI4 HBA doorbell registers.\n");
9345 goto out_iounmap_ctrl;
9346 }
9347 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
9348 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
9349 if (error)
9350 goto out_iounmap_all;
9351 }
9352
9353 return 0;
9354
9355 out_iounmap_all:
9356 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9357 out_iounmap_ctrl:
9358 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9359 out_iounmap_conf:
9360 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9361 out:
9362 return error;
9363 }
9364
9365 /**
9366 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
9367 * @phba: pointer to lpfc hba data structure.
9368 *
9369 * This routine is invoked to unset the PCI device memory space for device
9370 * with SLI-4 interface spec.
9371 **/
9372 static void
9373 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
9374 {
9375 uint32_t if_type;
9376 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9377
9378 switch (if_type) {
9379 case LPFC_SLI_INTF_IF_TYPE_0:
9380 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9381 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9382 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9383 break;
9384 case LPFC_SLI_INTF_IF_TYPE_2:
9385 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9386 break;
9387 case LPFC_SLI_INTF_IF_TYPE_1:
9388 default:
9389 dev_printk(KERN_ERR, &phba->pcidev->dev,
9390 "FATAL - unsupported SLI4 interface type - %d\n",
9391 if_type);
9392 break;
9393 }
9394 }
9395
9396 /**
9397 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
9398 * @phba: pointer to lpfc hba data structure.
9399 *
9400 * This routine is invoked to enable the MSI-X interrupt vectors to device
9401 * with SLI-3 interface specs.
9402 *
9403 * Return codes
9404 * 0 - successful
9405 * other values - error
9406 **/
9407 static int
9408 lpfc_sli_enable_msix(struct lpfc_hba *phba)
9409 {
9410 int rc;
9411 LPFC_MBOXQ_t *pmb;
9412
9413 /* Set up MSI-X multi-message vectors */
9414 rc = pci_alloc_irq_vectors(phba->pcidev,
9415 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
9416 if (rc < 0) {
9417 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9418 "0420 PCI enable MSI-X failed (%d)\n", rc);
9419 goto vec_fail_out;
9420 }
9421
9422 /*
9423 * Assign MSI-X vectors to interrupt handlers
9424 */
9425
9426 /* vector-0 is associated to slow-path handler */
9427 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
9428 &lpfc_sli_sp_intr_handler, 0,
9429 LPFC_SP_DRIVER_HANDLER_NAME, phba);
9430 if (rc) {
9431 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9432 "0421 MSI-X slow-path request_irq failed "
9433 "(%d)\n", rc);
9434 goto msi_fail_out;
9435 }
9436
9437 /* vector-1 is associated to fast-path handler */
9438 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
9439 &lpfc_sli_fp_intr_handler, 0,
9440 LPFC_FP_DRIVER_HANDLER_NAME, phba);
9441
9442 if (rc) {
9443 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9444 "0429 MSI-X fast-path request_irq failed "
9445 "(%d)\n", rc);
9446 goto irq_fail_out;
9447 }
9448
9449 /*
9450 * Configure HBA MSI-X attention conditions to messages
9451 */
9452 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9453
9454 if (!pmb) {
9455 rc = -ENOMEM;
9456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9457 "0474 Unable to allocate memory for issuing "
9458 "MBOX_CONFIG_MSI command\n");
9459 goto mem_fail_out;
9460 }
9461 rc = lpfc_config_msi(phba, pmb);
9462 if (rc)
9463 goto mbx_fail_out;
9464 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9465 if (rc != MBX_SUCCESS) {
9466 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
9467 "0351 Config MSI mailbox command failed, "
9468 "mbxCmd x%x, mbxStatus x%x\n",
9469 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
9470 goto mbx_fail_out;
9471 }
9472
9473 /* Free memory allocated for mailbox command */
9474 mempool_free(pmb, phba->mbox_mem_pool);
9475 return rc;
9476
9477 mbx_fail_out:
9478 /* Free memory allocated for mailbox command */
9479 mempool_free(pmb, phba->mbox_mem_pool);
9480
9481 mem_fail_out:
9482 /* free the irq already requested */
9483 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
9484
9485 irq_fail_out:
9486 /* free the irq already requested */
9487 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
9488
9489 msi_fail_out:
9490 /* Unconfigure MSI-X capability structure */
9491 pci_free_irq_vectors(phba->pcidev);
9492
9493 vec_fail_out:
9494 return rc;
9495 }
9496
9497 /**
9498 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
9499 * @phba: pointer to lpfc hba data structure.
9500 *
9501 * This routine is invoked to enable the MSI interrupt mode to device with
9502 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
9503 * enable the MSI vector. The device driver is responsible for calling the
9504 * request_irq() to register MSI vector with a interrupt the handler, which
9505 * is done in this function.
9506 *
9507 * Return codes
9508 * 0 - successful
9509 * other values - error
9510 */
9511 static int
9512 lpfc_sli_enable_msi(struct lpfc_hba *phba)
9513 {
9514 int rc;
9515
9516 rc = pci_enable_msi(phba->pcidev);
9517 if (!rc)
9518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9519 "0462 PCI enable MSI mode success.\n");
9520 else {
9521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9522 "0471 PCI enable MSI mode failed (%d)\n", rc);
9523 return rc;
9524 }
9525
9526 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9527 0, LPFC_DRIVER_NAME, phba);
9528 if (rc) {
9529 pci_disable_msi(phba->pcidev);
9530 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9531 "0478 MSI request_irq failed (%d)\n", rc);
9532 }
9533 return rc;
9534 }
9535
9536 /**
9537 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
9538 * @phba: pointer to lpfc hba data structure.
9539 *
9540 * This routine is invoked to enable device interrupt and associate driver's
9541 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
9542 * spec. Depends on the interrupt mode configured to the driver, the driver
9543 * will try to fallback from the configured interrupt mode to an interrupt
9544 * mode which is supported by the platform, kernel, and device in the order
9545 * of:
9546 * MSI-X -> MSI -> IRQ.
9547 *
9548 * Return codes
9549 * 0 - successful
9550 * other values - error
9551 **/
9552 static uint32_t
9553 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9554 {
9555 uint32_t intr_mode = LPFC_INTR_ERROR;
9556 int retval;
9557
9558 if (cfg_mode == 2) {
9559 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
9560 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
9561 if (!retval) {
9562 /* Now, try to enable MSI-X interrupt mode */
9563 retval = lpfc_sli_enable_msix(phba);
9564 if (!retval) {
9565 /* Indicate initialization to MSI-X mode */
9566 phba->intr_type = MSIX;
9567 intr_mode = 2;
9568 }
9569 }
9570 }
9571
9572 /* Fallback to MSI if MSI-X initialization failed */
9573 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9574 retval = lpfc_sli_enable_msi(phba);
9575 if (!retval) {
9576 /* Indicate initialization to MSI mode */
9577 phba->intr_type = MSI;
9578 intr_mode = 1;
9579 }
9580 }
9581
9582 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9583 if (phba->intr_type == NONE) {
9584 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9585 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9586 if (!retval) {
9587 /* Indicate initialization to INTx mode */
9588 phba->intr_type = INTx;
9589 intr_mode = 0;
9590 }
9591 }
9592 return intr_mode;
9593 }
9594
9595 /**
9596 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
9597 * @phba: pointer to lpfc hba data structure.
9598 *
9599 * This routine is invoked to disable device interrupt and disassociate the
9600 * driver's interrupt handler(s) from interrupt vector(s) to device with
9601 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
9602 * release the interrupt vector(s) for the message signaled interrupt.
9603 **/
9604 static void
9605 lpfc_sli_disable_intr(struct lpfc_hba *phba)
9606 {
9607 int nr_irqs, i;
9608
9609 if (phba->intr_type == MSIX)
9610 nr_irqs = LPFC_MSIX_VECTORS;
9611 else
9612 nr_irqs = 1;
9613
9614 for (i = 0; i < nr_irqs; i++)
9615 free_irq(pci_irq_vector(phba->pcidev, i), phba);
9616 pci_free_irq_vectors(phba->pcidev);
9617
9618 /* Reset interrupt management states */
9619 phba->intr_type = NONE;
9620 phba->sli.slistat.sli_intr = 0;
9621 }
9622
9623 /**
9624 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
9625 * @phba: pointer to lpfc hba data structure.
9626 * @vectors: number of msix vectors allocated.
9627 *
9628 * The routine will figure out the CPU affinity assignment for every
9629 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
9630 * with a pointer to the CPU mask that defines ALL the CPUs this vector
9631 * can be associated with. If the vector can be unquely associated with
9632 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
9633 * In addition, the CPU to IO channel mapping will be calculated
9634 * and the phba->sli4_hba.cpu_map array will reflect this.
9635 */
9636 static void
9637 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
9638 {
9639 struct lpfc_vector_map_info *cpup;
9640 int index = 0;
9641 int vec = 0;
9642 int cpu;
9643 #ifdef CONFIG_X86
9644 struct cpuinfo_x86 *cpuinfo;
9645 #endif
9646
9647 /* Init cpu_map array */
9648 memset(phba->sli4_hba.cpu_map, 0xff,
9649 (sizeof(struct lpfc_vector_map_info) *
9650 phba->sli4_hba.num_present_cpu));
9651
9652 /* Update CPU map with physical id and core id of each CPU */
9653 cpup = phba->sli4_hba.cpu_map;
9654 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
9655 #ifdef CONFIG_X86
9656 cpuinfo = &cpu_data(cpu);
9657 cpup->phys_id = cpuinfo->phys_proc_id;
9658 cpup->core_id = cpuinfo->cpu_core_id;
9659 #else
9660 /* No distinction between CPUs for other platforms */
9661 cpup->phys_id = 0;
9662 cpup->core_id = 0;
9663 #endif
9664 cpup->channel_id = index; /* For now round robin */
9665 cpup->irq = pci_irq_vector(phba->pcidev, vec);
9666 vec++;
9667 if (vec >= vectors)
9668 vec = 0;
9669 index++;
9670 if (index >= phba->cfg_fcp_io_channel)
9671 index = 0;
9672 cpup++;
9673 }
9674 }
9675
9676
9677 /**
9678 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
9679 * @phba: pointer to lpfc hba data structure.
9680 *
9681 * This routine is invoked to enable the MSI-X interrupt vectors to device
9682 * with SLI-4 interface spec.
9683 *
9684 * Return codes
9685 * 0 - successful
9686 * other values - error
9687 **/
9688 static int
9689 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
9690 {
9691 int vectors, rc, index;
9692
9693 /* Set up MSI-X multi-message vectors */
9694 vectors = phba->io_channel_irqs;
9695 if (phba->cfg_fof)
9696 vectors++;
9697
9698 rc = pci_alloc_irq_vectors(phba->pcidev,
9699 (phba->nvmet_support) ? 1 : 2,
9700 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
9701 if (rc < 0) {
9702 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9703 "0484 PCI enable MSI-X failed (%d)\n", rc);
9704 goto vec_fail_out;
9705 }
9706 vectors = rc;
9707
9708 /* Assign MSI-X vectors to interrupt handlers */
9709 for (index = 0; index < vectors; index++) {
9710 memset(&phba->sli4_hba.handler_name[index], 0, 16);
9711 snprintf((char *)&phba->sli4_hba.handler_name[index],
9712 LPFC_SLI4_HANDLER_NAME_SZ,
9713 LPFC_DRIVER_HANDLER_NAME"%d", index);
9714
9715 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9716 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
9717 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
9718 if (phba->cfg_fof && (index == (vectors - 1)))
9719 rc = request_irq(pci_irq_vector(phba->pcidev, index),
9720 &lpfc_sli4_fof_intr_handler, 0,
9721 (char *)&phba->sli4_hba.handler_name[index],
9722 &phba->sli4_hba.hba_eq_hdl[index]);
9723 else
9724 rc = request_irq(pci_irq_vector(phba->pcidev, index),
9725 &lpfc_sli4_hba_intr_handler, 0,
9726 (char *)&phba->sli4_hba.handler_name[index],
9727 &phba->sli4_hba.hba_eq_hdl[index]);
9728 if (rc) {
9729 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9730 "0486 MSI-X fast-path (%d) "
9731 "request_irq failed (%d)\n", index, rc);
9732 goto cfg_fail_out;
9733 }
9734 }
9735
9736 if (phba->cfg_fof)
9737 vectors--;
9738
9739 if (vectors != phba->io_channel_irqs) {
9740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9741 "3238 Reducing IO channels to match number of "
9742 "MSI-X vectors, requested %d got %d\n",
9743 phba->io_channel_irqs, vectors);
9744 if (phba->cfg_fcp_io_channel > vectors)
9745 phba->cfg_fcp_io_channel = vectors;
9746 if (phba->cfg_nvme_io_channel > vectors)
9747 phba->cfg_nvme_io_channel = vectors;
9748 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
9749 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
9750 else
9751 phba->io_channel_irqs = phba->cfg_nvme_io_channel;
9752 }
9753 lpfc_cpu_affinity_check(phba, vectors);
9754
9755 return rc;
9756
9757 cfg_fail_out:
9758 /* free the irq already requested */
9759 for (--index; index >= 0; index--)
9760 free_irq(pci_irq_vector(phba->pcidev, index),
9761 &phba->sli4_hba.hba_eq_hdl[index]);
9762
9763 /* Unconfigure MSI-X capability structure */
9764 pci_free_irq_vectors(phba->pcidev);
9765
9766 vec_fail_out:
9767 return rc;
9768 }
9769
9770 /**
9771 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
9772 * @phba: pointer to lpfc hba data structure.
9773 *
9774 * This routine is invoked to enable the MSI interrupt mode to device with
9775 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
9776 * to enable the MSI vector. The device driver is responsible for calling
9777 * the request_irq() to register MSI vector with a interrupt the handler,
9778 * which is done in this function.
9779 *
9780 * Return codes
9781 * 0 - successful
9782 * other values - error
9783 **/
9784 static int
9785 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
9786 {
9787 int rc, index;
9788
9789 rc = pci_enable_msi(phba->pcidev);
9790 if (!rc)
9791 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9792 "0487 PCI enable MSI mode success.\n");
9793 else {
9794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9795 "0488 PCI enable MSI mode failed (%d)\n", rc);
9796 return rc;
9797 }
9798
9799 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9800 0, LPFC_DRIVER_NAME, phba);
9801 if (rc) {
9802 pci_disable_msi(phba->pcidev);
9803 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9804 "0490 MSI request_irq failed (%d)\n", rc);
9805 return rc;
9806 }
9807
9808 for (index = 0; index < phba->io_channel_irqs; index++) {
9809 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9810 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
9811 }
9812
9813 if (phba->cfg_fof) {
9814 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9815 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
9816 }
9817 return 0;
9818 }
9819
9820 /**
9821 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
9822 * @phba: pointer to lpfc hba data structure.
9823 *
9824 * This routine is invoked to enable device interrupt and associate driver's
9825 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
9826 * interface spec. Depends on the interrupt mode configured to the driver,
9827 * the driver will try to fallback from the configured interrupt mode to an
9828 * interrupt mode which is supported by the platform, kernel, and device in
9829 * the order of:
9830 * MSI-X -> MSI -> IRQ.
9831 *
9832 * Return codes
9833 * 0 - successful
9834 * other values - error
9835 **/
9836 static uint32_t
9837 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9838 {
9839 uint32_t intr_mode = LPFC_INTR_ERROR;
9840 int retval, idx;
9841
9842 if (cfg_mode == 2) {
9843 /* Preparation before conf_msi mbox cmd */
9844 retval = 0;
9845 if (!retval) {
9846 /* Now, try to enable MSI-X interrupt mode */
9847 retval = lpfc_sli4_enable_msix(phba);
9848 if (!retval) {
9849 /* Indicate initialization to MSI-X mode */
9850 phba->intr_type = MSIX;
9851 intr_mode = 2;
9852 }
9853 }
9854 }
9855
9856 /* Fallback to MSI if MSI-X initialization failed */
9857 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9858 retval = lpfc_sli4_enable_msi(phba);
9859 if (!retval) {
9860 /* Indicate initialization to MSI mode */
9861 phba->intr_type = MSI;
9862 intr_mode = 1;
9863 }
9864 }
9865
9866 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9867 if (phba->intr_type == NONE) {
9868 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9869 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9870 if (!retval) {
9871 struct lpfc_hba_eq_hdl *eqhdl;
9872
9873 /* Indicate initialization to INTx mode */
9874 phba->intr_type = INTx;
9875 intr_mode = 0;
9876
9877 for (idx = 0; idx < phba->io_channel_irqs; idx++) {
9878 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
9879 eqhdl->idx = idx;
9880 eqhdl->phba = phba;
9881 atomic_set(&eqhdl->hba_eq_in_use, 1);
9882 }
9883 if (phba->cfg_fof) {
9884 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
9885 eqhdl->idx = idx;
9886 eqhdl->phba = phba;
9887 atomic_set(&eqhdl->hba_eq_in_use, 1);
9888 }
9889 }
9890 }
9891 return intr_mode;
9892 }
9893
9894 /**
9895 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9896 * @phba: pointer to lpfc hba data structure.
9897 *
9898 * This routine is invoked to disable device interrupt and disassociate
9899 * the driver's interrupt handler(s) from interrupt vector(s) to device
9900 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9901 * will release the interrupt vector(s) for the message signaled interrupt.
9902 **/
9903 static void
9904 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
9905 {
9906 /* Disable the currently initialized interrupt mode */
9907 if (phba->intr_type == MSIX) {
9908 int index;
9909
9910 /* Free up MSI-X multi-message vectors */
9911 for (index = 0; index < phba->io_channel_irqs; index++)
9912 free_irq(pci_irq_vector(phba->pcidev, index),
9913 &phba->sli4_hba.hba_eq_hdl[index]);
9914
9915 if (phba->cfg_fof)
9916 free_irq(pci_irq_vector(phba->pcidev, index),
9917 &phba->sli4_hba.hba_eq_hdl[index]);
9918 } else {
9919 free_irq(phba->pcidev->irq, phba);
9920 }
9921
9922 pci_free_irq_vectors(phba->pcidev);
9923
9924 /* Reset interrupt management states */
9925 phba->intr_type = NONE;
9926 phba->sli.slistat.sli_intr = 0;
9927 }
9928
9929 /**
9930 * lpfc_unset_hba - Unset SLI3 hba device initialization
9931 * @phba: pointer to lpfc hba data structure.
9932 *
9933 * This routine is invoked to unset the HBA device initialization steps to
9934 * a device with SLI-3 interface spec.
9935 **/
9936 static void
9937 lpfc_unset_hba(struct lpfc_hba *phba)
9938 {
9939 struct lpfc_vport *vport = phba->pport;
9940 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9941
9942 spin_lock_irq(shost->host_lock);
9943 vport->load_flag |= FC_UNLOADING;
9944 spin_unlock_irq(shost->host_lock);
9945
9946 kfree(phba->vpi_bmask);
9947 kfree(phba->vpi_ids);
9948
9949 lpfc_stop_hba_timers(phba);
9950
9951 phba->pport->work_port_events = 0;
9952
9953 lpfc_sli_hba_down(phba);
9954
9955 lpfc_sli_brdrestart(phba);
9956
9957 lpfc_sli_disable_intr(phba);
9958
9959 return;
9960 }
9961
9962 /**
9963 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9964 * @phba: Pointer to HBA context object.
9965 *
9966 * This function is called in the SLI4 code path to wait for completion
9967 * of device's XRIs exchange busy. It will check the XRI exchange busy
9968 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9969 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9970 * I/Os every 30 seconds, log error message, and wait forever. Only when
9971 * all XRI exchange busy complete, the driver unload shall proceed with
9972 * invoking the function reset ioctl mailbox command to the CNA and the
9973 * the rest of the driver unload resource release.
9974 **/
9975 static void
9976 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
9977 {
9978 int wait_time = 0;
9979 int nvme_xri_cmpl = 1;
9980 int nvmet_xri_cmpl = 1;
9981 int fcp_xri_cmpl = 1;
9982 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9983
9984 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
9985 fcp_xri_cmpl =
9986 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9987 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9988 nvme_xri_cmpl =
9989 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
9990 nvmet_xri_cmpl =
9991 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
9992 }
9993
9994 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
9995 !nvmet_xri_cmpl) {
9996 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
9997 if (!nvme_xri_cmpl)
9998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9999 "6100 NVME XRI exchange busy "
10000 "wait time: %d seconds.\n",
10001 wait_time/1000);
10002 if (!fcp_xri_cmpl)
10003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10004 "2877 FCP XRI exchange busy "
10005 "wait time: %d seconds.\n",
10006 wait_time/1000);
10007 if (!els_xri_cmpl)
10008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10009 "2878 ELS XRI exchange busy "
10010 "wait time: %d seconds.\n",
10011 wait_time/1000);
10012 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
10013 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
10014 } else {
10015 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
10016 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
10017 }
10018 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10019 nvme_xri_cmpl = list_empty(
10020 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
10021 nvmet_xri_cmpl = list_empty(
10022 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10023 }
10024
10025 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10026 fcp_xri_cmpl = list_empty(
10027 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
10028
10029 els_xri_cmpl =
10030 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10031
10032 }
10033 }
10034
10035 /**
10036 * lpfc_sli4_hba_unset - Unset the fcoe hba
10037 * @phba: Pointer to HBA context object.
10038 *
10039 * This function is called in the SLI4 code path to reset the HBA's FCoE
10040 * function. The caller is not required to hold any lock. This routine
10041 * issues PCI function reset mailbox command to reset the FCoE function.
10042 * At the end of the function, it calls lpfc_hba_down_post function to
10043 * free any pending commands.
10044 **/
10045 static void
10046 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10047 {
10048 int wait_cnt = 0;
10049 LPFC_MBOXQ_t *mboxq;
10050 struct pci_dev *pdev = phba->pcidev;
10051
10052 lpfc_stop_hba_timers(phba);
10053 phba->sli4_hba.intr_enable = 0;
10054
10055 /*
10056 * Gracefully wait out the potential current outstanding asynchronous
10057 * mailbox command.
10058 */
10059
10060 /* First, block any pending async mailbox command from posted */
10061 spin_lock_irq(&phba->hbalock);
10062 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10063 spin_unlock_irq(&phba->hbalock);
10064 /* Now, trying to wait it out if we can */
10065 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10066 msleep(10);
10067 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
10068 break;
10069 }
10070 /* Forcefully release the outstanding mailbox command if timed out */
10071 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10072 spin_lock_irq(&phba->hbalock);
10073 mboxq = phba->sli.mbox_active;
10074 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10075 __lpfc_mbox_cmpl_put(phba, mboxq);
10076 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10077 phba->sli.mbox_active = NULL;
10078 spin_unlock_irq(&phba->hbalock);
10079 }
10080
10081 /* Abort all iocbs associated with the hba */
10082 lpfc_sli_hba_iocb_abort(phba);
10083
10084 /* Wait for completion of device XRI exchange busy */
10085 lpfc_sli4_xri_exchange_busy_wait(phba);
10086
10087 /* Disable PCI subsystem interrupt */
10088 lpfc_sli4_disable_intr(phba);
10089
10090 /* Disable SR-IOV if enabled */
10091 if (phba->cfg_sriov_nr_virtfn)
10092 pci_disable_sriov(pdev);
10093
10094 /* Stop kthread signal shall trigger work_done one more time */
10095 kthread_stop(phba->worker_thread);
10096
10097 /* Unset the queues shared with the hardware then release all
10098 * allocated resources.
10099 */
10100 lpfc_sli4_queue_unset(phba);
10101 lpfc_sli4_queue_destroy(phba);
10102
10103 /* Reset SLI4 HBA FCoE function */
10104 lpfc_pci_function_reset(phba);
10105
10106 /* Stop the SLI4 device port */
10107 phba->pport->work_port_events = 0;
10108 }
10109
10110 /**
10111 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
10112 * @phba: Pointer to HBA context object.
10113 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10114 *
10115 * This function is called in the SLI4 code path to read the port's
10116 * sli4 capabilities.
10117 *
10118 * This function may be be called from any context that can block-wait
10119 * for the completion. The expectation is that this routine is called
10120 * typically from probe_one or from the online routine.
10121 **/
10122 int
10123 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10124 {
10125 int rc;
10126 struct lpfc_mqe *mqe;
10127 struct lpfc_pc_sli4_params *sli4_params;
10128 uint32_t mbox_tmo;
10129
10130 rc = 0;
10131 mqe = &mboxq->u.mqe;
10132
10133 /* Read the port's SLI4 Parameters port capabilities */
10134 lpfc_pc_sli4_params(mboxq);
10135 if (!phba->sli4_hba.intr_enable)
10136 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10137 else {
10138 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10139 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10140 }
10141
10142 if (unlikely(rc))
10143 return 1;
10144
10145 sli4_params = &phba->sli4_hba.pc_sli4_params;
10146 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
10147 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
10148 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
10149 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
10150 &mqe->un.sli4_params);
10151 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
10152 &mqe->un.sli4_params);
10153 sli4_params->proto_types = mqe->un.sli4_params.word3;
10154 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
10155 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
10156 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
10157 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
10158 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
10159 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
10160 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
10161 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
10162 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
10163 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
10164 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
10165 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
10166 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
10167 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
10168 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
10169 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
10170 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
10171 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
10172 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
10173 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
10174
10175 /* Make sure that sge_supp_len can be handled by the driver */
10176 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10177 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10178
10179 return rc;
10180 }
10181
10182 /**
10183 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
10184 * @phba: Pointer to HBA context object.
10185 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10186 *
10187 * This function is called in the SLI4 code path to read the port's
10188 * sli4 capabilities.
10189 *
10190 * This function may be be called from any context that can block-wait
10191 * for the completion. The expectation is that this routine is called
10192 * typically from probe_one or from the online routine.
10193 **/
10194 int
10195 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10196 {
10197 int rc;
10198 struct lpfc_mqe *mqe = &mboxq->u.mqe;
10199 struct lpfc_pc_sli4_params *sli4_params;
10200 uint32_t mbox_tmo;
10201 int length;
10202 struct lpfc_sli4_parameters *mbx_sli4_parameters;
10203
10204 /*
10205 * By default, the driver assumes the SLI4 port requires RPI
10206 * header postings. The SLI4_PARAM response will correct this
10207 * assumption.
10208 */
10209 phba->sli4_hba.rpi_hdrs_in_use = 1;
10210
10211 /* Read the port's SLI4 Config Parameters */
10212 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
10213 sizeof(struct lpfc_sli4_cfg_mhdr));
10214 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10215 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
10216 length, LPFC_SLI4_MBX_EMBED);
10217 if (!phba->sli4_hba.intr_enable)
10218 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10219 else {
10220 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10221 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10222 }
10223 if (unlikely(rc))
10224 return rc;
10225 sli4_params = &phba->sli4_hba.pc_sli4_params;
10226 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
10227 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
10228 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
10229 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
10230 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
10231 mbx_sli4_parameters);
10232 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
10233 mbx_sli4_parameters);
10234 if (bf_get(cfg_phwq, mbx_sli4_parameters))
10235 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
10236 else
10237 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
10238 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
10239 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
10240 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
10241 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
10242 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
10243 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
10244 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
10245 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
10246 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
10247 mbx_sli4_parameters);
10248 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
10249 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
10250 mbx_sli4_parameters);
10251 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
10252 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
10253 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
10254 bf_get(cfg_xib, mbx_sli4_parameters));
10255
10256 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
10257 !phba->nvme_support) {
10258 phba->nvme_support = 0;
10259 phba->nvmet_support = 0;
10260 phba->cfg_nvmet_mrq = 0;
10261 phba->cfg_nvme_io_channel = 0;
10262 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
10264 "6101 Disabling NVME support: "
10265 "Not supported by firmware: %d %d\n",
10266 bf_get(cfg_nvme, mbx_sli4_parameters),
10267 bf_get(cfg_xib, mbx_sli4_parameters));
10268
10269 /* If firmware doesn't support NVME, just use SCSI support */
10270 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
10271 return -ENODEV;
10272 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
10273 }
10274
10275 if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
10276 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
10277
10278 /* Make sure that sge_supp_len can be handled by the driver */
10279 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10280 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10281
10282 /*
10283 * Issue IOs with CDB embedded in WQE to minimized the number
10284 * of DMAs the firmware has to do. Setting this to 1 also forces
10285 * the driver to use 128 bytes WQEs for FCP IOs.
10286 */
10287 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
10288 phba->fcp_embed_io = 1;
10289 else
10290 phba->fcp_embed_io = 0;
10291
10292 /*
10293 * Check if the SLI port supports MDS Diagnostics
10294 */
10295 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
10296 phba->mds_diags_support = 1;
10297 else
10298 phba->mds_diags_support = 0;
10299 return 0;
10300 }
10301
10302 /**
10303 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
10304 * @pdev: pointer to PCI device
10305 * @pid: pointer to PCI device identifier
10306 *
10307 * This routine is to be called to attach a device with SLI-3 interface spec
10308 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10309 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10310 * information of the device and driver to see if the driver state that it can
10311 * support this kind of device. If the match is successful, the driver core
10312 * invokes this routine. If this routine determines it can claim the HBA, it
10313 * does all the initialization that it needs to do to handle the HBA properly.
10314 *
10315 * Return code
10316 * 0 - driver can claim the device
10317 * negative value - driver can not claim the device
10318 **/
10319 static int
10320 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
10321 {
10322 struct lpfc_hba *phba;
10323 struct lpfc_vport *vport = NULL;
10324 struct Scsi_Host *shost = NULL;
10325 int error;
10326 uint32_t cfg_mode, intr_mode;
10327
10328 /* Allocate memory for HBA structure */
10329 phba = lpfc_hba_alloc(pdev);
10330 if (!phba)
10331 return -ENOMEM;
10332
10333 /* Perform generic PCI device enabling operation */
10334 error = lpfc_enable_pci_dev(phba);
10335 if (error)
10336 goto out_free_phba;
10337
10338 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
10339 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
10340 if (error)
10341 goto out_disable_pci_dev;
10342
10343 /* Set up SLI-3 specific device PCI memory space */
10344 error = lpfc_sli_pci_mem_setup(phba);
10345 if (error) {
10346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10347 "1402 Failed to set up pci memory space.\n");
10348 goto out_disable_pci_dev;
10349 }
10350
10351 /* Set up SLI-3 specific device driver resources */
10352 error = lpfc_sli_driver_resource_setup(phba);
10353 if (error) {
10354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10355 "1404 Failed to set up driver resource.\n");
10356 goto out_unset_pci_mem_s3;
10357 }
10358
10359 /* Initialize and populate the iocb list per host */
10360
10361 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
10362 if (error) {
10363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10364 "1405 Failed to initialize iocb list.\n");
10365 goto out_unset_driver_resource_s3;
10366 }
10367
10368 /* Set up common device driver resources */
10369 error = lpfc_setup_driver_resource_phase2(phba);
10370 if (error) {
10371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10372 "1406 Failed to set up driver resource.\n");
10373 goto out_free_iocb_list;
10374 }
10375
10376 /* Get the default values for Model Name and Description */
10377 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10378
10379 /* Create SCSI host to the physical port */
10380 error = lpfc_create_shost(phba);
10381 if (error) {
10382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10383 "1407 Failed to create scsi host.\n");
10384 goto out_unset_driver_resource;
10385 }
10386
10387 /* Configure sysfs attributes */
10388 vport = phba->pport;
10389 error = lpfc_alloc_sysfs_attr(vport);
10390 if (error) {
10391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10392 "1476 Failed to allocate sysfs attr\n");
10393 goto out_destroy_shost;
10394 }
10395
10396 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
10397 /* Now, trying to enable interrupt and bring up the device */
10398 cfg_mode = phba->cfg_use_msi;
10399 while (true) {
10400 /* Put device to a known state before enabling interrupt */
10401 lpfc_stop_port(phba);
10402 /* Configure and enable interrupt */
10403 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
10404 if (intr_mode == LPFC_INTR_ERROR) {
10405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10406 "0431 Failed to enable interrupt.\n");
10407 error = -ENODEV;
10408 goto out_free_sysfs_attr;
10409 }
10410 /* SLI-3 HBA setup */
10411 if (lpfc_sli_hba_setup(phba)) {
10412 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10413 "1477 Failed to set up hba\n");
10414 error = -ENODEV;
10415 goto out_remove_device;
10416 }
10417
10418 /* Wait 50ms for the interrupts of previous mailbox commands */
10419 msleep(50);
10420 /* Check active interrupts on message signaled interrupts */
10421 if (intr_mode == 0 ||
10422 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
10423 /* Log the current active interrupt mode */
10424 phba->intr_mode = intr_mode;
10425 lpfc_log_intr_mode(phba, intr_mode);
10426 break;
10427 } else {
10428 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10429 "0447 Configure interrupt mode (%d) "
10430 "failed active interrupt test.\n",
10431 intr_mode);
10432 /* Disable the current interrupt mode */
10433 lpfc_sli_disable_intr(phba);
10434 /* Try next level of interrupt mode */
10435 cfg_mode = --intr_mode;
10436 }
10437 }
10438
10439 /* Perform post initialization setup */
10440 lpfc_post_init_setup(phba);
10441
10442 /* Check if there are static vports to be created. */
10443 lpfc_create_static_vport(phba);
10444
10445 return 0;
10446
10447 out_remove_device:
10448 lpfc_unset_hba(phba);
10449 out_free_sysfs_attr:
10450 lpfc_free_sysfs_attr(vport);
10451 out_destroy_shost:
10452 lpfc_destroy_shost(phba);
10453 out_unset_driver_resource:
10454 lpfc_unset_driver_resource_phase2(phba);
10455 out_free_iocb_list:
10456 lpfc_free_iocb_list(phba);
10457 out_unset_driver_resource_s3:
10458 lpfc_sli_driver_resource_unset(phba);
10459 out_unset_pci_mem_s3:
10460 lpfc_sli_pci_mem_unset(phba);
10461 out_disable_pci_dev:
10462 lpfc_disable_pci_dev(phba);
10463 if (shost)
10464 scsi_host_put(shost);
10465 out_free_phba:
10466 lpfc_hba_free(phba);
10467 return error;
10468 }
10469
10470 /**
10471 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
10472 * @pdev: pointer to PCI device
10473 *
10474 * This routine is to be called to disattach a device with SLI-3 interface
10475 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10476 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10477 * device to be removed from the PCI subsystem properly.
10478 **/
10479 static void
10480 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10481 {
10482 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10483 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10484 struct lpfc_vport **vports;
10485 struct lpfc_hba *phba = vport->phba;
10486 int i;
10487
10488 spin_lock_irq(&phba->hbalock);
10489 vport->load_flag |= FC_UNLOADING;
10490 spin_unlock_irq(&phba->hbalock);
10491
10492 lpfc_free_sysfs_attr(vport);
10493
10494 /* Release all the vports against this physical port */
10495 vports = lpfc_create_vport_work_array(phba);
10496 if (vports != NULL)
10497 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10498 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10499 continue;
10500 fc_vport_terminate(vports[i]->fc_vport);
10501 }
10502 lpfc_destroy_vport_work_array(phba, vports);
10503
10504 /* Remove FC host and then SCSI host with the physical port */
10505 fc_remove_host(shost);
10506 scsi_remove_host(shost);
10507
10508 lpfc_cleanup(vport);
10509
10510 /*
10511 * Bring down the SLI Layer. This step disable all interrupts,
10512 * clears the rings, discards all mailbox commands, and resets
10513 * the HBA.
10514 */
10515
10516 /* HBA interrupt will be disabled after this call */
10517 lpfc_sli_hba_down(phba);
10518 /* Stop kthread signal shall trigger work_done one more time */
10519 kthread_stop(phba->worker_thread);
10520 /* Final cleanup of txcmplq and reset the HBA */
10521 lpfc_sli_brdrestart(phba);
10522
10523 kfree(phba->vpi_bmask);
10524 kfree(phba->vpi_ids);
10525
10526 lpfc_stop_hba_timers(phba);
10527 spin_lock_irq(&phba->hbalock);
10528 list_del_init(&vport->listentry);
10529 spin_unlock_irq(&phba->hbalock);
10530
10531 lpfc_debugfs_terminate(vport);
10532
10533 /* Disable SR-IOV if enabled */
10534 if (phba->cfg_sriov_nr_virtfn)
10535 pci_disable_sriov(pdev);
10536
10537 /* Disable interrupt */
10538 lpfc_sli_disable_intr(phba);
10539
10540 scsi_host_put(shost);
10541
10542 /*
10543 * Call scsi_free before mem_free since scsi bufs are released to their
10544 * corresponding pools here.
10545 */
10546 lpfc_scsi_free(phba);
10547 lpfc_mem_free_all(phba);
10548
10549 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
10550 phba->hbqslimp.virt, phba->hbqslimp.phys);
10551
10552 /* Free resources associated with SLI2 interface */
10553 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
10554 phba->slim2p.virt, phba->slim2p.phys);
10555
10556 /* unmap adapter SLIM and Control Registers */
10557 iounmap(phba->ctrl_regs_memmap_p);
10558 iounmap(phba->slim_memmap_p);
10559
10560 lpfc_hba_free(phba);
10561
10562 pci_release_mem_regions(pdev);
10563 pci_disable_device(pdev);
10564 }
10565
10566 /**
10567 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
10568 * @pdev: pointer to PCI device
10569 * @msg: power management message
10570 *
10571 * This routine is to be called from the kernel's PCI subsystem to support
10572 * system Power Management (PM) to device with SLI-3 interface spec. When
10573 * PM invokes this method, it quiesces the device by stopping the driver's
10574 * worker thread for the device, turning off device's interrupt and DMA,
10575 * and bring the device offline. Note that as the driver implements the
10576 * minimum PM requirements to a power-aware driver's PM support for the
10577 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10578 * to the suspend() method call will be treated as SUSPEND and the driver will
10579 * fully reinitialize its device during resume() method call, the driver will
10580 * set device to PCI_D3hot state in PCI config space instead of setting it
10581 * according to the @msg provided by the PM.
10582 *
10583 * Return code
10584 * 0 - driver suspended the device
10585 * Error otherwise
10586 **/
10587 static int
10588 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
10589 {
10590 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10591 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10592
10593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10594 "0473 PCI device Power Management suspend.\n");
10595
10596 /* Bring down the device */
10597 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10598 lpfc_offline(phba);
10599 kthread_stop(phba->worker_thread);
10600
10601 /* Disable interrupt from device */
10602 lpfc_sli_disable_intr(phba);
10603
10604 /* Save device state to PCI config space */
10605 pci_save_state(pdev);
10606 pci_set_power_state(pdev, PCI_D3hot);
10607
10608 return 0;
10609 }
10610
10611 /**
10612 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
10613 * @pdev: pointer to PCI device
10614 *
10615 * This routine is to be called from the kernel's PCI subsystem to support
10616 * system Power Management (PM) to device with SLI-3 interface spec. When PM
10617 * invokes this method, it restores the device's PCI config space state and
10618 * fully reinitializes the device and brings it online. Note that as the
10619 * driver implements the minimum PM requirements to a power-aware driver's
10620 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
10621 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
10622 * driver will fully reinitialize its device during resume() method call,
10623 * the device will be set to PCI_D0 directly in PCI config space before
10624 * restoring the state.
10625 *
10626 * Return code
10627 * 0 - driver suspended the device
10628 * Error otherwise
10629 **/
10630 static int
10631 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
10632 {
10633 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10634 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10635 uint32_t intr_mode;
10636 int error;
10637
10638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10639 "0452 PCI device Power Management resume.\n");
10640
10641 /* Restore device state from PCI config space */
10642 pci_set_power_state(pdev, PCI_D0);
10643 pci_restore_state(pdev);
10644
10645 /*
10646 * As the new kernel behavior of pci_restore_state() API call clears
10647 * device saved_state flag, need to save the restored state again.
10648 */
10649 pci_save_state(pdev);
10650
10651 if (pdev->is_busmaster)
10652 pci_set_master(pdev);
10653
10654 /* Startup the kernel thread for this host adapter. */
10655 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10656 "lpfc_worker_%d", phba->brd_no);
10657 if (IS_ERR(phba->worker_thread)) {
10658 error = PTR_ERR(phba->worker_thread);
10659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10660 "0434 PM resume failed to start worker "
10661 "thread: error=x%x.\n", error);
10662 return error;
10663 }
10664
10665 /* Configure and enable interrupt */
10666 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10667 if (intr_mode == LPFC_INTR_ERROR) {
10668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10669 "0430 PM resume Failed to enable interrupt\n");
10670 return -EIO;
10671 } else
10672 phba->intr_mode = intr_mode;
10673
10674 /* Restart HBA and bring it online */
10675 lpfc_sli_brdrestart(phba);
10676 lpfc_online(phba);
10677
10678 /* Log the current active interrupt mode */
10679 lpfc_log_intr_mode(phba, phba->intr_mode);
10680
10681 return 0;
10682 }
10683
10684 /**
10685 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
10686 * @phba: pointer to lpfc hba data structure.
10687 *
10688 * This routine is called to prepare the SLI3 device for PCI slot recover. It
10689 * aborts all the outstanding SCSI I/Os to the pci device.
10690 **/
10691 static void
10692 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
10693 {
10694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10695 "2723 PCI channel I/O abort preparing for recovery\n");
10696
10697 /*
10698 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10699 * and let the SCSI mid-layer to retry them to recover.
10700 */
10701 lpfc_sli_abort_fcp_rings(phba);
10702 }
10703
10704 /**
10705 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
10706 * @phba: pointer to lpfc hba data structure.
10707 *
10708 * This routine is called to prepare the SLI3 device for PCI slot reset. It
10709 * disables the device interrupt and pci device, and aborts the internal FCP
10710 * pending I/Os.
10711 **/
10712 static void
10713 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
10714 {
10715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10716 "2710 PCI channel disable preparing for reset\n");
10717
10718 /* Block any management I/Os to the device */
10719 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
10720
10721 /* Block all SCSI devices' I/Os on the host */
10722 lpfc_scsi_dev_block(phba);
10723
10724 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10725 lpfc_sli_flush_fcp_rings(phba);
10726
10727 /* stop all timers */
10728 lpfc_stop_hba_timers(phba);
10729
10730 /* Disable interrupt and pci device */
10731 lpfc_sli_disable_intr(phba);
10732 pci_disable_device(phba->pcidev);
10733 }
10734
10735 /**
10736 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
10737 * @phba: pointer to lpfc hba data structure.
10738 *
10739 * This routine is called to prepare the SLI3 device for PCI slot permanently
10740 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10741 * pending I/Os.
10742 **/
10743 static void
10744 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10745 {
10746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10747 "2711 PCI channel permanent disable for failure\n");
10748 /* Block all SCSI devices' I/Os on the host */
10749 lpfc_scsi_dev_block(phba);
10750
10751 /* stop all timers */
10752 lpfc_stop_hba_timers(phba);
10753
10754 /* Clean up all driver's outstanding SCSI I/Os */
10755 lpfc_sli_flush_fcp_rings(phba);
10756 }
10757
10758 /**
10759 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
10760 * @pdev: pointer to PCI device.
10761 * @state: the current PCI connection state.
10762 *
10763 * This routine is called from the PCI subsystem for I/O error handling to
10764 * device with SLI-3 interface spec. This function is called by the PCI
10765 * subsystem after a PCI bus error affecting this device has been detected.
10766 * When this function is invoked, it will need to stop all the I/Os and
10767 * interrupt(s) to the device. Once that is done, it will return
10768 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
10769 * as desired.
10770 *
10771 * Return codes
10772 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
10773 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10774 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10775 **/
10776 static pci_ers_result_t
10777 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
10778 {
10779 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10780 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10781
10782 switch (state) {
10783 case pci_channel_io_normal:
10784 /* Non-fatal error, prepare for recovery */
10785 lpfc_sli_prep_dev_for_recover(phba);
10786 return PCI_ERS_RESULT_CAN_RECOVER;
10787 case pci_channel_io_frozen:
10788 /* Fatal error, prepare for slot reset */
10789 lpfc_sli_prep_dev_for_reset(phba);
10790 return PCI_ERS_RESULT_NEED_RESET;
10791 case pci_channel_io_perm_failure:
10792 /* Permanent failure, prepare for device down */
10793 lpfc_sli_prep_dev_for_perm_failure(phba);
10794 return PCI_ERS_RESULT_DISCONNECT;
10795 default:
10796 /* Unknown state, prepare and request slot reset */
10797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10798 "0472 Unknown PCI error state: x%x\n", state);
10799 lpfc_sli_prep_dev_for_reset(phba);
10800 return PCI_ERS_RESULT_NEED_RESET;
10801 }
10802 }
10803
10804 /**
10805 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
10806 * @pdev: pointer to PCI device.
10807 *
10808 * This routine is called from the PCI subsystem for error handling to
10809 * device with SLI-3 interface spec. This is called after PCI bus has been
10810 * reset to restart the PCI card from scratch, as if from a cold-boot.
10811 * During the PCI subsystem error recovery, after driver returns
10812 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10813 * recovery and then call this routine before calling the .resume method
10814 * to recover the device. This function will initialize the HBA device,
10815 * enable the interrupt, but it will just put the HBA to offline state
10816 * without passing any I/O traffic.
10817 *
10818 * Return codes
10819 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10820 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10821 */
10822 static pci_ers_result_t
10823 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
10824 {
10825 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10826 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10827 struct lpfc_sli *psli = &phba->sli;
10828 uint32_t intr_mode;
10829
10830 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10831 if (pci_enable_device_mem(pdev)) {
10832 printk(KERN_ERR "lpfc: Cannot re-enable "
10833 "PCI device after reset.\n");
10834 return PCI_ERS_RESULT_DISCONNECT;
10835 }
10836
10837 pci_restore_state(pdev);
10838
10839 /*
10840 * As the new kernel behavior of pci_restore_state() API call clears
10841 * device saved_state flag, need to save the restored state again.
10842 */
10843 pci_save_state(pdev);
10844
10845 if (pdev->is_busmaster)
10846 pci_set_master(pdev);
10847
10848 spin_lock_irq(&phba->hbalock);
10849 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10850 spin_unlock_irq(&phba->hbalock);
10851
10852 /* Configure and enable interrupt */
10853 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10854 if (intr_mode == LPFC_INTR_ERROR) {
10855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10856 "0427 Cannot re-enable interrupt after "
10857 "slot reset.\n");
10858 return PCI_ERS_RESULT_DISCONNECT;
10859 } else
10860 phba->intr_mode = intr_mode;
10861
10862 /* Take device offline, it will perform cleanup */
10863 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10864 lpfc_offline(phba);
10865 lpfc_sli_brdrestart(phba);
10866
10867 /* Log the current active interrupt mode */
10868 lpfc_log_intr_mode(phba, phba->intr_mode);
10869
10870 return PCI_ERS_RESULT_RECOVERED;
10871 }
10872
10873 /**
10874 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
10875 * @pdev: pointer to PCI device
10876 *
10877 * This routine is called from the PCI subsystem for error handling to device
10878 * with SLI-3 interface spec. It is called when kernel error recovery tells
10879 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10880 * error recovery. After this call, traffic can start to flow from this device
10881 * again.
10882 */
10883 static void
10884 lpfc_io_resume_s3(struct pci_dev *pdev)
10885 {
10886 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10887 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10888
10889 /* Bring device online, it will be no-op for non-fatal error resume */
10890 lpfc_online(phba);
10891
10892 /* Clean up Advanced Error Reporting (AER) if needed */
10893 if (phba->hba_flag & HBA_AER_ENABLED)
10894 pci_cleanup_aer_uncorrect_error_status(pdev);
10895 }
10896
10897 /**
10898 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
10899 * @phba: pointer to lpfc hba data structure.
10900 *
10901 * returns the number of ELS/CT IOCBs to reserve
10902 **/
10903 int
10904 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
10905 {
10906 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
10907
10908 if (phba->sli_rev == LPFC_SLI_REV4) {
10909 if (max_xri <= 100)
10910 return 10;
10911 else if (max_xri <= 256)
10912 return 25;
10913 else if (max_xri <= 512)
10914 return 50;
10915 else if (max_xri <= 1024)
10916 return 100;
10917 else if (max_xri <= 1536)
10918 return 150;
10919 else if (max_xri <= 2048)
10920 return 200;
10921 else
10922 return 250;
10923 } else
10924 return 0;
10925 }
10926
10927 /**
10928 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
10929 * @phba: pointer to lpfc hba data structure.
10930 *
10931 * returns the number of ELS/CT + NVMET IOCBs to reserve
10932 **/
10933 int
10934 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
10935 {
10936 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
10937
10938 if (phba->nvmet_support)
10939 max_xri += LPFC_NVMET_BUF_POST;
10940 return max_xri;
10941 }
10942
10943
10944 /**
10945 * lpfc_write_firmware - attempt to write a firmware image to the port
10946 * @fw: pointer to firmware image returned from request_firmware.
10947 * @phba: pointer to lpfc hba data structure.
10948 *
10949 **/
10950 static void
10951 lpfc_write_firmware(const struct firmware *fw, void *context)
10952 {
10953 struct lpfc_hba *phba = (struct lpfc_hba *)context;
10954 char fwrev[FW_REV_STR_SIZE];
10955 struct lpfc_grp_hdr *image;
10956 struct list_head dma_buffer_list;
10957 int i, rc = 0;
10958 struct lpfc_dmabuf *dmabuf, *next;
10959 uint32_t offset = 0, temp_offset = 0;
10960 uint32_t magic_number, ftype, fid, fsize;
10961
10962 /* It can be null in no-wait mode, sanity check */
10963 if (!fw) {
10964 rc = -ENXIO;
10965 goto out;
10966 }
10967 image = (struct lpfc_grp_hdr *)fw->data;
10968
10969 magic_number = be32_to_cpu(image->magic_number);
10970 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
10971 fid = bf_get_be32(lpfc_grp_hdr_id, image),
10972 fsize = be32_to_cpu(image->size);
10973
10974 INIT_LIST_HEAD(&dma_buffer_list);
10975 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
10976 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
10977 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
10978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10979 "3022 Invalid FW image found. "
10980 "Magic:%x Type:%x ID:%x Size %d %zd\n",
10981 magic_number, ftype, fid, fsize, fw->size);
10982 rc = -EINVAL;
10983 goto release_out;
10984 }
10985 lpfc_decode_firmware_rev(phba, fwrev, 1);
10986 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
10987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10988 "3023 Updating Firmware, Current Version:%s "
10989 "New Version:%s\n",
10990 fwrev, image->revision);
10991 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
10992 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
10993 GFP_KERNEL);
10994 if (!dmabuf) {
10995 rc = -ENOMEM;
10996 goto release_out;
10997 }
10998 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
10999 SLI4_PAGE_SIZE,
11000 &dmabuf->phys,
11001 GFP_KERNEL);
11002 if (!dmabuf->virt) {
11003 kfree(dmabuf);
11004 rc = -ENOMEM;
11005 goto release_out;
11006 }
11007 list_add_tail(&dmabuf->list, &dma_buffer_list);
11008 }
11009 while (offset < fw->size) {
11010 temp_offset = offset;
11011 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
11012 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
11013 memcpy(dmabuf->virt,
11014 fw->data + temp_offset,
11015 fw->size - temp_offset);
11016 temp_offset = fw->size;
11017 break;
11018 }
11019 memcpy(dmabuf->virt, fw->data + temp_offset,
11020 SLI4_PAGE_SIZE);
11021 temp_offset += SLI4_PAGE_SIZE;
11022 }
11023 rc = lpfc_wr_object(phba, &dma_buffer_list,
11024 (fw->size - offset), &offset);
11025 if (rc)
11026 goto release_out;
11027 }
11028 rc = offset;
11029 }
11030
11031 release_out:
11032 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
11033 list_del(&dmabuf->list);
11034 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
11035 dmabuf->virt, dmabuf->phys);
11036 kfree(dmabuf);
11037 }
11038 release_firmware(fw);
11039 out:
11040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11041 "3024 Firmware update done: %d.\n", rc);
11042 return;
11043 }
11044
11045 /**
11046 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
11047 * @phba: pointer to lpfc hba data structure.
11048 *
11049 * This routine is called to perform Linux generic firmware upgrade on device
11050 * that supports such feature.
11051 **/
11052 int
11053 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
11054 {
11055 uint8_t file_name[ELX_MODEL_NAME_SIZE];
11056 int ret;
11057 const struct firmware *fw;
11058
11059 /* Only supported on SLI4 interface type 2 for now */
11060 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11061 LPFC_SLI_INTF_IF_TYPE_2)
11062 return -EPERM;
11063
11064 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
11065
11066 if (fw_upgrade == INT_FW_UPGRADE) {
11067 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
11068 file_name, &phba->pcidev->dev,
11069 GFP_KERNEL, (void *)phba,
11070 lpfc_write_firmware);
11071 } else if (fw_upgrade == RUN_FW_UPGRADE) {
11072 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
11073 if (!ret)
11074 lpfc_write_firmware(fw, (void *)phba);
11075 } else {
11076 ret = -EINVAL;
11077 }
11078
11079 return ret;
11080 }
11081
11082 /**
11083 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
11084 * @pdev: pointer to PCI device
11085 * @pid: pointer to PCI device identifier
11086 *
11087 * This routine is called from the kernel's PCI subsystem to device with
11088 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
11089 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
11090 * information of the device and driver to see if the driver state that it
11091 * can support this kind of device. If the match is successful, the driver
11092 * core invokes this routine. If this routine determines it can claim the HBA,
11093 * it does all the initialization that it needs to do to handle the HBA
11094 * properly.
11095 *
11096 * Return code
11097 * 0 - driver can claim the device
11098 * negative value - driver can not claim the device
11099 **/
11100 static int
11101 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11102 {
11103 struct lpfc_hba *phba;
11104 struct lpfc_vport *vport = NULL;
11105 struct Scsi_Host *shost = NULL;
11106 int error, cnt;
11107 uint32_t cfg_mode, intr_mode;
11108
11109 /* Allocate memory for HBA structure */
11110 phba = lpfc_hba_alloc(pdev);
11111 if (!phba)
11112 return -ENOMEM;
11113
11114 /* Perform generic PCI device enabling operation */
11115 error = lpfc_enable_pci_dev(phba);
11116 if (error)
11117 goto out_free_phba;
11118
11119 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
11120 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
11121 if (error)
11122 goto out_disable_pci_dev;
11123
11124 /* Set up SLI-4 specific device PCI memory space */
11125 error = lpfc_sli4_pci_mem_setup(phba);
11126 if (error) {
11127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11128 "1410 Failed to set up pci memory space.\n");
11129 goto out_disable_pci_dev;
11130 }
11131
11132 /* Set up SLI-4 Specific device driver resources */
11133 error = lpfc_sli4_driver_resource_setup(phba);
11134 if (error) {
11135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11136 "1412 Failed to set up driver resource.\n");
11137 goto out_unset_pci_mem_s4;
11138 }
11139
11140 cnt = phba->cfg_iocb_cnt * 1024;
11141 if (phba->nvmet_support)
11142 cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
11143
11144 /* Initialize and populate the iocb list per host */
11145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11146 "2821 initialize iocb list %d total %d\n",
11147 phba->cfg_iocb_cnt, cnt);
11148 error = lpfc_init_iocb_list(phba, cnt);
11149
11150 if (error) {
11151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11152 "1413 Failed to initialize iocb list.\n");
11153 goto out_unset_driver_resource_s4;
11154 }
11155
11156 INIT_LIST_HEAD(&phba->active_rrq_list);
11157 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
11158
11159 /* Set up common device driver resources */
11160 error = lpfc_setup_driver_resource_phase2(phba);
11161 if (error) {
11162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11163 "1414 Failed to set up driver resource.\n");
11164 goto out_free_iocb_list;
11165 }
11166
11167 /* Get the default values for Model Name and Description */
11168 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11169
11170 /* Create SCSI host to the physical port */
11171 error = lpfc_create_shost(phba);
11172 if (error) {
11173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11174 "1415 Failed to create scsi host.\n");
11175 goto out_unset_driver_resource;
11176 }
11177
11178 /* Configure sysfs attributes */
11179 vport = phba->pport;
11180 error = lpfc_alloc_sysfs_attr(vport);
11181 if (error) {
11182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11183 "1416 Failed to allocate sysfs attr\n");
11184 goto out_destroy_shost;
11185 }
11186
11187 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
11188 /* Now, trying to enable interrupt and bring up the device */
11189 cfg_mode = phba->cfg_use_msi;
11190
11191 /* Put device to a known state before enabling interrupt */
11192 lpfc_stop_port(phba);
11193
11194 /* Configure and enable interrupt */
11195 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
11196 if (intr_mode == LPFC_INTR_ERROR) {
11197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11198 "0426 Failed to enable interrupt.\n");
11199 error = -ENODEV;
11200 goto out_free_sysfs_attr;
11201 }
11202 /* Default to single EQ for non-MSI-X */
11203 if (phba->intr_type != MSIX) {
11204 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
11205 phba->cfg_fcp_io_channel = 1;
11206 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11207 phba->cfg_nvme_io_channel = 1;
11208 if (phba->nvmet_support)
11209 phba->cfg_nvmet_mrq = 1;
11210 }
11211 phba->io_channel_irqs = 1;
11212 }
11213
11214 /* Set up SLI-4 HBA */
11215 if (lpfc_sli4_hba_setup(phba)) {
11216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11217 "1421 Failed to set up hba\n");
11218 error = -ENODEV;
11219 goto out_disable_intr;
11220 }
11221
11222 /* Log the current active interrupt mode */
11223 phba->intr_mode = intr_mode;
11224 lpfc_log_intr_mode(phba, intr_mode);
11225
11226 /* Perform post initialization setup */
11227 lpfc_post_init_setup(phba);
11228
11229 /* NVME support in FW earlier in the driver load corrects the
11230 * FC4 type making a check for nvme_support unnecessary.
11231 */
11232 if ((phba->nvmet_support == 0) &&
11233 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
11234 /* Create NVME binding with nvme_fc_transport. This
11235 * ensures the vport is initialized. If the localport
11236 * create fails, it should not unload the driver to
11237 * support field issues.
11238 */
11239 error = lpfc_nvme_create_localport(vport);
11240 if (error) {
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "6004 NVME registration failed, "
11243 "error x%x\n",
11244 error);
11245 }
11246 }
11247
11248 /* check for firmware upgrade or downgrade */
11249 if (phba->cfg_request_firmware_upgrade)
11250 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
11251
11252 /* Check if there are static vports to be created. */
11253 lpfc_create_static_vport(phba);
11254 return 0;
11255
11256 out_disable_intr:
11257 lpfc_sli4_disable_intr(phba);
11258 out_free_sysfs_attr:
11259 lpfc_free_sysfs_attr(vport);
11260 out_destroy_shost:
11261 lpfc_destroy_shost(phba);
11262 out_unset_driver_resource:
11263 lpfc_unset_driver_resource_phase2(phba);
11264 out_free_iocb_list:
11265 lpfc_free_iocb_list(phba);
11266 out_unset_driver_resource_s4:
11267 lpfc_sli4_driver_resource_unset(phba);
11268 out_unset_pci_mem_s4:
11269 lpfc_sli4_pci_mem_unset(phba);
11270 out_disable_pci_dev:
11271 lpfc_disable_pci_dev(phba);
11272 if (shost)
11273 scsi_host_put(shost);
11274 out_free_phba:
11275 lpfc_hba_free(phba);
11276 return error;
11277 }
11278
11279 /**
11280 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
11281 * @pdev: pointer to PCI device
11282 *
11283 * This routine is called from the kernel's PCI subsystem to device with
11284 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
11285 * removed from PCI bus, it performs all the necessary cleanup for the HBA
11286 * device to be removed from the PCI subsystem properly.
11287 **/
11288 static void
11289 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11290 {
11291 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11292 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
11293 struct lpfc_vport **vports;
11294 struct lpfc_hba *phba = vport->phba;
11295 int i;
11296
11297 /* Mark the device unloading flag */
11298 spin_lock_irq(&phba->hbalock);
11299 vport->load_flag |= FC_UNLOADING;
11300 spin_unlock_irq(&phba->hbalock);
11301
11302 /* Free the HBA sysfs attributes */
11303 lpfc_free_sysfs_attr(vport);
11304
11305 /* Release all the vports against this physical port */
11306 vports = lpfc_create_vport_work_array(phba);
11307 if (vports != NULL)
11308 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11309 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11310 continue;
11311 fc_vport_terminate(vports[i]->fc_vport);
11312 }
11313 lpfc_destroy_vport_work_array(phba, vports);
11314
11315 /* Remove FC host and then SCSI host with the physical port */
11316 fc_remove_host(shost);
11317 scsi_remove_host(shost);
11318
11319 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
11320 * localports are destroyed after to cleanup all transport memory.
11321 */
11322 lpfc_cleanup(vport);
11323 lpfc_nvmet_destroy_targetport(phba);
11324 lpfc_nvme_destroy_localport(vport);
11325
11326 /*
11327 * Bring down the SLI Layer. This step disables all interrupts,
11328 * clears the rings, discards all mailbox commands, and resets
11329 * the HBA FCoE function.
11330 */
11331 lpfc_debugfs_terminate(vport);
11332 lpfc_sli4_hba_unset(phba);
11333
11334 spin_lock_irq(&phba->hbalock);
11335 list_del_init(&vport->listentry);
11336 spin_unlock_irq(&phba->hbalock);
11337
11338 /* Perform scsi free before driver resource_unset since scsi
11339 * buffers are released to their corresponding pools here.
11340 */
11341 lpfc_scsi_free(phba);
11342 lpfc_nvme_free(phba);
11343 lpfc_free_iocb_list(phba);
11344
11345 lpfc_sli4_driver_resource_unset(phba);
11346
11347 /* Unmap adapter Control and Doorbell registers */
11348 lpfc_sli4_pci_mem_unset(phba);
11349
11350 /* Release PCI resources and disable device's PCI function */
11351 scsi_host_put(shost);
11352 lpfc_disable_pci_dev(phba);
11353
11354 /* Finally, free the driver's device data structure */
11355 lpfc_hba_free(phba);
11356
11357 return;
11358 }
11359
11360 /**
11361 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
11362 * @pdev: pointer to PCI device
11363 * @msg: power management message
11364 *
11365 * This routine is called from the kernel's PCI subsystem to support system
11366 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
11367 * this method, it quiesces the device by stopping the driver's worker
11368 * thread for the device, turning off device's interrupt and DMA, and bring
11369 * the device offline. Note that as the driver implements the minimum PM
11370 * requirements to a power-aware driver's PM support for suspend/resume -- all
11371 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
11372 * method call will be treated as SUSPEND and the driver will fully
11373 * reinitialize its device during resume() method call, the driver will set
11374 * device to PCI_D3hot state in PCI config space instead of setting it
11375 * according to the @msg provided by the PM.
11376 *
11377 * Return code
11378 * 0 - driver suspended the device
11379 * Error otherwise
11380 **/
11381 static int
11382 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
11383 {
11384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11385 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11386
11387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11388 "2843 PCI device Power Management suspend.\n");
11389
11390 /* Bring down the device */
11391 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11392 lpfc_offline(phba);
11393 kthread_stop(phba->worker_thread);
11394
11395 /* Disable interrupt from device */
11396 lpfc_sli4_disable_intr(phba);
11397 lpfc_sli4_queue_destroy(phba);
11398
11399 /* Save device state to PCI config space */
11400 pci_save_state(pdev);
11401 pci_set_power_state(pdev, PCI_D3hot);
11402
11403 return 0;
11404 }
11405
11406 /**
11407 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
11408 * @pdev: pointer to PCI device
11409 *
11410 * This routine is called from the kernel's PCI subsystem to support system
11411 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
11412 * this method, it restores the device's PCI config space state and fully
11413 * reinitializes the device and brings it online. Note that as the driver
11414 * implements the minimum PM requirements to a power-aware driver's PM for
11415 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11416 * to the suspend() method call will be treated as SUSPEND and the driver
11417 * will fully reinitialize its device during resume() method call, the device
11418 * will be set to PCI_D0 directly in PCI config space before restoring the
11419 * state.
11420 *
11421 * Return code
11422 * 0 - driver suspended the device
11423 * Error otherwise
11424 **/
11425 static int
11426 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
11427 {
11428 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11429 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11430 uint32_t intr_mode;
11431 int error;
11432
11433 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11434 "0292 PCI device Power Management resume.\n");
11435
11436 /* Restore device state from PCI config space */
11437 pci_set_power_state(pdev, PCI_D0);
11438 pci_restore_state(pdev);
11439
11440 /*
11441 * As the new kernel behavior of pci_restore_state() API call clears
11442 * device saved_state flag, need to save the restored state again.
11443 */
11444 pci_save_state(pdev);
11445
11446 if (pdev->is_busmaster)
11447 pci_set_master(pdev);
11448
11449 /* Startup the kernel thread for this host adapter. */
11450 phba->worker_thread = kthread_run(lpfc_do_work, phba,
11451 "lpfc_worker_%d", phba->brd_no);
11452 if (IS_ERR(phba->worker_thread)) {
11453 error = PTR_ERR(phba->worker_thread);
11454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11455 "0293 PM resume failed to start worker "
11456 "thread: error=x%x.\n", error);
11457 return error;
11458 }
11459
11460 /* Configure and enable interrupt */
11461 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
11462 if (intr_mode == LPFC_INTR_ERROR) {
11463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11464 "0294 PM resume Failed to enable interrupt\n");
11465 return -EIO;
11466 } else
11467 phba->intr_mode = intr_mode;
11468
11469 /* Restart HBA and bring it online */
11470 lpfc_sli_brdrestart(phba);
11471 lpfc_online(phba);
11472
11473 /* Log the current active interrupt mode */
11474 lpfc_log_intr_mode(phba, phba->intr_mode);
11475
11476 return 0;
11477 }
11478
11479 /**
11480 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
11481 * @phba: pointer to lpfc hba data structure.
11482 *
11483 * This routine is called to prepare the SLI4 device for PCI slot recover. It
11484 * aborts all the outstanding SCSI I/Os to the pci device.
11485 **/
11486 static void
11487 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
11488 {
11489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11490 "2828 PCI channel I/O abort preparing for recovery\n");
11491 /*
11492 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11493 * and let the SCSI mid-layer to retry them to recover.
11494 */
11495 lpfc_sli_abort_fcp_rings(phba);
11496 }
11497
11498 /**
11499 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
11500 * @phba: pointer to lpfc hba data structure.
11501 *
11502 * This routine is called to prepare the SLI4 device for PCI slot reset. It
11503 * disables the device interrupt and pci device, and aborts the internal FCP
11504 * pending I/Os.
11505 **/
11506 static void
11507 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
11508 {
11509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11510 "2826 PCI channel disable preparing for reset\n");
11511
11512 /* Block any management I/Os to the device */
11513 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
11514
11515 /* Block all SCSI devices' I/Os on the host */
11516 lpfc_scsi_dev_block(phba);
11517
11518 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11519 lpfc_sli_flush_fcp_rings(phba);
11520
11521 /* stop all timers */
11522 lpfc_stop_hba_timers(phba);
11523
11524 /* Disable interrupt and pci device */
11525 lpfc_sli4_disable_intr(phba);
11526 lpfc_sli4_queue_destroy(phba);
11527 pci_disable_device(phba->pcidev);
11528 }
11529
11530 /**
11531 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
11532 * @phba: pointer to lpfc hba data structure.
11533 *
11534 * This routine is called to prepare the SLI4 device for PCI slot permanently
11535 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
11536 * pending I/Os.
11537 **/
11538 static void
11539 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
11540 {
11541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11542 "2827 PCI channel permanent disable for failure\n");
11543
11544 /* Block all SCSI devices' I/Os on the host */
11545 lpfc_scsi_dev_block(phba);
11546
11547 /* stop all timers */
11548 lpfc_stop_hba_timers(phba);
11549
11550 /* Clean up all driver's outstanding SCSI I/Os */
11551 lpfc_sli_flush_fcp_rings(phba);
11552 }
11553
11554 /**
11555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
11556 * @pdev: pointer to PCI device.
11557 * @state: the current PCI connection state.
11558 *
11559 * This routine is called from the PCI subsystem for error handling to device
11560 * with SLI-4 interface spec. This function is called by the PCI subsystem
11561 * after a PCI bus error affecting this device has been detected. When this
11562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
11563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
11564 * for the PCI subsystem to perform proper recovery as desired.
11565 *
11566 * Return codes
11567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11569 **/
11570 static pci_ers_result_t
11571 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
11572 {
11573 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11574 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11575
11576 switch (state) {
11577 case pci_channel_io_normal:
11578 /* Non-fatal error, prepare for recovery */
11579 lpfc_sli4_prep_dev_for_recover(phba);
11580 return PCI_ERS_RESULT_CAN_RECOVER;
11581 case pci_channel_io_frozen:
11582 /* Fatal error, prepare for slot reset */
11583 lpfc_sli4_prep_dev_for_reset(phba);
11584 return PCI_ERS_RESULT_NEED_RESET;
11585 case pci_channel_io_perm_failure:
11586 /* Permanent failure, prepare for device down */
11587 lpfc_sli4_prep_dev_for_perm_failure(phba);
11588 return PCI_ERS_RESULT_DISCONNECT;
11589 default:
11590 /* Unknown state, prepare and request slot reset */
11591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11592 "2825 Unknown PCI error state: x%x\n", state);
11593 lpfc_sli4_prep_dev_for_reset(phba);
11594 return PCI_ERS_RESULT_NEED_RESET;
11595 }
11596 }
11597
11598 /**
11599 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
11600 * @pdev: pointer to PCI device.
11601 *
11602 * This routine is called from the PCI subsystem for error handling to device
11603 * with SLI-4 interface spec. It is called after PCI bus has been reset to
11604 * restart the PCI card from scratch, as if from a cold-boot. During the
11605 * PCI subsystem error recovery, after the driver returns
11606 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
11607 * recovery and then call this routine before calling the .resume method to
11608 * recover the device. This function will initialize the HBA device, enable
11609 * the interrupt, but it will just put the HBA to offline state without
11610 * passing any I/O traffic.
11611 *
11612 * Return codes
11613 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11614 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11615 */
11616 static pci_ers_result_t
11617 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
11618 {
11619 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11620 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11621 struct lpfc_sli *psli = &phba->sli;
11622 uint32_t intr_mode;
11623
11624 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
11625 if (pci_enable_device_mem(pdev)) {
11626 printk(KERN_ERR "lpfc: Cannot re-enable "
11627 "PCI device after reset.\n");
11628 return PCI_ERS_RESULT_DISCONNECT;
11629 }
11630
11631 pci_restore_state(pdev);
11632
11633 /*
11634 * As the new kernel behavior of pci_restore_state() API call clears
11635 * device saved_state flag, need to save the restored state again.
11636 */
11637 pci_save_state(pdev);
11638
11639 if (pdev->is_busmaster)
11640 pci_set_master(pdev);
11641
11642 spin_lock_irq(&phba->hbalock);
11643 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
11644 spin_unlock_irq(&phba->hbalock);
11645
11646 /* Configure and enable interrupt */
11647 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
11648 if (intr_mode == LPFC_INTR_ERROR) {
11649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11650 "2824 Cannot re-enable interrupt after "
11651 "slot reset.\n");
11652 return PCI_ERS_RESULT_DISCONNECT;
11653 } else
11654 phba->intr_mode = intr_mode;
11655
11656 /* Log the current active interrupt mode */
11657 lpfc_log_intr_mode(phba, phba->intr_mode);
11658
11659 return PCI_ERS_RESULT_RECOVERED;
11660 }
11661
11662 /**
11663 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
11664 * @pdev: pointer to PCI device
11665 *
11666 * This routine is called from the PCI subsystem for error handling to device
11667 * with SLI-4 interface spec. It is called when kernel error recovery tells
11668 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
11669 * error recovery. After this call, traffic can start to flow from this device
11670 * again.
11671 **/
11672 static void
11673 lpfc_io_resume_s4(struct pci_dev *pdev)
11674 {
11675 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11676 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11677
11678 /*
11679 * In case of slot reset, as function reset is performed through
11680 * mailbox command which needs DMA to be enabled, this operation
11681 * has to be moved to the io resume phase. Taking device offline
11682 * will perform the necessary cleanup.
11683 */
11684 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
11685 /* Perform device reset */
11686 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11687 lpfc_offline(phba);
11688 lpfc_sli_brdrestart(phba);
11689 /* Bring the device back online */
11690 lpfc_online(phba);
11691 }
11692
11693 /* Clean up Advanced Error Reporting (AER) if needed */
11694 if (phba->hba_flag & HBA_AER_ENABLED)
11695 pci_cleanup_aer_uncorrect_error_status(pdev);
11696 }
11697
11698 /**
11699 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
11700 * @pdev: pointer to PCI device
11701 * @pid: pointer to PCI device identifier
11702 *
11703 * This routine is to be registered to the kernel's PCI subsystem. When an
11704 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
11705 * at PCI device-specific information of the device and driver to see if the
11706 * driver state that it can support this kind of device. If the match is
11707 * successful, the driver core invokes this routine. This routine dispatches
11708 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
11709 * do all the initialization that it needs to do to handle the HBA device
11710 * properly.
11711 *
11712 * Return code
11713 * 0 - driver can claim the device
11714 * negative value - driver can not claim the device
11715 **/
11716 static int
11717 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
11718 {
11719 int rc;
11720 struct lpfc_sli_intf intf;
11721
11722 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
11723 return -ENODEV;
11724
11725 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
11726 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
11727 rc = lpfc_pci_probe_one_s4(pdev, pid);
11728 else
11729 rc = lpfc_pci_probe_one_s3(pdev, pid);
11730
11731 return rc;
11732 }
11733
11734 /**
11735 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
11736 * @pdev: pointer to PCI device
11737 *
11738 * This routine is to be registered to the kernel's PCI subsystem. When an
11739 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
11740 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
11741 * remove routine, which will perform all the necessary cleanup for the
11742 * device to be removed from the PCI subsystem properly.
11743 **/
11744 static void
11745 lpfc_pci_remove_one(struct pci_dev *pdev)
11746 {
11747 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11748 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11749
11750 switch (phba->pci_dev_grp) {
11751 case LPFC_PCI_DEV_LP:
11752 lpfc_pci_remove_one_s3(pdev);
11753 break;
11754 case LPFC_PCI_DEV_OC:
11755 lpfc_pci_remove_one_s4(pdev);
11756 break;
11757 default:
11758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11759 "1424 Invalid PCI device group: 0x%x\n",
11760 phba->pci_dev_grp);
11761 break;
11762 }
11763 return;
11764 }
11765
11766 /**
11767 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
11768 * @pdev: pointer to PCI device
11769 * @msg: power management message
11770 *
11771 * This routine is to be registered to the kernel's PCI subsystem to support
11772 * system Power Management (PM). When PM invokes this method, it dispatches
11773 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
11774 * suspend the device.
11775 *
11776 * Return code
11777 * 0 - driver suspended the device
11778 * Error otherwise
11779 **/
11780 static int
11781 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
11782 {
11783 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11784 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11785 int rc = -ENODEV;
11786
11787 switch (phba->pci_dev_grp) {
11788 case LPFC_PCI_DEV_LP:
11789 rc = lpfc_pci_suspend_one_s3(pdev, msg);
11790 break;
11791 case LPFC_PCI_DEV_OC:
11792 rc = lpfc_pci_suspend_one_s4(pdev, msg);
11793 break;
11794 default:
11795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11796 "1425 Invalid PCI device group: 0x%x\n",
11797 phba->pci_dev_grp);
11798 break;
11799 }
11800 return rc;
11801 }
11802
11803 /**
11804 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
11805 * @pdev: pointer to PCI device
11806 *
11807 * This routine is to be registered to the kernel's PCI subsystem to support
11808 * system Power Management (PM). When PM invokes this method, it dispatches
11809 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
11810 * resume the device.
11811 *
11812 * Return code
11813 * 0 - driver suspended the device
11814 * Error otherwise
11815 **/
11816 static int
11817 lpfc_pci_resume_one(struct pci_dev *pdev)
11818 {
11819 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11820 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11821 int rc = -ENODEV;
11822
11823 switch (phba->pci_dev_grp) {
11824 case LPFC_PCI_DEV_LP:
11825 rc = lpfc_pci_resume_one_s3(pdev);
11826 break;
11827 case LPFC_PCI_DEV_OC:
11828 rc = lpfc_pci_resume_one_s4(pdev);
11829 break;
11830 default:
11831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11832 "1426 Invalid PCI device group: 0x%x\n",
11833 phba->pci_dev_grp);
11834 break;
11835 }
11836 return rc;
11837 }
11838
11839 /**
11840 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
11841 * @pdev: pointer to PCI device.
11842 * @state: the current PCI connection state.
11843 *
11844 * This routine is registered to the PCI subsystem for error handling. This
11845 * function is called by the PCI subsystem after a PCI bus error affecting
11846 * this device has been detected. When this routine is invoked, it dispatches
11847 * the action to the proper SLI-3 or SLI-4 device error detected handling
11848 * routine, which will perform the proper error detected operation.
11849 *
11850 * Return codes
11851 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11852 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11853 **/
11854 static pci_ers_result_t
11855 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11856 {
11857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11858 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11859 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11860
11861 switch (phba->pci_dev_grp) {
11862 case LPFC_PCI_DEV_LP:
11863 rc = lpfc_io_error_detected_s3(pdev, state);
11864 break;
11865 case LPFC_PCI_DEV_OC:
11866 rc = lpfc_io_error_detected_s4(pdev, state);
11867 break;
11868 default:
11869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11870 "1427 Invalid PCI device group: 0x%x\n",
11871 phba->pci_dev_grp);
11872 break;
11873 }
11874 return rc;
11875 }
11876
11877 /**
11878 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
11879 * @pdev: pointer to PCI device.
11880 *
11881 * This routine is registered to the PCI subsystem for error handling. This
11882 * function is called after PCI bus has been reset to restart the PCI card
11883 * from scratch, as if from a cold-boot. When this routine is invoked, it
11884 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
11885 * routine, which will perform the proper device reset.
11886 *
11887 * Return codes
11888 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11889 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11890 **/
11891 static pci_ers_result_t
11892 lpfc_io_slot_reset(struct pci_dev *pdev)
11893 {
11894 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11895 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11896 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11897
11898 switch (phba->pci_dev_grp) {
11899 case LPFC_PCI_DEV_LP:
11900 rc = lpfc_io_slot_reset_s3(pdev);
11901 break;
11902 case LPFC_PCI_DEV_OC:
11903 rc = lpfc_io_slot_reset_s4(pdev);
11904 break;
11905 default:
11906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11907 "1428 Invalid PCI device group: 0x%x\n",
11908 phba->pci_dev_grp);
11909 break;
11910 }
11911 return rc;
11912 }
11913
11914 /**
11915 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
11916 * @pdev: pointer to PCI device
11917 *
11918 * This routine is registered to the PCI subsystem for error handling. It
11919 * is called when kernel error recovery tells the lpfc driver that it is
11920 * OK to resume normal PCI operation after PCI bus error recovery. When
11921 * this routine is invoked, it dispatches the action to the proper SLI-3
11922 * or SLI-4 device io_resume routine, which will resume the device operation.
11923 **/
11924 static void
11925 lpfc_io_resume(struct pci_dev *pdev)
11926 {
11927 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11928 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11929
11930 switch (phba->pci_dev_grp) {
11931 case LPFC_PCI_DEV_LP:
11932 lpfc_io_resume_s3(pdev);
11933 break;
11934 case LPFC_PCI_DEV_OC:
11935 lpfc_io_resume_s4(pdev);
11936 break;
11937 default:
11938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11939 "1429 Invalid PCI device group: 0x%x\n",
11940 phba->pci_dev_grp);
11941 break;
11942 }
11943 return;
11944 }
11945
11946 /**
11947 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
11948 * @phba: pointer to lpfc hba data structure.
11949 *
11950 * This routine checks to see if OAS is supported for this adapter. If
11951 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
11952 * the enable oas flag is cleared and the pool created for OAS device data
11953 * is destroyed.
11954 *
11955 **/
11956 void
11957 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
11958 {
11959
11960 if (!phba->cfg_EnableXLane)
11961 return;
11962
11963 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
11964 phba->cfg_fof = 1;
11965 } else {
11966 phba->cfg_fof = 0;
11967 if (phba->device_data_mem_pool)
11968 mempool_destroy(phba->device_data_mem_pool);
11969 phba->device_data_mem_pool = NULL;
11970 }
11971
11972 return;
11973 }
11974
11975 /**
11976 * lpfc_fof_queue_setup - Set up all the fof queues
11977 * @phba: pointer to lpfc hba data structure.
11978 *
11979 * This routine is invoked to set up all the fof queues for the FC HBA
11980 * operation.
11981 *
11982 * Return codes
11983 * 0 - successful
11984 * -ENOMEM - No available memory
11985 **/
11986 int
11987 lpfc_fof_queue_setup(struct lpfc_hba *phba)
11988 {
11989 struct lpfc_sli_ring *pring;
11990 int rc;
11991
11992 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
11993 if (rc)
11994 return -ENOMEM;
11995
11996 if (phba->cfg_fof) {
11997
11998 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
11999 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
12000 if (rc)
12001 goto out_oas_cq;
12002
12003 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
12004 phba->sli4_hba.oas_cq, LPFC_FCP);
12005 if (rc)
12006 goto out_oas_wq;
12007
12008 /* Bind this CQ/WQ to the NVME ring */
12009 pring = phba->sli4_hba.oas_wq->pring;
12010 pring->sli.sli4.wqp =
12011 (void *)phba->sli4_hba.oas_wq;
12012 phba->sli4_hba.oas_cq->pring = pring;
12013 }
12014
12015 return 0;
12016
12017 out_oas_wq:
12018 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
12019 out_oas_cq:
12020 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
12021 return rc;
12022
12023 }
12024
12025 /**
12026 * lpfc_fof_queue_create - Create all the fof queues
12027 * @phba: pointer to lpfc hba data structure.
12028 *
12029 * This routine is invoked to allocate all the fof queues for the FC HBA
12030 * operation. For each SLI4 queue type, the parameters such as queue entry
12031 * count (queue depth) shall be taken from the module parameter. For now,
12032 * we just use some constant number as place holder.
12033 *
12034 * Return codes
12035 * 0 - successful
12036 * -ENOMEM - No availble memory
12037 * -EIO - The mailbox failed to complete successfully.
12038 **/
12039 int
12040 lpfc_fof_queue_create(struct lpfc_hba *phba)
12041 {
12042 struct lpfc_queue *qdesc;
12043 uint32_t wqesize;
12044
12045 /* Create FOF EQ */
12046 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
12047 phba->sli4_hba.eq_ecount);
12048 if (!qdesc)
12049 goto out_error;
12050
12051 phba->sli4_hba.fof_eq = qdesc;
12052
12053 if (phba->cfg_fof) {
12054
12055 /* Create OAS CQ */
12056 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
12057 phba->sli4_hba.cq_ecount);
12058 if (!qdesc)
12059 goto out_error;
12060
12061 phba->sli4_hba.oas_cq = qdesc;
12062
12063 /* Create OAS WQ */
12064 wqesize = (phba->fcp_embed_io) ?
12065 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
12066 qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
12067 phba->sli4_hba.wq_ecount);
12068
12069 if (!qdesc)
12070 goto out_error;
12071
12072 phba->sli4_hba.oas_wq = qdesc;
12073 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
12074
12075 }
12076 return 0;
12077
12078 out_error:
12079 lpfc_fof_queue_destroy(phba);
12080 return -ENOMEM;
12081 }
12082
12083 /**
12084 * lpfc_fof_queue_destroy - Destroy all the fof queues
12085 * @phba: pointer to lpfc hba data structure.
12086 *
12087 * This routine is invoked to release all the SLI4 queues with the FC HBA
12088 * operation.
12089 *
12090 * Return codes
12091 * 0 - successful
12092 **/
12093 int
12094 lpfc_fof_queue_destroy(struct lpfc_hba *phba)
12095 {
12096 /* Release FOF Event queue */
12097 if (phba->sli4_hba.fof_eq != NULL) {
12098 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
12099 phba->sli4_hba.fof_eq = NULL;
12100 }
12101
12102 /* Release OAS Completion queue */
12103 if (phba->sli4_hba.oas_cq != NULL) {
12104 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
12105 phba->sli4_hba.oas_cq = NULL;
12106 }
12107
12108 /* Release OAS Work queue */
12109 if (phba->sli4_hba.oas_wq != NULL) {
12110 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
12111 phba->sli4_hba.oas_wq = NULL;
12112 }
12113 return 0;
12114 }
12115
12116 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
12117
12118 static const struct pci_error_handlers lpfc_err_handler = {
12119 .error_detected = lpfc_io_error_detected,
12120 .slot_reset = lpfc_io_slot_reset,
12121 .resume = lpfc_io_resume,
12122 };
12123
12124 static struct pci_driver lpfc_driver = {
12125 .name = LPFC_DRIVER_NAME,
12126 .id_table = lpfc_id_table,
12127 .probe = lpfc_pci_probe_one,
12128 .remove = lpfc_pci_remove_one,
12129 .shutdown = lpfc_pci_remove_one,
12130 .suspend = lpfc_pci_suspend_one,
12131 .resume = lpfc_pci_resume_one,
12132 .err_handler = &lpfc_err_handler,
12133 };
12134
12135 static const struct file_operations lpfc_mgmt_fop = {
12136 .owner = THIS_MODULE,
12137 };
12138
12139 static struct miscdevice lpfc_mgmt_dev = {
12140 .minor = MISC_DYNAMIC_MINOR,
12141 .name = "lpfcmgmt",
12142 .fops = &lpfc_mgmt_fop,
12143 };
12144
12145 /**
12146 * lpfc_init - lpfc module initialization routine
12147 *
12148 * This routine is to be invoked when the lpfc module is loaded into the
12149 * kernel. The special kernel macro module_init() is used to indicate the
12150 * role of this routine to the kernel as lpfc module entry point.
12151 *
12152 * Return codes
12153 * 0 - successful
12154 * -ENOMEM - FC attach transport failed
12155 * all others - failed
12156 */
12157 static int __init
12158 lpfc_init(void)
12159 {
12160 int error = 0;
12161
12162 printk(LPFC_MODULE_DESC "\n");
12163 printk(LPFC_COPYRIGHT "\n");
12164
12165 error = misc_register(&lpfc_mgmt_dev);
12166 if (error)
12167 printk(KERN_ERR "Could not register lpfcmgmt device, "
12168 "misc_register returned with status %d", error);
12169
12170 lpfc_transport_functions.vport_create = lpfc_vport_create;
12171 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
12172 lpfc_transport_template =
12173 fc_attach_transport(&lpfc_transport_functions);
12174 if (lpfc_transport_template == NULL)
12175 return -ENOMEM;
12176 lpfc_vport_transport_template =
12177 fc_attach_transport(&lpfc_vport_transport_functions);
12178 if (lpfc_vport_transport_template == NULL) {
12179 fc_release_transport(lpfc_transport_template);
12180 return -ENOMEM;
12181 }
12182
12183 /* Initialize in case vector mapping is needed */
12184 lpfc_used_cpu = NULL;
12185 lpfc_present_cpu = num_present_cpus();
12186
12187 error = pci_register_driver(&lpfc_driver);
12188 if (error) {
12189 fc_release_transport(lpfc_transport_template);
12190 fc_release_transport(lpfc_vport_transport_template);
12191 }
12192
12193 return error;
12194 }
12195
12196 /**
12197 * lpfc_exit - lpfc module removal routine
12198 *
12199 * This routine is invoked when the lpfc module is removed from the kernel.
12200 * The special kernel macro module_exit() is used to indicate the role of
12201 * this routine to the kernel as lpfc module exit point.
12202 */
12203 static void __exit
12204 lpfc_exit(void)
12205 {
12206 misc_deregister(&lpfc_mgmt_dev);
12207 pci_unregister_driver(&lpfc_driver);
12208 fc_release_transport(lpfc_transport_template);
12209 fc_release_transport(lpfc_vport_transport_template);
12210 if (_dump_buf_data) {
12211 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
12212 "_dump_buf_data at 0x%p\n",
12213 (1L << _dump_buf_data_order), _dump_buf_data);
12214 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
12215 }
12216
12217 if (_dump_buf_dif) {
12218 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
12219 "_dump_buf_dif at 0x%p\n",
12220 (1L << _dump_buf_dif_order), _dump_buf_dif);
12221 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
12222 }
12223 kfree(lpfc_used_cpu);
12224 idr_destroy(&lpfc_hba_index);
12225 }
12226
12227 module_init(lpfc_init);
12228 module_exit(lpfc_exit);
12229 MODULE_LICENSE("GPL");
12230 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
12231 MODULE_AUTHOR("Broadcom");
12232 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);