]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_init.c
scsi: lpfc: Vport creation is failing with "Link Down" error
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
d080abe0
JS
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e
JB
22 *******************************************************************/
23
dea3101e
JB
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
acf3368f 29#include <linux/module.h>
dea3101e
JB
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
92d7f7b0 33#include <linux/ctype.h>
0d878419 34#include <linux/aer.h>
5a0e3ad6 35#include <linux/slab.h>
52d52440 36#include <linux/firmware.h>
3ef6d24c 37#include <linux/miscdevice.h>
7bb03bbf 38#include <linux/percpu.h>
895427bd 39#include <linux/msi.h>
dea3101e 40
91886523 41#include <scsi/scsi.h>
dea3101e
JB
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44#include <scsi/scsi_transport_fc.h>
86c67379
JS
45#include <scsi/scsi_tcq.h>
46#include <scsi/fc/fc_fs.h>
47
48#include <linux/nvme-fc-driver.h>
dea3101e 49
da0436e9 50#include "lpfc_hw4.h"
dea3101e
JB
51#include "lpfc_hw.h"
52#include "lpfc_sli.h"
da0436e9 53#include "lpfc_sli4.h"
ea2151b4 54#include "lpfc_nl.h"
dea3101e 55#include "lpfc_disc.h"
dea3101e 56#include "lpfc.h"
895427bd
JS
57#include "lpfc_scsi.h"
58#include "lpfc_nvme.h"
86c67379 59#include "lpfc_nvmet.h"
dea3101e
JB
60#include "lpfc_logmsg.h"
61#include "lpfc_crtn.h"
92d7f7b0 62#include "lpfc_vport.h"
dea3101e 63#include "lpfc_version.h"
12f44457 64#include "lpfc_ids.h"
dea3101e 65
81301a9b
JS
66char *_dump_buf_data;
67unsigned long _dump_buf_data_order;
68char *_dump_buf_dif;
69unsigned long _dump_buf_dif_order;
70spinlock_t _dump_buf_lock;
71
7bb03bbf 72/* Used when mapping IRQ vectors in a driver centric manner */
b246de17
JS
73uint16_t *lpfc_used_cpu;
74uint32_t lpfc_present_cpu;
7bb03bbf 75
dea3101e
JB
76static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 78static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
79static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80static int lpfc_setup_endian_order(struct lpfc_hba *);
da0436e9 81static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
8a9d2e80 82static void lpfc_free_els_sgl_list(struct lpfc_hba *);
f358dd0c 83static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
8a9d2e80 84static void lpfc_init_sgl_list(struct lpfc_hba *);
da0436e9
JS
85static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86static void lpfc_free_active_sgl(struct lpfc_hba *);
87static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
618a5230
JS
92static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
1ba981fd 94static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
dea3101e
JB
95
96static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 97static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 98static DEFINE_IDR(lpfc_hba_index);
f358dd0c 99#define LPFC_NVMET_BUF_POST 254
dea3101e 100
e59058c4 101/**
3621a710 102 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
103 * @phba: pointer to lpfc hba data structure.
104 *
105 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
106 * mailbox command. It retrieves the revision information from the HBA and
107 * collects the Vital Product Data (VPD) about the HBA for preparing the
108 * configuration of the HBA.
109 *
110 * Return codes:
111 * 0 - success.
112 * -ERESTART - requests the SLI layer to reset the HBA and try again.
113 * Any other value - indicates an error.
114 **/
dea3101e 115int
2e0fef85 116lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e
JB
117{
118 lpfc_vpd_t *vp = &phba->vpd;
119 int i = 0, rc;
120 LPFC_MBOXQ_t *pmb;
121 MAILBOX_t *mb;
122 char *lpfc_vpd_data = NULL;
123 uint16_t offset = 0;
124 static char licensed[56] =
125 "key unlock for use with gnu public licensed code only\0";
65a29c16 126 static int init_key = 1;
dea3101e
JB
127
128 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
129 if (!pmb) {
2e0fef85 130 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
131 return -ENOMEM;
132 }
133
04c68496 134 mb = &pmb->u.mb;
2e0fef85 135 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
136
137 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
138 if (init_key) {
139 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 140
65a29c16
JS
141 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
142 *ptext = cpu_to_be32(*ptext);
143 init_key = 0;
144 }
dea3101e
JB
145
146 lpfc_read_nv(phba, pmb);
147 memset((char*)mb->un.varRDnvp.rsvd3, 0,
148 sizeof (mb->un.varRDnvp.rsvd3));
149 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
150 sizeof (licensed));
151
152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153
154 if (rc != MBX_SUCCESS) {
ed957684 155 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 156 "0324 Config Port initialization "
dea3101e
JB
157 "error, mbxCmd x%x READ_NVPARM, "
158 "mbxStatus x%x\n",
dea3101e
JB
159 mb->mbxCommand, mb->mbxStatus);
160 mempool_free(pmb, phba->mbox_mem_pool);
161 return -ERESTART;
162 }
163 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
164 sizeof(phba->wwnn));
165 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
166 sizeof(phba->wwpn));
dea3101e
JB
167 }
168
92d7f7b0
JS
169 phba->sli3_options = 0x0;
170
dea3101e
JB
171 /* Setup and issue mailbox READ REV command */
172 lpfc_read_rev(phba, pmb);
173 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
174 if (rc != MBX_SUCCESS) {
ed957684 175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 176 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 177 "READ_REV, mbxStatus x%x\n",
dea3101e
JB
178 mb->mbxCommand, mb->mbxStatus);
179 mempool_free( pmb, phba->mbox_mem_pool);
180 return -ERESTART;
181 }
182
92d7f7b0 183
1de933f3
JSEC
184 /*
185 * The value of rr must be 1 since the driver set the cv field to 1.
186 * This setting requires the FW to set all revision fields.
dea3101e 187 */
1de933f3 188 if (mb->un.varRdRev.rr == 0) {
dea3101e 189 vp->rev.rBit = 0;
1de933f3 190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
191 "0440 Adapter failed to init, READ_REV has "
192 "missing revision information.\n");
dea3101e
JB
193 mempool_free(pmb, phba->mbox_mem_pool);
194 return -ERESTART;
dea3101e
JB
195 }
196
495a714c
JS
197 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
198 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 199 return -EINVAL;
495a714c 200 }
ed957684 201
dea3101e 202 /* Save information as VPD data */
1de933f3 203 vp->rev.rBit = 1;
92d7f7b0 204 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
205 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
206 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
207 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
208 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e
JB
209 vp->rev.biuRev = mb->un.varRdRev.biuRev;
210 vp->rev.smRev = mb->un.varRdRev.smRev;
211 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
212 vp->rev.endecRev = mb->un.varRdRev.endecRev;
213 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
214 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
215 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
216 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
217 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
218 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
219
92d7f7b0
JS
220 /* If the sli feature level is less then 9, we must
221 * tear down all RPIs and VPIs on link down if NPIV
222 * is enabled.
223 */
224 if (vp->rev.feaLevelHigh < 9)
225 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
226
dea3101e
JB
227 if (lpfc_is_LC_HBA(phba->pcidev->device))
228 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
229 sizeof (phba->RandomData));
230
dea3101e 231 /* Get adapter VPD information */
dea3101e
JB
232 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
233 if (!lpfc_vpd_data)
d7c255b2 234 goto out_free_mbox;
dea3101e 235 do {
a0c87cbd 236 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e
JB
237 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
238
239 if (rc != MBX_SUCCESS) {
240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 241 "0441 VPD not present on adapter, "
dea3101e 242 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 243 mb->mbxCommand, mb->mbxStatus);
74b72a59 244 mb->un.varDmp.word_cnt = 0;
dea3101e 245 }
04c68496
JS
246 /* dump mem may return a zero when finished or we got a
247 * mailbox error, either way we are done.
248 */
249 if (mb->un.varDmp.word_cnt == 0)
250 break;
74b72a59
JW
251 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
252 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
253 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
254 lpfc_vpd_data + offset,
92d7f7b0 255 mb->un.varDmp.word_cnt);
dea3101e 256 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
257 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
258 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e
JB
259
260 kfree(lpfc_vpd_data);
dea3101e
JB
261out_free_mbox:
262 mempool_free(pmb, phba->mbox_mem_pool);
263 return 0;
264}
265
e59058c4 266/**
3621a710 267 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
268 * @phba: pointer to lpfc hba data structure.
269 * @pmboxq: pointer to the driver internal queue element for mailbox command.
270 *
271 * This is the completion handler for driver's configuring asynchronous event
272 * mailbox command to the device. If the mailbox command returns successfully,
273 * it will set internal async event support flag to 1; otherwise, it will
274 * set internal async event support flag to 0.
275 **/
57127f15
JS
276static void
277lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
278{
04c68496 279 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
280 phba->temp_sensor_support = 1;
281 else
282 phba->temp_sensor_support = 0;
283 mempool_free(pmboxq, phba->mbox_mem_pool);
284 return;
285}
286
97207482 287/**
3621a710 288 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
289 * @phba: pointer to lpfc hba data structure.
290 * @pmboxq: pointer to the driver internal queue element for mailbox command.
291 *
292 * This is the completion handler for dump mailbox command for getting
293 * wake up parameters. When this command complete, the response contain
294 * Option rom version of the HBA. This function translate the version number
295 * into a human readable string and store it in OptionROMVersion.
296 **/
297static void
298lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
299{
300 struct prog_id *prg;
301 uint32_t prog_id_word;
302 char dist = ' ';
303 /* character array used for decoding dist type. */
304 char dist_char[] = "nabx";
305
04c68496 306 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 307 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 308 return;
9f1e1b50 309 }
97207482
JS
310
311 prg = (struct prog_id *) &prog_id_word;
312
313 /* word 7 contain option rom version */
04c68496 314 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
315
316 /* Decode the Option rom version word to a readable string */
317 if (prg->dist < 4)
318 dist = dist_char[prg->dist];
319
320 if ((prg->dist == 3) && (prg->num == 0))
a2fc4aef 321 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
97207482
JS
322 prg->ver, prg->rev, prg->lev);
323 else
a2fc4aef 324 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
97207482
JS
325 prg->ver, prg->rev, prg->lev,
326 dist, prg->num);
9f1e1b50 327 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
328 return;
329}
330
0558056c
JS
331/**
332 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
333 * cfg_soft_wwnn, cfg_soft_wwpn
334 * @vport: pointer to lpfc vport data structure.
335 *
336 *
337 * Return codes
338 * None.
339 **/
340void
341lpfc_update_vport_wwn(struct lpfc_vport *vport)
342{
aeb3c817
JS
343 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
344 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
345
0558056c
JS
346 /* If the soft name exists then update it using the service params */
347 if (vport->phba->cfg_soft_wwnn)
348 u64_to_wwn(vport->phba->cfg_soft_wwnn,
349 vport->fc_sparam.nodeName.u.wwn);
350 if (vport->phba->cfg_soft_wwpn)
351 u64_to_wwn(vport->phba->cfg_soft_wwpn,
352 vport->fc_sparam.portName.u.wwn);
353
354 /*
355 * If the name is empty or there exists a soft name
356 * then copy the service params name, otherwise use the fc name
357 */
358 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360 sizeof(struct lpfc_name));
361 else
362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
363 sizeof(struct lpfc_name));
364
aeb3c817
JS
365 /*
366 * If the port name has changed, then set the Param changes flag
367 * to unreg the login
368 */
369 if (vport->fc_portname.u.wwn[0] != 0 &&
370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
371 sizeof(struct lpfc_name)))
372 vport->vport_flag |= FAWWPN_PARAM_CHG;
373
374 if (vport->fc_portname.u.wwn[0] == 0 ||
375 vport->phba->cfg_soft_wwpn ||
376 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
377 vport->vport_flag & FAWWPN_SET) {
0558056c
JS
378 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name));
aeb3c817
JS
380 vport->vport_flag &= ~FAWWPN_SET;
381 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
382 vport->vport_flag |= FAWWPN_SET;
383 }
0558056c
JS
384 else
385 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
386 sizeof(struct lpfc_name));
387}
388
e59058c4 389/**
3621a710 390 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
391 * @phba: pointer to lpfc hba data structure.
392 *
393 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
394 * command call. It performs all internal resource and state setups on the
395 * port: post IOCB buffers, enable appropriate host interrupt attentions,
396 * ELS ring timers, etc.
397 *
398 * Return codes
399 * 0 - success.
400 * Any other value - error.
401 **/
dea3101e 402int
2e0fef85 403lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 404{
2e0fef85 405 struct lpfc_vport *vport = phba->pport;
a257bf90 406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e
JB
407 LPFC_MBOXQ_t *pmb;
408 MAILBOX_t *mb;
409 struct lpfc_dmabuf *mp;
410 struct lpfc_sli *psli = &phba->sli;
411 uint32_t status, timeout;
2e0fef85
JS
412 int i, j;
413 int rc;
dea3101e 414
7af67051
JS
415 spin_lock_irq(&phba->hbalock);
416 /*
417 * If the Config port completed correctly the HBA is not
418 * over heated any more.
419 */
420 if (phba->over_temp_state == HBA_OVER_TEMP)
421 phba->over_temp_state = HBA_NORMAL_TEMP;
422 spin_unlock_irq(&phba->hbalock);
423
dea3101e
JB
424 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
425 if (!pmb) {
2e0fef85 426 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
427 return -ENOMEM;
428 }
04c68496 429 mb = &pmb->u.mb;
dea3101e 430
dea3101e 431 /* Get login parameters for NID. */
9f1177a3
JS
432 rc = lpfc_read_sparam(phba, pmb, 0);
433 if (rc) {
434 mempool_free(pmb, phba->mbox_mem_pool);
435 return -ENOMEM;
436 }
437
ed957684 438 pmb->vport = vport;
dea3101e 439 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 441 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 442 "READ_SPARM mbxStatus x%x\n",
dea3101e 443 mb->mbxCommand, mb->mbxStatus);
2e0fef85 444 phba->link_state = LPFC_HBA_ERROR;
dea3101e 445 mp = (struct lpfc_dmabuf *) pmb->context1;
9f1177a3 446 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
447 lpfc_mbuf_free(phba, mp->virt, mp->phys);
448 kfree(mp);
449 return -EIO;
450 }
451
452 mp = (struct lpfc_dmabuf *) pmb->context1;
453
2e0fef85 454 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e
JB
455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 pmb->context1 = NULL;
0558056c 458 lpfc_update_vport_wwn(vport);
a257bf90
JS
459
460 /* Update the fc_host data structures with new wwn. */
461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 463 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 464
dea3101e
JB
465 /* If no serial number in VPD data, use low 6 bytes of WWNN */
466 /* This should be consolidated into parse_vpd ? - mr */
467 if (phba->SerialNumber[0] == 0) {
468 uint8_t *outptr;
469
2e0fef85 470 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e
JB
471 for (i = 0; i < 12; i++) {
472 status = *outptr++;
473 j = ((status & 0xf0) >> 4);
474 if (j <= 9)
475 phba->SerialNumber[i] =
476 (char)((uint8_t) 0x30 + (uint8_t) j);
477 else
478 phba->SerialNumber[i] =
479 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
480 i++;
481 j = (status & 0xf);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 }
489 }
490
dea3101e 491 lpfc_read_config(phba, pmb);
ed957684 492 pmb->vport = vport;
dea3101e 493 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 495 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 496 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 497 mb->mbxCommand, mb->mbxStatus);
2e0fef85 498 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
499 mempool_free( pmb, phba->mbox_mem_pool);
500 return -EIO;
501 }
502
a0c87cbd
JS
503 /* Check if the port is disabled */
504 lpfc_sli_read_link_ste(phba);
505
dea3101e 506 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
507 i = (mb->un.varRdConfig.max_xri + 1);
508 if (phba->cfg_hba_queue_depth > i) {
509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
510 "3359 HBA queue depth changed from %d to %d\n",
511 phba->cfg_hba_queue_depth, i);
512 phba->cfg_hba_queue_depth = i;
513 }
514
515 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
516 i = (mb->un.varRdConfig.max_xri >> 3);
517 if (phba->pport->cfg_lun_queue_depth > i) {
518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
519 "3360 LUN queue depth changed from %d to %d\n",
520 phba->pport->cfg_lun_queue_depth, i);
521 phba->pport->cfg_lun_queue_depth = i;
522 }
dea3101e
JB
523
524 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
525
526 /* Get the default values for Model Name and Description */
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528
2e0fef85 529 phba->link_state = LPFC_LINK_DOWN;
dea3101e 530
0b727fea 531 /* Only process IOCBs on ELS ring till hba_state is READY */
895427bd
JS
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e
JB
536
537 /* Post receive buffers for desired rings */
ed957684
JS
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
dea3101e 540
9399627f
JS
541 /*
542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
543 */
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
546 if (rc) {
547 mempool_free(pmb, phba->mbox_mem_pool);
548 return -EIO;
549 }
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
9399627f
JS
557 mempool_free(pmb, phba->mbox_mem_pool);
558 return -EIO;
559 }
560 }
561
04c68496 562 spin_lock_irq(&phba->hbalock);
9399627f
JS
563 /* Initialize ERATT handling flag */
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
565
dea3101e 566 /* Enable appropriate host interrupts */
9940b97b
JS
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
569 return -EIO;
570 }
dea3101e
JB
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
580
875fbdfe
JSEC
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 583 status &= ~(HC_R0INT_ENA);
875fbdfe 584
dea3101e
JB
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr); /* flush */
2e0fef85 587 spin_unlock_irq(&phba->hbalock);
dea3101e 588
9399627f
JS
589 /* Set up ring-0 (ELS) timer */
590 timeout = phba->fc_ratov * 2;
256ec0d0
JS
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
9399627f 593 /* Set up heart beat (HB) timer */
256ec0d0
JS
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
596 phba->hb_outstanding = 0;
597 phba->last_completion_time = jiffies;
9399627f 598 /* Set up error attention (ERATT) polling timer */
256ec0d0 599 mod_timer(&phba->eratt_poll,
65791f1f 600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
dea3101e 601
a0c87cbd
JS
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba,
604 KERN_ERR, LOG_INIT,
605 "2598 Adapter Link is disabled.\n");
606 lpfc_down_link(phba, pmb);
607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
610 lpfc_printf_log(phba,
611 KERN_ERR, LOG_INIT,
612 "2599 Adapter failed to issue DOWN_LINK"
613 " mbox command rc 0x%x\n", rc);
614
615 mempool_free(pmb, phba->mbox_mem_pool);
616 return -EIO;
617 }
e40a02c1 618 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
619 mempool_free(pmb, phba->mbox_mem_pool);
620 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
621 if (rc)
622 return rc;
dea3101e
JB
623 }
624 /* MBOX buffer will be freed in mbox compl */
57127f15 625 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
626 if (!pmb) {
627 phba->link_state = LPFC_HBA_ERROR;
628 return -ENOMEM;
629 }
630
57127f15
JS
631 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
632 pmb->mbox_cmpl = lpfc_config_async_cmpl;
633 pmb->vport = phba->pport;
634 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 635
57127f15
JS
636 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
637 lpfc_printf_log(phba,
638 KERN_ERR,
639 LOG_INIT,
640 "0456 Adapter failed to issue "
e4e74273 641 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
642 rc);
643 mempool_free(pmb, phba->mbox_mem_pool);
644 }
97207482
JS
645
646 /* Get Option rom version */
647 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
648 if (!pmb) {
649 phba->link_state = LPFC_HBA_ERROR;
650 return -ENOMEM;
651 }
652
97207482
JS
653 lpfc_dump_wakeup_param(phba, pmb);
654 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
655 pmb->vport = phba->pport;
656 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
657
658 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
e4e74273 660 "to get Option ROM version status x%x\n", rc);
97207482
JS
661 mempool_free(pmb, phba->mbox_mem_pool);
662 }
663
d7c255b2 664 return 0;
ce8b3ce5
JS
665}
666
84d1b006
JS
667/**
668 * lpfc_hba_init_link - Initialize the FC link
669 * @phba: pointer to lpfc hba data structure.
6e7288d9 670 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
671 *
672 * This routine will issue the INIT_LINK mailbox command call.
673 * It is available to other drivers through the lpfc_hba data
674 * structure for use as a delayed link up mechanism with the
675 * module parameter lpfc_suppress_link_up.
676 *
677 * Return code
678 * 0 - success
679 * Any other value - error
680 **/
e399b228 681static int
6e7288d9 682lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
1b51197d
JS
683{
684 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
685}
686
687/**
688 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
689 * @phba: pointer to lpfc hba data structure.
690 * @fc_topology: desired fc topology.
691 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
692 *
693 * This routine will issue the INIT_LINK mailbox command call.
694 * It is available to other drivers through the lpfc_hba data
695 * structure for use as a delayed link up mechanism with the
696 * module parameter lpfc_suppress_link_up.
697 *
698 * Return code
699 * 0 - success
700 * Any other value - error
701 **/
702int
703lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
704 uint32_t flag)
84d1b006
JS
705{
706 struct lpfc_vport *vport = phba->pport;
707 LPFC_MBOXQ_t *pmb;
708 MAILBOX_t *mb;
709 int rc;
710
711 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
712 if (!pmb) {
713 phba->link_state = LPFC_HBA_ERROR;
714 return -ENOMEM;
715 }
716 mb = &pmb->u.mb;
717 pmb->vport = vport;
718
026abb87
JS
719 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
721 !(phba->lmt & LMT_1Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
723 !(phba->lmt & LMT_2Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
725 !(phba->lmt & LMT_4Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
727 !(phba->lmt & LMT_8Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
729 !(phba->lmt & LMT_10Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
d38dd52c
JS
731 !(phba->lmt & LMT_16Gb)) ||
732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
733 !(phba->lmt & LMT_32Gb))) {
026abb87
JS
734 /* Reset link speed to auto */
735 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
736 "1302 Invalid speed for this board:%d "
737 "Reset link speed to auto.\n",
738 phba->cfg_link_speed);
739 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
740 }
1b51197d 741 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
84d1b006 742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1b51197d
JS
743 if (phba->sli_rev < LPFC_SLI_REV4)
744 lpfc_set_loopback_flag(phba);
6e7288d9 745 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 746 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
84d1b006
JS
747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
748 "0498 Adapter failed to init, mbxCmd x%x "
749 "INIT_LINK, mbxStatus x%x\n",
750 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
751 if (phba->sli_rev <= LPFC_SLI_REV3) {
752 /* Clear all interrupt enable conditions */
753 writel(0, phba->HCregaddr);
754 readl(phba->HCregaddr); /* flush */
755 /* Clear all pending interrupts */
756 writel(0xffffffff, phba->HAregaddr);
757 readl(phba->HAregaddr); /* flush */
758 }
84d1b006 759 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 760 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
761 mempool_free(pmb, phba->mbox_mem_pool);
762 return -EIO;
763 }
e40a02c1 764 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
765 if (flag == MBX_POLL)
766 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
767
768 return 0;
769}
770
771/**
772 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
773 * @phba: pointer to lpfc hba data structure.
774 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
775 *
776 * This routine will issue the DOWN_LINK mailbox command call.
777 * It is available to other drivers through the lpfc_hba data
778 * structure for use to stop the link.
779 *
780 * Return code
781 * 0 - success
782 * Any other value - error
783 **/
e399b228 784static int
6e7288d9 785lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
786{
787 LPFC_MBOXQ_t *pmb;
788 int rc;
789
790 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
791 if (!pmb) {
792 phba->link_state = LPFC_HBA_ERROR;
793 return -ENOMEM;
794 }
795
796 lpfc_printf_log(phba,
797 KERN_ERR, LOG_INIT,
798 "0491 Adapter Link is disabled.\n");
799 lpfc_down_link(phba, pmb);
800 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 801 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006
JS
802 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
803 lpfc_printf_log(phba,
804 KERN_ERR, LOG_INIT,
805 "2522 Adapter failed to issue DOWN_LINK"
806 " mbox command rc 0x%x\n", rc);
807
808 mempool_free(pmb, phba->mbox_mem_pool);
809 return -EIO;
810 }
6e7288d9
JS
811 if (flag == MBX_POLL)
812 mempool_free(pmb, phba->mbox_mem_pool);
813
84d1b006
JS
814 return 0;
815}
816
e59058c4 817/**
3621a710 818 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
819 * @phba: pointer to lpfc HBA data structure.
820 *
821 * This routine will do LPFC uninitialization before the HBA is reset when
822 * bringing down the SLI Layer.
823 *
824 * Return codes
825 * 0 - success.
826 * Any other value - error.
827 **/
dea3101e 828int
2e0fef85 829lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 830{
1b32f6aa
JS
831 struct lpfc_vport **vports;
832 int i;
3772a991
JS
833
834 if (phba->sli_rev <= LPFC_SLI_REV3) {
835 /* Disable interrupts */
836 writel(0, phba->HCregaddr);
837 readl(phba->HCregaddr); /* flush */
838 }
dea3101e 839
1b32f6aa
JS
840 if (phba->pport->load_flag & FC_UNLOADING)
841 lpfc_cleanup_discovery_resources(phba->pport);
842 else {
843 vports = lpfc_create_vport_work_array(phba);
844 if (vports != NULL)
3772a991
JS
845 for (i = 0; i <= phba->max_vports &&
846 vports[i] != NULL; i++)
1b32f6aa
JS
847 lpfc_cleanup_discovery_resources(vports[i]);
848 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
849 }
850 return 0;
dea3101e
JB
851}
852
68e814f5
JS
853/**
854 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
855 * rspiocb which got deferred
856 *
857 * @phba: pointer to lpfc HBA data structure.
858 *
859 * This routine will cleanup completed slow path events after HBA is reset
860 * when bringing down the SLI Layer.
861 *
862 *
863 * Return codes
864 * void.
865 **/
866static void
867lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
868{
869 struct lpfc_iocbq *rspiocbq;
870 struct hbq_dmabuf *dmabuf;
871 struct lpfc_cq_event *cq_event;
872
873 spin_lock_irq(&phba->hbalock);
874 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
875 spin_unlock_irq(&phba->hbalock);
876
877 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
878 /* Get the response iocb from the head of work queue */
879 spin_lock_irq(&phba->hbalock);
880 list_remove_head(&phba->sli4_hba.sp_queue_event,
881 cq_event, struct lpfc_cq_event, list);
882 spin_unlock_irq(&phba->hbalock);
883
884 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
885 case CQE_CODE_COMPL_WQE:
886 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
887 cq_event);
888 lpfc_sli_release_iocbq(phba, rspiocbq);
889 break;
890 case CQE_CODE_RECEIVE:
891 case CQE_CODE_RECEIVE_V1:
892 dmabuf = container_of(cq_event, struct hbq_dmabuf,
893 cq_event);
894 lpfc_in_buf_free(phba, &dmabuf->dbuf);
895 }
896 }
897}
898
e59058c4 899/**
bcece5f5 900 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
e59058c4
JS
901 * @phba: pointer to lpfc HBA data structure.
902 *
bcece5f5
JS
903 * This routine will cleanup posted ELS buffers after the HBA is reset
904 * when bringing down the SLI Layer.
905 *
e59058c4
JS
906 *
907 * Return codes
bcece5f5 908 * void.
e59058c4 909 **/
bcece5f5
JS
910static void
911lpfc_hba_free_post_buf(struct lpfc_hba *phba)
41415862
JW
912{
913 struct lpfc_sli *psli = &phba->sli;
914 struct lpfc_sli_ring *pring;
915 struct lpfc_dmabuf *mp, *next_mp;
07eab624
JS
916 LIST_HEAD(buflist);
917 int count;
41415862 918
92d7f7b0
JS
919 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
920 lpfc_sli_hbqbuf_free_all(phba);
921 else {
922 /* Cleanup preposted buffers on the ELS ring */
895427bd 923 pring = &psli->sli3_ring[LPFC_ELS_RING];
07eab624
JS
924 spin_lock_irq(&phba->hbalock);
925 list_splice_init(&pring->postbufq, &buflist);
926 spin_unlock_irq(&phba->hbalock);
927
928 count = 0;
929 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
92d7f7b0 930 list_del(&mp->list);
07eab624 931 count++;
92d7f7b0
JS
932 lpfc_mbuf_free(phba, mp->virt, mp->phys);
933 kfree(mp);
934 }
07eab624
JS
935
936 spin_lock_irq(&phba->hbalock);
937 pring->postbufq_cnt -= count;
bcece5f5 938 spin_unlock_irq(&phba->hbalock);
41415862 939 }
bcece5f5
JS
940}
941
942/**
943 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
944 * @phba: pointer to lpfc HBA data structure.
945 *
946 * This routine will cleanup the txcmplq after the HBA is reset when bringing
947 * down the SLI Layer.
948 *
949 * Return codes
950 * void
951 **/
952static void
953lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
954{
955 struct lpfc_sli *psli = &phba->sli;
895427bd 956 struct lpfc_queue *qp = NULL;
bcece5f5
JS
957 struct lpfc_sli_ring *pring;
958 LIST_HEAD(completions);
959 int i;
960
895427bd
JS
961 if (phba->sli_rev != LPFC_SLI_REV4) {
962 for (i = 0; i < psli->num_rings; i++) {
963 pring = &psli->sli3_ring[i];
bcece5f5 964 spin_lock_irq(&phba->hbalock);
895427bd
JS
965 /* At this point in time the HBA is either reset or DOA
966 * Nothing should be on txcmplq as it will
967 * NEVER complete.
968 */
969 list_splice_init(&pring->txcmplq, &completions);
970 pring->txcmplq_cnt = 0;
bcece5f5 971 spin_unlock_irq(&phba->hbalock);
09372820 972
895427bd
JS
973 lpfc_sli_abort_iocb_ring(phba, pring);
974 }
a257bf90 975 /* Cancel all the IOCBs from the completions list */
895427bd
JS
976 lpfc_sli_cancel_iocbs(phba, &completions,
977 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
978 return;
979 }
980 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
981 pring = qp->pring;
982 if (!pring)
983 continue;
984 spin_lock_irq(&pring->ring_lock);
985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
41415862
JW
988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
895427bd
JS
990 /* Cancel all the IOCBs from the completions list */
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
bcece5f5 993}
41415862 994
bcece5f5
JS
995/**
996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
997 int i;
998 * @phba: pointer to lpfc HBA data structure.
999 *
1000 * This routine will do uninitialization after the HBA is reset when bring
1001 * down the SLI Layer.
1002 *
1003 * Return codes
1004 * 0 - success.
1005 * Any other value - error.
1006 **/
1007static int
1008lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009{
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
41415862
JW
1012 return 0;
1013}
5af5eee7 1014
da0436e9
JS
1015/**
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017 * @phba: pointer to lpfc HBA data structure.
1018 *
1019 * This routine will do uninitialization after the HBA is reset when bring
1020 * down the SLI Layer.
1021 *
1022 * Return codes
af901ca1 1023 * 0 - success.
da0436e9
JS
1024 * Any other value - error.
1025 **/
1026static int
1027lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028{
1029 struct lpfc_scsi_buf *psb, *psb_next;
86c67379 1030 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
da0436e9 1031 LIST_HEAD(aborts);
895427bd 1032 LIST_HEAD(nvme_aborts);
86c67379 1033 LIST_HEAD(nvmet_aborts);
da0436e9 1034 unsigned long iflag = 0;
0f65ff68
JS
1035 struct lpfc_sglq *sglq_entry = NULL;
1036
895427bd
JS
1037
1038 lpfc_sli_hbqbuf_free_all(phba);
bcece5f5
JS
1039 lpfc_hba_clean_txcmplq(phba);
1040
da0436e9
JS
1041 /* At this point in time the HBA is either reset or DOA. Either
1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
895427bd 1043 * on the lpfc_els_sgl_list so that it can either be freed if the
da0436e9
JS
1044 * driver is unloading or reposted if the driver is restarting
1045 * the port.
1046 */
895427bd 1047 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
da0436e9 1048 /* scsl_buf_list */
895427bd 1049 /* sgl_list_lock required because worker thread uses this
da0436e9
JS
1050 * list.
1051 */
895427bd 1052 spin_lock(&phba->sli4_hba.sgl_list_lock);
0f65ff68
JS
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1056
da0436e9 1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895427bd
JS
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1059
f358dd0c 1060
895427bd 1061 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1062 /* abts_scsi_buf_list_lock required because worker thread uses this
1063 * list.
1064 */
895427bd
JS
1065 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
1066 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1067 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1068 &aborts);
1069 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1070 }
1071
1072 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1073 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1074 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
1075 &nvme_aborts);
86c67379
JS
1076 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1077 &nvmet_aborts);
895427bd
JS
1078 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1079 }
1080
da0436e9
JS
1081 spin_unlock_irq(&phba->hbalock);
1082
1083 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1084 psb->pCmd = NULL;
1085 psb->status = IOSTAT_SUCCESS;
1086 }
a40fc5f0
JS
1087 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1088 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1089 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
68e814f5 1090
86c67379
JS
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1093 psb->pCmd = NULL;
1094 psb->status = IOSTAT_SUCCESS;
1095 }
1096 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
1097 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1098 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1099
1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
6c621a22 1102 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
86c67379 1103 }
895427bd 1104 }
895427bd 1105
68e814f5 1106 lpfc_sli4_free_sp_events(phba);
da0436e9
JS
1107 return 0;
1108}
1109
1110/**
1111 * lpfc_hba_down_post - Wrapper func for hba down post routine
1112 * @phba: pointer to lpfc HBA data structure.
1113 *
1114 * This routine wraps the actual SLI3 or SLI4 routine for performing
1115 * uninitialization after the HBA is reset when bring down the SLI Layer.
1116 *
1117 * Return codes
af901ca1 1118 * 0 - success.
da0436e9
JS
1119 * Any other value - error.
1120 **/
1121int
1122lpfc_hba_down_post(struct lpfc_hba *phba)
1123{
1124 return (*phba->lpfc_hba_down_post)(phba);
1125}
41415862 1126
e59058c4 1127/**
3621a710 1128 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
1129 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1130 *
1131 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1132 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1133 * work-port-events bitmap and the worker thread is notified. This timeout
1134 * event will be used by the worker thread to invoke the actual timeout
1135 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1136 * be performed in the timeout handler and the HBA timeout event bit shall
1137 * be cleared by the worker thread after it has taken the event bitmap out.
1138 **/
a6ababd2 1139static void
858c9f6c
JS
1140lpfc_hb_timeout(unsigned long ptr)
1141{
1142 struct lpfc_hba *phba;
5e9d9b82 1143 uint32_t tmo_posted;
858c9f6c
JS
1144 unsigned long iflag;
1145
1146 phba = (struct lpfc_hba *)ptr;
9399627f
JS
1147
1148 /* Check for heart beat timeout conditions */
858c9f6c 1149 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
1150 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1151 if (!tmo_posted)
858c9f6c
JS
1152 phba->pport->work_port_events |= WORKER_HB_TMO;
1153 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1154
9399627f 1155 /* Tell the worker thread there is work to do */
5e9d9b82
JS
1156 if (!tmo_posted)
1157 lpfc_worker_wake_up(phba);
858c9f6c
JS
1158 return;
1159}
1160
19ca7609
JS
1161/**
1162 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1163 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1164 *
1165 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1166 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1167 * work-port-events bitmap and the worker thread is notified. This timeout
1168 * event will be used by the worker thread to invoke the actual timeout
1169 * handler routine, lpfc_rrq_handler. Any periodical operations will
1170 * be performed in the timeout handler and the RRQ timeout event bit shall
1171 * be cleared by the worker thread after it has taken the event bitmap out.
1172 **/
1173static void
1174lpfc_rrq_timeout(unsigned long ptr)
1175{
1176 struct lpfc_hba *phba;
19ca7609
JS
1177 unsigned long iflag;
1178
1179 phba = (struct lpfc_hba *)ptr;
1180 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1181 if (!(phba->pport->load_flag & FC_UNLOADING))
1182 phba->hba_flag |= HBA_RRQ_ACTIVE;
1183 else
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
19ca7609 1185 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1186
1187 if (!(phba->pport->load_flag & FC_UNLOADING))
1188 lpfc_worker_wake_up(phba);
19ca7609
JS
1189}
1190
e59058c4 1191/**
3621a710 1192 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
1193 * @phba: pointer to lpfc hba data structure.
1194 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1195 *
1196 * This is the callback function to the lpfc heart-beat mailbox command.
1197 * If configured, the lpfc driver issues the heart-beat mailbox command to
1198 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1199 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1200 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1201 * heart-beat outstanding state. Once the mailbox command comes back and
1202 * no error conditions detected, the heart-beat mailbox command timer is
1203 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1204 * state is cleared for the next heart-beat. If the timer expired with the
1205 * heart-beat outstanding state set, the driver will put the HBA offline.
1206 **/
858c9f6c
JS
1207static void
1208lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1209{
1210 unsigned long drvr_flag;
1211
1212 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1213 phba->hb_outstanding = 0;
1214 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1215
9399627f 1216 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
1217 mempool_free(pmboxq, phba->mbox_mem_pool);
1218 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1219 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 1220 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c 1221 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1222 jiffies +
1223 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1224 return;
1225}
1226
e59058c4 1227/**
3621a710 1228 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1229 * @phba: pointer to lpfc hba data structure.
1230 *
1231 * This is the actual HBA-timer timeout handler to be invoked by the worker
1232 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1233 * handler performs any periodic operations needed for the device. If such
1234 * periodic event has already been attended to either in the interrupt handler
1235 * or by processing slow-ring or fast-ring events within the HBA-timer
1236 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1237 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1238 * is configured and there is no heart-beat mailbox command outstanding, a
1239 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1240 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1241 * to offline.
1242 **/
858c9f6c
JS
1243void
1244lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1245{
45ed1190 1246 struct lpfc_vport **vports;
858c9f6c 1247 LPFC_MBOXQ_t *pmboxq;
0ff10d46 1248 struct lpfc_dmabuf *buf_ptr;
45ed1190 1249 int retval, i;
858c9f6c 1250 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1251 LIST_HEAD(completions);
0cf07f84
JS
1252 struct lpfc_queue *qp;
1253 unsigned long time_elapsed;
1254 uint32_t tick_cqe, max_cqe, val;
1255 uint64_t tot, data1, data2, data3;
1256 struct lpfc_register reg_data;
1257 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
858c9f6c 1258
45ed1190
JS
1259 vports = lpfc_create_vport_work_array(phba);
1260 if (vports != NULL)
4258e98e 1261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
45ed1190 1262 lpfc_rcv_seq_check_edtov(vports[i]);
4258e98e
JS
1263 lpfc_fdmi_num_disc_check(vports[i]);
1264 }
45ed1190
JS
1265 lpfc_destroy_vport_work_array(phba, vports);
1266
858c9f6c 1267 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1268 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1269 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1270 return;
1271
0cf07f84
JS
1272 if (phba->cfg_auto_imax) {
1273 if (!phba->last_eqdelay_time) {
1274 phba->last_eqdelay_time = jiffies;
1275 goto skip_eqdelay;
1276 }
1277 time_elapsed = jiffies - phba->last_eqdelay_time;
1278 phba->last_eqdelay_time = jiffies;
1279
1280 tot = 0xffff;
1281 /* Check outstanding IO count */
1282 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1283 if (phba->nvmet_support) {
1284 spin_lock(&phba->sli4_hba.nvmet_io_lock);
1285 tot = phba->sli4_hba.nvmet_xri_cnt -
1286 phba->sli4_hba.nvmet_ctx_cnt;
1287 spin_unlock(&phba->sli4_hba.nvmet_io_lock);
1288 } else {
1289 tot = atomic_read(&phba->fc4NvmeIoCmpls);
1290 data1 = atomic_read(
1291 &phba->fc4NvmeInputRequests);
1292 data2 = atomic_read(
1293 &phba->fc4NvmeOutputRequests);
1294 data3 = atomic_read(
1295 &phba->fc4NvmeControlRequests);
1296 tot = (data1 + data2 + data3) - tot;
1297 }
1298 }
1299
1300 /* Interrupts per sec per EQ */
1301 val = phba->cfg_fcp_imax / phba->io_channel_irqs;
1302 tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
1303
1304 /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
1305 max_cqe = time_elapsed * tick_cqe;
1306
1307 for (i = 0; i < phba->io_channel_irqs; i++) {
1308 /* Fast-path EQ */
1309 qp = phba->sli4_hba.hba_eq[i];
1310 if (!qp)
1311 continue;
1312
1313 /* Use no EQ delay if we don't have many outstanding
1314 * IOs, or if we are only processing 1 CQE/ISR or less.
1315 * Otherwise, assume we can process up to lpfc_fcp_imax
1316 * interrupts per HBA.
1317 */
1318 if (tot < LPFC_NODELAY_MAX_IO ||
1319 qp->EQ_cqe_cnt <= max_cqe)
1320 val = 0;
1321 else
1322 val = phba->cfg_fcp_imax;
1323
1324 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
1325 /* Use EQ Delay Register method */
1326
1327 /* Convert for EQ Delay register */
1328 if (val) {
1329 /* First, interrupts per sec per EQ */
1330 val = phba->cfg_fcp_imax /
1331 phba->io_channel_irqs;
1332
1333 /* us delay between each interrupt */
1334 val = LPFC_SEC_TO_USEC / val;
1335 }
1336 if (val != qp->q_mode) {
1337 reg_data.word0 = 0;
1338 bf_set(lpfc_sliport_eqdelay_id,
1339 &reg_data, qp->queue_id);
1340 bf_set(lpfc_sliport_eqdelay_delay,
1341 &reg_data, val);
1342 writel(reg_data.word0, eqdreg);
1343 }
1344 } else {
1345 /* Use mbox command method */
1346 if (val != qp->q_mode)
1347 lpfc_modify_hba_eq_delay(phba, i,
1348 1, val);
1349 }
1350
1351 /*
1352 * val is cfg_fcp_imax or 0 for mbox delay or us delay
1353 * between interrupts for EQDR.
1354 */
1355 qp->q_mode = val;
1356 qp->EQ_cqe_cnt = 0;
1357 }
1358 }
1359
1360skip_eqdelay:
858c9f6c 1361 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c 1362
256ec0d0
JS
1363 if (time_after(phba->last_completion_time +
1364 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1365 jiffies)) {
858c9f6c
JS
1366 spin_unlock_irq(&phba->pport->work_port_lock);
1367 if (!phba->hb_outstanding)
1368 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1369 jiffies +
1370 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1371 else
1372 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1373 jiffies +
1374 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c
JS
1375 return;
1376 }
1377 spin_unlock_irq(&phba->pport->work_port_lock);
1378
0ff10d46
JS
1379 if (phba->elsbuf_cnt &&
1380 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1381 spin_lock_irq(&phba->hbalock);
1382 list_splice_init(&phba->elsbuf, &completions);
1383 phba->elsbuf_cnt = 0;
1384 phba->elsbuf_prev_cnt = 0;
1385 spin_unlock_irq(&phba->hbalock);
1386
1387 while (!list_empty(&completions)) {
1388 list_remove_head(&completions, buf_ptr,
1389 struct lpfc_dmabuf, list);
1390 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1391 kfree(buf_ptr);
1392 }
1393 }
1394 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1395
858c9f6c 1396 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
1397 if (phba->cfg_enable_hba_heartbeat) {
1398 if (!phba->hb_outstanding) {
bc73905a
JS
1399 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1400 (list_empty(&psli->mboxq))) {
1401 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1402 GFP_KERNEL);
1403 if (!pmboxq) {
1404 mod_timer(&phba->hb_tmofunc,
1405 jiffies +
256ec0d0
JS
1406 msecs_to_jiffies(1000 *
1407 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1408 return;
1409 }
1410
1411 lpfc_heart_beat(phba, pmboxq);
1412 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1413 pmboxq->vport = phba->pport;
1414 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1415 MBX_NOWAIT);
1416
1417 if (retval != MBX_BUSY &&
1418 retval != MBX_SUCCESS) {
1419 mempool_free(pmboxq,
1420 phba->mbox_mem_pool);
1421 mod_timer(&phba->hb_tmofunc,
1422 jiffies +
256ec0d0
JS
1423 msecs_to_jiffies(1000 *
1424 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1425 return;
1426 }
1427 phba->skipped_hb = 0;
1428 phba->hb_outstanding = 1;
1429 } else if (time_before_eq(phba->last_completion_time,
1430 phba->skipped_hb)) {
1431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1432 "2857 Last completion time not "
1433 " updated in %d ms\n",
1434 jiffies_to_msecs(jiffies
1435 - phba->last_completion_time));
1436 } else
1437 phba->skipped_hb = jiffies;
1438
858c9f6c 1439 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1440 jiffies +
1441 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1442 return;
13815c83
JS
1443 } else {
1444 /*
1445 * If heart beat timeout called with hb_outstanding set
dcf2a4e0
JS
1446 * we need to give the hb mailbox cmd a chance to
1447 * complete or TMO.
13815c83 1448 */
dcf2a4e0
JS
1449 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1450 "0459 Adapter heartbeat still out"
1451 "standing:last compl time was %d ms.\n",
1452 jiffies_to_msecs(jiffies
1453 - phba->last_completion_time));
1454 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1455 jiffies +
1456 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1457 }
4258e98e
JS
1458 } else {
1459 mod_timer(&phba->hb_tmofunc,
1460 jiffies +
1461 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1462 }
1463}
1464
e59058c4 1465/**
3621a710 1466 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1467 * @phba: pointer to lpfc hba data structure.
1468 *
1469 * This routine is called to bring the HBA offline when HBA hardware error
1470 * other than Port Error 6 has been detected.
1471 **/
09372820
JS
1472static void
1473lpfc_offline_eratt(struct lpfc_hba *phba)
1474{
1475 struct lpfc_sli *psli = &phba->sli;
1476
1477 spin_lock_irq(&phba->hbalock);
f4b4c68f 1478 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820 1479 spin_unlock_irq(&phba->hbalock);
618a5230 1480 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
09372820
JS
1481
1482 lpfc_offline(phba);
1483 lpfc_reset_barrier(phba);
f4b4c68f 1484 spin_lock_irq(&phba->hbalock);
09372820 1485 lpfc_sli_brdreset(phba);
f4b4c68f 1486 spin_unlock_irq(&phba->hbalock);
09372820
JS
1487 lpfc_hba_down_post(phba);
1488 lpfc_sli_brdready(phba, HS_MBRDY);
1489 lpfc_unblock_mgmt_io(phba);
1490 phba->link_state = LPFC_HBA_ERROR;
1491 return;
1492}
1493
da0436e9
JS
1494/**
1495 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1499 * other than Port Error 6 has been detected.
1500 **/
a88dbb6a 1501void
da0436e9
JS
1502lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1503{
946727dc
JS
1504 spin_lock_irq(&phba->hbalock);
1505 phba->link_state = LPFC_HBA_ERROR;
1506 spin_unlock_irq(&phba->hbalock);
1507
618a5230 1508 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
da0436e9 1509 lpfc_offline(phba);
da0436e9 1510 lpfc_hba_down_post(phba);
da0436e9 1511 lpfc_unblock_mgmt_io(phba);
da0436e9
JS
1512}
1513
a257bf90
JS
1514/**
1515 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1516 * @phba: pointer to lpfc hba data structure.
1517 *
1518 * This routine is invoked to handle the deferred HBA hardware error
1519 * conditions. This type of error is indicated by HBA by setting ER1
1520 * and another ER bit in the host status register. The driver will
1521 * wait until the ER1 bit clears before handling the error condition.
1522 **/
1523static void
1524lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1525{
1526 uint32_t old_host_status = phba->work_hs;
a257bf90
JS
1527 struct lpfc_sli *psli = &phba->sli;
1528
f4b4c68f
JS
1529 /* If the pci channel is offline, ignore possible errors,
1530 * since we cannot communicate with the pci card anyway.
1531 */
1532 if (pci_channel_offline(phba->pcidev)) {
1533 spin_lock_irq(&phba->hbalock);
1534 phba->hba_flag &= ~DEFER_ERATT;
1535 spin_unlock_irq(&phba->hbalock);
1536 return;
1537 }
1538
a257bf90
JS
1539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1540 "0479 Deferred Adapter Hardware Error "
1541 "Data: x%x x%x x%x\n",
1542 phba->work_hs,
1543 phba->work_status[0], phba->work_status[1]);
1544
1545 spin_lock_irq(&phba->hbalock);
f4b4c68f 1546 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1547 spin_unlock_irq(&phba->hbalock);
1548
1549
1550 /*
1551 * Firmware stops when it triggred erratt. That could cause the I/Os
1552 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1553 * SCSI layer retry it after re-establishing link.
1554 */
db55fba8 1555 lpfc_sli_abort_fcp_rings(phba);
a257bf90
JS
1556
1557 /*
1558 * There was a firmware error. Take the hba offline and then
1559 * attempt to restart it.
1560 */
618a5230 1561 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
a257bf90
JS
1562 lpfc_offline(phba);
1563
1564 /* Wait for the ER1 bit to clear.*/
1565 while (phba->work_hs & HS_FFER1) {
1566 msleep(100);
9940b97b
JS
1567 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1568 phba->work_hs = UNPLUG_ERR ;
1569 break;
1570 }
a257bf90
JS
1571 /* If driver is unloading let the worker thread continue */
1572 if (phba->pport->load_flag & FC_UNLOADING) {
1573 phba->work_hs = 0;
1574 break;
1575 }
1576 }
1577
1578 /*
1579 * This is to ptrotect against a race condition in which
1580 * first write to the host attention register clear the
1581 * host status register.
1582 */
1583 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1584 phba->work_hs = old_host_status & ~HS_FFER1;
1585
3772a991 1586 spin_lock_irq(&phba->hbalock);
a257bf90 1587 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1588 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1589 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1590 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1591}
1592
3772a991
JS
1593static void
1594lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1595{
1596 struct lpfc_board_event_header board_event;
1597 struct Scsi_Host *shost;
1598
1599 board_event.event_type = FC_REG_BOARD_EVENT;
1600 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1601 shost = lpfc_shost_from_vport(phba->pport);
1602 fc_host_post_vendor_event(shost, fc_get_event_number(),
1603 sizeof(board_event),
1604 (char *) &board_event,
1605 LPFC_NL_VENDOR_ID);
1606}
1607
e59058c4 1608/**
3772a991 1609 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1610 * @phba: pointer to lpfc hba data structure.
1611 *
1612 * This routine is invoked to handle the following HBA hardware error
1613 * conditions:
1614 * 1 - HBA error attention interrupt
1615 * 2 - DMA ring index out of range
1616 * 3 - Mailbox command came back as unknown
1617 **/
3772a991
JS
1618static void
1619lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1620{
2e0fef85 1621 struct lpfc_vport *vport = phba->pport;
2e0fef85 1622 struct lpfc_sli *psli = &phba->sli;
d2873e4c 1623 uint32_t event_data;
57127f15
JS
1624 unsigned long temperature;
1625 struct temp_event temp_event_data;
92d7f7b0 1626 struct Scsi_Host *shost;
2e0fef85 1627
8d63f375 1628 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1629 * since we cannot communicate with the pci card anyway.
1630 */
1631 if (pci_channel_offline(phba->pcidev)) {
1632 spin_lock_irq(&phba->hbalock);
1633 phba->hba_flag &= ~DEFER_ERATT;
1634 spin_unlock_irq(&phba->hbalock);
8d63f375 1635 return;
3772a991
JS
1636 }
1637
13815c83
JS
1638 /* If resets are disabled then leave the HBA alone and return */
1639 if (!phba->cfg_enable_hba_reset)
1640 return;
dea3101e 1641
ea2151b4 1642 /* Send an internal error event to mgmt application */
3772a991 1643 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1644
a257bf90
JS
1645 if (phba->hba_flag & DEFER_ERATT)
1646 lpfc_handle_deferred_eratt(phba);
1647
dcf2a4e0
JS
1648 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1649 if (phba->work_hs & HS_FFER6)
1650 /* Re-establishing Link */
1651 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1652 "1301 Re-establishing Link "
1653 "Data: x%x x%x x%x\n",
1654 phba->work_hs, phba->work_status[0],
1655 phba->work_status[1]);
1656 if (phba->work_hs & HS_FFER8)
1657 /* Device Zeroization */
1658 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1659 "2861 Host Authentication device "
1660 "zeroization Data:x%x x%x x%x\n",
1661 phba->work_hs, phba->work_status[0],
1662 phba->work_status[1]);
58da1ffb 1663
92d7f7b0 1664 spin_lock_irq(&phba->hbalock);
f4b4c68f 1665 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1666 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1667
1668 /*
1669 * Firmware stops when it triggled erratt with HS_FFER6.
1670 * That could cause the I/Os dropped by the firmware.
1671 * Error iocb (I/O) on txcmplq and let the SCSI layer
1672 * retry it after re-establishing link.
1673 */
db55fba8 1674 lpfc_sli_abort_fcp_rings(phba);
dea3101e 1675
dea3101e
JB
1676 /*
1677 * There was a firmware error. Take the hba offline and then
1678 * attempt to restart it.
1679 */
618a5230 1680 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea3101e 1681 lpfc_offline(phba);
41415862 1682 lpfc_sli_brdrestart(phba);
dea3101e 1683 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1684 lpfc_unblock_mgmt_io(phba);
dea3101e
JB
1685 return;
1686 }
46fa311e 1687 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1688 } else if (phba->work_hs & HS_CRIT_TEMP) {
1689 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1690 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1691 temp_event_data.event_code = LPFC_CRIT_TEMP;
1692 temp_event_data.data = (uint32_t)temperature;
1693
1694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1695 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1696 "(%ld), taking this port offline "
1697 "Data: x%x x%x x%x\n",
1698 temperature, phba->work_hs,
1699 phba->work_status[0], phba->work_status[1]);
1700
1701 shost = lpfc_shost_from_vport(phba->pport);
1702 fc_host_post_vendor_event(shost, fc_get_event_number(),
1703 sizeof(temp_event_data),
1704 (char *) &temp_event_data,
1705 SCSI_NL_VID_TYPE_PCI
1706 | PCI_VENDOR_ID_EMULEX);
1707
7af67051 1708 spin_lock_irq(&phba->hbalock);
7af67051
JS
1709 phba->over_temp_state = HBA_OVER_TEMP;
1710 spin_unlock_irq(&phba->hbalock);
09372820 1711 lpfc_offline_eratt(phba);
57127f15 1712
dea3101e
JB
1713 } else {
1714 /* The if clause above forces this code path when the status
9399627f
JS
1715 * failure is a value other than FFER6. Do not call the offline
1716 * twice. This is the adapter hardware error path.
dea3101e
JB
1717 */
1718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1719 "0457 Adapter Hardware Error "
dea3101e 1720 "Data: x%x x%x x%x\n",
e8b62011 1721 phba->work_hs,
dea3101e
JB
1722 phba->work_status[0], phba->work_status[1]);
1723
d2873e4c 1724 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1725 shost = lpfc_shost_from_vport(vport);
2e0fef85 1726 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1727 sizeof(event_data), (char *) &event_data,
1728 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1729
09372820 1730 lpfc_offline_eratt(phba);
dea3101e 1731 }
9399627f 1732 return;
dea3101e
JB
1733}
1734
618a5230
JS
1735/**
1736 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1737 * @phba: pointer to lpfc hba data structure.
1738 * @mbx_action: flag for mailbox shutdown action.
1739 *
1740 * This routine is invoked to perform an SLI4 port PCI function reset in
1741 * response to port status register polling attention. It waits for port
1742 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1743 * During this process, interrupt vectors are freed and later requested
1744 * for handling possible port resource change.
1745 **/
1746static int
e10b2022
JS
1747lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1748 bool en_rn_msg)
618a5230
JS
1749{
1750 int rc;
1751 uint32_t intr_mode;
1752
65791f1f
JS
1753 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1754 LPFC_SLI_INTF_IF_TYPE_2) {
1755 /*
1756 * On error status condition, driver need to wait for port
1757 * ready before performing reset.
1758 */
1759 rc = lpfc_sli4_pdev_status_reg_wait(phba);
0e916ee7 1760 if (rc)
65791f1f
JS
1761 return rc;
1762 }
0e916ee7 1763
65791f1f
JS
1764 /* need reset: attempt for port recovery */
1765 if (en_rn_msg)
1766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1767 "2887 Reset Needed: Attempting Port "
1768 "Recovery...\n");
1769 lpfc_offline_prep(phba, mbx_action);
1770 lpfc_offline(phba);
1771 /* release interrupt for possible resource change */
1772 lpfc_sli4_disable_intr(phba);
1773 lpfc_sli_brdrestart(phba);
1774 /* request and enable interrupt */
1775 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1776 if (intr_mode == LPFC_INTR_ERROR) {
1777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1778 "3175 Failed to enable interrupt\n");
1779 return -EIO;
618a5230 1780 }
65791f1f
JS
1781 phba->intr_mode = intr_mode;
1782 rc = lpfc_online(phba);
1783 if (rc == 0)
1784 lpfc_unblock_mgmt_io(phba);
1785
618a5230
JS
1786 return rc;
1787}
1788
da0436e9
JS
1789/**
1790 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1791 * @phba: pointer to lpfc hba data structure.
1792 *
1793 * This routine is invoked to handle the SLI4 HBA hardware error attention
1794 * conditions.
1795 **/
1796static void
1797lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1798{
1799 struct lpfc_vport *vport = phba->pport;
1800 uint32_t event_data;
1801 struct Scsi_Host *shost;
2fcee4bf 1802 uint32_t if_type;
2e90f4b5
JS
1803 struct lpfc_register portstat_reg = {0};
1804 uint32_t reg_err1, reg_err2;
1805 uint32_t uerrlo_reg, uemasklo_reg;
65791f1f 1806 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
e10b2022 1807 bool en_rn_msg = true;
946727dc 1808 struct temp_event temp_event_data;
65791f1f
JS
1809 struct lpfc_register portsmphr_reg;
1810 int rc, i;
da0436e9
JS
1811
1812 /* If the pci channel is offline, ignore possible errors, since
1813 * we cannot communicate with the pci card anyway.
1814 */
1815 if (pci_channel_offline(phba->pcidev))
1816 return;
da0436e9 1817
65791f1f 1818 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2fcee4bf
JS
1819 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1820 switch (if_type) {
1821 case LPFC_SLI_INTF_IF_TYPE_0:
2e90f4b5
JS
1822 pci_rd_rc1 = lpfc_readl(
1823 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1824 &uerrlo_reg);
1825 pci_rd_rc2 = lpfc_readl(
1826 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1827 &uemasklo_reg);
1828 /* consider PCI bus read error as pci_channel_offline */
1829 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1830 return;
65791f1f
JS
1831 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1832 lpfc_sli4_offline_eratt(phba);
1833 return;
1834 }
1835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1836 "7623 Checking UE recoverable");
1837
1838 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1839 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1840 &portsmphr_reg.word0))
1841 continue;
1842
1843 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1844 &portsmphr_reg);
1845 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1846 LPFC_PORT_SEM_UE_RECOVERABLE)
1847 break;
1848 /*Sleep for 1Sec, before checking SEMAPHORE */
1849 msleep(1000);
1850 }
1851
1852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1853 "4827 smphr_port_status x%x : Waited %dSec",
1854 smphr_port_status, i);
1855
1856 /* Recoverable UE, reset the HBA device */
1857 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1858 LPFC_PORT_SEM_UE_RECOVERABLE) {
1859 for (i = 0; i < 20; i++) {
1860 msleep(1000);
1861 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1862 &portsmphr_reg.word0) &&
1863 (LPFC_POST_STAGE_PORT_READY ==
1864 bf_get(lpfc_port_smphr_port_status,
1865 &portsmphr_reg))) {
1866 rc = lpfc_sli4_port_sta_fn_reset(phba,
1867 LPFC_MBX_NO_WAIT, en_rn_msg);
1868 if (rc == 0)
1869 return;
1870 lpfc_printf_log(phba,
1871 KERN_ERR, LOG_INIT,
1872 "4215 Failed to recover UE");
1873 break;
1874 }
1875 }
1876 }
1877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1878 "7624 Firmware not ready: Failing UE recovery,"
1879 " waited %dSec", i);
2fcee4bf
JS
1880 lpfc_sli4_offline_eratt(phba);
1881 break;
946727dc 1882
2fcee4bf 1883 case LPFC_SLI_INTF_IF_TYPE_2:
2e90f4b5
JS
1884 pci_rd_rc1 = lpfc_readl(
1885 phba->sli4_hba.u.if_type2.STATUSregaddr,
1886 &portstat_reg.word0);
1887 /* consider PCI bus read error as pci_channel_offline */
6b5151fd
JS
1888 if (pci_rd_rc1 == -EIO) {
1889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1890 "3151 PCI bus read access failure: x%x\n",
1891 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2e90f4b5 1892 return;
6b5151fd 1893 }
2e90f4b5
JS
1894 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1895 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2fcee4bf 1896 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2fcee4bf
JS
1897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1898 "2889 Port Overtemperature event, "
946727dc
JS
1899 "taking port offline Data: x%x x%x\n",
1900 reg_err1, reg_err2);
1901
310429ef 1902 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
1903 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1904 temp_event_data.event_code = LPFC_CRIT_TEMP;
1905 temp_event_data.data = 0xFFFFFFFF;
1906
1907 shost = lpfc_shost_from_vport(phba->pport);
1908 fc_host_post_vendor_event(shost, fc_get_event_number(),
1909 sizeof(temp_event_data),
1910 (char *)&temp_event_data,
1911 SCSI_NL_VID_TYPE_PCI
1912 | PCI_VENDOR_ID_EMULEX);
1913
2fcee4bf
JS
1914 spin_lock_irq(&phba->hbalock);
1915 phba->over_temp_state = HBA_OVER_TEMP;
1916 spin_unlock_irq(&phba->hbalock);
1917 lpfc_sli4_offline_eratt(phba);
946727dc 1918 return;
2fcee4bf 1919 }
2e90f4b5 1920 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
e10b2022 1921 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2e90f4b5 1922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e10b2022
JS
1923 "3143 Port Down: Firmware Update "
1924 "Detected\n");
1925 en_rn_msg = false;
1926 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2e90f4b5
JS
1927 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1929 "3144 Port Down: Debug Dump\n");
1930 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1931 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1933 "3145 Port Down: Provisioning\n");
618a5230 1934
946727dc
JS
1935 /* If resets are disabled then leave the HBA alone and return */
1936 if (!phba->cfg_enable_hba_reset)
1937 return;
1938
618a5230 1939 /* Check port status register for function reset */
e10b2022
JS
1940 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1941 en_rn_msg);
618a5230
JS
1942 if (rc == 0) {
1943 /* don't report event on forced debug dump */
1944 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1945 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1946 return;
1947 else
1948 break;
2fcee4bf 1949 }
618a5230 1950 /* fall through for not able to recover */
6b5151fd
JS
1951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1952 "3152 Unrecoverable error, bring the port "
1953 "offline\n");
2fcee4bf
JS
1954 lpfc_sli4_offline_eratt(phba);
1955 break;
1956 case LPFC_SLI_INTF_IF_TYPE_1:
1957 default:
1958 break;
1959 }
2e90f4b5
JS
1960 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1961 "3123 Report dump event to upper layer\n");
1962 /* Send an internal error event to mgmt application */
1963 lpfc_board_errevt_to_mgmt(phba);
1964
1965 event_data = FC_REG_DUMP_EVENT;
1966 shost = lpfc_shost_from_vport(vport);
1967 fc_host_post_vendor_event(shost, fc_get_event_number(),
1968 sizeof(event_data), (char *) &event_data,
1969 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
da0436e9
JS
1970}
1971
1972/**
1973 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1974 * @phba: pointer to lpfc HBA data structure.
1975 *
1976 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1977 * routine from the API jump table function pointer from the lpfc_hba struct.
1978 *
1979 * Return codes
af901ca1 1980 * 0 - success.
da0436e9
JS
1981 * Any other value - error.
1982 **/
1983void
1984lpfc_handle_eratt(struct lpfc_hba *phba)
1985{
1986 (*phba->lpfc_handle_eratt)(phba);
1987}
1988
e59058c4 1989/**
3621a710 1990 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
1991 * @phba: pointer to lpfc hba data structure.
1992 *
1993 * This routine is invoked from the worker thread to handle a HBA host
895427bd 1994 * attention link event. SLI3 only.
e59058c4 1995 **/
dea3101e 1996void
2e0fef85 1997lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 1998{
2e0fef85
JS
1999 struct lpfc_vport *vport = phba->pport;
2000 struct lpfc_sli *psli = &phba->sli;
dea3101e
JB
2001 LPFC_MBOXQ_t *pmb;
2002 volatile uint32_t control;
2003 struct lpfc_dmabuf *mp;
09372820 2004 int rc = 0;
dea3101e
JB
2005
2006 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
2007 if (!pmb) {
2008 rc = 1;
dea3101e 2009 goto lpfc_handle_latt_err_exit;
09372820 2010 }
dea3101e
JB
2011
2012 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
2013 if (!mp) {
2014 rc = 2;
dea3101e 2015 goto lpfc_handle_latt_free_pmb;
09372820 2016 }
dea3101e
JB
2017
2018 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
2019 if (!mp->virt) {
2020 rc = 3;
dea3101e 2021 goto lpfc_handle_latt_free_mp;
09372820 2022 }
dea3101e 2023
6281bfe0 2024 /* Cleanup any outstanding ELS commands */
549e55cd 2025 lpfc_els_flush_all_cmd(phba);
dea3101e
JB
2026
2027 psli->slistat.link_event++;
76a95d75
JS
2028 lpfc_read_topology(phba, pmb, mp);
2029 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 2030 pmb->vport = vport;
0d2b6b83 2031 /* Block ELS IOCBs until we have processed this mbox command */
895427bd 2032 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 2033 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
2034 if (rc == MBX_NOT_FINISHED) {
2035 rc = 4;
14691150 2036 goto lpfc_handle_latt_free_mbuf;
09372820 2037 }
dea3101e
JB
2038
2039 /* Clear Link Attention in HA REG */
2e0fef85 2040 spin_lock_irq(&phba->hbalock);
dea3101e
JB
2041 writel(HA_LATT, phba->HAregaddr);
2042 readl(phba->HAregaddr); /* flush */
2e0fef85 2043 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
2044
2045 return;
2046
14691150 2047lpfc_handle_latt_free_mbuf:
895427bd 2048 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 2049 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e
JB
2050lpfc_handle_latt_free_mp:
2051 kfree(mp);
2052lpfc_handle_latt_free_pmb:
1dcb58e5 2053 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
2054lpfc_handle_latt_err_exit:
2055 /* Enable Link attention interrupts */
2e0fef85 2056 spin_lock_irq(&phba->hbalock);
dea3101e
JB
2057 psli->sli_flag |= LPFC_PROCESS_LA;
2058 control = readl(phba->HCregaddr);
2059 control |= HC_LAINT_ENA;
2060 writel(control, phba->HCregaddr);
2061 readl(phba->HCregaddr); /* flush */
2062
2063 /* Clear Link Attention in HA REG */
2064 writel(HA_LATT, phba->HAregaddr);
2065 readl(phba->HAregaddr); /* flush */
2e0fef85 2066 spin_unlock_irq(&phba->hbalock);
dea3101e 2067 lpfc_linkdown(phba);
2e0fef85 2068 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2069
09372820
JS
2070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2071 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e
JB
2072
2073 return;
2074}
2075
e59058c4 2076/**
3621a710 2077 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
2078 * @phba: pointer to lpfc hba data structure.
2079 * @vpd: pointer to the vital product data.
2080 * @len: length of the vital product data in bytes.
2081 *
2082 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2083 * an array of characters. In this routine, the ModelName, ProgramType, and
2084 * ModelDesc, etc. fields of the phba data structure will be populated.
2085 *
2086 * Return codes
2087 * 0 - pointer to the VPD passed in is NULL
2088 * 1 - success
2089 **/
3772a991 2090int
2e0fef85 2091lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e
JB
2092{
2093 uint8_t lenlo, lenhi;
07da60c1 2094 int Length;
dea3101e
JB
2095 int i, j;
2096 int finished = 0;
2097 int index = 0;
2098
2099 if (!vpd)
2100 return 0;
2101
2102 /* Vital Product */
ed957684 2103 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 2104 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e
JB
2105 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2106 (uint32_t) vpd[3]);
74b72a59 2107 while (!finished && (index < (len - 4))) {
dea3101e
JB
2108 switch (vpd[index]) {
2109 case 0x82:
74b72a59 2110 case 0x91:
dea3101e
JB
2111 index += 1;
2112 lenlo = vpd[index];
2113 index += 1;
2114 lenhi = vpd[index];
2115 index += 1;
2116 i = ((((unsigned short)lenhi) << 8) + lenlo);
2117 index += i;
2118 break;
2119 case 0x90:
2120 index += 1;
2121 lenlo = vpd[index];
2122 index += 1;
2123 lenhi = vpd[index];
2124 index += 1;
2125 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
2126 if (Length > len - index)
2127 Length = len - index;
dea3101e
JB
2128 while (Length > 0) {
2129 /* Look for Serial Number */
2130 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2131 index += 2;
2132 i = vpd[index];
2133 index += 1;
2134 j = 0;
2135 Length -= (3+i);
2136 while(i--) {
2137 phba->SerialNumber[j++] = vpd[index++];
2138 if (j == 31)
2139 break;
2140 }
2141 phba->SerialNumber[j] = 0;
2142 continue;
2143 }
2144 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2145 phba->vpd_flag |= VPD_MODEL_DESC;
2146 index += 2;
2147 i = vpd[index];
2148 index += 1;
2149 j = 0;
2150 Length -= (3+i);
2151 while(i--) {
2152 phba->ModelDesc[j++] = vpd[index++];
2153 if (j == 255)
2154 break;
2155 }
2156 phba->ModelDesc[j] = 0;
2157 continue;
2158 }
2159 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2160 phba->vpd_flag |= VPD_MODEL_NAME;
2161 index += 2;
2162 i = vpd[index];
2163 index += 1;
2164 j = 0;
2165 Length -= (3+i);
2166 while(i--) {
2167 phba->ModelName[j++] = vpd[index++];
2168 if (j == 79)
2169 break;
2170 }
2171 phba->ModelName[j] = 0;
2172 continue;
2173 }
2174 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2175 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2176 index += 2;
2177 i = vpd[index];
2178 index += 1;
2179 j = 0;
2180 Length -= (3+i);
2181 while(i--) {
2182 phba->ProgramType[j++] = vpd[index++];
2183 if (j == 255)
2184 break;
2185 }
2186 phba->ProgramType[j] = 0;
2187 continue;
2188 }
2189 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2190 phba->vpd_flag |= VPD_PORT;
2191 index += 2;
2192 i = vpd[index];
2193 index += 1;
2194 j = 0;
2195 Length -= (3+i);
2196 while(i--) {
cd1c8301
JS
2197 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2198 (phba->sli4_hba.pport_name_sta ==
2199 LPFC_SLI4_PPNAME_GET)) {
2200 j++;
2201 index++;
2202 } else
2203 phba->Port[j++] = vpd[index++];
2204 if (j == 19)
2205 break;
dea3101e 2206 }
cd1c8301
JS
2207 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2208 (phba->sli4_hba.pport_name_sta ==
2209 LPFC_SLI4_PPNAME_NON))
2210 phba->Port[j] = 0;
dea3101e
JB
2211 continue;
2212 }
2213 else {
2214 index += 2;
2215 i = vpd[index];
2216 index += 1;
2217 index += i;
2218 Length -= (3 + i);
2219 }
2220 }
2221 finished = 0;
2222 break;
2223 case 0x78:
2224 finished = 1;
2225 break;
2226 default:
2227 index ++;
2228 break;
2229 }
74b72a59 2230 }
dea3101e
JB
2231
2232 return(1);
2233}
2234
e59058c4 2235/**
3621a710 2236 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
2237 * @phba: pointer to lpfc hba data structure.
2238 * @mdp: pointer to the data structure to hold the derived model name.
2239 * @descp: pointer to the data structure to hold the derived description.
2240 *
2241 * This routine retrieves HBA's description based on its registered PCI device
2242 * ID. The @descp passed into this function points to an array of 256 chars. It
2243 * shall be returned with the model name, maximum speed, and the host bus type.
2244 * The @mdp passed into this function points to an array of 80 chars. When the
2245 * function returns, the @mdp will be filled with the model name.
2246 **/
dea3101e 2247static void
2e0fef85 2248lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e
JB
2249{
2250 lpfc_vpd_t *vp;
fefcb2b6 2251 uint16_t dev_id = phba->pcidev->device;
74b72a59 2252 int max_speed;
84774a4d 2253 int GE = 0;
da0436e9 2254 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 2255 struct {
a747c9ce
JS
2256 char *name;
2257 char *bus;
2258 char *function;
2259 } m = {"<Unknown>", "", ""};
74b72a59
JW
2260
2261 if (mdp && mdp[0] != '\0'
2262 && descp && descp[0] != '\0')
2263 return;
2264
d38dd52c
JS
2265 if (phba->lmt & LMT_32Gb)
2266 max_speed = 32;
2267 else if (phba->lmt & LMT_16Gb)
c0c11512
JS
2268 max_speed = 16;
2269 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
2270 max_speed = 10;
2271 else if (phba->lmt & LMT_8Gb)
2272 max_speed = 8;
2273 else if (phba->lmt & LMT_4Gb)
2274 max_speed = 4;
2275 else if (phba->lmt & LMT_2Gb)
2276 max_speed = 2;
4169d868 2277 else if (phba->lmt & LMT_1Gb)
74b72a59 2278 max_speed = 1;
4169d868
JS
2279 else
2280 max_speed = 0;
dea3101e
JB
2281
2282 vp = &phba->vpd;
dea3101e 2283
e4adb204 2284 switch (dev_id) {
06325e74 2285 case PCI_DEVICE_ID_FIREFLY:
12222f4f
JS
2286 m = (typeof(m)){"LP6000", "PCI",
2287 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2288 break;
dea3101e
JB
2289 case PCI_DEVICE_ID_SUPERFLY:
2290 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
12222f4f 2291 m = (typeof(m)){"LP7000", "PCI", ""};
dea3101e 2292 else
12222f4f
JS
2293 m = (typeof(m)){"LP7000E", "PCI", ""};
2294 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e
JB
2295 break;
2296 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce 2297 m = (typeof(m)){"LP8000", "PCI",
12222f4f 2298 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2299 break;
2300 case PCI_DEVICE_ID_CENTAUR:
2301 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
12222f4f 2302 m = (typeof(m)){"LP9002", "PCI", ""};
dea3101e 2303 else
12222f4f
JS
2304 m = (typeof(m)){"LP9000", "PCI", ""};
2305 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e
JB
2306 break;
2307 case PCI_DEVICE_ID_RFLY:
a747c9ce 2308 m = (typeof(m)){"LP952", "PCI",
12222f4f 2309 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2310 break;
2311 case PCI_DEVICE_ID_PEGASUS:
a747c9ce 2312 m = (typeof(m)){"LP9802", "PCI-X",
12222f4f 2313 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2314 break;
2315 case PCI_DEVICE_ID_THOR:
a747c9ce 2316 m = (typeof(m)){"LP10000", "PCI-X",
12222f4f 2317 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2318 break;
2319 case PCI_DEVICE_ID_VIPER:
a747c9ce 2320 m = (typeof(m)){"LPX1000", "PCI-X",
12222f4f 2321 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2322 break;
2323 case PCI_DEVICE_ID_PFLY:
a747c9ce 2324 m = (typeof(m)){"LP982", "PCI-X",
12222f4f 2325 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2326 break;
2327 case PCI_DEVICE_ID_TFLY:
a747c9ce 2328 m = (typeof(m)){"LP1050", "PCI-X",
12222f4f 2329 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2330 break;
2331 case PCI_DEVICE_ID_HELIOS:
a747c9ce 2332 m = (typeof(m)){"LP11000", "PCI-X2",
12222f4f 2333 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2334 break;
e4adb204 2335 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce 2336 m = (typeof(m)){"LP11000-SP", "PCI-X2",
12222f4f 2337 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2338 break;
2339 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce 2340 m = (typeof(m)){"LP11002-SP", "PCI-X2",
12222f4f 2341 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2342 break;
2343 case PCI_DEVICE_ID_NEPTUNE:
12222f4f
JS
2344 m = (typeof(m)){"LPe1000", "PCIe",
2345 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2346 break;
2347 case PCI_DEVICE_ID_NEPTUNE_SCSP:
12222f4f
JS
2348 m = (typeof(m)){"LPe1000-SP", "PCIe",
2349 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2350 break;
2351 case PCI_DEVICE_ID_NEPTUNE_DCSP:
12222f4f
JS
2352 m = (typeof(m)){"LPe1002-SP", "PCIe",
2353 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204 2354 break;
dea3101e 2355 case PCI_DEVICE_ID_BMID:
a747c9ce 2356 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e
JB
2357 break;
2358 case PCI_DEVICE_ID_BSMB:
12222f4f
JS
2359 m = (typeof(m)){"LP111", "PCI-X2",
2360 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2361 break;
2362 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 2363 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 2364 break;
e4adb204 2365 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 2366 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2367 break;
2368 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 2369 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 2370 GE = 1;
e4adb204 2371 break;
dea3101e 2372 case PCI_DEVICE_ID_ZMID:
a747c9ce 2373 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e
JB
2374 break;
2375 case PCI_DEVICE_ID_ZSMB:
a747c9ce 2376 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e
JB
2377 break;
2378 case PCI_DEVICE_ID_LP101:
12222f4f
JS
2379 m = (typeof(m)){"LP101", "PCI-X",
2380 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e
JB
2381 break;
2382 case PCI_DEVICE_ID_LP10000S:
12222f4f
JS
2383 m = (typeof(m)){"LP10000-S", "PCI",
2384 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2385 break;
e4adb204 2386 case PCI_DEVICE_ID_LP11000S:
12222f4f
JS
2387 m = (typeof(m)){"LP11000-S", "PCI-X2",
2388 "Obsolete, Unsupported Fibre Channel Adapter"};
18a3b596 2389 break;
e4adb204 2390 case PCI_DEVICE_ID_LPE11000S:
12222f4f
JS
2391 m = (typeof(m)){"LPe11000-S", "PCIe",
2392 "Obsolete, Unsupported Fibre Channel Adapter"};
5cc36b3c 2393 break;
b87eab38 2394 case PCI_DEVICE_ID_SAT:
a747c9ce 2395 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2396 break;
2397 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 2398 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2399 break;
2400 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 2401 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2402 break;
2403 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 2404 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2405 break;
2406 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 2407 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2408 break;
2409 case PCI_DEVICE_ID_SAT_S:
a747c9ce 2410 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 2411 break;
84774a4d 2412 case PCI_DEVICE_ID_HORNET:
12222f4f
JS
2413 m = (typeof(m)){"LP21000", "PCIe",
2414 "Obsolete, Unsupported FCoE Adapter"};
84774a4d
JS
2415 GE = 1;
2416 break;
2417 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce 2418 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2419 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2420 break;
2421 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce 2422 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2423 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2424 break;
2425 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce 2426 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
12222f4f 2427 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d 2428 break;
da0436e9
JS
2429 case PCI_DEVICE_ID_TIGERSHARK:
2430 oneConnect = 1;
a747c9ce 2431 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 2432 break;
a747c9ce 2433 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 2434 oneConnect = 1;
a747c9ce
JS
2435 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2436 break;
2437 case PCI_DEVICE_ID_FALCON:
2438 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2439 "EmulexSecure Fibre"};
6669f9bb 2440 break;
98fc5dd9
JS
2441 case PCI_DEVICE_ID_BALIUS:
2442 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
12222f4f 2443 "Obsolete, Unsupported Fibre Channel Adapter"};
98fc5dd9 2444 break;
085c647c 2445 case PCI_DEVICE_ID_LANCER_FC:
c0c11512 2446 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c 2447 break;
12222f4f
JS
2448 case PCI_DEVICE_ID_LANCER_FC_VF:
2449 m = (typeof(m)){"LPe16000", "PCIe",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2451 break;
085c647c
JS
2452 case PCI_DEVICE_ID_LANCER_FCOE:
2453 oneConnect = 1;
079b5c91 2454 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 2455 break;
12222f4f
JS
2456 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2457 oneConnect = 1;
2458 m = (typeof(m)){"OCe15100", "PCIe",
2459 "Obsolete, Unsupported FCoE"};
2460 break;
d38dd52c
JS
2461 case PCI_DEVICE_ID_LANCER_G6_FC:
2462 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2463 break;
f8cafd38
JS
2464 case PCI_DEVICE_ID_SKYHAWK:
2465 case PCI_DEVICE_ID_SKYHAWK_VF:
2466 oneConnect = 1;
2467 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2468 break;
5cc36b3c 2469 default:
a747c9ce 2470 m = (typeof(m)){"Unknown", "", ""};
e4adb204 2471 break;
dea3101e 2472 }
74b72a59
JW
2473
2474 if (mdp && mdp[0] == '\0')
2475 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
2476 /*
2477 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
2478 * and we put the port number on the end
2479 */
2480 if (descp && descp[0] == '\0') {
2481 if (oneConnect)
2482 snprintf(descp, 255,
4169d868 2483 "Emulex OneConnect %s, %s Initiator %s",
a747c9ce 2484 m.name, m.function,
da0436e9 2485 phba->Port);
4169d868
JS
2486 else if (max_speed == 0)
2487 snprintf(descp, 255,
290237d2 2488 "Emulex %s %s %s",
4169d868 2489 m.name, m.bus, m.function);
da0436e9
JS
2490 else
2491 snprintf(descp, 255,
2492 "Emulex %s %d%s %s %s",
a747c9ce
JS
2493 m.name, max_speed, (GE) ? "GE" : "Gb",
2494 m.bus, m.function);
da0436e9 2495 }
dea3101e
JB
2496}
2497
e59058c4 2498/**
3621a710 2499 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
2500 * @phba: pointer to lpfc hba data structure.
2501 * @pring: pointer to a IOCB ring.
2502 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2503 *
2504 * This routine posts a given number of IOCBs with the associated DMA buffer
2505 * descriptors specified by the cnt argument to the given IOCB ring.
2506 *
2507 * Return codes
2508 * The number of IOCBs NOT able to be posted to the IOCB ring.
2509 **/
dea3101e 2510int
495a714c 2511lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e
JB
2512{
2513 IOCB_t *icmd;
0bd4ca25 2514 struct lpfc_iocbq *iocb;
dea3101e
JB
2515 struct lpfc_dmabuf *mp1, *mp2;
2516
2517 cnt += pring->missbufcnt;
2518
2519 /* While there are buffers to post */
2520 while (cnt > 0) {
2521 /* Allocate buffer for command iocb */
0bd4ca25 2522 iocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
2523 if (iocb == NULL) {
2524 pring->missbufcnt = cnt;
2525 return cnt;
2526 }
dea3101e
JB
2527 icmd = &iocb->iocb;
2528
2529 /* 2 buffers can be posted per command */
2530 /* Allocate buffer to post */
2531 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2532 if (mp1)
98c9ea5c
JS
2533 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2534 if (!mp1 || !mp1->virt) {
c9475cb0 2535 kfree(mp1);
604a3e30 2536 lpfc_sli_release_iocbq(phba, iocb);
dea3101e
JB
2537 pring->missbufcnt = cnt;
2538 return cnt;
2539 }
2540
2541 INIT_LIST_HEAD(&mp1->list);
2542 /* Allocate buffer to post */
2543 if (cnt > 1) {
2544 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2545 if (mp2)
2546 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2547 &mp2->phys);
98c9ea5c 2548 if (!mp2 || !mp2->virt) {
c9475cb0 2549 kfree(mp2);
dea3101e
JB
2550 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2551 kfree(mp1);
604a3e30 2552 lpfc_sli_release_iocbq(phba, iocb);
dea3101e
JB
2553 pring->missbufcnt = cnt;
2554 return cnt;
2555 }
2556
2557 INIT_LIST_HEAD(&mp2->list);
2558 } else {
2559 mp2 = NULL;
2560 }
2561
2562 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2563 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2564 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2565 icmd->ulpBdeCount = 1;
2566 cnt--;
2567 if (mp2) {
2568 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2569 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2570 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2571 cnt--;
2572 icmd->ulpBdeCount = 2;
2573 }
2574
2575 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2576 icmd->ulpLe = 1;
2577
3772a991
JS
2578 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2579 IOCB_ERROR) {
dea3101e
JB
2580 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2581 kfree(mp1);
2582 cnt++;
2583 if (mp2) {
2584 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2585 kfree(mp2);
2586 cnt++;
2587 }
604a3e30 2588 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2589 pring->missbufcnt = cnt;
dea3101e
JB
2590 return cnt;
2591 }
dea3101e 2592 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2593 if (mp2)
dea3101e 2594 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e
JB
2595 }
2596 pring->missbufcnt = 0;
2597 return 0;
2598}
2599
e59058c4 2600/**
3621a710 2601 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2602 * @phba: pointer to lpfc hba data structure.
2603 *
2604 * This routine posts initial receive IOCB buffers to the ELS ring. The
2605 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
895427bd 2606 * set to 64 IOCBs. SLI3 only.
e59058c4
JS
2607 *
2608 * Return codes
2609 * 0 - success (currently always success)
2610 **/
dea3101e 2611static int
2e0fef85 2612lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e
JB
2613{
2614 struct lpfc_sli *psli = &phba->sli;
2615
2616 /* Ring 0, ELS / CT buffers */
895427bd 2617 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e
JB
2618 /* Ring 2 - FCP no buffers needed */
2619
2620 return 0;
2621}
2622
2623#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2624
e59058c4 2625/**
3621a710 2626 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2627 * @HashResultPointer: pointer to an array as hash table.
2628 *
2629 * This routine sets up the initial values to the array of hash table entries
2630 * for the LC HBAs.
2631 **/
dea3101e
JB
2632static void
2633lpfc_sha_init(uint32_t * HashResultPointer)
2634{
2635 HashResultPointer[0] = 0x67452301;
2636 HashResultPointer[1] = 0xEFCDAB89;
2637 HashResultPointer[2] = 0x98BADCFE;
2638 HashResultPointer[3] = 0x10325476;
2639 HashResultPointer[4] = 0xC3D2E1F0;
2640}
2641
e59058c4 2642/**
3621a710 2643 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2644 * @HashResultPointer: pointer to an initial/result hash table.
2645 * @HashWorkingPointer: pointer to an working hash table.
2646 *
2647 * This routine iterates an initial hash table pointed by @HashResultPointer
2648 * with the values from the working hash table pointeed by @HashWorkingPointer.
2649 * The results are putting back to the initial hash table, returned through
2650 * the @HashResultPointer as the result hash table.
2651 **/
dea3101e
JB
2652static void
2653lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2654{
2655 int t;
2656 uint32_t TEMP;
2657 uint32_t A, B, C, D, E;
2658 t = 16;
2659 do {
2660 HashWorkingPointer[t] =
2661 S(1,
2662 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2663 8] ^
2664 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2665 } while (++t <= 79);
2666 t = 0;
2667 A = HashResultPointer[0];
2668 B = HashResultPointer[1];
2669 C = HashResultPointer[2];
2670 D = HashResultPointer[3];
2671 E = HashResultPointer[4];
2672
2673 do {
2674 if (t < 20) {
2675 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2676 } else if (t < 40) {
2677 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2678 } else if (t < 60) {
2679 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2680 } else {
2681 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2682 }
2683 TEMP += S(5, A) + E + HashWorkingPointer[t];
2684 E = D;
2685 D = C;
2686 C = S(30, B);
2687 B = A;
2688 A = TEMP;
2689 } while (++t <= 79);
2690
2691 HashResultPointer[0] += A;
2692 HashResultPointer[1] += B;
2693 HashResultPointer[2] += C;
2694 HashResultPointer[3] += D;
2695 HashResultPointer[4] += E;
2696
2697}
2698
e59058c4 2699/**
3621a710 2700 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2701 * @RandomChallenge: pointer to the entry of host challenge random number array.
2702 * @HashWorking: pointer to the entry of the working hash array.
2703 *
2704 * This routine calculates the working hash array referred by @HashWorking
2705 * from the challenge random numbers associated with the host, referred by
2706 * @RandomChallenge. The result is put into the entry of the working hash
2707 * array and returned by reference through @HashWorking.
2708 **/
dea3101e
JB
2709static void
2710lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2711{
2712 *HashWorking = (*RandomChallenge ^ *HashWorking);
2713}
2714
e59058c4 2715/**
3621a710 2716 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2717 * @phba: pointer to lpfc hba data structure.
2718 * @hbainit: pointer to an array of unsigned 32-bit integers.
2719 *
2720 * This routine performs the special handling for LC HBA initialization.
2721 **/
dea3101e
JB
2722void
2723lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2724{
2725 int t;
2726 uint32_t *HashWorking;
2e0fef85 2727 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2728
bbfbbbc1 2729 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e
JB
2730 if (!HashWorking)
2731 return;
2732
dea3101e
JB
2733 HashWorking[0] = HashWorking[78] = *pwwnn++;
2734 HashWorking[1] = HashWorking[79] = *pwwnn;
2735
2736 for (t = 0; t < 7; t++)
2737 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2738
2739 lpfc_sha_init(hbainit);
2740 lpfc_sha_iterate(hbainit, HashWorking);
2741 kfree(HashWorking);
2742}
2743
e59058c4 2744/**
3621a710 2745 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2746 * @vport: pointer to a virtual N_Port data structure.
2747 *
2748 * This routine performs the necessary cleanups before deleting the @vport.
2749 * It invokes the discovery state machine to perform necessary state
2750 * transitions and to release the ndlps associated with the @vport. Note,
2751 * the physical port is treated as @vport 0.
2752 **/
87af33fe 2753void
2e0fef85 2754lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2755{
87af33fe 2756 struct lpfc_hba *phba = vport->phba;
dea3101e 2757 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2758 int i = 0;
dea3101e 2759
87af33fe
JS
2760 if (phba->link_state > LPFC_LINK_DOWN)
2761 lpfc_port_link_failure(vport);
2762
2763 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2764 if (!NLP_CHK_NODE_ACT(ndlp)) {
2765 ndlp = lpfc_enable_node(vport, ndlp,
2766 NLP_STE_UNUSED_NODE);
2767 if (!ndlp)
2768 continue;
2769 spin_lock_irq(&phba->ndlp_lock);
2770 NLP_SET_FREE_REQ(ndlp);
2771 spin_unlock_irq(&phba->ndlp_lock);
2772 /* Trigger the release of the ndlp memory */
2773 lpfc_nlp_put(ndlp);
2774 continue;
2775 }
2776 spin_lock_irq(&phba->ndlp_lock);
2777 if (NLP_CHK_FREE_REQ(ndlp)) {
2778 /* The ndlp should not be in memory free mode already */
2779 spin_unlock_irq(&phba->ndlp_lock);
2780 continue;
2781 } else
2782 /* Indicate request for freeing ndlp memory */
2783 NLP_SET_FREE_REQ(ndlp);
2784 spin_unlock_irq(&phba->ndlp_lock);
2785
58da1ffb
JS
2786 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2787 ndlp->nlp_DID == Fabric_DID) {
2788 /* Just free up ndlp with Fabric_DID for vports */
2789 lpfc_nlp_put(ndlp);
2790 continue;
2791 }
2792
eff4a01b
JS
2793 /* take care of nodes in unused state before the state
2794 * machine taking action.
2795 */
2796 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2797 lpfc_nlp_put(ndlp);
2798 continue;
2799 }
2800
87af33fe
JS
2801 if (ndlp->nlp_type & NLP_FABRIC)
2802 lpfc_disc_state_machine(vport, ndlp, NULL,
2803 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2804
a0f2d3ef
JS
2805 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
2806 /* Remove the NVME transport reference now and
2807 * continue to remove the node.
2808 */
2809 lpfc_nlp_put(ndlp);
2810 }
2811
87af33fe
JS
2812 lpfc_disc_state_machine(vport, ndlp, NULL,
2813 NLP_EVT_DEVICE_RM);
2814 }
2815
a8adb832
JS
2816 /* At this point, ALL ndlp's should be gone
2817 * because of the previous NLP_EVT_DEVICE_RM.
2818 * Lets wait for this to happen, if needed.
2819 */
87af33fe 2820 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2821 if (i++ > 3000) {
87af33fe 2822 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 2823 "0233 Nodelist not empty\n");
e47c9093
JS
2824 list_for_each_entry_safe(ndlp, next_ndlp,
2825 &vport->fc_nodes, nlp_listp) {
2826 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2827 LOG_NODE,
d7c255b2 2828 "0282 did:x%x ndlp:x%p "
e47c9093
JS
2829 "usgmap:x%x refcnt:%d\n",
2830 ndlp->nlp_DID, (void *)ndlp,
2831 ndlp->nlp_usg_map,
2c935bc5 2832 kref_read(&ndlp->kref));
e47c9093 2833 }
a8adb832 2834 break;
87af33fe 2835 }
a8adb832
JS
2836
2837 /* Wait for any activity on ndlps to settle */
2838 msleep(10);
87af33fe 2839 }
1151e3ec 2840 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e
JB
2841}
2842
e59058c4 2843/**
3621a710 2844 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2845 * @vport: pointer to a virtual N_Port data structure.
2846 *
2847 * This routine stops all the timers associated with a @vport. This function
2848 * is invoked before disabling or deleting a @vport. Note that the physical
2849 * port is treated as @vport 0.
2850 **/
92d7f7b0
JS
2851void
2852lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2853{
92d7f7b0 2854 del_timer_sync(&vport->els_tmofunc);
92494144 2855 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2856 lpfc_can_disctmo(vport);
2857 return;
dea3101e
JB
2858}
2859
ecfd03c6
JS
2860/**
2861 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2862 * @phba: pointer to lpfc hba data structure.
2863 *
2864 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2865 * caller of this routine should already hold the host lock.
2866 **/
2867void
2868__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2869{
5ac6b303
JS
2870 /* Clear pending FCF rediscovery wait flag */
2871 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2872
ecfd03c6
JS
2873 /* Now, try to stop the timer */
2874 del_timer(&phba->fcf.redisc_wait);
2875}
2876
2877/**
2878 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2879 * @phba: pointer to lpfc hba data structure.
2880 *
2881 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2882 * checks whether the FCF rediscovery wait timer is pending with the host
2883 * lock held before proceeding with disabling the timer and clearing the
2884 * wait timer pendig flag.
2885 **/
2886void
2887lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2888{
2889 spin_lock_irq(&phba->hbalock);
2890 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2891 /* FCF rediscovery timer already fired or stopped */
2892 spin_unlock_irq(&phba->hbalock);
2893 return;
2894 }
2895 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
2896 /* Clear failover in progress flags */
2897 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
2898 spin_unlock_irq(&phba->hbalock);
2899}
2900
e59058c4 2901/**
3772a991 2902 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2903 * @phba: pointer to lpfc hba data structure.
2904 *
2905 * This routine stops all the timers associated with a HBA. This function is
2906 * invoked before either putting a HBA offline or unloading the driver.
2907 **/
3772a991
JS
2908void
2909lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2910{
51ef4c26 2911 lpfc_stop_vport_timers(phba->pport);
2e0fef85 2912 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2913 del_timer_sync(&phba->fabric_block_timer);
9399627f 2914 del_timer_sync(&phba->eratt_poll);
3772a991 2915 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
2916 if (phba->sli_rev == LPFC_SLI_REV4) {
2917 del_timer_sync(&phba->rrq_tmr);
2918 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2919 }
3772a991
JS
2920 phba->hb_outstanding = 0;
2921
2922 switch (phba->pci_dev_grp) {
2923 case LPFC_PCI_DEV_LP:
2924 /* Stop any LightPulse device specific driver timers */
2925 del_timer_sync(&phba->fcp_poll_timer);
2926 break;
2927 case LPFC_PCI_DEV_OC:
2928 /* Stop any OneConnect device sepcific driver timers */
ecfd03c6 2929 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
2930 break;
2931 default:
2932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2933 "0297 Invalid device group (x%x)\n",
2934 phba->pci_dev_grp);
2935 break;
2936 }
2e0fef85 2937 return;
dea3101e
JB
2938}
2939
e59058c4 2940/**
3621a710 2941 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2942 * @phba: pointer to lpfc hba data structure.
2943 *
2944 * This routine marks a HBA's management interface as blocked. Once the HBA's
2945 * management interface is marked as blocked, all the user space access to
2946 * the HBA, whether they are from sysfs interface or libdfc interface will
2947 * all be blocked. The HBA is set to block the management interface when the
2948 * driver prepares the HBA interface for online or offline.
2949 **/
a6ababd2 2950static void
618a5230 2951lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
a6ababd2
AB
2952{
2953 unsigned long iflag;
6e7288d9
JS
2954 uint8_t actcmd = MBX_HEARTBEAT;
2955 unsigned long timeout;
2956
a6ababd2
AB
2957 spin_lock_irqsave(&phba->hbalock, iflag);
2958 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
618a5230
JS
2959 spin_unlock_irqrestore(&phba->hbalock, iflag);
2960 if (mbx_action == LPFC_MBX_NO_WAIT)
2961 return;
2962 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2963 spin_lock_irqsave(&phba->hbalock, iflag);
a183a15f 2964 if (phba->sli.mbox_active) {
6e7288d9 2965 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
2966 /* Determine how long we might wait for the active mailbox
2967 * command to be gracefully completed by firmware.
2968 */
2969 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2970 phba->sli.mbox_active) * 1000) + jiffies;
2971 }
a6ababd2 2972 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 2973
6e7288d9
JS
2974 /* Wait for the outstnading mailbox command to complete */
2975 while (phba->sli.mbox_active) {
2976 /* Check active mailbox complete status every 2ms */
2977 msleep(2);
2978 if (time_after(jiffies, timeout)) {
2979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2980 "2813 Mgmt IO is Blocked %x "
2981 "- mbox cmd %x still active\n",
2982 phba->sli.sli_flag, actcmd);
2983 break;
2984 }
2985 }
a6ababd2
AB
2986}
2987
6b5151fd
JS
2988/**
2989 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2990 * @phba: pointer to lpfc hba data structure.
2991 *
2992 * Allocate RPIs for all active remote nodes. This is needed whenever
2993 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2994 * is to fixup the temporary rpi assignments.
2995 **/
2996void
2997lpfc_sli4_node_prep(struct lpfc_hba *phba)
2998{
2999 struct lpfc_nodelist *ndlp, *next_ndlp;
3000 struct lpfc_vport **vports;
9d3d340d
JS
3001 int i, rpi;
3002 unsigned long flags;
6b5151fd
JS
3003
3004 if (phba->sli_rev != LPFC_SLI_REV4)
3005 return;
3006
3007 vports = lpfc_create_vport_work_array(phba);
9d3d340d
JS
3008 if (vports == NULL)
3009 return;
6b5151fd 3010
9d3d340d
JS
3011 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3012 if (vports[i]->load_flag & FC_UNLOADING)
3013 continue;
3014
3015 list_for_each_entry_safe(ndlp, next_ndlp,
3016 &vports[i]->fc_nodes,
3017 nlp_listp) {
3018 if (!NLP_CHK_NODE_ACT(ndlp))
3019 continue;
3020 rpi = lpfc_sli4_alloc_rpi(phba);
3021 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3022 spin_lock_irqsave(&phba->ndlp_lock, flags);
3023 NLP_CLR_NODE_ACT(ndlp);
3024 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3025 continue;
6b5151fd 3026 }
9d3d340d
JS
3027 ndlp->nlp_rpi = rpi;
3028 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3029 "0009 rpi:%x DID:%x "
3030 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3031 ndlp->nlp_DID, ndlp->nlp_flag,
3032 ndlp->nlp_usg_map, ndlp);
6b5151fd
JS
3033 }
3034 }
3035 lpfc_destroy_vport_work_array(phba, vports);
3036}
3037
e59058c4 3038/**
3621a710 3039 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
3040 * @phba: pointer to lpfc hba data structure.
3041 *
3042 * This routine initializes the HBA and brings a HBA online. During this
3043 * process, the management interface is blocked to prevent user space access
3044 * to the HBA interfering with the driver initialization.
3045 *
3046 * Return codes
3047 * 0 - successful
3048 * 1 - failed
3049 **/
dea3101e 3050int
2e0fef85 3051lpfc_online(struct lpfc_hba *phba)
dea3101e 3052{
372bd282 3053 struct lpfc_vport *vport;
549e55cd
JS
3054 struct lpfc_vport **vports;
3055 int i;
16a3a208 3056 bool vpis_cleared = false;
2e0fef85 3057
dea3101e
JB
3058 if (!phba)
3059 return 0;
372bd282 3060 vport = phba->pport;
dea3101e 3061
2e0fef85 3062 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e
JB
3063 return 0;
3064
ed957684 3065 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3066 "0458 Bring Adapter online\n");
dea3101e 3067
618a5230 3068 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
46fa311e 3069
da0436e9
JS
3070 if (phba->sli_rev == LPFC_SLI_REV4) {
3071 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3072 lpfc_unblock_mgmt_io(phba);
3073 return 1;
3074 }
16a3a208
JS
3075 spin_lock_irq(&phba->hbalock);
3076 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3077 vpis_cleared = true;
3078 spin_unlock_irq(&phba->hbalock);
da0436e9 3079 } else {
895427bd 3080 lpfc_sli_queue_init(phba);
da0436e9
JS
3081 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3082 lpfc_unblock_mgmt_io(phba);
3083 return 1;
3084 }
46fa311e 3085 }
dea3101e 3086
549e55cd 3087 vports = lpfc_create_vport_work_array(phba);
aeb6641f 3088 if (vports != NULL) {
da0436e9 3089 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
3090 struct Scsi_Host *shost;
3091 shost = lpfc_shost_from_vport(vports[i]);
3092 spin_lock_irq(shost->host_lock);
3093 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3094 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3095 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
16a3a208 3096 if (phba->sli_rev == LPFC_SLI_REV4) {
1c6834a7 3097 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
16a3a208
JS
3098 if ((vpis_cleared) &&
3099 (vports[i]->port_type !=
3100 LPFC_PHYSICAL_PORT))
3101 vports[i]->vpi = 0;
3102 }
549e55cd
JS
3103 spin_unlock_irq(shost->host_lock);
3104 }
aeb6641f
AB
3105 }
3106 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3107
46fa311e 3108 lpfc_unblock_mgmt_io(phba);
dea3101e
JB
3109 return 0;
3110}
3111
e59058c4 3112/**
3621a710 3113 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
3114 * @phba: pointer to lpfc hba data structure.
3115 *
3116 * This routine marks a HBA's management interface as not blocked. Once the
3117 * HBA's management interface is marked as not blocked, all the user space
3118 * access to the HBA, whether they are from sysfs interface or libdfc
3119 * interface will be allowed. The HBA is set to block the management interface
3120 * when the driver prepares the HBA interface for online or offline and then
3121 * set to unblock the management interface afterwards.
3122 **/
46fa311e
JS
3123void
3124lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3125{
3126 unsigned long iflag;
3127
2e0fef85
JS
3128 spin_lock_irqsave(&phba->hbalock, iflag);
3129 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3130 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
3131}
3132
e59058c4 3133/**
3621a710 3134 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
3135 * @phba: pointer to lpfc hba data structure.
3136 *
3137 * This routine is invoked to prepare a HBA to be brought offline. It performs
3138 * unregistration login to all the nodes on all vports and flushes the mailbox
3139 * queue to make it ready to be brought offline.
3140 **/
46fa311e 3141void
618a5230 3142lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
46fa311e 3143{
2e0fef85 3144 struct lpfc_vport *vport = phba->pport;
46fa311e 3145 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 3146 struct lpfc_vport **vports;
72100cc4 3147 struct Scsi_Host *shost;
87af33fe 3148 int i;
dea3101e 3149
2e0fef85 3150 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3151 return;
dea3101e 3152
618a5230 3153 lpfc_block_mgmt_io(phba, mbx_action);
dea3101e
JB
3154
3155 lpfc_linkdown(phba);
3156
87af33fe
JS
3157 /* Issue an unreg_login to all nodes on all vports */
3158 vports = lpfc_create_vport_work_array(phba);
3159 if (vports != NULL) {
da0436e9 3160 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
3161 if (vports[i]->load_flag & FC_UNLOADING)
3162 continue;
72100cc4
JS
3163 shost = lpfc_shost_from_vport(vports[i]);
3164 spin_lock_irq(shost->host_lock);
c868595d 3165 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
3166 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3167 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 3168 spin_unlock_irq(shost->host_lock);
695a814e 3169
87af33fe
JS
3170 shost = lpfc_shost_from_vport(vports[i]);
3171 list_for_each_entry_safe(ndlp, next_ndlp,
3172 &vports[i]->fc_nodes,
3173 nlp_listp) {
e47c9093
JS
3174 if (!NLP_CHK_NODE_ACT(ndlp))
3175 continue;
87af33fe
JS
3176 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3177 continue;
3178 if (ndlp->nlp_type & NLP_FABRIC) {
3179 lpfc_disc_state_machine(vports[i], ndlp,
3180 NULL, NLP_EVT_DEVICE_RECOVERY);
3181 lpfc_disc_state_machine(vports[i], ndlp,
3182 NULL, NLP_EVT_DEVICE_RM);
3183 }
3184 spin_lock_irq(shost->host_lock);
3185 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
401ee0c1 3186 spin_unlock_irq(shost->host_lock);
6b5151fd
JS
3187 /*
3188 * Whenever an SLI4 port goes offline, free the
401ee0c1
JS
3189 * RPI. Get a new RPI when the adapter port
3190 * comes back online.
6b5151fd 3191 */
be6bb941
JS
3192 if (phba->sli_rev == LPFC_SLI_REV4) {
3193 lpfc_printf_vlog(ndlp->vport,
3194 KERN_INFO, LOG_NODE,
3195 "0011 lpfc_offline: "
3196 "ndlp:x%p did %x "
3197 "usgmap:x%x rpi:%x\n",
3198 ndlp, ndlp->nlp_DID,
3199 ndlp->nlp_usg_map,
3200 ndlp->nlp_rpi);
3201
6b5151fd 3202 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
be6bb941 3203 }
87af33fe
JS
3204 lpfc_unreg_rpi(vports[i], ndlp);
3205 }
3206 }
3207 }
09372820 3208 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3209
618a5230 3210 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
46fa311e
JS
3211}
3212
e59058c4 3213/**
3621a710 3214 * lpfc_offline - Bring a HBA offline
e59058c4
JS
3215 * @phba: pointer to lpfc hba data structure.
3216 *
3217 * This routine actually brings a HBA offline. It stops all the timers
3218 * associated with the HBA, brings down the SLI layer, and eventually
3219 * marks the HBA as in offline state for the upper layer protocol.
3220 **/
46fa311e 3221void
2e0fef85 3222lpfc_offline(struct lpfc_hba *phba)
46fa311e 3223{
549e55cd
JS
3224 struct Scsi_Host *shost;
3225 struct lpfc_vport **vports;
3226 int i;
46fa311e 3227
549e55cd 3228 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3229 return;
688a8863 3230
da0436e9
JS
3231 /* stop port and all timers associated with this hba */
3232 lpfc_stop_port(phba);
51ef4c26
JS
3233 vports = lpfc_create_vport_work_array(phba);
3234 if (vports != NULL)
da0436e9 3235 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 3236 lpfc_stop_vport_timers(vports[i]);
09372820 3237 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 3238 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3239 "0460 Bring Adapter offline\n");
dea3101e
JB
3240 /* Bring down the SLI Layer and cleanup. The HBA is offline
3241 now. */
3242 lpfc_sli_hba_down(phba);
92d7f7b0 3243 spin_lock_irq(&phba->hbalock);
7054a606 3244 phba->work_ha = 0;
92d7f7b0 3245 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
3246 vports = lpfc_create_vport_work_array(phba);
3247 if (vports != NULL)
da0436e9 3248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 3249 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
3250 spin_lock_irq(shost->host_lock);
3251 vports[i]->work_port_events = 0;
3252 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3253 spin_unlock_irq(shost->host_lock);
3254 }
09372820 3255 lpfc_destroy_vport_work_array(phba, vports);
dea3101e
JB
3256}
3257
e59058c4 3258/**
3621a710 3259 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
3260 * @phba: pointer to lpfc hba data structure.
3261 *
3262 * This routine is to free all the SCSI buffers and IOCBs from the driver
3263 * list back to kernel. It is called from lpfc_pci_remove_one to free
3264 * the internal resources before the device is removed from the system.
e59058c4 3265 **/
8a9d2e80 3266static void
2e0fef85 3267lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e
JB
3268{
3269 struct lpfc_scsi_buf *sb, *sb_next;
dea3101e 3270
895427bd
JS
3271 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3272 return;
3273
2e0fef85 3274 spin_lock_irq(&phba->hbalock);
a40fc5f0 3275
dea3101e 3276 /* Release all the lpfc_scsi_bufs maintained by this host. */
a40fc5f0
JS
3277
3278 spin_lock(&phba->scsi_buf_list_put_lock);
3279 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3280 list) {
dea3101e 3281 list_del(&sb->list);
895427bd 3282 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3283 sb->dma_handle);
dea3101e
JB
3284 kfree(sb);
3285 phba->total_scsi_bufs--;
3286 }
a40fc5f0
JS
3287 spin_unlock(&phba->scsi_buf_list_put_lock);
3288
3289 spin_lock(&phba->scsi_buf_list_get_lock);
3290 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3291 list) {
dea3101e 3292 list_del(&sb->list);
895427bd 3293 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3294 sb->dma_handle);
dea3101e
JB
3295 kfree(sb);
3296 phba->total_scsi_bufs--;
3297 }
a40fc5f0 3298 spin_unlock(&phba->scsi_buf_list_get_lock);
2e0fef85 3299 spin_unlock_irq(&phba->hbalock);
8a9d2e80 3300}
895427bd
JS
3301/**
3302 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
3303 * @phba: pointer to lpfc hba data structure.
3304 *
3305 * This routine is to free all the NVME buffers and IOCBs from the driver
3306 * list back to kernel. It is called from lpfc_pci_remove_one to free
3307 * the internal resources before the device is removed from the system.
3308 **/
3309static void
3310lpfc_nvme_free(struct lpfc_hba *phba)
3311{
3312 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
895427bd
JS
3313
3314 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3315 return;
3316
3317 spin_lock_irq(&phba->hbalock);
3318
3319 /* Release all the lpfc_nvme_bufs maintained by this host. */
3320 spin_lock(&phba->nvme_buf_list_put_lock);
3321 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3322 &phba->lpfc_nvme_buf_list_put, list) {
3323 list_del(&lpfc_ncmd->list);
3324 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3325 lpfc_ncmd->dma_handle);
3326 kfree(lpfc_ncmd);
3327 phba->total_nvme_bufs--;
3328 }
3329 spin_unlock(&phba->nvme_buf_list_put_lock);
3330
3331 spin_lock(&phba->nvme_buf_list_get_lock);
3332 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3333 &phba->lpfc_nvme_buf_list_get, list) {
3334 list_del(&lpfc_ncmd->list);
3335 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3336 lpfc_ncmd->dma_handle);
3337 kfree(lpfc_ncmd);
3338 phba->total_nvme_bufs--;
3339 }
3340 spin_unlock(&phba->nvme_buf_list_get_lock);
895427bd
JS
3341 spin_unlock_irq(&phba->hbalock);
3342}
8a9d2e80 3343/**
895427bd 3344 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
8a9d2e80
JS
3345 * @phba: pointer to lpfc hba data structure.
3346 *
3347 * This routine first calculates the sizes of the current els and allocated
3348 * scsi sgl lists, and then goes through all sgls to updates the physical
3349 * XRIs assigned due to port function reset. During port initialization, the
3350 * current els and allocated scsi sgl lists are 0s.
3351 *
3352 * Return codes
3353 * 0 - successful (for now, it always returns 0)
3354 **/
3355int
895427bd 3356lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
8a9d2e80
JS
3357{
3358 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
895427bd 3359 uint16_t i, lxri, xri_cnt, els_xri_cnt;
8a9d2e80 3360 LIST_HEAD(els_sgl_list);
8a9d2e80
JS
3361 int rc;
3362
3363 /*
3364 * update on pci function's els xri-sgl list
3365 */
3366 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
895427bd 3367
8a9d2e80
JS
3368 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3369 /* els xri-sgl expanded */
3370 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3371 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3372 "3157 ELS xri-sgl count increased from "
3373 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3374 els_xri_cnt);
3375 /* allocate the additional els sgls */
3376 for (i = 0; i < xri_cnt; i++) {
3377 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3378 GFP_KERNEL);
3379 if (sglq_entry == NULL) {
3380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3381 "2562 Failure to allocate an "
3382 "ELS sgl entry:%d\n", i);
3383 rc = -ENOMEM;
3384 goto out_free_mem;
3385 }
3386 sglq_entry->buff_type = GEN_BUFF_TYPE;
3387 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3388 &sglq_entry->phys);
3389 if (sglq_entry->virt == NULL) {
3390 kfree(sglq_entry);
3391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3392 "2563 Failure to allocate an "
3393 "ELS mbuf:%d\n", i);
3394 rc = -ENOMEM;
3395 goto out_free_mem;
3396 }
3397 sglq_entry->sgl = sglq_entry->virt;
3398 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3399 sglq_entry->state = SGL_FREED;
3400 list_add_tail(&sglq_entry->list, &els_sgl_list);
3401 }
38c20673 3402 spin_lock_irq(&phba->hbalock);
895427bd
JS
3403 spin_lock(&phba->sli4_hba.sgl_list_lock);
3404 list_splice_init(&els_sgl_list,
3405 &phba->sli4_hba.lpfc_els_sgl_list);
3406 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 3407 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
3408 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3409 /* els xri-sgl shrinked */
3410 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3411 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3412 "3158 ELS xri-sgl count decreased from "
3413 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3414 els_xri_cnt);
3415 spin_lock_irq(&phba->hbalock);
895427bd
JS
3416 spin_lock(&phba->sli4_hba.sgl_list_lock);
3417 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3418 &els_sgl_list);
8a9d2e80
JS
3419 /* release extra els sgls from list */
3420 for (i = 0; i < xri_cnt; i++) {
3421 list_remove_head(&els_sgl_list,
3422 sglq_entry, struct lpfc_sglq, list);
3423 if (sglq_entry) {
895427bd
JS
3424 __lpfc_mbuf_free(phba, sglq_entry->virt,
3425 sglq_entry->phys);
8a9d2e80
JS
3426 kfree(sglq_entry);
3427 }
3428 }
895427bd
JS
3429 list_splice_init(&els_sgl_list,
3430 &phba->sli4_hba.lpfc_els_sgl_list);
3431 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8a9d2e80
JS
3432 spin_unlock_irq(&phba->hbalock);
3433 } else
3434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3435 "3163 ELS xri-sgl count unchanged: %d\n",
3436 els_xri_cnt);
3437 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3438
3439 /* update xris to els sgls on the list */
3440 sglq_entry = NULL;
3441 sglq_entry_next = NULL;
3442 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
895427bd 3443 &phba->sli4_hba.lpfc_els_sgl_list, list) {
8a9d2e80
JS
3444 lxri = lpfc_sli4_next_xritag(phba);
3445 if (lxri == NO_XRI) {
3446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3447 "2400 Failed to allocate xri for "
3448 "ELS sgl\n");
3449 rc = -ENOMEM;
3450 goto out_free_mem;
3451 }
3452 sglq_entry->sli4_lxritag = lxri;
3453 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3454 }
895427bd
JS
3455 return 0;
3456
3457out_free_mem:
3458 lpfc_free_els_sgl_list(phba);
3459 return rc;
3460}
3461
f358dd0c
JS
3462/**
3463 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3464 * @phba: pointer to lpfc hba data structure.
3465 *
3466 * This routine first calculates the sizes of the current els and allocated
3467 * scsi sgl lists, and then goes through all sgls to updates the physical
3468 * XRIs assigned due to port function reset. During port initialization, the
3469 * current els and allocated scsi sgl lists are 0s.
3470 *
3471 * Return codes
3472 * 0 - successful (for now, it always returns 0)
3473 **/
3474int
3475lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3476{
3477 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3478 uint16_t i, lxri, xri_cnt, els_xri_cnt;
6c621a22 3479 uint16_t nvmet_xri_cnt;
f358dd0c
JS
3480 LIST_HEAD(nvmet_sgl_list);
3481 int rc;
3482
3483 /*
3484 * update on pci function's nvmet xri-sgl list
3485 */
3486 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
61f3d4bf 3487
6c621a22
JS
3488 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3489 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
f358dd0c
JS
3490
3491 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3492 /* els xri-sgl expanded */
3493 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3494 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3495 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3496 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3497 /* allocate the additional nvmet sgls */
3498 for (i = 0; i < xri_cnt; i++) {
3499 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3500 GFP_KERNEL);
3501 if (sglq_entry == NULL) {
3502 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3503 "6303 Failure to allocate an "
3504 "NVMET sgl entry:%d\n", i);
3505 rc = -ENOMEM;
3506 goto out_free_mem;
3507 }
3508 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3509 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3510 &sglq_entry->phys);
3511 if (sglq_entry->virt == NULL) {
3512 kfree(sglq_entry);
3513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3514 "6304 Failure to allocate an "
3515 "NVMET buf:%d\n", i);
3516 rc = -ENOMEM;
3517 goto out_free_mem;
3518 }
3519 sglq_entry->sgl = sglq_entry->virt;
3520 memset(sglq_entry->sgl, 0,
3521 phba->cfg_sg_dma_buf_size);
3522 sglq_entry->state = SGL_FREED;
3523 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3524 }
3525 spin_lock_irq(&phba->hbalock);
3526 spin_lock(&phba->sli4_hba.sgl_list_lock);
3527 list_splice_init(&nvmet_sgl_list,
3528 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3529 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3530 spin_unlock_irq(&phba->hbalock);
3531 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3532 /* nvmet xri-sgl shrunk */
3533 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3534 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3535 "6305 NVMET xri-sgl count decreased from "
3536 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3537 nvmet_xri_cnt);
3538 spin_lock_irq(&phba->hbalock);
3539 spin_lock(&phba->sli4_hba.sgl_list_lock);
3540 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3541 &nvmet_sgl_list);
3542 /* release extra nvmet sgls from list */
3543 for (i = 0; i < xri_cnt; i++) {
3544 list_remove_head(&nvmet_sgl_list,
3545 sglq_entry, struct lpfc_sglq, list);
3546 if (sglq_entry) {
3547 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3548 sglq_entry->phys);
3549 kfree(sglq_entry);
3550 }
3551 }
3552 list_splice_init(&nvmet_sgl_list,
3553 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3554 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3555 spin_unlock_irq(&phba->hbalock);
3556 } else
3557 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3558 "6306 NVMET xri-sgl count unchanged: %d\n",
3559 nvmet_xri_cnt);
3560 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3561
3562 /* update xris to nvmet sgls on the list */
3563 sglq_entry = NULL;
3564 sglq_entry_next = NULL;
3565 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3566 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3567 lxri = lpfc_sli4_next_xritag(phba);
3568 if (lxri == NO_XRI) {
3569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3570 "6307 Failed to allocate xri for "
3571 "NVMET sgl\n");
3572 rc = -ENOMEM;
3573 goto out_free_mem;
3574 }
3575 sglq_entry->sli4_lxritag = lxri;
3576 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3577 }
3578 return 0;
3579
3580out_free_mem:
3581 lpfc_free_nvmet_sgl_list(phba);
3582 return rc;
3583}
3584
895427bd
JS
3585/**
3586 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
3587 * @phba: pointer to lpfc hba data structure.
3588 *
3589 * This routine first calculates the sizes of the current els and allocated
3590 * scsi sgl lists, and then goes through all sgls to updates the physical
3591 * XRIs assigned due to port function reset. During port initialization, the
3592 * current els and allocated scsi sgl lists are 0s.
3593 *
3594 * Return codes
3595 * 0 - successful (for now, it always returns 0)
3596 **/
3597int
3598lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
3599{
3600 struct lpfc_scsi_buf *psb, *psb_next;
3601 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
3602 LIST_HEAD(scsi_sgl_list);
3603 int rc;
8a9d2e80
JS
3604
3605 /*
895427bd 3606 * update on pci function's els xri-sgl list
8a9d2e80 3607 */
895427bd 3608 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
8a9d2e80
JS
3609 phba->total_scsi_bufs = 0;
3610
895427bd
JS
3611 /*
3612 * update on pci function's allocated scsi xri-sgl list
3613 */
8a9d2e80
JS
3614 /* maximum number of xris available for scsi buffers */
3615 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3616 els_xri_cnt;
3617
895427bd
JS
3618 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3619 return 0;
3620
3621 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3622 phba->sli4_hba.scsi_xri_max = /* Split them up */
3623 (phba->sli4_hba.scsi_xri_max *
3624 phba->cfg_xri_split) / 100;
8a9d2e80 3625
a40fc5f0 3626 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3627 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3628 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3629 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
164cecd1 3630 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3631 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3632
e8c0a779
JS
3633 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3634 "6060 Current allocated SCSI xri-sgl count:%d, "
3635 "maximum SCSI xri count:%d (split:%d)\n",
3636 phba->sli4_hba.scsi_xri_cnt,
3637 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
3638
8a9d2e80
JS
3639 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3640 /* max scsi xri shrinked below the allocated scsi buffers */
3641 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3642 phba->sli4_hba.scsi_xri_max;
3643 /* release the extra allocated scsi buffers */
3644 for (i = 0; i < scsi_xri_cnt; i++) {
3645 list_remove_head(&scsi_sgl_list, psb,
3646 struct lpfc_scsi_buf, list);
a2fc4aef 3647 if (psb) {
895427bd 3648 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
a2fc4aef
JS
3649 psb->data, psb->dma_handle);
3650 kfree(psb);
3651 }
8a9d2e80 3652 }
a40fc5f0 3653 spin_lock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3654 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
a40fc5f0 3655 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
3656 }
3657
3658 /* update xris associated to remaining allocated scsi buffers */
3659 psb = NULL;
3660 psb_next = NULL;
3661 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3662 lxri = lpfc_sli4_next_xritag(phba);
3663 if (lxri == NO_XRI) {
3664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3665 "2560 Failed to allocate xri for "
3666 "scsi buffer\n");
3667 rc = -ENOMEM;
3668 goto out_free_mem;
3669 }
3670 psb->cur_iocbq.sli4_lxritag = lxri;
3671 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3672 }
a40fc5f0 3673 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3674 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3675 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3676 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
164cecd1 3677 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3678 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
dea3101e 3679 return 0;
8a9d2e80
JS
3680
3681out_free_mem:
8a9d2e80
JS
3682 lpfc_scsi_free(phba);
3683 return rc;
dea3101e
JB
3684}
3685
96418b5e
JS
3686static uint64_t
3687lpfc_get_wwpn(struct lpfc_hba *phba)
3688{
3689 uint64_t wwn;
3690 int rc;
3691 LPFC_MBOXQ_t *mboxq;
3692 MAILBOX_t *mb;
3693
96418b5e
JS
3694 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3695 GFP_KERNEL);
3696 if (!mboxq)
3697 return (uint64_t)-1;
3698
3699 /* First get WWN of HBA instance */
3700 lpfc_read_nv(phba, mboxq);
3701 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3702 if (rc != MBX_SUCCESS) {
3703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3704 "6019 Mailbox failed , mbxCmd x%x "
3705 "READ_NV, mbxStatus x%x\n",
3706 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3707 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3708 mempool_free(mboxq, phba->mbox_mem_pool);
3709 return (uint64_t) -1;
3710 }
3711 mb = &mboxq->u.mb;
3712 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3713 /* wwn is WWPN of HBA instance */
3714 mempool_free(mboxq, phba->mbox_mem_pool);
3715 if (phba->sli_rev == LPFC_SLI_REV4)
3716 return be64_to_cpu(wwn);
3717 else
3718 return (((wwn & 0xffffffff00000000) >> 32) |
3719 ((wwn & 0x00000000ffffffff) << 32));
3720
3721}
3722
895427bd
JS
3723/**
3724 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine first calculates the sizes of the current els and allocated
3728 * scsi sgl lists, and then goes through all sgls to updates the physical
3729 * XRIs assigned due to port function reset. During port initialization, the
3730 * current els and allocated scsi sgl lists are 0s.
3731 *
3732 * Return codes
3733 * 0 - successful (for now, it always returns 0)
3734 **/
3735int
3736lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3737{
3738 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3739 uint16_t i, lxri, els_xri_cnt;
3740 uint16_t nvme_xri_cnt, nvme_xri_max;
3741 LIST_HEAD(nvme_sgl_list);
3742 int rc;
3743
3744 phba->total_nvme_bufs = 0;
3745
3746 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3747 return 0;
3748 /*
3749 * update on pci function's allocated nvme xri-sgl list
3750 */
3751
3752 /* maximum number of xris available for nvme buffers */
3753 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3754 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3755 phba->sli4_hba.nvme_xri_max = nvme_xri_max;
3756 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
3757
3758 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3759 "6074 Current allocated NVME xri-sgl count:%d, "
3760 "maximum NVME xri count:%d\n",
3761 phba->sli4_hba.nvme_xri_cnt,
3762 phba->sli4_hba.nvme_xri_max);
3763
3764 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3765 spin_lock(&phba->nvme_buf_list_put_lock);
3766 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3767 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
3768 spin_unlock(&phba->nvme_buf_list_put_lock);
3769 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3770
3771 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
3772 /* max nvme xri shrunk below the allocated nvme buffers */
3773 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3774 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
3775 phba->sli4_hba.nvme_xri_max;
3776 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3777 /* release the extra allocated nvme buffers */
3778 for (i = 0; i < nvme_xri_cnt; i++) {
3779 list_remove_head(&nvme_sgl_list, lpfc_ncmd,
3780 struct lpfc_nvme_buf, list);
3781 if (lpfc_ncmd) {
3782 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
3783 lpfc_ncmd->data,
3784 lpfc_ncmd->dma_handle);
3785 kfree(lpfc_ncmd);
3786 }
3787 }
3788 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3789 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
3790 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3791 }
3792
3793 /* update xris associated to remaining allocated nvme buffers */
3794 lpfc_ncmd = NULL;
3795 lpfc_ncmd_next = NULL;
3796 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3797 &nvme_sgl_list, list) {
3798 lxri = lpfc_sli4_next_xritag(phba);
3799 if (lxri == NO_XRI) {
3800 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3801 "6075 Failed to allocate xri for "
3802 "nvme buffer\n");
3803 rc = -ENOMEM;
3804 goto out_free_mem;
3805 }
3806 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
3807 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3808 }
3809 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3810 spin_lock(&phba->nvme_buf_list_put_lock);
3811 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
3812 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3813 spin_unlock(&phba->nvme_buf_list_put_lock);
3814 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3815 return 0;
3816
3817out_free_mem:
3818 lpfc_nvme_free(phba);
3819 return rc;
3820}
3821
e59058c4 3822/**
3621a710 3823 * lpfc_create_port - Create an FC port
e59058c4
JS
3824 * @phba: pointer to lpfc hba data structure.
3825 * @instance: a unique integer ID to this FC port.
3826 * @dev: pointer to the device data structure.
3827 *
3828 * This routine creates a FC port for the upper layer protocol. The FC port
3829 * can be created on top of either a physical port or a virtual port provided
3830 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3831 * and associates the FC port created before adding the shost into the SCSI
3832 * layer.
3833 *
3834 * Return codes
3835 * @vport - pointer to the virtual N_Port data structure.
3836 * NULL - port create failed.
3837 **/
2e0fef85 3838struct lpfc_vport *
3de2a653 3839lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 3840{
2e0fef85 3841 struct lpfc_vport *vport;
895427bd 3842 struct Scsi_Host *shost = NULL;
2e0fef85 3843 int error = 0;
96418b5e
JS
3844 int i;
3845 uint64_t wwn;
3846 bool use_no_reset_hba = false;
56bc8028 3847 int rc;
96418b5e 3848
56bc8028
JS
3849 if (lpfc_no_hba_reset_cnt) {
3850 if (phba->sli_rev < LPFC_SLI_REV4 &&
3851 dev == &phba->pcidev->dev) {
3852 /* Reset the port first */
3853 lpfc_sli_brdrestart(phba);
3854 rc = lpfc_sli_chipset_init(phba);
3855 if (rc)
3856 return NULL;
3857 }
3858 wwn = lpfc_get_wwpn(phba);
3859 }
96418b5e
JS
3860
3861 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3862 if (wwn == lpfc_no_hba_reset[i]) {
3863 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3864 "6020 Setting use_no_reset port=%llx\n",
3865 wwn);
3866 use_no_reset_hba = true;
3867 break;
3868 }
3869 }
47a8617c 3870
895427bd
JS
3871 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3872 if (dev != &phba->pcidev->dev) {
3873 shost = scsi_host_alloc(&lpfc_vport_template,
3874 sizeof(struct lpfc_vport));
3875 } else {
96418b5e 3876 if (!use_no_reset_hba)
895427bd
JS
3877 shost = scsi_host_alloc(&lpfc_template,
3878 sizeof(struct lpfc_vport));
3879 else
96418b5e 3880 shost = scsi_host_alloc(&lpfc_template_no_hr,
895427bd
JS
3881 sizeof(struct lpfc_vport));
3882 }
3883 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3884 shost = scsi_host_alloc(&lpfc_template_nvme,
ea4142f6
JS
3885 sizeof(struct lpfc_vport));
3886 }
2e0fef85
JS
3887 if (!shost)
3888 goto out;
47a8617c 3889
2e0fef85
JS
3890 vport = (struct lpfc_vport *) shost->hostdata;
3891 vport->phba = phba;
2e0fef85 3892 vport->load_flag |= FC_LOADING;
92d7f7b0 3893 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 3894 vport->fc_rscn_flush = 0;
3de2a653 3895 lpfc_get_vport_cfgparam(vport);
895427bd 3896
2e0fef85
JS
3897 shost->unique_id = instance;
3898 shost->max_id = LPFC_MAX_TARGET;
3de2a653 3899 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
3900 shost->this_id = -1;
3901 shost->max_cmd_len = 16;
8b0dff14 3902 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
da0436e9 3903 if (phba->sli_rev == LPFC_SLI_REV4) {
28baac74 3904 shost->dma_boundary =
cb5172ea 3905 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
da0436e9
JS
3906 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3907 }
81301a9b 3908
47a8617c 3909 /*
2e0fef85
JS
3910 * Set initial can_queue value since 0 is no longer supported and
3911 * scsi_add_host will fail. This will be adjusted later based on the
3912 * max xri value determined in hba setup.
47a8617c 3913 */
2e0fef85 3914 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 3915 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
3916 shost->transportt = lpfc_vport_transport_template;
3917 vport->port_type = LPFC_NPIV_PORT;
3918 } else {
3919 shost->transportt = lpfc_transport_template;
3920 vport->port_type = LPFC_PHYSICAL_PORT;
3921 }
47a8617c 3922
2e0fef85
JS
3923 /* Initialize all internally managed lists. */
3924 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 3925 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 3926 spin_lock_init(&vport->work_port_lock);
47a8617c 3927
33cc559a
TJ
3928 setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
3929 (unsigned long)vport);
47a8617c 3930
33cc559a
TJ
3931 setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
3932 (unsigned long)vport);
92494144 3933
33cc559a
TJ
3934 setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
3935 (unsigned long)vport);
92494144 3936
d139b9bd 3937 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
3938 if (error)
3939 goto out_put_shost;
47a8617c 3940
549e55cd 3941 spin_lock_irq(&phba->hbalock);
2e0fef85 3942 list_add_tail(&vport->listentry, &phba->port_list);
549e55cd 3943 spin_unlock_irq(&phba->hbalock);
2e0fef85 3944 return vport;
47a8617c 3945
2e0fef85
JS
3946out_put_shost:
3947 scsi_host_put(shost);
3948out:
3949 return NULL;
47a8617c
JS
3950}
3951
e59058c4 3952/**
3621a710 3953 * destroy_port - destroy an FC port
e59058c4
JS
3954 * @vport: pointer to an lpfc virtual N_Port data structure.
3955 *
3956 * This routine destroys a FC port from the upper layer protocol. All the
3957 * resources associated with the port are released.
3958 **/
2e0fef85
JS
3959void
3960destroy_port(struct lpfc_vport *vport)
47a8617c 3961{
92d7f7b0
JS
3962 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3963 struct lpfc_hba *phba = vport->phba;
47a8617c 3964
858c9f6c 3965 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
3966 fc_remove_host(shost);
3967 scsi_remove_host(shost);
47a8617c 3968
92d7f7b0
JS
3969 spin_lock_irq(&phba->hbalock);
3970 list_del_init(&vport->listentry);
3971 spin_unlock_irq(&phba->hbalock);
47a8617c 3972
92d7f7b0 3973 lpfc_cleanup(vport);
47a8617c 3974 return;
47a8617c
JS
3975}
3976
e59058c4 3977/**
3621a710 3978 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
3979 *
3980 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3981 * uses the kernel idr facility to perform the task.
3982 *
3983 * Return codes:
3984 * instance - a unique integer ID allocated as the new instance.
3985 * -1 - lpfc get instance failed.
3986 **/
92d7f7b0
JS
3987int
3988lpfc_get_instance(void)
3989{
ab516036
TH
3990 int ret;
3991
3992 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3993 return ret < 0 ? -1 : ret;
47a8617c
JS
3994}
3995
e59058c4 3996/**
3621a710 3997 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
3998 * @shost: pointer to SCSI host data structure.
3999 * @time: elapsed time of the scan in jiffies.
4000 *
4001 * This routine is called by the SCSI layer with a SCSI host to determine
4002 * whether the scan host is finished.
4003 *
4004 * Note: there is no scan_start function as adapter initialization will have
4005 * asynchronously kicked off the link initialization.
4006 *
4007 * Return codes
4008 * 0 - SCSI host scan is not over yet.
4009 * 1 - SCSI host scan is over.
4010 **/
47a8617c
JS
4011int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4012{
2e0fef85
JS
4013 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4014 struct lpfc_hba *phba = vport->phba;
858c9f6c 4015 int stat = 0;
47a8617c 4016
858c9f6c
JS
4017 spin_lock_irq(shost->host_lock);
4018
51ef4c26 4019 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
4020 stat = 1;
4021 goto finished;
4022 }
256ec0d0 4023 if (time >= msecs_to_jiffies(30 * 1000)) {
2e0fef85 4024 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4025 "0461 Scanning longer than 30 "
4026 "seconds. Continuing initialization\n");
858c9f6c 4027 stat = 1;
47a8617c 4028 goto finished;
2e0fef85 4029 }
256ec0d0
JS
4030 if (time >= msecs_to_jiffies(15 * 1000) &&
4031 phba->link_state <= LPFC_LINK_DOWN) {
2e0fef85 4032 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4033 "0465 Link down longer than 15 "
4034 "seconds. Continuing initialization\n");
858c9f6c 4035 stat = 1;
47a8617c 4036 goto finished;
2e0fef85 4037 }
47a8617c 4038
2e0fef85 4039 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 4040 goto finished;
2e0fef85 4041 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 4042 goto finished;
256ec0d0 4043 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
858c9f6c 4044 goto finished;
2e0fef85 4045 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
4046 goto finished;
4047
4048 stat = 1;
47a8617c
JS
4049
4050finished:
858c9f6c
JS
4051 spin_unlock_irq(shost->host_lock);
4052 return stat;
92d7f7b0 4053}
47a8617c 4054
e59058c4 4055/**
3621a710 4056 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
4057 * @shost: pointer to SCSI host data structure.
4058 *
4059 * This routine initializes a given SCSI host attributes on a FC port. The
4060 * SCSI host can be either on top of a physical port or a virtual port.
4061 **/
92d7f7b0
JS
4062void lpfc_host_attrib_init(struct Scsi_Host *shost)
4063{
4064 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4065 struct lpfc_hba *phba = vport->phba;
47a8617c 4066 /*
2e0fef85 4067 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
4068 */
4069
2e0fef85
JS
4070 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4071 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
4072 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4073
4074 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 4075 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
4076 fc_host_supported_fc4s(shost)[2] = 1;
4077 fc_host_supported_fc4s(shost)[7] = 1;
4078
92d7f7b0
JS
4079 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4080 sizeof fc_host_symbolic_name(shost));
47a8617c
JS
4081
4082 fc_host_supported_speeds(shost) = 0;
d38dd52c
JS
4083 if (phba->lmt & LMT_32Gb)
4084 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
88a2cfbb
JS
4085 if (phba->lmt & LMT_16Gb)
4086 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
47a8617c
JS
4087 if (phba->lmt & LMT_10Gb)
4088 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
a8adb832
JS
4089 if (phba->lmt & LMT_8Gb)
4090 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
47a8617c
JS
4091 if (phba->lmt & LMT_4Gb)
4092 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4093 if (phba->lmt & LMT_2Gb)
4094 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4095 if (phba->lmt & LMT_1Gb)
4096 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4097
4098 fc_host_maxframe_size(shost) =
2e0fef85
JS
4099 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4100 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 4101
0af5d708
MC
4102 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4103
47a8617c
JS
4104 /* This value is also unchanging */
4105 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 4106 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
4107 fc_host_active_fc4s(shost)[2] = 1;
4108 fc_host_active_fc4s(shost)[7] = 1;
4109
92d7f7b0 4110 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 4111 spin_lock_irq(shost->host_lock);
51ef4c26 4112 vport->load_flag &= ~FC_LOADING;
47a8617c 4113 spin_unlock_irq(shost->host_lock);
47a8617c 4114}
dea3101e 4115
e59058c4 4116/**
da0436e9 4117 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
4118 * @phba: pointer to lpfc hba data structure.
4119 *
da0436e9
JS
4120 * This routine is invoked to stop an SLI3 device port, it stops the device
4121 * from generating interrupts and stops the device driver's timers for the
4122 * device.
e59058c4 4123 **/
da0436e9
JS
4124static void
4125lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 4126{
da0436e9
JS
4127 /* Clear all interrupt enable conditions */
4128 writel(0, phba->HCregaddr);
4129 readl(phba->HCregaddr); /* flush */
4130 /* Clear all pending interrupts */
4131 writel(0xffffffff, phba->HAregaddr);
4132 readl(phba->HAregaddr); /* flush */
db2378e0 4133
da0436e9
JS
4134 /* Reset some HBA SLI setup states */
4135 lpfc_stop_hba_timers(phba);
4136 phba->pport->work_port_events = 0;
4137}
db2378e0 4138
da0436e9
JS
4139/**
4140 * lpfc_stop_port_s4 - Stop SLI4 device port
4141 * @phba: pointer to lpfc hba data structure.
4142 *
4143 * This routine is invoked to stop an SLI4 device port, it stops the device
4144 * from generating interrupts and stops the device driver's timers for the
4145 * device.
4146 **/
4147static void
4148lpfc_stop_port_s4(struct lpfc_hba *phba)
4149{
4150 /* Reset some HBA SLI4 setup states */
4151 lpfc_stop_hba_timers(phba);
4152 phba->pport->work_port_events = 0;
4153 phba->sli4_hba.intr_enable = 0;
da0436e9 4154}
9399627f 4155
da0436e9
JS
4156/**
4157 * lpfc_stop_port - Wrapper function for stopping hba port
4158 * @phba: Pointer to HBA context object.
4159 *
4160 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4161 * the API jump table function pointer from the lpfc_hba struct.
4162 **/
4163void
4164lpfc_stop_port(struct lpfc_hba *phba)
4165{
4166 phba->lpfc_stop_port(phba);
4167}
db2378e0 4168
ecfd03c6
JS
4169/**
4170 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4171 * @phba: Pointer to hba for which this call is being executed.
4172 *
4173 * This routine starts the timer waiting for the FCF rediscovery to complete.
4174 **/
4175void
4176lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4177{
4178 unsigned long fcf_redisc_wait_tmo =
4179 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4180 /* Start fcf rediscovery wait period timer */
4181 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4182 spin_lock_irq(&phba->hbalock);
4183 /* Allow action to new fcf asynchronous event */
4184 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4185 /* Mark the FCF rediscovery pending state */
4186 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4187 spin_unlock_irq(&phba->hbalock);
4188}
4189
4190/**
4191 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4192 * @ptr: Map to lpfc_hba data structure pointer.
4193 *
4194 * This routine is invoked when waiting for FCF table rediscover has been
4195 * timed out. If new FCF record(s) has (have) been discovered during the
4196 * wait period, a new FCF event shall be added to the FCOE async event
4197 * list, and then worker thread shall be waked up for processing from the
4198 * worker thread context.
4199 **/
e399b228 4200static void
ecfd03c6
JS
4201lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
4202{
4203 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
4204
4205 /* Don't send FCF rediscovery event if timer cancelled */
4206 spin_lock_irq(&phba->hbalock);
4207 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4208 spin_unlock_irq(&phba->hbalock);
4209 return;
4210 }
4211 /* Clear FCF rediscovery timer pending flag */
4212 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4213 /* FCF rediscovery event to worker thread */
4214 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4215 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 4216 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 4217 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
4218 /* wake up worker thread */
4219 lpfc_worker_wake_up(phba);
4220}
4221
e59058c4 4222/**
da0436e9 4223 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 4224 * @phba: pointer to lpfc hba data structure.
da0436e9 4225 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 4226 *
da0436e9
JS
4227 * This routine is to parse the SLI4 link-attention link fault code and
4228 * translate it into the base driver's read link attention mailbox command
4229 * status.
4230 *
4231 * Return: Link-attention status in terms of base driver's coding.
e59058c4 4232 **/
da0436e9
JS
4233static uint16_t
4234lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4235 struct lpfc_acqe_link *acqe_link)
db2378e0 4236{
da0436e9 4237 uint16_t latt_fault;
9399627f 4238
da0436e9
JS
4239 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4240 case LPFC_ASYNC_LINK_FAULT_NONE:
4241 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4242 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4243 latt_fault = 0;
4244 break;
4245 default:
4246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4247 "0398 Invalid link fault code: x%x\n",
4248 bf_get(lpfc_acqe_link_fault, acqe_link));
4249 latt_fault = MBXERR_ERROR;
4250 break;
4251 }
4252 return latt_fault;
db2378e0
JS
4253}
4254
5b75da2f 4255/**
da0436e9 4256 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 4257 * @phba: pointer to lpfc hba data structure.
da0436e9 4258 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 4259 *
da0436e9
JS
4260 * This routine is to parse the SLI4 link attention type and translate it
4261 * into the base driver's link attention type coding.
5b75da2f 4262 *
da0436e9
JS
4263 * Return: Link attention type in terms of base driver's coding.
4264 **/
4265static uint8_t
4266lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4267 struct lpfc_acqe_link *acqe_link)
5b75da2f 4268{
da0436e9 4269 uint8_t att_type;
5b75da2f 4270
da0436e9
JS
4271 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4272 case LPFC_ASYNC_LINK_STATUS_DOWN:
4273 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 4274 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
4275 break;
4276 case LPFC_ASYNC_LINK_STATUS_UP:
4277 /* Ignore physical link up events - wait for logical link up */
76a95d75 4278 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
4279 break;
4280 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 4281 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
4282 break;
4283 default:
4284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4285 "0399 Invalid link attention type: x%x\n",
4286 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 4287 att_type = LPFC_ATT_RESERVED;
da0436e9 4288 break;
5b75da2f 4289 }
da0436e9 4290 return att_type;
5b75da2f
JS
4291}
4292
8b68cd52
JS
4293/**
4294 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4295 * @phba: pointer to lpfc hba data structure.
4296 *
4297 * This routine is to get an SLI3 FC port's link speed in Mbps.
4298 *
4299 * Return: link speed in terms of Mbps.
4300 **/
4301uint32_t
4302lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4303{
4304 uint32_t link_speed;
4305
4306 if (!lpfc_is_link_up(phba))
4307 return 0;
4308
a085e87c
JS
4309 if (phba->sli_rev <= LPFC_SLI_REV3) {
4310 switch (phba->fc_linkspeed) {
4311 case LPFC_LINK_SPEED_1GHZ:
4312 link_speed = 1000;
4313 break;
4314 case LPFC_LINK_SPEED_2GHZ:
4315 link_speed = 2000;
4316 break;
4317 case LPFC_LINK_SPEED_4GHZ:
4318 link_speed = 4000;
4319 break;
4320 case LPFC_LINK_SPEED_8GHZ:
4321 link_speed = 8000;
4322 break;
4323 case LPFC_LINK_SPEED_10GHZ:
4324 link_speed = 10000;
4325 break;
4326 case LPFC_LINK_SPEED_16GHZ:
4327 link_speed = 16000;
4328 break;
4329 default:
4330 link_speed = 0;
4331 }
4332 } else {
4333 if (phba->sli4_hba.link_state.logical_speed)
4334 link_speed =
4335 phba->sli4_hba.link_state.logical_speed;
4336 else
4337 link_speed = phba->sli4_hba.link_state.speed;
8b68cd52
JS
4338 }
4339 return link_speed;
4340}
4341
4342/**
4343 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4344 * @phba: pointer to lpfc hba data structure.
4345 * @evt_code: asynchronous event code.
4346 * @speed_code: asynchronous event link speed code.
4347 *
4348 * This routine is to parse the giving SLI4 async event link speed code into
4349 * value of Mbps for the link speed.
4350 *
4351 * Return: link speed in terms of Mbps.
4352 **/
4353static uint32_t
4354lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4355 uint8_t speed_code)
4356{
4357 uint32_t port_speed;
4358
4359 switch (evt_code) {
4360 case LPFC_TRAILER_CODE_LINK:
4361 switch (speed_code) {
26d830ec 4362 case LPFC_ASYNC_LINK_SPEED_ZERO:
8b68cd52
JS
4363 port_speed = 0;
4364 break;
26d830ec 4365 case LPFC_ASYNC_LINK_SPEED_10MBPS:
8b68cd52
JS
4366 port_speed = 10;
4367 break;
26d830ec 4368 case LPFC_ASYNC_LINK_SPEED_100MBPS:
8b68cd52
JS
4369 port_speed = 100;
4370 break;
26d830ec 4371 case LPFC_ASYNC_LINK_SPEED_1GBPS:
8b68cd52
JS
4372 port_speed = 1000;
4373 break;
26d830ec 4374 case LPFC_ASYNC_LINK_SPEED_10GBPS:
8b68cd52
JS
4375 port_speed = 10000;
4376 break;
26d830ec
JS
4377 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4378 port_speed = 20000;
4379 break;
4380 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4381 port_speed = 25000;
4382 break;
4383 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4384 port_speed = 40000;
4385 break;
8b68cd52
JS
4386 default:
4387 port_speed = 0;
4388 }
4389 break;
4390 case LPFC_TRAILER_CODE_FC:
4391 switch (speed_code) {
26d830ec 4392 case LPFC_FC_LA_SPEED_UNKNOWN:
8b68cd52
JS
4393 port_speed = 0;
4394 break;
26d830ec 4395 case LPFC_FC_LA_SPEED_1G:
8b68cd52
JS
4396 port_speed = 1000;
4397 break;
26d830ec 4398 case LPFC_FC_LA_SPEED_2G:
8b68cd52
JS
4399 port_speed = 2000;
4400 break;
26d830ec 4401 case LPFC_FC_LA_SPEED_4G:
8b68cd52
JS
4402 port_speed = 4000;
4403 break;
26d830ec 4404 case LPFC_FC_LA_SPEED_8G:
8b68cd52
JS
4405 port_speed = 8000;
4406 break;
26d830ec 4407 case LPFC_FC_LA_SPEED_10G:
8b68cd52
JS
4408 port_speed = 10000;
4409 break;
26d830ec 4410 case LPFC_FC_LA_SPEED_16G:
8b68cd52
JS
4411 port_speed = 16000;
4412 break;
d38dd52c
JS
4413 case LPFC_FC_LA_SPEED_32G:
4414 port_speed = 32000;
4415 break;
8b68cd52
JS
4416 default:
4417 port_speed = 0;
4418 }
4419 break;
4420 default:
4421 port_speed = 0;
4422 }
4423 return port_speed;
4424}
4425
da0436e9 4426/**
70f3c073 4427 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
4428 * @phba: pointer to lpfc hba data structure.
4429 * @acqe_link: pointer to the async link completion queue entry.
4430 *
70f3c073 4431 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
4432 **/
4433static void
4434lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4435 struct lpfc_acqe_link *acqe_link)
4436{
4437 struct lpfc_dmabuf *mp;
4438 LPFC_MBOXQ_t *pmb;
4439 MAILBOX_t *mb;
76a95d75 4440 struct lpfc_mbx_read_top *la;
da0436e9 4441 uint8_t att_type;
76a95d75 4442 int rc;
da0436e9
JS
4443
4444 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 4445 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 4446 return;
32b9793f 4447 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
4448 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4449 if (!pmb) {
4450 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4451 "0395 The mboxq allocation failed\n");
4452 return;
4453 }
4454 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4455 if (!mp) {
4456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4457 "0396 The lpfc_dmabuf allocation failed\n");
4458 goto out_free_pmb;
4459 }
4460 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4461 if (!mp->virt) {
4462 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4463 "0397 The mbuf allocation failed\n");
4464 goto out_free_dmabuf;
4465 }
4466
4467 /* Cleanup any outstanding ELS commands */
4468 lpfc_els_flush_all_cmd(phba);
4469
4470 /* Block ELS IOCBs until we have done process link event */
895427bd 4471 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
da0436e9
JS
4472
4473 /* Update link event statistics */
4474 phba->sli.slistat.link_event++;
4475
76a95d75
JS
4476 /* Create lpfc_handle_latt mailbox command from link ACQE */
4477 lpfc_read_topology(phba, pmb, mp);
4478 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
4479 pmb->vport = phba->pport;
4480
da0436e9
JS
4481 /* Keep the link status for extra SLI4 state machine reference */
4482 phba->sli4_hba.link_state.speed =
8b68cd52
JS
4483 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4484 bf_get(lpfc_acqe_link_speed, acqe_link));
da0436e9
JS
4485 phba->sli4_hba.link_state.duplex =
4486 bf_get(lpfc_acqe_link_duplex, acqe_link);
4487 phba->sli4_hba.link_state.status =
4488 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
4489 phba->sli4_hba.link_state.type =
4490 bf_get(lpfc_acqe_link_type, acqe_link);
4491 phba->sli4_hba.link_state.number =
4492 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
4493 phba->sli4_hba.link_state.fault =
4494 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 4495 phba->sli4_hba.link_state.logical_speed =
8b68cd52
JS
4496 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4497
70f3c073 4498 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
4499 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4500 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4501 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
4502 phba->sli4_hba.link_state.speed,
4503 phba->sli4_hba.link_state.topology,
4504 phba->sli4_hba.link_state.status,
4505 phba->sli4_hba.link_state.type,
4506 phba->sli4_hba.link_state.number,
8b68cd52 4507 phba->sli4_hba.link_state.logical_speed,
70f3c073 4508 phba->sli4_hba.link_state.fault);
76a95d75
JS
4509 /*
4510 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4511 * topology info. Note: Optional for non FC-AL ports.
4512 */
4513 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4514 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4515 if (rc == MBX_NOT_FINISHED)
4516 goto out_free_dmabuf;
4517 return;
4518 }
4519 /*
4520 * For FCoE Mode: fill in all the topology information we need and call
4521 * the READ_TOPOLOGY completion routine to continue without actually
4522 * sending the READ_TOPOLOGY mailbox command to the port.
4523 */
4524 /* Parse and translate status field */
4525 mb = &pmb->u.mb;
4526 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
4527
4528 /* Parse and translate link attention fields */
4529 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4530 la->eventTag = acqe_link->event_tag;
4531 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4532 bf_set(lpfc_mbx_read_top_link_spd, la,
a085e87c 4533 (bf_get(lpfc_acqe_link_speed, acqe_link)));
76a95d75
JS
4534
4535 /* Fake the the following irrelvant fields */
4536 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4537 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4538 bf_set(lpfc_mbx_read_top_il, la, 0);
4539 bf_set(lpfc_mbx_read_top_pb, la, 0);
4540 bf_set(lpfc_mbx_read_top_fa, la, 0);
4541 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
4542
4543 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 4544 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 4545
5b75da2f 4546 return;
da0436e9
JS
4547
4548out_free_dmabuf:
4549 kfree(mp);
4550out_free_pmb:
4551 mempool_free(pmb, phba->mbox_mem_pool);
4552}
4553
70f3c073
JS
4554/**
4555 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4556 * @phba: pointer to lpfc hba data structure.
4557 * @acqe_fc: pointer to the async fc completion queue entry.
4558 *
4559 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4560 * that the event was received and then issue a read_topology mailbox command so
4561 * that the rest of the driver will treat it the same as SLI3.
4562 **/
4563static void
4564lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4565{
4566 struct lpfc_dmabuf *mp;
4567 LPFC_MBOXQ_t *pmb;
7bdedb34
JS
4568 MAILBOX_t *mb;
4569 struct lpfc_mbx_read_top *la;
70f3c073
JS
4570 int rc;
4571
4572 if (bf_get(lpfc_trailer_type, acqe_fc) !=
4573 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4575 "2895 Non FC link Event detected.(%d)\n",
4576 bf_get(lpfc_trailer_type, acqe_fc));
4577 return;
4578 }
4579 /* Keep the link status for extra SLI4 state machine reference */
4580 phba->sli4_hba.link_state.speed =
8b68cd52
JS
4581 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4582 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
70f3c073
JS
4583 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4584 phba->sli4_hba.link_state.topology =
4585 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4586 phba->sli4_hba.link_state.status =
4587 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4588 phba->sli4_hba.link_state.type =
4589 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4590 phba->sli4_hba.link_state.number =
4591 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4592 phba->sli4_hba.link_state.fault =
4593 bf_get(lpfc_acqe_link_fault, acqe_fc);
4594 phba->sli4_hba.link_state.logical_speed =
8b68cd52 4595 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
70f3c073
JS
4596 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4597 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4598 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4599 "%dMbps Fault:%d\n",
4600 phba->sli4_hba.link_state.speed,
4601 phba->sli4_hba.link_state.topology,
4602 phba->sli4_hba.link_state.status,
4603 phba->sli4_hba.link_state.type,
4604 phba->sli4_hba.link_state.number,
8b68cd52 4605 phba->sli4_hba.link_state.logical_speed,
70f3c073
JS
4606 phba->sli4_hba.link_state.fault);
4607 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4608 if (!pmb) {
4609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4610 "2897 The mboxq allocation failed\n");
4611 return;
4612 }
4613 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4614 if (!mp) {
4615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4616 "2898 The lpfc_dmabuf allocation failed\n");
4617 goto out_free_pmb;
4618 }
4619 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4620 if (!mp->virt) {
4621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4622 "2899 The mbuf allocation failed\n");
4623 goto out_free_dmabuf;
4624 }
4625
4626 /* Cleanup any outstanding ELS commands */
4627 lpfc_els_flush_all_cmd(phba);
4628
4629 /* Block ELS IOCBs until we have done process link event */
895427bd 4630 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
70f3c073
JS
4631
4632 /* Update link event statistics */
4633 phba->sli.slistat.link_event++;
4634
4635 /* Create lpfc_handle_latt mailbox command from link ACQE */
4636 lpfc_read_topology(phba, pmb, mp);
4637 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4638 pmb->vport = phba->pport;
4639
7bdedb34 4640 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
ae9e28f3
JS
4641 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
4642
4643 switch (phba->sli4_hba.link_state.status) {
4644 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
4645 phba->link_flag |= LS_MDS_LINK_DOWN;
4646 break;
4647 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
4648 phba->link_flag |= LS_MDS_LOOPBACK;
4649 break;
4650 default:
4651 break;
4652 }
4653
7bdedb34
JS
4654 /* Parse and translate status field */
4655 mb = &pmb->u.mb;
4656 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
4657 (void *)acqe_fc);
4658
4659 /* Parse and translate link attention fields */
4660 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
4661 la->eventTag = acqe_fc->event_tag;
7bdedb34 4662
aeb3c817
JS
4663 if (phba->sli4_hba.link_state.status ==
4664 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
4665 bf_set(lpfc_mbx_read_top_att_type, la,
4666 LPFC_FC_LA_TYPE_UNEXP_WWPN);
4667 } else {
4668 bf_set(lpfc_mbx_read_top_att_type, la,
4669 LPFC_FC_LA_TYPE_LINK_DOWN);
4670 }
7bdedb34
JS
4671 /* Invoke the mailbox command callback function */
4672 lpfc_mbx_cmpl_read_topology(phba, pmb);
4673
4674 return;
4675 }
4676
70f3c073
JS
4677 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4678 if (rc == MBX_NOT_FINISHED)
4679 goto out_free_dmabuf;
4680 return;
4681
4682out_free_dmabuf:
4683 kfree(mp);
4684out_free_pmb:
4685 mempool_free(pmb, phba->mbox_mem_pool);
4686}
4687
4688/**
4689 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4690 * @phba: pointer to lpfc hba data structure.
4691 * @acqe_fc: pointer to the async SLI completion queue entry.
4692 *
4693 * This routine is to handle the SLI4 asynchronous SLI events.
4694 **/
4695static void
4696lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4697{
4b8bae08 4698 char port_name;
8c1312e1 4699 char message[128];
4b8bae08 4700 uint8_t status;
946727dc 4701 uint8_t evt_type;
448193b5 4702 uint8_t operational = 0;
946727dc 4703 struct temp_event temp_event_data;
4b8bae08 4704 struct lpfc_acqe_misconfigured_event *misconfigured;
946727dc
JS
4705 struct Scsi_Host *shost;
4706
4707 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4b8bae08 4708
448193b5
JS
4709 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4710 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4711 "x%08x SLI Event Type:%d\n",
4712 acqe_sli->event_data1, acqe_sli->event_data2,
4713 evt_type);
4b8bae08
JS
4714
4715 port_name = phba->Port[0];
4716 if (port_name == 0x00)
4717 port_name = '?'; /* get port name is empty */
4718
946727dc
JS
4719 switch (evt_type) {
4720 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4721 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4722 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4723 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4724
4725 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4726 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4727 acqe_sli->event_data1, port_name);
4728
310429ef 4729 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
4730 shost = lpfc_shost_from_vport(phba->pport);
4731 fc_host_post_vendor_event(shost, fc_get_event_number(),
4732 sizeof(temp_event_data),
4733 (char *)&temp_event_data,
4734 SCSI_NL_VID_TYPE_PCI
4735 | PCI_VENDOR_ID_EMULEX);
4736 break;
4737 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4738 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4739 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4740 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4741
4742 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4743 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4744 acqe_sli->event_data1, port_name);
4745
4746 shost = lpfc_shost_from_vport(phba->pport);
4747 fc_host_post_vendor_event(shost, fc_get_event_number(),
4748 sizeof(temp_event_data),
4749 (char *)&temp_event_data,
4750 SCSI_NL_VID_TYPE_PCI
4751 | PCI_VENDOR_ID_EMULEX);
4752 break;
4753 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4754 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4b8bae08
JS
4755 &acqe_sli->event_data1;
4756
946727dc
JS
4757 /* fetch the status for this port */
4758 switch (phba->sli4_hba.lnk_info.lnk_no) {
4759 case LPFC_LINK_NUMBER_0:
448193b5
JS
4760 status = bf_get(lpfc_sli_misconfigured_port0_state,
4761 &misconfigured->theEvent);
4762 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4b8bae08 4763 &misconfigured->theEvent);
946727dc
JS
4764 break;
4765 case LPFC_LINK_NUMBER_1:
448193b5
JS
4766 status = bf_get(lpfc_sli_misconfigured_port1_state,
4767 &misconfigured->theEvent);
4768 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4b8bae08 4769 &misconfigured->theEvent);
946727dc
JS
4770 break;
4771 case LPFC_LINK_NUMBER_2:
448193b5
JS
4772 status = bf_get(lpfc_sli_misconfigured_port2_state,
4773 &misconfigured->theEvent);
4774 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4b8bae08 4775 &misconfigured->theEvent);
946727dc
JS
4776 break;
4777 case LPFC_LINK_NUMBER_3:
448193b5
JS
4778 status = bf_get(lpfc_sli_misconfigured_port3_state,
4779 &misconfigured->theEvent);
4780 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4b8bae08 4781 &misconfigured->theEvent);
946727dc
JS
4782 break;
4783 default:
448193b5
JS
4784 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4785 "3296 "
4786 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4787 "event: Invalid link %d",
4788 phba->sli4_hba.lnk_info.lnk_no);
4789 return;
946727dc 4790 }
4b8bae08 4791
448193b5
JS
4792 /* Skip if optic state unchanged */
4793 if (phba->sli4_hba.lnk_info.optic_state == status)
4794 return;
4795
946727dc
JS
4796 switch (status) {
4797 case LPFC_SLI_EVENT_STATUS_VALID:
448193b5
JS
4798 sprintf(message, "Physical Link is functional");
4799 break;
946727dc
JS
4800 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4801 sprintf(message, "Optics faulted/incorrectly "
4802 "installed/not installed - Reseat optics, "
4803 "if issue not resolved, replace.");
4804 break;
4805 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4806 sprintf(message,
4807 "Optics of two types installed - Remove one "
4808 "optic or install matching pair of optics.");
4809 break;
4810 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4811 sprintf(message, "Incompatible optics - Replace with "
292098be 4812 "compatible optics for card to function.");
946727dc 4813 break;
448193b5
JS
4814 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
4815 sprintf(message, "Unqualified optics - Replace with "
4816 "Avago optics for Warranty and Technical "
4817 "Support - Link is%s operational",
2ea259ee 4818 (operational) ? " not" : "");
448193b5
JS
4819 break;
4820 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
4821 sprintf(message, "Uncertified optics - Replace with "
4822 "Avago-certified optics to enable link "
4823 "operation - Link is%s operational",
2ea259ee 4824 (operational) ? " not" : "");
448193b5 4825 break;
946727dc
JS
4826 default:
4827 /* firmware is reporting a status we don't know about */
4828 sprintf(message, "Unknown event status x%02x", status);
4829 break;
4830 }
448193b5 4831 phba->sli4_hba.lnk_info.optic_state = status;
946727dc 4832 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
448193b5 4833 "3176 Port Name %c %s\n", port_name, message);
946727dc
JS
4834 break;
4835 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4837 "3192 Remote DPort Test Initiated - "
4838 "Event Data1:x%08x Event Data2: x%08x\n",
4839 acqe_sli->event_data1, acqe_sli->event_data2);
4b8bae08
JS
4840 break;
4841 default:
946727dc
JS
4842 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4843 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4844 "x%08x SLI Event Type:%d\n",
4845 acqe_sli->event_data1, acqe_sli->event_data2,
4846 evt_type);
4b8bae08
JS
4847 break;
4848 }
70f3c073
JS
4849}
4850
fc2b989b
JS
4851/**
4852 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4853 * @vport: pointer to vport data structure.
4854 *
4855 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4856 * response to a CVL event.
4857 *
4858 * Return the pointer to the ndlp with the vport if successful, otherwise
4859 * return NULL.
4860 **/
4861static struct lpfc_nodelist *
4862lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4863{
4864 struct lpfc_nodelist *ndlp;
4865 struct Scsi_Host *shost;
4866 struct lpfc_hba *phba;
4867
4868 if (!vport)
4869 return NULL;
fc2b989b
JS
4870 phba = vport->phba;
4871 if (!phba)
4872 return NULL;
78730cfe
JS
4873 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4874 if (!ndlp) {
4875 /* Cannot find existing Fabric ndlp, so allocate a new one */
9d3d340d 4876 ndlp = lpfc_nlp_init(vport, Fabric_DID);
78730cfe
JS
4877 if (!ndlp)
4878 return 0;
78730cfe
JS
4879 /* Set the node type */
4880 ndlp->nlp_type |= NLP_FABRIC;
4881 /* Put ndlp onto node list */
4882 lpfc_enqueue_node(vport, ndlp);
4883 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4884 /* re-setup ndlp without removing from node list */
4885 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4886 if (!ndlp)
4887 return 0;
4888 }
63e801ce
JS
4889 if ((phba->pport->port_state < LPFC_FLOGI) &&
4890 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4891 return NULL;
4892 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
4893 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4894 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4895 return NULL;
4896 shost = lpfc_shost_from_vport(vport);
4897 if (!shost)
4898 return NULL;
4899 lpfc_linkdown_port(vport);
4900 lpfc_cleanup_pending_mbox(vport);
4901 spin_lock_irq(shost->host_lock);
4902 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4903 spin_unlock_irq(shost->host_lock);
4904
4905 return ndlp;
4906}
4907
4908/**
4909 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4910 * @vport: pointer to lpfc hba data structure.
4911 *
4912 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4913 * response to a FCF dead event.
4914 **/
4915static void
4916lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4917{
4918 struct lpfc_vport **vports;
4919 int i;
4920
4921 vports = lpfc_create_vport_work_array(phba);
4922 if (vports)
4923 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4924 lpfc_sli4_perform_vport_cvl(vports[i]);
4925 lpfc_destroy_vport_work_array(phba, vports);
4926}
4927
da0436e9 4928/**
76a95d75 4929 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9
JS
4930 * @phba: pointer to lpfc hba data structure.
4931 * @acqe_link: pointer to the async fcoe completion queue entry.
4932 *
4933 * This routine is to handle the SLI4 asynchronous fcoe event.
4934 **/
4935static void
76a95d75 4936lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 4937 struct lpfc_acqe_fip *acqe_fip)
da0436e9 4938{
70f3c073 4939 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 4940 int rc;
6669f9bb
JS
4941 struct lpfc_vport *vport;
4942 struct lpfc_nodelist *ndlp;
4943 struct Scsi_Host *shost;
695a814e
JS
4944 int active_vlink_present;
4945 struct lpfc_vport **vports;
4946 int i;
da0436e9 4947
70f3c073
JS
4948 phba->fc_eventTag = acqe_fip->event_tag;
4949 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 4950 switch (event_type) {
70f3c073
JS
4951 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4952 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4953 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
999d813f
JS
4954 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4955 LOG_DISCOVERY,
a93ff37a
JS
4956 "2546 New FCF event, evt_tag:x%x, "
4957 "index:x%x\n",
70f3c073
JS
4958 acqe_fip->event_tag,
4959 acqe_fip->index);
999d813f
JS
4960 else
4961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4962 LOG_DISCOVERY,
a93ff37a
JS
4963 "2788 FCF param modified event, "
4964 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
4965 acqe_fip->event_tag,
4966 acqe_fip->index);
38b92ef8 4967 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
4968 /*
4969 * During period of FCF discovery, read the FCF
4970 * table record indexed by the event to update
a93ff37a 4971 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
4972 */
4973 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4974 LOG_DISCOVERY,
a93ff37a
JS
4975 "2779 Read FCF (x%x) for updating "
4976 "roundrobin FCF failover bmask\n",
70f3c073
JS
4977 acqe_fip->index);
4978 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 4979 }
38b92ef8
JS
4980
4981 /* If the FCF discovery is in progress, do nothing. */
3804dc84 4982 spin_lock_irq(&phba->hbalock);
a93ff37a 4983 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
4984 spin_unlock_irq(&phba->hbalock);
4985 break;
4986 }
4987 /* If fast FCF failover rescan event is pending, do nothing */
4988 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4989 spin_unlock_irq(&phba->hbalock);
4990 break;
4991 }
4992
c2b9712e
JS
4993 /* If the FCF has been in discovered state, do nothing. */
4994 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3804dc84
JS
4995 spin_unlock_irq(&phba->hbalock);
4996 break;
4997 }
4998 spin_unlock_irq(&phba->hbalock);
38b92ef8 4999
0c9ab6f5
JS
5000 /* Otherwise, scan the entire FCF table and re-discover SAN */
5001 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
5002 "2770 Start FCF table scan per async FCF "
5003 "event, evt_tag:x%x, index:x%x\n",
70f3c073 5004 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
5005 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5006 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 5007 if (rc)
0c9ab6f5
JS
5008 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5009 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 5010 "command failed (x%x)\n", rc);
da0436e9
JS
5011 break;
5012
70f3c073 5013 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
da0436e9 5014 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e4e74273 5015 "2548 FCF Table full count 0x%x tag 0x%x\n",
70f3c073
JS
5016 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5017 acqe_fip->event_tag);
da0436e9
JS
5018 break;
5019
70f3c073 5020 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
80c17849 5021 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5022 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5023 "2549 FCF (x%x) disconnected from network, "
70f3c073 5024 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
38b92ef8
JS
5025 /*
5026 * If we are in the middle of FCF failover process, clear
5027 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 5028 */
fc2b989b 5029 spin_lock_irq(&phba->hbalock);
a1cadfef
JS
5030 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5031 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
fc2b989b 5032 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 5033 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 5034 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
5035 break;
5036 }
38b92ef8
JS
5037 spin_unlock_irq(&phba->hbalock);
5038
5039 /* If the event is not for currently used fcf do nothing */
70f3c073 5040 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
5041 break;
5042
5043 /*
5044 * Otherwise, request the port to rediscover the entire FCF
5045 * table for a fast recovery from case that the current FCF
5046 * is no longer valid as we are not in the middle of FCF
5047 * failover process already.
5048 */
c2b9712e
JS
5049 spin_lock_irq(&phba->hbalock);
5050 /* Mark the fast failover process in progress */
5051 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5052 spin_unlock_irq(&phba->hbalock);
5053
5054 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5055 "2771 Start FCF fast failover process due to "
5056 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5057 "\n", acqe_fip->event_tag, acqe_fip->index);
5058 rc = lpfc_sli4_redisc_fcf_table(phba);
5059 if (rc) {
5060 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5061 LOG_DISCOVERY,
5062 "2772 Issue FCF rediscover mabilbox "
5063 "command failed, fail through to FCF "
5064 "dead event\n");
5065 spin_lock_irq(&phba->hbalock);
5066 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5067 spin_unlock_irq(&phba->hbalock);
5068 /*
5069 * Last resort will fail over by treating this
5070 * as a link down to FCF registration.
5071 */
5072 lpfc_sli4_fcf_dead_failthrough(phba);
5073 } else {
5074 /* Reset FCF roundrobin bmask for new discovery */
5075 lpfc_sli4_clear_fcf_rr_bmask(phba);
5076 /*
5077 * Handling fast FCF failover to a DEAD FCF event is
5078 * considered equalivant to receiving CVL to all vports.
5079 */
5080 lpfc_sli4_perform_all_vport_cvl(phba);
5081 }
da0436e9 5082 break;
70f3c073 5083 case LPFC_FIP_EVENT_TYPE_CVL:
80c17849 5084 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5085 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6669f9bb 5086 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 5087 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 5088
6669f9bb 5089 vport = lpfc_find_vport_by_vpid(phba,
5248a749 5090 acqe_fip->index);
fc2b989b 5091 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
5092 if (!ndlp)
5093 break;
695a814e
JS
5094 active_vlink_present = 0;
5095
5096 vports = lpfc_create_vport_work_array(phba);
5097 if (vports) {
5098 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5099 i++) {
5100 if ((!(vports[i]->fc_flag &
5101 FC_VPORT_CVL_RCVD)) &&
5102 (vports[i]->port_state > LPFC_FDISC)) {
5103 active_vlink_present = 1;
5104 break;
5105 }
5106 }
5107 lpfc_destroy_vport_work_array(phba, vports);
5108 }
5109
cc82355a
JS
5110 /*
5111 * Don't re-instantiate if vport is marked for deletion.
5112 * If we are here first then vport_delete is going to wait
5113 * for discovery to complete.
5114 */
5115 if (!(vport->load_flag & FC_UNLOADING) &&
5116 active_vlink_present) {
695a814e
JS
5117 /*
5118 * If there are other active VLinks present,
5119 * re-instantiate the Vlink using FDISC.
5120 */
256ec0d0
JS
5121 mod_timer(&ndlp->nlp_delayfunc,
5122 jiffies + msecs_to_jiffies(1000));
fc2b989b 5123 shost = lpfc_shost_from_vport(vport);
6669f9bb
JS
5124 spin_lock_irq(shost->host_lock);
5125 ndlp->nlp_flag |= NLP_DELAY_TMO;
5126 spin_unlock_irq(shost->host_lock);
695a814e
JS
5127 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5128 vport->port_state = LPFC_FDISC;
5129 } else {
ecfd03c6
JS
5130 /*
5131 * Otherwise, we request port to rediscover
5132 * the entire FCF table for a fast recovery
5133 * from possible case that the current FCF
0c9ab6f5
JS
5134 * is no longer valid if we are not already
5135 * in the FCF failover process.
ecfd03c6 5136 */
fc2b989b 5137 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5138 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
5139 spin_unlock_irq(&phba->hbalock);
5140 break;
5141 }
5142 /* Mark the fast failover process in progress */
0c9ab6f5 5143 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 5144 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
5145 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5146 LOG_DISCOVERY,
a93ff37a 5147 "2773 Start FCF failover per CVL, "
70f3c073 5148 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 5149 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 5150 if (rc) {
0c9ab6f5
JS
5151 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5152 LOG_DISCOVERY,
5153 "2774 Issue FCF rediscover "
5154 "mabilbox command failed, "
5155 "through to CVL event\n");
fc2b989b 5156 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5157 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 5158 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
5159 /*
5160 * Last resort will be re-try on the
5161 * the current registered FCF entry.
5162 */
5163 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
5164 } else
5165 /*
5166 * Reset FCF roundrobin bmask for new
5167 * discovery.
5168 */
7d791df7 5169 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
5170 }
5171 break;
da0436e9
JS
5172 default:
5173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5174 "0288 Unknown FCoE event type 0x%x event tag "
70f3c073 5175 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
5176 break;
5177 }
5178}
5179
5180/**
5181 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5182 * @phba: pointer to lpfc hba data structure.
5183 * @acqe_link: pointer to the async dcbx completion queue entry.
5184 *
5185 * This routine is to handle the SLI4 asynchronous dcbx event.
5186 **/
5187static void
5188lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5189 struct lpfc_acqe_dcbx *acqe_dcbx)
5190{
4d9ab994 5191 phba->fc_eventTag = acqe_dcbx->event_tag;
da0436e9
JS
5192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5193 "0290 The SLI4 DCBX asynchronous event is not "
5194 "handled yet\n");
5195}
5196
b19a061a
JS
5197/**
5198 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5199 * @phba: pointer to lpfc hba data structure.
5200 * @acqe_link: pointer to the async grp5 completion queue entry.
5201 *
5202 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5203 * is an asynchronous notified of a logical link speed change. The Port
5204 * reports the logical link speed in units of 10Mbps.
5205 **/
5206static void
5207lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5208 struct lpfc_acqe_grp5 *acqe_grp5)
5209{
5210 uint16_t prev_ll_spd;
5211
5212 phba->fc_eventTag = acqe_grp5->event_tag;
5213 phba->fcoe_eventtag = acqe_grp5->event_tag;
5214 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5215 phba->sli4_hba.link_state.logical_speed =
8b68cd52 5216 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
b19a061a
JS
5217 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5218 "2789 GRP5 Async Event: Updating logical link speed "
8b68cd52
JS
5219 "from %dMbps to %dMbps\n", prev_ll_spd,
5220 phba->sli4_hba.link_state.logical_speed);
b19a061a
JS
5221}
5222
da0436e9
JS
5223/**
5224 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5225 * @phba: pointer to lpfc hba data structure.
5226 *
5227 * This routine is invoked by the worker thread to process all the pending
5228 * SLI4 asynchronous events.
5229 **/
5230void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5231{
5232 struct lpfc_cq_event *cq_event;
5233
5234 /* First, declare the async event has been handled */
5235 spin_lock_irq(&phba->hbalock);
5236 phba->hba_flag &= ~ASYNC_EVENT;
5237 spin_unlock_irq(&phba->hbalock);
5238 /* Now, handle all the async events */
5239 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5240 /* Get the first event from the head of the event queue */
5241 spin_lock_irq(&phba->hbalock);
5242 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5243 cq_event, struct lpfc_cq_event, list);
5244 spin_unlock_irq(&phba->hbalock);
5245 /* Process the asynchronous event */
5246 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5247 case LPFC_TRAILER_CODE_LINK:
5248 lpfc_sli4_async_link_evt(phba,
5249 &cq_event->cqe.acqe_link);
5250 break;
5251 case LPFC_TRAILER_CODE_FCOE:
70f3c073 5252 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
5253 break;
5254 case LPFC_TRAILER_CODE_DCBX:
5255 lpfc_sli4_async_dcbx_evt(phba,
5256 &cq_event->cqe.acqe_dcbx);
5257 break;
b19a061a
JS
5258 case LPFC_TRAILER_CODE_GRP5:
5259 lpfc_sli4_async_grp5_evt(phba,
5260 &cq_event->cqe.acqe_grp5);
5261 break;
70f3c073
JS
5262 case LPFC_TRAILER_CODE_FC:
5263 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5264 break;
5265 case LPFC_TRAILER_CODE_SLI:
5266 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5267 break;
da0436e9
JS
5268 default:
5269 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5270 "1804 Invalid asynchrous event code: "
5271 "x%x\n", bf_get(lpfc_trailer_code,
5272 &cq_event->cqe.mcqe_cmpl));
5273 break;
5274 }
5275 /* Free the completion event processed to the free pool */
5276 lpfc_sli4_cq_event_release(phba, cq_event);
5277 }
5278}
5279
ecfd03c6
JS
5280/**
5281 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5282 * @phba: pointer to lpfc hba data structure.
5283 *
5284 * This routine is invoked by the worker thread to process FCF table
5285 * rediscovery pending completion event.
5286 **/
5287void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5288{
5289 int rc;
5290
5291 spin_lock_irq(&phba->hbalock);
5292 /* Clear FCF rediscovery timeout event */
5293 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5294 /* Clear driver fast failover FCF record flag */
5295 phba->fcf.failover_rec.flag = 0;
5296 /* Set state for FCF fast failover */
5297 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5298 spin_unlock_irq(&phba->hbalock);
5299
5300 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 5301 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5302 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 5303 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 5304 if (rc)
0c9ab6f5
JS
5305 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5306 "2747 Issue FCF scan read FCF mailbox "
5307 "command failed 0x%x\n", rc);
ecfd03c6
JS
5308}
5309
da0436e9
JS
5310/**
5311 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5312 * @phba: pointer to lpfc hba data structure.
5313 * @dev_grp: The HBA PCI-Device group number.
5314 *
5315 * This routine is invoked to set up the per HBA PCI-Device group function
5316 * API jump table entries.
5317 *
5318 * Return: 0 if success, otherwise -ENODEV
5319 **/
5320int
5321lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5322{
5323 int rc;
5324
5325 /* Set up lpfc PCI-device group */
5326 phba->pci_dev_grp = dev_grp;
5327
5328 /* The LPFC_PCI_DEV_OC uses SLI4 */
5329 if (dev_grp == LPFC_PCI_DEV_OC)
5330 phba->sli_rev = LPFC_SLI_REV4;
5331
5332 /* Set up device INIT API function jump table */
5333 rc = lpfc_init_api_table_setup(phba, dev_grp);
5334 if (rc)
5335 return -ENODEV;
5336 /* Set up SCSI API function jump table */
5337 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5338 if (rc)
5339 return -ENODEV;
5340 /* Set up SLI API function jump table */
5341 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5342 if (rc)
5343 return -ENODEV;
5344 /* Set up MBOX API function jump table */
5345 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5346 if (rc)
5347 return -ENODEV;
5348
5349 return 0;
5b75da2f
JS
5350}
5351
5352/**
3621a710 5353 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
5354 * @phba: pointer to lpfc hba data structure.
5355 * @intr_mode: active interrupt mode adopted.
5356 *
5357 * This routine it invoked to log the currently used active interrupt mode
5358 * to the device.
3772a991
JS
5359 **/
5360static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
5361{
5362 switch (intr_mode) {
5363 case 0:
5364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5365 "0470 Enable INTx interrupt mode.\n");
5366 break;
5367 case 1:
5368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5369 "0481 Enabled MSI interrupt mode.\n");
5370 break;
5371 case 2:
5372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5373 "0480 Enabled MSI-X interrupt mode.\n");
5374 break;
5375 default:
5376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5377 "0482 Illegal interrupt mode.\n");
5378 break;
5379 }
5380 return;
5381}
5382
5b75da2f 5383/**
3772a991 5384 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
5385 * @phba: pointer to lpfc hba data structure.
5386 *
3772a991
JS
5387 * This routine is invoked to enable the PCI device that is common to all
5388 * PCI devices.
5b75da2f
JS
5389 *
5390 * Return codes
af901ca1 5391 * 0 - successful
3772a991 5392 * other values - error
5b75da2f 5393 **/
3772a991
JS
5394static int
5395lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 5396{
3772a991 5397 struct pci_dev *pdev;
5b75da2f 5398
3772a991
JS
5399 /* Obtain PCI device reference */
5400 if (!phba->pcidev)
5401 goto out_error;
5402 else
5403 pdev = phba->pcidev;
3772a991
JS
5404 /* Enable PCI device */
5405 if (pci_enable_device_mem(pdev))
5406 goto out_error;
5407 /* Request PCI resource for the device */
e0c0483c 5408 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
3772a991
JS
5409 goto out_disable_device;
5410 /* Set up device as PCI master and save state for EEH */
5411 pci_set_master(pdev);
5412 pci_try_set_mwi(pdev);
5413 pci_save_state(pdev);
5b75da2f 5414
0558056c 5415 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
453193e0 5416 if (pci_is_pcie(pdev))
0558056c
JS
5417 pdev->needs_freset = 1;
5418
3772a991 5419 return 0;
5b75da2f 5420
3772a991
JS
5421out_disable_device:
5422 pci_disable_device(pdev);
5423out_error:
079b5c91 5424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e0c0483c 5425 "1401 Failed to enable pci device\n");
3772a991 5426 return -ENODEV;
5b75da2f
JS
5427}
5428
5429/**
3772a991 5430 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
5431 * @phba: pointer to lpfc hba data structure.
5432 *
3772a991
JS
5433 * This routine is invoked to disable the PCI device that is common to all
5434 * PCI devices.
5b75da2f
JS
5435 **/
5436static void
3772a991 5437lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 5438{
3772a991 5439 struct pci_dev *pdev;
5b75da2f 5440
3772a991
JS
5441 /* Obtain PCI device reference */
5442 if (!phba->pcidev)
5443 return;
5444 else
5445 pdev = phba->pcidev;
3772a991 5446 /* Release PCI resource and disable PCI device */
e0c0483c 5447 pci_release_mem_regions(pdev);
3772a991 5448 pci_disable_device(pdev);
5b75da2f
JS
5449
5450 return;
5451}
5452
e59058c4 5453/**
3772a991
JS
5454 * lpfc_reset_hba - Reset a hba
5455 * @phba: pointer to lpfc hba data structure.
e59058c4 5456 *
3772a991
JS
5457 * This routine is invoked to reset a hba device. It brings the HBA
5458 * offline, performs a board restart, and then brings the board back
5459 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
5460 * on outstanding mailbox commands.
e59058c4 5461 **/
3772a991
JS
5462void
5463lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 5464{
3772a991
JS
5465 /* If resets are disabled then set error state and return. */
5466 if (!phba->cfg_enable_hba_reset) {
5467 phba->link_state = LPFC_HBA_ERROR;
5468 return;
5469 }
ee62021a
JS
5470 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
5471 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5472 else
5473 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
3772a991
JS
5474 lpfc_offline(phba);
5475 lpfc_sli_brdrestart(phba);
5476 lpfc_online(phba);
5477 lpfc_unblock_mgmt_io(phba);
5478}
dea3101e 5479
0a96e975
JS
5480/**
5481 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
5482 * @phba: pointer to lpfc hba data structure.
5483 *
5484 * This function enables the PCI SR-IOV virtual functions to a physical
5485 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5486 * enable the number of virtual functions to the physical function. As
5487 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5488 * API call does not considered as an error condition for most of the device.
5489 **/
5490uint16_t
5491lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
5492{
5493 struct pci_dev *pdev = phba->pcidev;
5494 uint16_t nr_virtfn;
5495 int pos;
5496
0a96e975
JS
5497 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5498 if (pos == 0)
5499 return 0;
5500
5501 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
5502 return nr_virtfn;
5503}
5504
912e3acd
JS
5505/**
5506 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
5507 * @phba: pointer to lpfc hba data structure.
5508 * @nr_vfn: number of virtual functions to be enabled.
5509 *
5510 * This function enables the PCI SR-IOV virtual functions to a physical
5511 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5512 * enable the number of virtual functions to the physical function. As
5513 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5514 * API call does not considered as an error condition for most of the device.
5515 **/
5516int
5517lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
5518{
5519 struct pci_dev *pdev = phba->pcidev;
0a96e975 5520 uint16_t max_nr_vfn;
912e3acd
JS
5521 int rc;
5522
0a96e975
JS
5523 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
5524 if (nr_vfn > max_nr_vfn) {
5525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5526 "3057 Requested vfs (%d) greater than "
5527 "supported vfs (%d)", nr_vfn, max_nr_vfn);
5528 return -EINVAL;
5529 }
5530
912e3acd
JS
5531 rc = pci_enable_sriov(pdev, nr_vfn);
5532 if (rc) {
5533 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5534 "2806 Failed to enable sriov on this device "
5535 "with vfn number nr_vf:%d, rc:%d\n",
5536 nr_vfn, rc);
5537 } else
5538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5539 "2807 Successful enable sriov on this device "
5540 "with vfn number nr_vf:%d\n", nr_vfn);
5541 return rc;
5542}
5543
3772a991 5544/**
895427bd 5545 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3772a991
JS
5546 * @phba: pointer to lpfc hba data structure.
5547 *
895427bd
JS
5548 * This routine is invoked to set up the driver internal resources before the
5549 * device specific resource setup to support the HBA device it attached to.
3772a991
JS
5550 *
5551 * Return codes
895427bd
JS
5552 * 0 - successful
5553 * other values - error
3772a991
JS
5554 **/
5555static int
895427bd 5556lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3772a991 5557{
895427bd 5558 struct lpfc_sli *psli = &phba->sli;
dea3101e 5559
2e0fef85 5560 /*
895427bd 5561 * Driver resources common to all SLI revisions
2e0fef85 5562 */
895427bd
JS
5563 atomic_set(&phba->fast_event_count, 0);
5564 spin_lock_init(&phba->hbalock);
dea3101e 5565
895427bd
JS
5566 /* Initialize ndlp management spinlock */
5567 spin_lock_init(&phba->ndlp_lock);
5568
5569 INIT_LIST_HEAD(&phba->port_list);
5570 INIT_LIST_HEAD(&phba->work_list);
5571 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5572
5573 /* Initialize the wait queue head for the kernel thread */
5574 init_waitqueue_head(&phba->work_waitq);
5575
5576 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
f358dd0c 5577 "1403 Protocols supported %s %s %s\n",
895427bd
JS
5578 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
5579 "SCSI" : " "),
5580 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
f358dd0c
JS
5581 "NVME" : " "),
5582 (phba->nvmet_support ? "NVMET" : " "));
895427bd
JS
5583
5584 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5585 /* Initialize the scsi buffer list used by driver for scsi IO */
5586 spin_lock_init(&phba->scsi_buf_list_get_lock);
5587 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5588 spin_lock_init(&phba->scsi_buf_list_put_lock);
5589 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5590 }
5591
5592 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
5593 (phba->nvmet_support == 0)) {
5594 /* Initialize the NVME buffer list used by driver for NVME IO */
5595 spin_lock_init(&phba->nvme_buf_list_get_lock);
5596 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
5597 spin_lock_init(&phba->nvme_buf_list_put_lock);
5598 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
5599 }
5600
5601 /* Initialize the fabric iocb list */
5602 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5603
5604 /* Initialize list to save ELS buffers */
5605 INIT_LIST_HEAD(&phba->elsbuf);
5606
5607 /* Initialize FCF connection rec list */
5608 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5609
5610 /* Initialize OAS configuration list */
5611 spin_lock_init(&phba->devicelock);
5612 INIT_LIST_HEAD(&phba->luns);
858c9f6c 5613
3772a991 5614 /* MBOX heartbeat timer */
33cc559a 5615 setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
3772a991 5616 /* Fabric block timer */
33cc559a
TJ
5617 setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
5618 (unsigned long)phba);
3772a991 5619 /* EA polling mode timer */
33cc559a
TJ
5620 setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
5621 (unsigned long)phba);
895427bd 5622 /* Heartbeat timer */
33cc559a 5623 setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
895427bd
JS
5624
5625 return 0;
5626}
5627
5628/**
5629 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
5630 * @phba: pointer to lpfc hba data structure.
5631 *
5632 * This routine is invoked to set up the driver internal resources specific to
5633 * support the SLI-3 HBA device it attached to.
5634 *
5635 * Return codes
5636 * 0 - successful
5637 * other values - error
5638 **/
5639static int
5640lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5641{
5642 int rc;
5643
5644 /*
5645 * Initialize timers used by driver
5646 */
5647
5648 /* FCP polling mode timer */
33cc559a
TJ
5649 setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
5650 (unsigned long)phba);
dea3101e 5651
3772a991
JS
5652 /* Host attention work mask setup */
5653 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
5654 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 5655
3772a991
JS
5656 /* Get all the module params for configuring this host */
5657 lpfc_get_cfgparam(phba);
895427bd
JS
5658 /* Set up phase-1 common device driver resources */
5659
5660 rc = lpfc_setup_driver_resource_phase1(phba);
5661 if (rc)
5662 return -ENODEV;
5663
49198b37
JS
5664 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
5665 phba->menlo_flag |= HBA_MENLO_SUPPORT;
5666 /* check for menlo minimum sg count */
5667 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
5668 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
5669 }
5670
895427bd
JS
5671 if (!phba->sli.sli3_ring)
5672 phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
2a76a283 5673 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
895427bd 5674 if (!phba->sli.sli3_ring)
2a76a283
JS
5675 return -ENOMEM;
5676
dea3101e 5677 /*
96f7077f 5678 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
3772a991 5679 * used to create the sg_dma_buf_pool must be dynamically calculated.
dea3101e 5680 */
3772a991 5681
96f7077f
JS
5682 /* Initialize the host templates the configured values. */
5683 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96418b5e
JS
5684 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5685 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96f7077f
JS
5686
5687 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
3772a991 5688 if (phba->cfg_enable_bg) {
96f7077f
JS
5689 /*
5690 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5691 * the FCP rsp, and a BDE for each. Sice we have no control
5692 * over how many protection data segments the SCSI Layer
5693 * will hand us (ie: there could be one for every block
5694 * in the IO), we just allocate enough BDEs to accomidate
5695 * our max amount and we need to limit lpfc_sg_seg_cnt to
5696 * minimize the risk of running out.
5697 */
5698 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5699 sizeof(struct fcp_rsp) +
5700 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5701
5702 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5703 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5704
5705 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5706 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5707 } else {
5708 /*
5709 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5710 * the FCP rsp, a BDE for each, and a BDE for up to
5711 * cfg_sg_seg_cnt data segments.
5712 */
5713 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5714 sizeof(struct fcp_rsp) +
5715 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5716
5717 /* Total BDEs in BPL for scsi_sg_list */
5718 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
901a920f 5719 }
dea3101e 5720
96f7077f
JS
5721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5722 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5723 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5724 phba->cfg_total_seg_cnt);
dea3101e 5725
3772a991
JS
5726 phba->max_vpi = LPFC_MAX_VPI;
5727 /* This will be set to correct value after config_port mbox */
5728 phba->max_vports = 0;
dea3101e 5729
3772a991
JS
5730 /*
5731 * Initialize the SLI Layer to run with lpfc HBAs.
5732 */
5733 lpfc_sli_setup(phba);
895427bd 5734 lpfc_sli_queue_init(phba);
ed957684 5735
3772a991
JS
5736 /* Allocate device driver memory */
5737 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5738 return -ENOMEM;
51ef4c26 5739
912e3acd
JS
5740 /*
5741 * Enable sr-iov virtual functions if supported and configured
5742 * through the module parameter.
5743 */
5744 if (phba->cfg_sriov_nr_virtfn > 0) {
5745 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5746 phba->cfg_sriov_nr_virtfn);
5747 if (rc) {
5748 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5749 "2808 Requested number of SR-IOV "
5750 "virtual functions (%d) is not "
5751 "supported\n",
5752 phba->cfg_sriov_nr_virtfn);
5753 phba->cfg_sriov_nr_virtfn = 0;
5754 }
5755 }
5756
3772a991
JS
5757 return 0;
5758}
ed957684 5759
3772a991
JS
5760/**
5761 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5762 * @phba: pointer to lpfc hba data structure.
5763 *
5764 * This routine is invoked to unset the driver internal resources set up
5765 * specific for supporting the SLI-3 HBA device it attached to.
5766 **/
5767static void
5768lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5769{
5770 /* Free device driver memory allocated */
5771 lpfc_mem_free_all(phba);
3163f725 5772
3772a991
JS
5773 return;
5774}
dea3101e 5775
3772a991 5776/**
da0436e9 5777 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
5778 * @phba: pointer to lpfc hba data structure.
5779 *
da0436e9
JS
5780 * This routine is invoked to set up the driver internal resources specific to
5781 * support the SLI-4 HBA device it attached to.
3772a991
JS
5782 *
5783 * Return codes
af901ca1 5784 * 0 - successful
da0436e9 5785 * other values - error
3772a991
JS
5786 **/
5787static int
da0436e9 5788lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 5789{
28baac74 5790 LPFC_MBOXQ_t *mboxq;
f358dd0c 5791 MAILBOX_t *mb;
895427bd 5792 int rc, i, max_buf_size;
28baac74
JS
5793 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5794 struct lpfc_mqe *mqe;
09294d46 5795 int longs;
1ba981fd 5796 int fof_vectors = 0;
f358dd0c 5797 uint64_t wwn;
da0436e9 5798
895427bd
JS
5799 phba->sli4_hba.num_online_cpu = num_online_cpus();
5800 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
5801 phba->sli4_hba.curr_disp_cpu = 0;
5802
716d3bc5
JS
5803 /* Get all the module params for configuring this host */
5804 lpfc_get_cfgparam(phba);
5805
895427bd
JS
5806 /* Set up phase-1 common device driver resources */
5807 rc = lpfc_setup_driver_resource_phase1(phba);
5808 if (rc)
5809 return -ENODEV;
5810
da0436e9
JS
5811 /* Before proceed, wait for POST done and device ready */
5812 rc = lpfc_sli4_post_status_check(phba);
5813 if (rc)
5814 return -ENODEV;
5815
3772a991 5816 /*
da0436e9 5817 * Initialize timers used by driver
3772a991 5818 */
3772a991 5819
33cc559a 5820 setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
3772a991 5821
ecfd03c6 5822 /* FCF rediscover timer */
33cc559a
TJ
5823 setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
5824 (unsigned long)phba);
ecfd03c6 5825
7ad20aa9
JS
5826 /*
5827 * Control structure for handling external multi-buffer mailbox
5828 * command pass-through.
5829 */
5830 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5831 sizeof(struct lpfc_mbox_ext_buf_ctx));
5832 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5833
da0436e9 5834 phba->max_vpi = LPFC_MAX_VPI;
67d12733 5835
da0436e9
JS
5836 /* This will be set to correct value after the read_config mbox */
5837 phba->max_vports = 0;
3772a991 5838
da0436e9
JS
5839 /* Program the default value of vlan_id and fc_map */
5840 phba->valid_vlan = 0;
5841 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5842 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5843 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 5844
2a76a283
JS
5845 /*
5846 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
895427bd
JS
5847 * we will associate a new ring, for each EQ/CQ/WQ tuple.
5848 * The WQ create will allocate the ring.
2a76a283 5849 */
09294d46 5850
da0436e9 5851 /*
09294d46
JS
5852 * It doesn't matter what family our adapter is in, we are
5853 * limited to 2 Pages, 512 SGEs, for our SGL.
5854 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5855 */
5856 max_buf_size = (2 * SLI4_PAGE_SIZE);
5857 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
5858 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
09294d46 5859
da0436e9 5860 /*
895427bd
JS
5861 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
5862 * used to create the sg_dma_buf_pool must be calculated.
da0436e9 5863 */
96f7077f
JS
5864 if (phba->cfg_enable_bg) {
5865 /*
895427bd
JS
5866 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
5867 * the FCP rsp, and a SGE. Sice we have no control
5868 * over how many protection segments the SCSI Layer
96f7077f 5869 * will hand us (ie: there could be one for every block
895427bd
JS
5870 * in the IO), just allocate enough SGEs to accomidate
5871 * our max amount and we need to limit lpfc_sg_seg_cnt
5872 * to minimize the risk of running out.
96f7077f
JS
5873 */
5874 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd 5875 sizeof(struct fcp_rsp) + max_buf_size;
96f7077f
JS
5876
5877 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5878 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5879
5880 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
895427bd
JS
5881 phba->cfg_sg_seg_cnt =
5882 LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
96f7077f
JS
5883 } else {
5884 /*
895427bd 5885 * The scsi_buf for a regular I/O holds the FCP cmnd,
96f7077f
JS
5886 * the FCP rsp, a SGE for each, and a SGE for up to
5887 * cfg_sg_seg_cnt data segments.
5888 */
5889 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd
JS
5890 sizeof(struct fcp_rsp) +
5891 ((phba->cfg_sg_seg_cnt + 2) *
5892 sizeof(struct sli4_sge));
96f7077f
JS
5893
5894 /* Total SGEs for scsi_sg_list */
5895 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
895427bd 5896
96f7077f 5897 /*
895427bd
JS
5898 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
5899 * need to post 1 page for the SGL.
96f7077f 5900 */
085c647c 5901 }
acd6859b 5902
96f7077f
JS
5903 /* Initialize the host templates with the updated values. */
5904 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5905 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96418b5e 5906 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
96f7077f
JS
5907
5908 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5909 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5910 else
5911 phba->cfg_sg_dma_buf_size =
5912 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5913
5914 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5915 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5916 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5917 phba->cfg_total_seg_cnt);
3772a991 5918
da0436e9 5919 /* Initialize buffer queue management fields */
895427bd 5920 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
da0436e9
JS
5921 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5922 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 5923
da0436e9
JS
5924 /*
5925 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5926 */
895427bd
JS
5927 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5928 /* Initialize the Abort scsi buffer list used by driver */
5929 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5930 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5931 }
5932
5933 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
5934 /* Initialize the Abort nvme buffer list used by driver */
5935 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5936 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379 5937 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6c621a22 5938 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
a8cf5dfe 5939 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6c621a22 5940
318083ad
JS
5941 /* Fast-path XRI aborted CQ Event work queue list */
5942 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
895427bd
JS
5943 }
5944
da0436e9 5945 /* This abort list used by worker thread */
895427bd 5946 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
f358dd0c 5947 spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
a8cf5dfe 5948 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
3772a991 5949
da0436e9 5950 /*
6d368e53 5951 * Initialize driver internal slow-path work queues
da0436e9 5952 */
3772a991 5953
da0436e9
JS
5954 /* Driver internel slow-path CQ Event pool */
5955 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5956 /* Response IOCB work queue list */
45ed1190 5957 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
5958 /* Asynchronous event CQ Event work queue list */
5959 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5960 /* Fast-path XRI aborted CQ Event work queue list */
5961 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5962 /* Slow-path XRI aborted CQ Event work queue list */
5963 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5964 /* Receive queue CQ Event work queue list */
5965 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5966
6d368e53
JS
5967 /* Initialize extent block lists. */
5968 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5969 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5970 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5971 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5972
d1f525aa
JS
5973 /* Initialize mboxq lists. If the early init routines fail
5974 * these lists need to be correctly initialized.
5975 */
5976 INIT_LIST_HEAD(&phba->sli.mboxq);
5977 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
5978
448193b5
JS
5979 /* initialize optic_state to 0xFF */
5980 phba->sli4_hba.lnk_info.optic_state = 0xff;
5981
da0436e9
JS
5982 /* Allocate device driver memory */
5983 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5984 if (rc)
5985 return -ENOMEM;
5986
2fcee4bf
JS
5987 /* IF Type 2 ports get initialized now. */
5988 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5989 LPFC_SLI_INTF_IF_TYPE_2) {
5990 rc = lpfc_pci_function_reset(phba);
895427bd
JS
5991 if (unlikely(rc)) {
5992 rc = -ENODEV;
5993 goto out_free_mem;
5994 }
946727dc 5995 phba->temp_sensor_support = 1;
2fcee4bf
JS
5996 }
5997
da0436e9
JS
5998 /* Create the bootstrap mailbox command */
5999 rc = lpfc_create_bootstrap_mbox(phba);
6000 if (unlikely(rc))
6001 goto out_free_mem;
6002
6003 /* Set up the host's endian order with the device. */
6004 rc = lpfc_setup_endian_order(phba);
6005 if (unlikely(rc))
6006 goto out_free_bsmbx;
6007
6008 /* Set up the hba's configuration parameters. */
6009 rc = lpfc_sli4_read_config(phba);
cff261f6
JS
6010 if (unlikely(rc))
6011 goto out_free_bsmbx;
6012 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
da0436e9
JS
6013 if (unlikely(rc))
6014 goto out_free_bsmbx;
6015
2fcee4bf
JS
6016 /* IF Type 0 ports get initialized now. */
6017 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6018 LPFC_SLI_INTF_IF_TYPE_0) {
6019 rc = lpfc_pci_function_reset(phba);
6020 if (unlikely(rc))
6021 goto out_free_bsmbx;
6022 }
da0436e9 6023
cb5172ea
JS
6024 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6025 GFP_KERNEL);
6026 if (!mboxq) {
6027 rc = -ENOMEM;
6028 goto out_free_bsmbx;
6029 }
6030
f358dd0c 6031 /* Check for NVMET being configured */
895427bd 6032 phba->nvmet_support = 0;
f358dd0c
JS
6033 if (lpfc_enable_nvmet_cnt) {
6034
6035 /* First get WWN of HBA instance */
6036 lpfc_read_nv(phba, mboxq);
6037 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6038 if (rc != MBX_SUCCESS) {
6039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6040 "6016 Mailbox failed , mbxCmd x%x "
6041 "READ_NV, mbxStatus x%x\n",
6042 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6043 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
d1f525aa 6044 mempool_free(mboxq, phba->mbox_mem_pool);
f358dd0c
JS
6045 rc = -EIO;
6046 goto out_free_bsmbx;
6047 }
6048 mb = &mboxq->u.mb;
6049 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6050 sizeof(uint64_t));
6051 wwn = cpu_to_be64(wwn);
6052 phba->sli4_hba.wwnn.u.name = wwn;
6053 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6054 sizeof(uint64_t));
6055 /* wwn is WWPN of HBA instance */
6056 wwn = cpu_to_be64(wwn);
6057 phba->sli4_hba.wwpn.u.name = wwn;
6058
6059 /* Check to see if it matches any module parameter */
6060 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6061 if (wwn == lpfc_enable_nvmet[i]) {
7d708033 6062#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3c603be9
JS
6063 if (lpfc_nvmet_mem_alloc(phba))
6064 break;
6065
6066 phba->nvmet_support = 1; /* a match */
6067
f358dd0c
JS
6068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6069 "6017 NVME Target %016llx\n",
6070 wwn);
7d708033
JS
6071#else
6072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6073 "6021 Can't enable NVME Target."
6074 " NVME_TARGET_FC infrastructure"
6075 " is not in kernel\n");
6076#endif
3c603be9 6077 break;
f358dd0c
JS
6078 }
6079 }
6080 }
895427bd
JS
6081
6082 lpfc_nvme_mod_param_dep(phba);
6083
fedd3b7b 6084 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
cb5172ea
JS
6085 lpfc_supported_pages(mboxq);
6086 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
fedd3b7b
JS
6087 if (!rc) {
6088 mqe = &mboxq->u.mqe;
6089 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6090 LPFC_MAX_SUPPORTED_PAGES);
6091 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6092 switch (pn_page[i]) {
6093 case LPFC_SLI4_PARAMETERS:
6094 phba->sli4_hba.pc_sli4_params.supported = 1;
6095 break;
6096 default:
6097 break;
6098 }
6099 }
6100 /* Read the port's SLI4 Parameters capabilities if supported. */
6101 if (phba->sli4_hba.pc_sli4_params.supported)
6102 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6103 if (rc) {
6104 mempool_free(mboxq, phba->mbox_mem_pool);
6105 rc = -EIO;
6106 goto out_free_bsmbx;
cb5172ea
JS
6107 }
6108 }
65791f1f 6109
fedd3b7b
JS
6110 /*
6111 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
6112 * If this call fails, it isn't critical unless the SLI4 parameters come
6113 * back in conflict.
fedd3b7b 6114 */
6d368e53
JS
6115 rc = lpfc_get_sli4_parameters(phba, mboxq);
6116 if (rc) {
6117 if (phba->sli4_hba.extents_in_use &&
6118 phba->sli4_hba.rpi_hdrs_in_use) {
6119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6120 "2999 Unsupported SLI4 Parameters "
6121 "Extents and RPI headers enabled.\n");
6d368e53 6122 }
895427bd
JS
6123 mempool_free(mboxq, phba->mbox_mem_pool);
6124 goto out_free_bsmbx;
6d368e53 6125 }
895427bd 6126
cb5172ea 6127 mempool_free(mboxq, phba->mbox_mem_pool);
1ba981fd
JS
6128
6129 /* Verify OAS is supported */
6130 lpfc_sli4_oas_verify(phba);
6131 if (phba->cfg_fof)
6132 fof_vectors = 1;
6133
5350d872
JS
6134 /* Verify all the SLI4 queues */
6135 rc = lpfc_sli4_queue_verify(phba);
da0436e9
JS
6136 if (rc)
6137 goto out_free_bsmbx;
6138
6139 /* Create driver internal CQE event pool */
6140 rc = lpfc_sli4_cq_event_pool_create(phba);
6141 if (rc)
5350d872 6142 goto out_free_bsmbx;
da0436e9 6143
8a9d2e80
JS
6144 /* Initialize sgl lists per host */
6145 lpfc_init_sgl_list(phba);
6146
6147 /* Allocate and initialize active sgl array */
da0436e9
JS
6148 rc = lpfc_init_active_sgl_array(phba);
6149 if (rc) {
6150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6151 "1430 Failed to initialize sgl list.\n");
8a9d2e80 6152 goto out_destroy_cq_event_pool;
da0436e9 6153 }
da0436e9
JS
6154 rc = lpfc_sli4_init_rpi_hdrs(phba);
6155 if (rc) {
6156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6157 "1432 Failed to initialize rpi headers.\n");
6158 goto out_free_active_sgl;
6159 }
6160
a93ff37a 6161 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5
JS
6162 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6163 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
6164 GFP_KERNEL);
6165 if (!phba->fcf.fcf_rr_bmask) {
6166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6167 "2759 Failed allocate memory for FCF round "
6168 "robin failover bmask\n");
0558056c 6169 rc = -ENOMEM;
0c9ab6f5
JS
6170 goto out_remove_rpi_hdrs;
6171 }
6172
895427bd
JS
6173 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
6174 sizeof(struct lpfc_hba_eq_hdl),
6175 GFP_KERNEL);
6176 if (!phba->sli4_hba.hba_eq_hdl) {
67d12733
JS
6177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6178 "2572 Failed allocate memory for "
6179 "fast-path per-EQ handle array\n");
6180 rc = -ENOMEM;
6181 goto out_free_fcf_rr_bmask;
da0436e9
JS
6182 }
6183
895427bd
JS
6184 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6185 sizeof(struct lpfc_vector_map_info),
6186 GFP_KERNEL);
7bb03bbf
JS
6187 if (!phba->sli4_hba.cpu_map) {
6188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6189 "3327 Failed allocate memory for msi-x "
6190 "interrupt vector mapping\n");
6191 rc = -ENOMEM;
895427bd 6192 goto out_free_hba_eq_hdl;
7bb03bbf 6193 }
b246de17 6194 if (lpfc_used_cpu == NULL) {
895427bd
JS
6195 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
6196 GFP_KERNEL);
b246de17
JS
6197 if (!lpfc_used_cpu) {
6198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6199 "3335 Failed allocate memory for msi-x "
6200 "interrupt vector mapping\n");
6201 kfree(phba->sli4_hba.cpu_map);
6202 rc = -ENOMEM;
895427bd 6203 goto out_free_hba_eq_hdl;
b246de17
JS
6204 }
6205 for (i = 0; i < lpfc_present_cpu; i++)
6206 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
6207 }
6208
912e3acd
JS
6209 /*
6210 * Enable sr-iov virtual functions if supported and configured
6211 * through the module parameter.
6212 */
6213 if (phba->cfg_sriov_nr_virtfn > 0) {
6214 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6215 phba->cfg_sriov_nr_virtfn);
6216 if (rc) {
6217 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6218 "3020 Requested number of SR-IOV "
6219 "virtual functions (%d) is not "
6220 "supported\n",
6221 phba->cfg_sriov_nr_virtfn);
6222 phba->cfg_sriov_nr_virtfn = 0;
6223 }
6224 }
6225
5248a749 6226 return 0;
da0436e9 6227
895427bd
JS
6228out_free_hba_eq_hdl:
6229 kfree(phba->sli4_hba.hba_eq_hdl);
0c9ab6f5
JS
6230out_free_fcf_rr_bmask:
6231 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
6232out_remove_rpi_hdrs:
6233 lpfc_sli4_remove_rpi_hdrs(phba);
6234out_free_active_sgl:
6235 lpfc_free_active_sgl(phba);
da0436e9
JS
6236out_destroy_cq_event_pool:
6237 lpfc_sli4_cq_event_pool_destroy(phba);
da0436e9
JS
6238out_free_bsmbx:
6239 lpfc_destroy_bootstrap_mbox(phba);
6240out_free_mem:
6241 lpfc_mem_free(phba);
6242 return rc;
6243}
6244
6245/**
6246 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6247 * @phba: pointer to lpfc hba data structure.
6248 *
6249 * This routine is invoked to unset the driver internal resources set up
6250 * specific for supporting the SLI-4 HBA device it attached to.
6251 **/
6252static void
6253lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6254{
6255 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6256
7bb03bbf
JS
6257 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6258 kfree(phba->sli4_hba.cpu_map);
6259 phba->sli4_hba.num_present_cpu = 0;
6260 phba->sli4_hba.num_online_cpu = 0;
76fd07a6 6261 phba->sli4_hba.curr_disp_cpu = 0;
7bb03bbf 6262
da0436e9 6263 /* Free memory allocated for fast-path work queue handles */
895427bd 6264 kfree(phba->sli4_hba.hba_eq_hdl);
da0436e9
JS
6265
6266 /* Free the allocated rpi headers. */
6267 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 6268 lpfc_sli4_remove_rpis(phba);
da0436e9 6269
0c9ab6f5
JS
6270 /* Free eligible FCF index bmask */
6271 kfree(phba->fcf.fcf_rr_bmask);
6272
da0436e9
JS
6273 /* Free the ELS sgl list */
6274 lpfc_free_active_sgl(phba);
8a9d2e80 6275 lpfc_free_els_sgl_list(phba);
f358dd0c 6276 lpfc_free_nvmet_sgl_list(phba);
da0436e9 6277
da0436e9
JS
6278 /* Free the completion queue EQ event pool */
6279 lpfc_sli4_cq_event_release_all(phba);
6280 lpfc_sli4_cq_event_pool_destroy(phba);
6281
6d368e53
JS
6282 /* Release resource identifiers. */
6283 lpfc_sli4_dealloc_resource_identifiers(phba);
6284
da0436e9
JS
6285 /* Free the bsmbx region. */
6286 lpfc_destroy_bootstrap_mbox(phba);
6287
6288 /* Free the SLI Layer memory with SLI4 HBAs */
6289 lpfc_mem_free_all(phba);
6290
6291 /* Free the current connect table */
6292 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
6293 &phba->fcf_conn_rec_list, list) {
6294 list_del_init(&conn_entry->list);
da0436e9 6295 kfree(conn_entry);
4d9ab994 6296 }
da0436e9
JS
6297
6298 return;
6299}
6300
6301/**
25985edc 6302 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
6303 * @phba: The hba struct for which this call is being executed.
6304 * @dev_grp: The HBA PCI-Device group number.
6305 *
6306 * This routine sets up the device INIT interface API function jump table
6307 * in @phba struct.
6308 *
6309 * Returns: 0 - success, -ENODEV - failure.
6310 **/
6311int
6312lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6313{
84d1b006
JS
6314 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6315 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 6316 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
6317 switch (dev_grp) {
6318 case LPFC_PCI_DEV_LP:
6319 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6320 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6321 phba->lpfc_stop_port = lpfc_stop_port_s3;
6322 break;
6323 case LPFC_PCI_DEV_OC:
6324 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6325 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6326 phba->lpfc_stop_port = lpfc_stop_port_s4;
6327 break;
6328 default:
6329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6330 "1431 Invalid HBA PCI-device group: 0x%x\n",
6331 dev_grp);
6332 return -ENODEV;
6333 break;
6334 }
6335 return 0;
6336}
6337
da0436e9
JS
6338/**
6339 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6340 * @phba: pointer to lpfc hba data structure.
6341 *
6342 * This routine is invoked to set up the driver internal resources after the
6343 * device specific resource setup to support the HBA device it attached to.
6344 *
6345 * Return codes
af901ca1 6346 * 0 - successful
da0436e9
JS
6347 * other values - error
6348 **/
6349static int
6350lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6351{
6352 int error;
6353
6354 /* Startup the kernel thread for this host adapter. */
6355 phba->worker_thread = kthread_run(lpfc_do_work, phba,
6356 "lpfc_worker_%d", phba->brd_no);
6357 if (IS_ERR(phba->worker_thread)) {
6358 error = PTR_ERR(phba->worker_thread);
6359 return error;
3772a991
JS
6360 }
6361
6362 return 0;
6363}
6364
6365/**
6366 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
6367 * @phba: pointer to lpfc hba data structure.
6368 *
6369 * This routine is invoked to unset the driver internal resources set up after
6370 * the device specific resource setup for supporting the HBA device it
6371 * attached to.
6372 **/
6373static void
6374lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6375{
6376 /* Stop kernel worker thread */
6377 kthread_stop(phba->worker_thread);
6378}
6379
6380/**
6381 * lpfc_free_iocb_list - Free iocb list.
6382 * @phba: pointer to lpfc hba data structure.
6383 *
6384 * This routine is invoked to free the driver's IOCB list and memory.
6385 **/
6c621a22 6386void
3772a991
JS
6387lpfc_free_iocb_list(struct lpfc_hba *phba)
6388{
6389 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
6390
6391 spin_lock_irq(&phba->hbalock);
6392 list_for_each_entry_safe(iocbq_entry, iocbq_next,
6393 &phba->lpfc_iocb_list, list) {
6394 list_del(&iocbq_entry->list);
6395 kfree(iocbq_entry);
6396 phba->total_iocbq_bufs--;
98c9ea5c 6397 }
3772a991
JS
6398 spin_unlock_irq(&phba->hbalock);
6399
6400 return;
6401}
6402
6403/**
6404 * lpfc_init_iocb_list - Allocate and initialize iocb list.
6405 * @phba: pointer to lpfc hba data structure.
6406 *
6407 * This routine is invoked to allocate and initizlize the driver's IOCB
6408 * list and set up the IOCB tag array accordingly.
6409 *
6410 * Return codes
af901ca1 6411 * 0 - successful
3772a991
JS
6412 * other values - error
6413 **/
6c621a22 6414int
3772a991
JS
6415lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6416{
6417 struct lpfc_iocbq *iocbq_entry = NULL;
6418 uint16_t iotag;
6419 int i;
dea3101e
JB
6420
6421 /* Initialize and populate the iocb list per host. */
6422 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 6423 for (i = 0; i < iocb_count; i++) {
dd00cc48 6424 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e
JB
6425 if (iocbq_entry == NULL) {
6426 printk(KERN_ERR "%s: only allocated %d iocbs of "
6427 "expected %d count. Unloading driver.\n",
cadbd4a5 6428 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e
JB
6429 goto out_free_iocbq;
6430 }
6431
604a3e30
JB
6432 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
6433 if (iotag == 0) {
3772a991 6434 kfree(iocbq_entry);
604a3e30 6435 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 6436 "Unloading driver.\n", __func__);
604a3e30
JB
6437 goto out_free_iocbq;
6438 }
6d368e53 6439 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 6440 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
6441
6442 spin_lock_irq(&phba->hbalock);
dea3101e
JB
6443 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
6444 phba->total_iocbq_bufs++;
2e0fef85 6445 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
6446 }
6447
3772a991 6448 return 0;
dea3101e 6449
3772a991
JS
6450out_free_iocbq:
6451 lpfc_free_iocb_list(phba);
dea3101e 6452
3772a991
JS
6453 return -ENOMEM;
6454}
5e9d9b82 6455
3772a991 6456/**
8a9d2e80 6457 * lpfc_free_sgl_list - Free a given sgl list.
da0436e9 6458 * @phba: pointer to lpfc hba data structure.
8a9d2e80 6459 * @sglq_list: pointer to the head of sgl list.
3772a991 6460 *
8a9d2e80 6461 * This routine is invoked to free a give sgl list and memory.
3772a991 6462 **/
8a9d2e80
JS
6463void
6464lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
3772a991 6465{
da0436e9 6466 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8a9d2e80
JS
6467
6468 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
6469 list_del(&sglq_entry->list);
6470 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
6471 kfree(sglq_entry);
6472 }
6473}
6474
6475/**
6476 * lpfc_free_els_sgl_list - Free els sgl list.
6477 * @phba: pointer to lpfc hba data structure.
6478 *
6479 * This routine is invoked to free the driver's els sgl list and memory.
6480 **/
6481static void
6482lpfc_free_els_sgl_list(struct lpfc_hba *phba)
6483{
da0436e9 6484 LIST_HEAD(sglq_list);
dea3101e 6485
8a9d2e80 6486 /* Retrieve all els sgls from driver list */
da0436e9 6487 spin_lock_irq(&phba->hbalock);
895427bd
JS
6488 spin_lock(&phba->sli4_hba.sgl_list_lock);
6489 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
6490 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9 6491 spin_unlock_irq(&phba->hbalock);
dea3101e 6492
8a9d2e80
JS
6493 /* Now free the sgl list */
6494 lpfc_free_sgl_list(phba, &sglq_list);
da0436e9 6495}
92d7f7b0 6496
f358dd0c
JS
6497/**
6498 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
6499 * @phba: pointer to lpfc hba data structure.
6500 *
6501 * This routine is invoked to free the driver's nvmet sgl list and memory.
6502 **/
6503static void
6504lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
6505{
6506 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6507 LIST_HEAD(sglq_list);
6508
6509 /* Retrieve all nvmet sgls from driver list */
6510 spin_lock_irq(&phba->hbalock);
6511 spin_lock(&phba->sli4_hba.sgl_list_lock);
6512 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
6513 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6514 spin_unlock_irq(&phba->hbalock);
6515
6516 /* Now free the sgl list */
6517 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
6518 list_del(&sglq_entry->list);
6519 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
6520 kfree(sglq_entry);
6521 }
6522}
6523
da0436e9
JS
6524/**
6525 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
6526 * @phba: pointer to lpfc hba data structure.
6527 *
6528 * This routine is invoked to allocate the driver's active sgl memory.
6529 * This array will hold the sglq_entry's for active IOs.
6530 **/
6531static int
6532lpfc_init_active_sgl_array(struct lpfc_hba *phba)
6533{
6534 int size;
6535 size = sizeof(struct lpfc_sglq *);
6536 size *= phba->sli4_hba.max_cfg_param.max_xri;
6537
6538 phba->sli4_hba.lpfc_sglq_active_list =
6539 kzalloc(size, GFP_KERNEL);
6540 if (!phba->sli4_hba.lpfc_sglq_active_list)
6541 return -ENOMEM;
6542 return 0;
3772a991
JS
6543}
6544
6545/**
da0436e9 6546 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
6547 * @phba: pointer to lpfc hba data structure.
6548 *
da0436e9
JS
6549 * This routine is invoked to walk through the array of active sglq entries
6550 * and free all of the resources.
6551 * This is just a place holder for now.
3772a991
JS
6552 **/
6553static void
da0436e9 6554lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 6555{
da0436e9 6556 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
6557}
6558
6559/**
da0436e9 6560 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
6561 * @phba: pointer to lpfc hba data structure.
6562 *
da0436e9
JS
6563 * This routine is invoked to allocate and initizlize the driver's sgl
6564 * list and set up the sgl xritag tag array accordingly.
3772a991 6565 *
3772a991 6566 **/
8a9d2e80 6567static void
da0436e9 6568lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 6569{
da0436e9 6570 /* Initialize and populate the sglq list per host/VF. */
895427bd 6571 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
da0436e9 6572 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 6573 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
86c67379 6574 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
da0436e9 6575
8a9d2e80
JS
6576 /* els xri-sgl book keeping */
6577 phba->sli4_hba.els_xri_cnt = 0;
0ff10d46 6578
8a9d2e80 6579 /* scsi xri-buffer book keeping */
da0436e9 6580 phba->sli4_hba.scsi_xri_cnt = 0;
895427bd
JS
6581
6582 /* nvme xri-buffer book keeping */
6583 phba->sli4_hba.nvme_xri_cnt = 0;
da0436e9
JS
6584}
6585
6586/**
6587 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
6588 * @phba: pointer to lpfc hba data structure.
6589 *
6590 * This routine is invoked to post rpi header templates to the
88a2cfbb 6591 * port for those SLI4 ports that do not support extents. This routine
da0436e9 6592 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
6593 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
6594 * and should be called only when interrupts are disabled.
da0436e9
JS
6595 *
6596 * Return codes
af901ca1 6597 * 0 - successful
88a2cfbb 6598 * -ERROR - otherwise.
da0436e9
JS
6599 **/
6600int
6601lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
6602{
6603 int rc = 0;
da0436e9
JS
6604 struct lpfc_rpi_hdr *rpi_hdr;
6605
6606 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
ff78d8f9 6607 if (!phba->sli4_hba.rpi_hdrs_in_use)
6d368e53 6608 return rc;
6d368e53
JS
6609 if (phba->sli4_hba.extents_in_use)
6610 return -EIO;
da0436e9
JS
6611
6612 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
6613 if (!rpi_hdr) {
6614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6615 "0391 Error during rpi post operation\n");
6616 lpfc_sli4_remove_rpis(phba);
6617 rc = -ENODEV;
6618 }
6619
6620 return rc;
6621}
6622
6623/**
6624 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
6625 * @phba: pointer to lpfc hba data structure.
6626 *
6627 * This routine is invoked to allocate a single 4KB memory region to
6628 * support rpis and stores them in the phba. This single region
6629 * provides support for up to 64 rpis. The region is used globally
6630 * by the device.
6631 *
6632 * Returns:
6633 * A valid rpi hdr on success.
6634 * A NULL pointer on any failure.
6635 **/
6636struct lpfc_rpi_hdr *
6637lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6638{
6639 uint16_t rpi_limit, curr_rpi_range;
6640 struct lpfc_dmabuf *dmabuf;
6641 struct lpfc_rpi_hdr *rpi_hdr;
6642
6d368e53
JS
6643 /*
6644 * If the SLI4 port supports extents, posting the rpi header isn't
6645 * required. Set the expected maximum count and let the actual value
6646 * get set when extents are fully allocated.
6647 */
6648 if (!phba->sli4_hba.rpi_hdrs_in_use)
6649 return NULL;
6650 if (phba->sli4_hba.extents_in_use)
6651 return NULL;
6652
6653 /* The limit on the logical index is just the max_rpi count. */
845d9e8d 6654 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
da0436e9
JS
6655
6656 spin_lock_irq(&phba->hbalock);
6d368e53
JS
6657 /*
6658 * Establish the starting RPI in this header block. The starting
6659 * rpi is normalized to a zero base because the physical rpi is
6660 * port based.
6661 */
97f2ecf1 6662 curr_rpi_range = phba->sli4_hba.next_rpi;
da0436e9
JS
6663 spin_unlock_irq(&phba->hbalock);
6664
845d9e8d
JS
6665 /* Reached full RPI range */
6666 if (curr_rpi_range == rpi_limit)
6d368e53 6667 return NULL;
845d9e8d 6668
da0436e9
JS
6669 /*
6670 * First allocate the protocol header region for the port. The
6671 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6672 */
6673 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6674 if (!dmabuf)
6675 return NULL;
6676
1aee383d
JP
6677 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6678 LPFC_HDR_TEMPLATE_SIZE,
6679 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
6680 if (!dmabuf->virt) {
6681 rpi_hdr = NULL;
6682 goto err_free_dmabuf;
6683 }
6684
da0436e9
JS
6685 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6686 rpi_hdr = NULL;
6687 goto err_free_coherent;
6688 }
6689
6690 /* Save the rpi header data for cleanup later. */
6691 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6692 if (!rpi_hdr)
6693 goto err_free_coherent;
6694
6695 rpi_hdr->dmabuf = dmabuf;
6696 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6697 rpi_hdr->page_count = 1;
6698 spin_lock_irq(&phba->hbalock);
6d368e53
JS
6699
6700 /* The rpi_hdr stores the logical index only. */
6701 rpi_hdr->start_rpi = curr_rpi_range;
845d9e8d 6702 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
da0436e9
JS
6703 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6704
da0436e9
JS
6705 spin_unlock_irq(&phba->hbalock);
6706 return rpi_hdr;
6707
6708 err_free_coherent:
6709 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6710 dmabuf->virt, dmabuf->phys);
6711 err_free_dmabuf:
6712 kfree(dmabuf);
6713 return NULL;
6714}
6715
6716/**
6717 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6718 * @phba: pointer to lpfc hba data structure.
6719 *
6720 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
6721 * to support rpis for SLI4 ports not supporting extents. This routine
6722 * presumes the caller has released all rpis consumed by fabric or port
6723 * logins and is prepared to have the header pages removed.
da0436e9
JS
6724 **/
6725void
6726lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6727{
6728 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6729
6d368e53
JS
6730 if (!phba->sli4_hba.rpi_hdrs_in_use)
6731 goto exit;
6732
da0436e9
JS
6733 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6734 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6735 list_del(&rpi_hdr->list);
6736 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6737 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6738 kfree(rpi_hdr->dmabuf);
6739 kfree(rpi_hdr);
6740 }
6d368e53
JS
6741 exit:
6742 /* There are no rpis available to the port now. */
6743 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
6744}
6745
6746/**
6747 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6748 * @pdev: pointer to pci device data structure.
6749 *
6750 * This routine is invoked to allocate the driver hba data structure for an
6751 * HBA device. If the allocation is successful, the phba reference to the
6752 * PCI device data structure is set.
6753 *
6754 * Return codes
af901ca1 6755 * pointer to @phba - successful
da0436e9
JS
6756 * NULL - error
6757 **/
6758static struct lpfc_hba *
6759lpfc_hba_alloc(struct pci_dev *pdev)
6760{
6761 struct lpfc_hba *phba;
6762
6763 /* Allocate memory for HBA structure */
6764 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6765 if (!phba) {
e34ccdfe 6766 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
6767 return NULL;
6768 }
6769
6770 /* Set reference to PCI device in HBA structure */
6771 phba->pcidev = pdev;
6772
6773 /* Assign an unused board number */
6774 phba->brd_no = lpfc_get_instance();
6775 if (phba->brd_no < 0) {
6776 kfree(phba);
6777 return NULL;
6778 }
65791f1f 6779 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
da0436e9 6780
4fede78f 6781 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
6782 INIT_LIST_HEAD(&phba->ct_ev_waiters);
6783
da0436e9
JS
6784 return phba;
6785}
6786
6787/**
6788 * lpfc_hba_free - Free driver hba data structure with a device.
6789 * @phba: pointer to lpfc hba data structure.
6790 *
6791 * This routine is invoked to free the driver hba data structure with an
6792 * HBA device.
6793 **/
6794static void
6795lpfc_hba_free(struct lpfc_hba *phba)
6796{
6797 /* Release the driver assigned board number */
6798 idr_remove(&lpfc_hba_index, phba->brd_no);
6799
895427bd
JS
6800 /* Free memory allocated with sli3 rings */
6801 kfree(phba->sli.sli3_ring);
6802 phba->sli.sli3_ring = NULL;
2a76a283 6803
da0436e9
JS
6804 kfree(phba);
6805 return;
6806}
6807
6808/**
6809 * lpfc_create_shost - Create hba physical port with associated scsi host.
6810 * @phba: pointer to lpfc hba data structure.
6811 *
6812 * This routine is invoked to create HBA physical port and associate a SCSI
6813 * host with it.
6814 *
6815 * Return codes
af901ca1 6816 * 0 - successful
da0436e9
JS
6817 * other values - error
6818 **/
6819static int
6820lpfc_create_shost(struct lpfc_hba *phba)
6821{
6822 struct lpfc_vport *vport;
6823 struct Scsi_Host *shost;
6824
6825 /* Initialize HBA FC structure */
6826 phba->fc_edtov = FF_DEF_EDTOV;
6827 phba->fc_ratov = FF_DEF_RATOV;
6828 phba->fc_altov = FF_DEF_ALTOV;
6829 phba->fc_arbtov = FF_DEF_ARBTOV;
6830
d7c47992 6831 atomic_set(&phba->sdev_cnt, 0);
2cee7808
JS
6832 atomic_set(&phba->fc4ScsiInputRequests, 0);
6833 atomic_set(&phba->fc4ScsiOutputRequests, 0);
6834 atomic_set(&phba->fc4ScsiControlRequests, 0);
6835 atomic_set(&phba->fc4ScsiIoCmpls, 0);
6836 atomic_set(&phba->fc4NvmeInputRequests, 0);
6837 atomic_set(&phba->fc4NvmeOutputRequests, 0);
6838 atomic_set(&phba->fc4NvmeControlRequests, 0);
6839 atomic_set(&phba->fc4NvmeIoCmpls, 0);
6840 atomic_set(&phba->fc4NvmeLsRequests, 0);
6841 atomic_set(&phba->fc4NvmeLsCmpls, 0);
da0436e9
JS
6842 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6843 if (!vport)
6844 return -ENODEV;
6845
6846 shost = lpfc_shost_from_vport(vport);
6847 phba->pport = vport;
2ea259ee 6848
f358dd0c
JS
6849 if (phba->nvmet_support) {
6850 /* Only 1 vport (pport) will support NVME target */
6851 if (phba->txrdy_payload_pool == NULL) {
6852 phba->txrdy_payload_pool = pci_pool_create(
6853 "txrdy_pool", phba->pcidev,
6854 TXRDY_PAYLOAD_LEN, 16, 0);
6855 if (phba->txrdy_payload_pool) {
6856 phba->targetport = NULL;
6857 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
6858 lpfc_printf_log(phba, KERN_INFO,
6859 LOG_INIT | LOG_NVME_DISC,
6860 "6076 NVME Target Found\n");
6861 }
6862 }
6863 }
6864
da0436e9
JS
6865 lpfc_debugfs_initialize(vport);
6866 /* Put reference to SCSI host to driver's device private data */
6867 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 6868
4258e98e
JS
6869 /*
6870 * At this point we are fully registered with PSA. In addition,
6871 * any initial discovery should be completed.
6872 */
6873 vport->load_flag |= FC_ALLOW_FDMI;
8663cbbe
JS
6874 if (phba->cfg_enable_SmartSAN ||
6875 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
6876
6877 /* Setup appropriate attribute masks */
6878 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 6879 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
6880 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
6881 else
6882 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
6883 }
3772a991
JS
6884 return 0;
6885}
db2378e0 6886
3772a991
JS
6887/**
6888 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6889 * @phba: pointer to lpfc hba data structure.
6890 *
6891 * This routine is invoked to destroy HBA physical port and the associated
6892 * SCSI host.
6893 **/
6894static void
6895lpfc_destroy_shost(struct lpfc_hba *phba)
6896{
6897 struct lpfc_vport *vport = phba->pport;
6898
6899 /* Destroy physical port that associated with the SCSI host */
6900 destroy_port(vport);
6901
6902 return;
6903}
6904
6905/**
6906 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6907 * @phba: pointer to lpfc hba data structure.
6908 * @shost: the shost to be used to detect Block guard settings.
6909 *
6910 * This routine sets up the local Block guard protocol settings for @shost.
6911 * This routine also allocates memory for debugging bg buffers.
6912 **/
6913static void
6914lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6915{
bbeb79b9
JS
6916 uint32_t old_mask;
6917 uint32_t old_guard;
6918
3772a991 6919 int pagecnt = 10;
b3b98b74 6920 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
3772a991
JS
6921 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6922 "1478 Registering BlockGuard with the "
6923 "SCSI layer\n");
bbeb79b9 6924
b3b98b74
JS
6925 old_mask = phba->cfg_prot_mask;
6926 old_guard = phba->cfg_prot_guard;
bbeb79b9
JS
6927
6928 /* Only allow supported values */
b3b98b74 6929 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
bbeb79b9
JS
6930 SHOST_DIX_TYPE0_PROTECTION |
6931 SHOST_DIX_TYPE1_PROTECTION);
b3b98b74
JS
6932 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
6933 SHOST_DIX_GUARD_CRC);
bbeb79b9
JS
6934
6935 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
b3b98b74
JS
6936 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6937 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
bbeb79b9 6938
b3b98b74
JS
6939 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6940 if ((old_mask != phba->cfg_prot_mask) ||
6941 (old_guard != phba->cfg_prot_guard))
bbeb79b9
JS
6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6943 "1475 Registering BlockGuard with the "
6944 "SCSI layer: mask %d guard %d\n",
b3b98b74
JS
6945 phba->cfg_prot_mask,
6946 phba->cfg_prot_guard);
bbeb79b9 6947
b3b98b74
JS
6948 scsi_host_set_prot(shost, phba->cfg_prot_mask);
6949 scsi_host_set_guard(shost, phba->cfg_prot_guard);
bbeb79b9
JS
6950 } else
6951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6952 "1479 Not Registering BlockGuard with the SCSI "
6953 "layer, Bad protection parameters: %d %d\n",
6954 old_mask, old_guard);
3772a991 6955 }
bbeb79b9 6956
3772a991
JS
6957 if (!_dump_buf_data) {
6958 while (pagecnt) {
6959 spin_lock_init(&_dump_buf_lock);
6960 _dump_buf_data =
6961 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6962 if (_dump_buf_data) {
6a9c52cf
JS
6963 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6964 "9043 BLKGRD: allocated %d pages for "
3772a991
JS
6965 "_dump_buf_data at 0x%p\n",
6966 (1 << pagecnt), _dump_buf_data);
6967 _dump_buf_data_order = pagecnt;
6968 memset(_dump_buf_data, 0,
6969 ((1 << PAGE_SHIFT) << pagecnt));
6970 break;
6971 } else
6972 --pagecnt;
6973 }
6974 if (!_dump_buf_data_order)
6a9c52cf
JS
6975 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6976 "9044 BLKGRD: ERROR unable to allocate "
3772a991
JS
6977 "memory for hexdump\n");
6978 } else
6a9c52cf
JS
6979 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6980 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
3772a991
JS
6981 "\n", _dump_buf_data);
6982 if (!_dump_buf_dif) {
6983 while (pagecnt) {
6984 _dump_buf_dif =
6985 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6986 if (_dump_buf_dif) {
6a9c52cf
JS
6987 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6988 "9046 BLKGRD: allocated %d pages for "
3772a991
JS
6989 "_dump_buf_dif at 0x%p\n",
6990 (1 << pagecnt), _dump_buf_dif);
6991 _dump_buf_dif_order = pagecnt;
6992 memset(_dump_buf_dif, 0,
6993 ((1 << PAGE_SHIFT) << pagecnt));
6994 break;
6995 } else
6996 --pagecnt;
6997 }
6998 if (!_dump_buf_dif_order)
6a9c52cf
JS
6999 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7000 "9047 BLKGRD: ERROR unable to allocate "
3772a991
JS
7001 "memory for hexdump\n");
7002 } else
6a9c52cf
JS
7003 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7004 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
3772a991
JS
7005 _dump_buf_dif);
7006}
7007
7008/**
7009 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7010 * @phba: pointer to lpfc hba data structure.
7011 *
7012 * This routine is invoked to perform all the necessary post initialization
7013 * setup for the device.
7014 **/
7015static void
7016lpfc_post_init_setup(struct lpfc_hba *phba)
7017{
7018 struct Scsi_Host *shost;
7019 struct lpfc_adapter_event_header adapter_event;
7020
7021 /* Get the default values for Model Name and Description */
7022 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7023
7024 /*
7025 * hba setup may have changed the hba_queue_depth so we need to
7026 * adjust the value of can_queue.
7027 */
7028 shost = pci_get_drvdata(phba->pcidev);
7029 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7030 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
7031 lpfc_setup_bg(phba, shost);
7032
7033 lpfc_host_attrib_init(shost);
7034
7035 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7036 spin_lock_irq(shost->host_lock);
7037 lpfc_poll_start_timer(phba);
7038 spin_unlock_irq(shost->host_lock);
7039 }
7040
7041 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7042 "0428 Perform SCSI scan\n");
7043 /* Send board arrival event to upper layer */
7044 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7045 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7046 fc_host_post_vendor_event(shost, fc_get_event_number(),
7047 sizeof(adapter_event),
7048 (char *) &adapter_event,
7049 LPFC_NL_VENDOR_ID);
7050 return;
7051}
7052
7053/**
7054 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7055 * @phba: pointer to lpfc hba data structure.
7056 *
7057 * This routine is invoked to set up the PCI device memory space for device
7058 * with SLI-3 interface spec.
7059 *
7060 * Return codes
af901ca1 7061 * 0 - successful
3772a991
JS
7062 * other values - error
7063 **/
7064static int
7065lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7066{
7067 struct pci_dev *pdev;
7068 unsigned long bar0map_len, bar2map_len;
7069 int i, hbq_count;
7070 void *ptr;
7071 int error = -ENODEV;
7072
7073 /* Obtain PCI device reference */
7074 if (!phba->pcidev)
7075 return error;
7076 else
7077 pdev = phba->pcidev;
7078
7079 /* Set the device DMA mask size */
8e68597d
MR
7080 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7081 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7082 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7083 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
3772a991 7084 return error;
8e68597d
MR
7085 }
7086 }
3772a991
JS
7087
7088 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7089 * required by each mapping.
7090 */
7091 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7092 bar0map_len = pci_resource_len(pdev, 0);
7093
7094 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7095 bar2map_len = pci_resource_len(pdev, 2);
7096
7097 /* Map HBA SLIM to a kernel virtual address. */
7098 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7099 if (!phba->slim_memmap_p) {
7100 dev_printk(KERN_ERR, &pdev->dev,
7101 "ioremap failed for SLIM memory.\n");
7102 goto out;
7103 }
7104
7105 /* Map HBA Control Registers to a kernel virtual address. */
7106 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7107 if (!phba->ctrl_regs_memmap_p) {
7108 dev_printk(KERN_ERR, &pdev->dev,
7109 "ioremap failed for HBA control registers.\n");
7110 goto out_iounmap_slim;
7111 }
7112
7113 /* Allocate memory for SLI-2 structures */
1aee383d
JP
7114 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7115 &phba->slim2p.phys, GFP_KERNEL);
3772a991
JS
7116 if (!phba->slim2p.virt)
7117 goto out_iounmap;
7118
3772a991 7119 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
7120 phba->mbox_ext = (phba->slim2p.virt +
7121 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
7122 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7123 phba->IOCBs = (phba->slim2p.virt +
7124 offsetof(struct lpfc_sli2_slim, IOCBs));
7125
7126 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7127 lpfc_sli_hbq_size(),
7128 &phba->hbqslimp.phys,
7129 GFP_KERNEL);
7130 if (!phba->hbqslimp.virt)
7131 goto out_free_slim;
7132
7133 hbq_count = lpfc_sli_hbq_count();
7134 ptr = phba->hbqslimp.virt;
7135 for (i = 0; i < hbq_count; ++i) {
7136 phba->hbqs[i].hbq_virt = ptr;
7137 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7138 ptr += (lpfc_hbq_defs[i]->entry_count *
7139 sizeof(struct lpfc_hbq_entry));
7140 }
7141 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7142 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7143
7144 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7145
3772a991
JS
7146 phba->MBslimaddr = phba->slim_memmap_p;
7147 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7148 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7149 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7150 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7151
7152 return 0;
7153
7154out_free_slim:
7155 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7156 phba->slim2p.virt, phba->slim2p.phys);
7157out_iounmap:
7158 iounmap(phba->ctrl_regs_memmap_p);
7159out_iounmap_slim:
7160 iounmap(phba->slim_memmap_p);
7161out:
7162 return error;
7163}
7164
7165/**
7166 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7167 * @phba: pointer to lpfc hba data structure.
7168 *
7169 * This routine is invoked to unset the PCI device memory space for device
7170 * with SLI-3 interface spec.
7171 **/
7172static void
7173lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7174{
7175 struct pci_dev *pdev;
7176
7177 /* Obtain PCI device reference */
7178 if (!phba->pcidev)
7179 return;
7180 else
7181 pdev = phba->pcidev;
7182
7183 /* Free coherent DMA memory allocated */
7184 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7185 phba->hbqslimp.virt, phba->hbqslimp.phys);
7186 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7187 phba->slim2p.virt, phba->slim2p.phys);
7188
7189 /* I/O memory unmap */
7190 iounmap(phba->ctrl_regs_memmap_p);
7191 iounmap(phba->slim_memmap_p);
7192
7193 return;
7194}
7195
7196/**
da0436e9 7197 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
7198 * @phba: pointer to lpfc hba data structure.
7199 *
da0436e9
JS
7200 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7201 * done and check status.
3772a991 7202 *
da0436e9 7203 * Return 0 if successful, otherwise -ENODEV.
3772a991 7204 **/
da0436e9
JS
7205int
7206lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 7207{
2fcee4bf
JS
7208 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7209 struct lpfc_register reg_data;
7210 int i, port_error = 0;
7211 uint32_t if_type;
3772a991 7212
9940b97b
JS
7213 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7214 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 7215 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 7216 return -ENODEV;
3772a991 7217
da0436e9
JS
7218 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7219 for (i = 0; i < 3000; i++) {
9940b97b
JS
7220 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7221 &portsmphr_reg.word0) ||
7222 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 7223 /* Port has a fatal POST error, break out */
da0436e9
JS
7224 port_error = -ENODEV;
7225 break;
7226 }
2fcee4bf
JS
7227 if (LPFC_POST_STAGE_PORT_READY ==
7228 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 7229 break;
da0436e9 7230 msleep(10);
3772a991
JS
7231 }
7232
2fcee4bf
JS
7233 /*
7234 * If there was a port error during POST, then don't proceed with
7235 * other register reads as the data may not be valid. Just exit.
7236 */
7237 if (port_error) {
da0436e9 7238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
7239 "1408 Port Failed POST - portsmphr=0x%x, "
7240 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7241 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7242 portsmphr_reg.word0,
7243 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7244 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7245 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7246 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7247 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7248 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7249 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7250 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7251 } else {
28baac74 7252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
7253 "2534 Device Info: SLIFamily=0x%x, "
7254 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7255 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
7256 bf_get(lpfc_sli_intf_sli_family,
7257 &phba->sli4_hba.sli_intf),
7258 bf_get(lpfc_sli_intf_slirev,
7259 &phba->sli4_hba.sli_intf),
085c647c
JS
7260 bf_get(lpfc_sli_intf_if_type,
7261 &phba->sli4_hba.sli_intf),
7262 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 7263 &phba->sli4_hba.sli_intf),
085c647c
JS
7264 bf_get(lpfc_sli_intf_sli_hint2,
7265 &phba->sli4_hba.sli_intf),
7266 bf_get(lpfc_sli_intf_func_type,
28baac74 7267 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
7268 /*
7269 * Check for other Port errors during the initialization
7270 * process. Fail the load if the port did not come up
7271 * correctly.
7272 */
7273 if_type = bf_get(lpfc_sli_intf_if_type,
7274 &phba->sli4_hba.sli_intf);
7275 switch (if_type) {
7276 case LPFC_SLI_INTF_IF_TYPE_0:
7277 phba->sli4_hba.ue_mask_lo =
7278 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7279 phba->sli4_hba.ue_mask_hi =
7280 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7281 uerrlo_reg.word0 =
7282 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7283 uerrhi_reg.word0 =
7284 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7285 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7286 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7288 "1422 Unrecoverable Error "
7289 "Detected during POST "
7290 "uerr_lo_reg=0x%x, "
7291 "uerr_hi_reg=0x%x, "
7292 "ue_mask_lo_reg=0x%x, "
7293 "ue_mask_hi_reg=0x%x\n",
7294 uerrlo_reg.word0,
7295 uerrhi_reg.word0,
7296 phba->sli4_hba.ue_mask_lo,
7297 phba->sli4_hba.ue_mask_hi);
7298 port_error = -ENODEV;
7299 }
7300 break;
7301 case LPFC_SLI_INTF_IF_TYPE_2:
7302 /* Final checks. The port status should be clean. */
9940b97b
JS
7303 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7304 &reg_data.word0) ||
0558056c
JS
7305 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7306 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
7307 phba->work_status[0] =
7308 readl(phba->sli4_hba.u.if_type2.
7309 ERR1regaddr);
7310 phba->work_status[1] =
7311 readl(phba->sli4_hba.u.if_type2.
7312 ERR2regaddr);
7313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8fcb8acd
JS
7314 "2888 Unrecoverable port error "
7315 "following POST: port status reg "
7316 "0x%x, port_smphr reg 0x%x, "
2fcee4bf
JS
7317 "error 1=0x%x, error 2=0x%x\n",
7318 reg_data.word0,
7319 portsmphr_reg.word0,
7320 phba->work_status[0],
7321 phba->work_status[1]);
7322 port_error = -ENODEV;
7323 }
7324 break;
7325 case LPFC_SLI_INTF_IF_TYPE_1:
7326 default:
7327 break;
7328 }
28baac74 7329 }
da0436e9
JS
7330 return port_error;
7331}
3772a991 7332
da0436e9
JS
7333/**
7334 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7335 * @phba: pointer to lpfc hba data structure.
2fcee4bf 7336 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
7337 *
7338 * This routine is invoked to set up SLI4 BAR0 PCI config space register
7339 * memory map.
7340 **/
7341static void
2fcee4bf
JS
7342lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7343{
7344 switch (if_type) {
7345 case LPFC_SLI_INTF_IF_TYPE_0:
7346 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7347 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7348 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7349 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7350 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7351 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7352 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7353 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7354 phba->sli4_hba.SLIINTFregaddr =
7355 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7356 break;
7357 case LPFC_SLI_INTF_IF_TYPE_2:
0cf07f84
JS
7358 phba->sli4_hba.u.if_type2.EQDregaddr =
7359 phba->sli4_hba.conf_regs_memmap_p +
7360 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
2fcee4bf 7361 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
7362 phba->sli4_hba.conf_regs_memmap_p +
7363 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 7364 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
7365 phba->sli4_hba.conf_regs_memmap_p +
7366 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 7367 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
7368 phba->sli4_hba.conf_regs_memmap_p +
7369 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 7370 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
7371 phba->sli4_hba.conf_regs_memmap_p +
7372 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
7373 phba->sli4_hba.SLIINTFregaddr =
7374 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7375 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
7376 phba->sli4_hba.conf_regs_memmap_p +
7377 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf 7378 phba->sli4_hba.RQDBregaddr =
962bc51b
JS
7379 phba->sli4_hba.conf_regs_memmap_p +
7380 LPFC_ULP0_RQ_DOORBELL;
2fcee4bf 7381 phba->sli4_hba.WQDBregaddr =
962bc51b
JS
7382 phba->sli4_hba.conf_regs_memmap_p +
7383 LPFC_ULP0_WQ_DOORBELL;
2fcee4bf
JS
7384 phba->sli4_hba.EQCQDBregaddr =
7385 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
7386 phba->sli4_hba.MQDBregaddr =
7387 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
7388 phba->sli4_hba.BMBXregaddr =
7389 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7390 break;
7391 case LPFC_SLI_INTF_IF_TYPE_1:
7392 default:
7393 dev_printk(KERN_ERR, &phba->pcidev->dev,
7394 "FATAL - unsupported SLI4 interface type - %d\n",
7395 if_type);
7396 break;
7397 }
da0436e9 7398}
3772a991 7399
da0436e9
JS
7400/**
7401 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
7402 * @phba: pointer to lpfc hba data structure.
7403 *
7404 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
7405 * memory map.
7406 **/
7407static void
7408lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
7409{
2fcee4bf
JS
7410 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7411 LPFC_SLIPORT_IF0_SMPHR;
da0436e9 7412 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 7413 LPFC_HST_ISR0;
da0436e9 7414 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 7415 LPFC_HST_IMR0;
da0436e9 7416 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 7417 LPFC_HST_ISCR0;
3772a991
JS
7418}
7419
7420/**
da0436e9 7421 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 7422 * @phba: pointer to lpfc hba data structure.
da0436e9 7423 * @vf: virtual function number
3772a991 7424 *
da0436e9
JS
7425 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
7426 * based on the given viftual function number, @vf.
7427 *
7428 * Return 0 if successful, otherwise -ENODEV.
3772a991 7429 **/
da0436e9
JS
7430static int
7431lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 7432{
da0436e9
JS
7433 if (vf > LPFC_VIR_FUNC_MAX)
7434 return -ENODEV;
3772a991 7435
da0436e9 7436 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
7437 vf * LPFC_VFR_PAGE_SIZE +
7438 LPFC_ULP0_RQ_DOORBELL);
da0436e9 7439 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
7440 vf * LPFC_VFR_PAGE_SIZE +
7441 LPFC_ULP0_WQ_DOORBELL);
da0436e9
JS
7442 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7443 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
7444 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7445 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
7446 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7447 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
7448 return 0;
3772a991
JS
7449}
7450
7451/**
da0436e9 7452 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
7453 * @phba: pointer to lpfc hba data structure.
7454 *
da0436e9
JS
7455 * This routine is invoked to create the bootstrap mailbox
7456 * region consistent with the SLI-4 interface spec. This
7457 * routine allocates all memory necessary to communicate
7458 * mailbox commands to the port and sets up all alignment
7459 * needs. No locks are expected to be held when calling
7460 * this routine.
3772a991
JS
7461 *
7462 * Return codes
af901ca1 7463 * 0 - successful
d439d286 7464 * -ENOMEM - could not allocated memory.
da0436e9 7465 **/
3772a991 7466static int
da0436e9 7467lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 7468{
da0436e9
JS
7469 uint32_t bmbx_size;
7470 struct lpfc_dmabuf *dmabuf;
7471 struct dma_address *dma_address;
7472 uint32_t pa_addr;
7473 uint64_t phys_addr;
7474
7475 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7476 if (!dmabuf)
7477 return -ENOMEM;
3772a991 7478
da0436e9
JS
7479 /*
7480 * The bootstrap mailbox region is comprised of 2 parts
7481 * plus an alignment restriction of 16 bytes.
7482 */
7483 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
1aee383d
JP
7484 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
7485 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
7486 if (!dmabuf->virt) {
7487 kfree(dmabuf);
7488 return -ENOMEM;
3772a991
JS
7489 }
7490
da0436e9
JS
7491 /*
7492 * Initialize the bootstrap mailbox pointers now so that the register
7493 * operations are simple later. The mailbox dma address is required
7494 * to be 16-byte aligned. Also align the virtual memory as each
7495 * maibox is copied into the bmbx mailbox region before issuing the
7496 * command to the port.
7497 */
7498 phba->sli4_hba.bmbx.dmabuf = dmabuf;
7499 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
7500
7501 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
7502 LPFC_ALIGN_16_BYTE);
7503 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
7504 LPFC_ALIGN_16_BYTE);
7505
7506 /*
7507 * Set the high and low physical addresses now. The SLI4 alignment
7508 * requirement is 16 bytes and the mailbox is posted to the port
7509 * as two 30-bit addresses. The other data is a bit marking whether
7510 * the 30-bit address is the high or low address.
7511 * Upcast bmbx aphys to 64bits so shift instruction compiles
7512 * clean on 32 bit machines.
7513 */
7514 dma_address = &phba->sli4_hba.bmbx.dma_address;
7515 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
7516 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
7517 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
7518 LPFC_BMBX_BIT1_ADDR_HI);
7519
7520 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
7521 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
7522 LPFC_BMBX_BIT1_ADDR_LO);
7523 return 0;
3772a991
JS
7524}
7525
7526/**
da0436e9 7527 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
7528 * @phba: pointer to lpfc hba data structure.
7529 *
da0436e9
JS
7530 * This routine is invoked to teardown the bootstrap mailbox
7531 * region and release all host resources. This routine requires
7532 * the caller to ensure all mailbox commands recovered, no
7533 * additional mailbox comands are sent, and interrupts are disabled
7534 * before calling this routine.
7535 *
7536 **/
3772a991 7537static void
da0436e9 7538lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 7539{
da0436e9
JS
7540 dma_free_coherent(&phba->pcidev->dev,
7541 phba->sli4_hba.bmbx.bmbx_size,
7542 phba->sli4_hba.bmbx.dmabuf->virt,
7543 phba->sli4_hba.bmbx.dmabuf->phys);
7544
7545 kfree(phba->sli4_hba.bmbx.dmabuf);
7546 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
7547}
7548
7549/**
da0436e9 7550 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
7551 * @phba: pointer to lpfc hba data structure.
7552 *
da0436e9
JS
7553 * This routine is invoked to read the configuration parameters from the HBA.
7554 * The configuration parameters are used to set the base and maximum values
7555 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
7556 * allocation for the port.
3772a991
JS
7557 *
7558 * Return codes
af901ca1 7559 * 0 - successful
25985edc 7560 * -ENOMEM - No available memory
d439d286 7561 * -EIO - The mailbox failed to complete successfully.
3772a991 7562 **/
ff78d8f9 7563int
da0436e9 7564lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 7565{
da0436e9
JS
7566 LPFC_MBOXQ_t *pmb;
7567 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
7568 union lpfc_sli4_cfg_shdr *shdr;
7569 uint32_t shdr_status, shdr_add_status;
7570 struct lpfc_mbx_get_func_cfg *get_func_cfg;
7571 struct lpfc_rsrc_desc_fcfcoe *desc;
8aa134a8 7572 char *pdesc_0;
c691816e
JS
7573 uint16_t forced_link_speed;
7574 uint32_t if_type;
8aa134a8 7575 int length, i, rc = 0, rc2;
3772a991 7576
da0436e9
JS
7577 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7578 if (!pmb) {
7579 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7580 "2011 Unable to allocate memory for issuing "
7581 "SLI_CONFIG_SPECIAL mailbox command\n");
7582 return -ENOMEM;
3772a991
JS
7583 }
7584
da0436e9 7585 lpfc_read_config(phba, pmb);
3772a991 7586
da0436e9
JS
7587 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7588 if (rc != MBX_SUCCESS) {
7589 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7590 "2012 Mailbox failed , mbxCmd x%x "
7591 "READ_CONFIG, mbxStatus x%x\n",
7592 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7593 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7594 rc = -EIO;
7595 } else {
7596 rd_config = &pmb->u.mqe.un.rd_config;
ff78d8f9
JS
7597 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
7598 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
7599 phba->sli4_hba.lnk_info.lnk_tp =
7600 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
7601 phba->sli4_hba.lnk_info.lnk_no =
7602 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
7603 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7604 "3081 lnk_type:%d, lnk_numb:%d\n",
7605 phba->sli4_hba.lnk_info.lnk_tp,
7606 phba->sli4_hba.lnk_info.lnk_no);
7607 } else
7608 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7609 "3082 Mailbox (x%x) returned ldv:x0\n",
7610 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6d368e53
JS
7611 phba->sli4_hba.extents_in_use =
7612 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
7613 phba->sli4_hba.max_cfg_param.max_xri =
7614 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
7615 phba->sli4_hba.max_cfg_param.xri_base =
7616 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
7617 phba->sli4_hba.max_cfg_param.max_vpi =
7618 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
7619 phba->sli4_hba.max_cfg_param.vpi_base =
7620 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
7621 phba->sli4_hba.max_cfg_param.max_rpi =
7622 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
7623 phba->sli4_hba.max_cfg_param.rpi_base =
7624 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
7625 phba->sli4_hba.max_cfg_param.max_vfi =
7626 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
7627 phba->sli4_hba.max_cfg_param.vfi_base =
7628 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
7629 phba->sli4_hba.max_cfg_param.max_fcfi =
7630 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
7631 phba->sli4_hba.max_cfg_param.max_eq =
7632 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
7633 phba->sli4_hba.max_cfg_param.max_rq =
7634 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
7635 phba->sli4_hba.max_cfg_param.max_wq =
7636 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
7637 phba->sli4_hba.max_cfg_param.max_cq =
7638 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
7639 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
7640 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
7641 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
7642 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5ffc266e
JS
7643 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
7644 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9
JS
7645 phba->max_vports = phba->max_vpi;
7646 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
7647 "2003 cfg params Extents? %d "
7648 "XRI(B:%d M:%d), "
da0436e9
JS
7649 "VPI(B:%d M:%d) "
7650 "VFI(B:%d M:%d) "
7651 "RPI(B:%d M:%d) "
2ea259ee 7652 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
6d368e53 7653 phba->sli4_hba.extents_in_use,
da0436e9
JS
7654 phba->sli4_hba.max_cfg_param.xri_base,
7655 phba->sli4_hba.max_cfg_param.max_xri,
7656 phba->sli4_hba.max_cfg_param.vpi_base,
7657 phba->sli4_hba.max_cfg_param.max_vpi,
7658 phba->sli4_hba.max_cfg_param.vfi_base,
7659 phba->sli4_hba.max_cfg_param.max_vfi,
7660 phba->sli4_hba.max_cfg_param.rpi_base,
7661 phba->sli4_hba.max_cfg_param.max_rpi,
2ea259ee
JS
7662 phba->sli4_hba.max_cfg_param.max_fcfi,
7663 phba->sli4_hba.max_cfg_param.max_eq,
7664 phba->sli4_hba.max_cfg_param.max_cq,
7665 phba->sli4_hba.max_cfg_param.max_wq,
7666 phba->sli4_hba.max_cfg_param.max_rq);
7667
3772a991 7668 }
912e3acd
JS
7669
7670 if (rc)
7671 goto read_cfg_out;
da0436e9 7672
c691816e
JS
7673 /* Update link speed if forced link speed is supported */
7674 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7675 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7676 forced_link_speed =
7677 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7678 if (forced_link_speed) {
7679 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7680
7681 switch (forced_link_speed) {
7682 case LINK_SPEED_1G:
7683 phba->cfg_link_speed =
7684 LPFC_USER_LINK_SPEED_1G;
7685 break;
7686 case LINK_SPEED_2G:
7687 phba->cfg_link_speed =
7688 LPFC_USER_LINK_SPEED_2G;
7689 break;
7690 case LINK_SPEED_4G:
7691 phba->cfg_link_speed =
7692 LPFC_USER_LINK_SPEED_4G;
7693 break;
7694 case LINK_SPEED_8G:
7695 phba->cfg_link_speed =
7696 LPFC_USER_LINK_SPEED_8G;
7697 break;
7698 case LINK_SPEED_10G:
7699 phba->cfg_link_speed =
7700 LPFC_USER_LINK_SPEED_10G;
7701 break;
7702 case LINK_SPEED_16G:
7703 phba->cfg_link_speed =
7704 LPFC_USER_LINK_SPEED_16G;
7705 break;
7706 case LINK_SPEED_32G:
7707 phba->cfg_link_speed =
7708 LPFC_USER_LINK_SPEED_32G;
7709 break;
7710 case 0xffff:
7711 phba->cfg_link_speed =
7712 LPFC_USER_LINK_SPEED_AUTO;
7713 break;
7714 default:
7715 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7716 "0047 Unrecognized link "
7717 "speed : %d\n",
7718 forced_link_speed);
7719 phba->cfg_link_speed =
7720 LPFC_USER_LINK_SPEED_AUTO;
7721 }
7722 }
7723 }
7724
da0436e9 7725 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
7726 length = phba->sli4_hba.max_cfg_param.max_xri -
7727 lpfc_sli4_get_els_iocb_cnt(phba);
7728 if (phba->cfg_hba_queue_depth > length) {
7729 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7730 "3361 HBA queue depth changed from %d to %d\n",
7731 phba->cfg_hba_queue_depth, length);
7732 phba->cfg_hba_queue_depth = length;
7733 }
912e3acd
JS
7734
7735 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
7736 LPFC_SLI_INTF_IF_TYPE_2)
7737 goto read_cfg_out;
7738
7739 /* get the pf# and vf# for SLI4 if_type 2 port */
7740 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
7741 sizeof(struct lpfc_sli4_cfg_mhdr));
7742 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
7743 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
7744 length, LPFC_SLI4_MBX_EMBED);
7745
8aa134a8 7746 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
912e3acd
JS
7747 shdr = (union lpfc_sli4_cfg_shdr *)
7748 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7749 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7750 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8aa134a8 7751 if (rc2 || shdr_status || shdr_add_status) {
912e3acd
JS
7752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7753 "3026 Mailbox failed , mbxCmd x%x "
7754 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7755 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7756 bf_get(lpfc_mqe_status, &pmb->u.mqe));
912e3acd
JS
7757 goto read_cfg_out;
7758 }
7759
7760 /* search for fc_fcoe resrouce descriptor */
7761 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
912e3acd 7762
8aa134a8
JS
7763 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
7764 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
7765 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
7766 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
7767 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
7768 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
7769 goto read_cfg_out;
7770
912e3acd 7771 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8aa134a8 7772 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
912e3acd 7773 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8aa134a8 7774 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
912e3acd
JS
7775 phba->sli4_hba.iov.pf_number =
7776 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
7777 phba->sli4_hba.iov.vf_number =
7778 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7779 break;
7780 }
7781 }
7782
7783 if (i < LPFC_RSRC_DESC_MAX_NUM)
7784 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7785 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7786 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7787 phba->sli4_hba.iov.vf_number);
8aa134a8 7788 else
912e3acd
JS
7789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7790 "3028 GET_FUNCTION_CONFIG: failed to find "
7791 "Resrouce Descriptor:x%x\n",
7792 LPFC_RSRC_DESC_TYPE_FCFCOE);
912e3acd
JS
7793
7794read_cfg_out:
7795 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 7796 return rc;
3772a991
JS
7797}
7798
7799/**
2fcee4bf 7800 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
7801 * @phba: pointer to lpfc hba data structure.
7802 *
2fcee4bf
JS
7803 * This routine is invoked to setup the port-side endian order when
7804 * the port if_type is 0. This routine has no function for other
7805 * if_types.
da0436e9
JS
7806 *
7807 * Return codes
af901ca1 7808 * 0 - successful
25985edc 7809 * -ENOMEM - No available memory
d439d286 7810 * -EIO - The mailbox failed to complete successfully.
3772a991 7811 **/
da0436e9
JS
7812static int
7813lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 7814{
da0436e9 7815 LPFC_MBOXQ_t *mboxq;
2fcee4bf 7816 uint32_t if_type, rc = 0;
da0436e9
JS
7817 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
7818 HOST_ENDIAN_HIGH_WORD1};
3772a991 7819
2fcee4bf
JS
7820 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7821 switch (if_type) {
7822 case LPFC_SLI_INTF_IF_TYPE_0:
7823 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7824 GFP_KERNEL);
7825 if (!mboxq) {
7826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7827 "0492 Unable to allocate memory for "
7828 "issuing SLI_CONFIG_SPECIAL mailbox "
7829 "command\n");
7830 return -ENOMEM;
7831 }
3772a991 7832
2fcee4bf
JS
7833 /*
7834 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7835 * two words to contain special data values and no other data.
7836 */
7837 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
7838 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
7839 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7840 if (rc != MBX_SUCCESS) {
7841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7842 "0493 SLI_CONFIG_SPECIAL mailbox "
7843 "failed with status x%x\n",
7844 rc);
7845 rc = -EIO;
7846 }
7847 mempool_free(mboxq, phba->mbox_mem_pool);
7848 break;
7849 case LPFC_SLI_INTF_IF_TYPE_2:
7850 case LPFC_SLI_INTF_IF_TYPE_1:
7851 default:
7852 break;
da0436e9 7853 }
da0436e9 7854 return rc;
3772a991
JS
7855}
7856
7857/**
895427bd 7858 * lpfc_sli4_queue_verify - Verify and update EQ counts
3772a991
JS
7859 * @phba: pointer to lpfc hba data structure.
7860 *
895427bd
JS
7861 * This routine is invoked to check the user settable queue counts for EQs.
7862 * After this routine is called the counts will be set to valid values that
5350d872
JS
7863 * adhere to the constraints of the system's interrupt vectors and the port's
7864 * queue resources.
da0436e9
JS
7865 *
7866 * Return codes
af901ca1 7867 * 0 - successful
25985edc 7868 * -ENOMEM - No available memory
3772a991 7869 **/
da0436e9 7870static int
5350d872 7871lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 7872{
895427bd 7873 int io_channel;
1ba981fd 7874 int fof_vectors = phba->cfg_fof ? 1 : 0;
3772a991 7875
da0436e9 7876 /*
67d12733 7877 * Sanity check for configured queue parameters against the run-time
da0436e9
JS
7878 * device parameters
7879 */
3772a991 7880
67d12733 7881 /* Sanity check on HBA EQ parameters */
895427bd 7882 io_channel = phba->io_channel_irqs;
67d12733 7883
895427bd 7884 if (phba->sli4_hba.num_online_cpu < io_channel) {
82c3e9ba
JS
7885 lpfc_printf_log(phba,
7886 KERN_ERR, LOG_INIT,
90695ee0 7887 "3188 Reducing IO channels to match number of "
7bb03bbf 7888 "online CPUs: from %d to %d\n",
895427bd
JS
7889 io_channel, phba->sli4_hba.num_online_cpu);
7890 io_channel = phba->sli4_hba.num_online_cpu;
90695ee0
JS
7891 }
7892
895427bd 7893 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
82c3e9ba
JS
7894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7895 "2575 Reducing IO channels to match number of "
7896 "available EQs: from %d to %d\n",
895427bd 7897 io_channel,
82c3e9ba 7898 phba->sli4_hba.max_cfg_param.max_eq);
895427bd 7899 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
da0436e9 7900 }
67d12733 7901
895427bd
JS
7902 /* The actual number of FCP / NVME event queues adopted */
7903 if (io_channel != phba->io_channel_irqs)
7904 phba->io_channel_irqs = io_channel;
7905 if (phba->cfg_fcp_io_channel > io_channel)
7906 phba->cfg_fcp_io_channel = io_channel;
7907 if (phba->cfg_nvme_io_channel > io_channel)
7908 phba->cfg_nvme_io_channel = io_channel;
2d7dbc4c
JS
7909 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
7910 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
895427bd
JS
7911
7912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2d7dbc4c 7913 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
895427bd 7914 phba->io_channel_irqs, phba->cfg_fcp_io_channel,
2d7dbc4c 7915 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
3772a991 7916
da0436e9
JS
7917 /* Get EQ depth from module parameter, fake the default for now */
7918 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7919 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 7920
5350d872
JS
7921 /* Get CQ depth from module parameter, fake the default for now */
7922 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7923 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
895427bd
JS
7924 return 0;
7925}
7926
7927static int
7928lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
7929{
7930 struct lpfc_queue *qdesc;
7931 int cnt;
5350d872 7932
895427bd
JS
7933 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7934 phba->sli4_hba.cq_ecount);
7935 if (!qdesc) {
7936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7937 "0508 Failed allocate fast-path NVME CQ (%d)\n",
7938 wqidx);
7939 return 1;
7940 }
7941 phba->sli4_hba.nvme_cq[wqidx] = qdesc;
7942
7943 cnt = LPFC_NVME_WQSIZE;
7944 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
7945 if (!qdesc) {
7946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7947 "0509 Failed allocate fast-path NVME WQ (%d)\n",
7948 wqidx);
7949 return 1;
7950 }
7951 phba->sli4_hba.nvme_wq[wqidx] = qdesc;
7952 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
7953 return 0;
7954}
7955
7956static int
7957lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
7958{
7959 struct lpfc_queue *qdesc;
7960 uint32_t wqesize;
7961
7962 /* Create Fast Path FCP CQs */
7963 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7964 phba->sli4_hba.cq_ecount);
7965 if (!qdesc) {
7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7967 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
7968 return 1;
7969 }
7970 phba->sli4_hba.fcp_cq[wqidx] = qdesc;
7971
7972 /* Create Fast Path FCP WQs */
7973 wqesize = (phba->fcp_embed_io) ?
d1f525aa 7974 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
895427bd
JS
7975 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
7976 if (!qdesc) {
7977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7978 "0503 Failed allocate fast-path FCP WQ (%d)\n",
7979 wqidx);
7980 return 1;
7981 }
7982 phba->sli4_hba.fcp_wq[wqidx] = qdesc;
7983 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
5350d872 7984 return 0;
5350d872
JS
7985}
7986
7987/**
7988 * lpfc_sli4_queue_create - Create all the SLI4 queues
7989 * @phba: pointer to lpfc hba data structure.
7990 *
7991 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7992 * operation. For each SLI4 queue type, the parameters such as queue entry
7993 * count (queue depth) shall be taken from the module parameter. For now,
7994 * we just use some constant number as place holder.
7995 *
7996 * Return codes
4907cb7b 7997 * 0 - successful
5350d872
JS
7998 * -ENOMEM - No availble memory
7999 * -EIO - The mailbox failed to complete successfully.
8000 **/
8001int
8002lpfc_sli4_queue_create(struct lpfc_hba *phba)
8003{
8004 struct lpfc_queue *qdesc;
d1f525aa 8005 int idx, io_channel;
5350d872
JS
8006
8007 /*
67d12733 8008 * Create HBA Record arrays.
895427bd 8009 * Both NVME and FCP will share that same vectors / EQs
5350d872 8010 */
895427bd
JS
8011 io_channel = phba->io_channel_irqs;
8012 if (!io_channel)
67d12733 8013 return -ERANGE;
5350d872 8014
67d12733
JS
8015 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8016 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8017 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8018 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8019 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8020 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
895427bd
JS
8021 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8022 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8023 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8024 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
67d12733 8025
895427bd
JS
8026 phba->sli4_hba.hba_eq = kcalloc(io_channel,
8027 sizeof(struct lpfc_queue *),
8028 GFP_KERNEL);
67d12733
JS
8029 if (!phba->sli4_hba.hba_eq) {
8030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8031 "2576 Failed allocate memory for "
8032 "fast-path EQ record array\n");
8033 goto out_error;
8034 }
8035
895427bd
JS
8036 if (phba->cfg_fcp_io_channel) {
8037 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
8038 sizeof(struct lpfc_queue *),
8039 GFP_KERNEL);
8040 if (!phba->sli4_hba.fcp_cq) {
8041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8042 "2577 Failed allocate memory for "
8043 "fast-path CQ record array\n");
8044 goto out_error;
8045 }
8046 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
8047 sizeof(struct lpfc_queue *),
8048 GFP_KERNEL);
8049 if (!phba->sli4_hba.fcp_wq) {
8050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8051 "2578 Failed allocate memory for "
8052 "fast-path FCP WQ record array\n");
8053 goto out_error;
8054 }
8055 /*
8056 * Since the first EQ can have multiple CQs associated with it,
8057 * this array is used to quickly see if we have a FCP fast-path
8058 * CQ match.
8059 */
8060 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
8061 sizeof(uint16_t),
8062 GFP_KERNEL);
8063 if (!phba->sli4_hba.fcp_cq_map) {
8064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8065 "2545 Failed allocate memory for "
8066 "fast-path CQ map\n");
8067 goto out_error;
8068 }
67d12733
JS
8069 }
8070
895427bd
JS
8071 if (phba->cfg_nvme_io_channel) {
8072 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
8073 sizeof(struct lpfc_queue *),
8074 GFP_KERNEL);
8075 if (!phba->sli4_hba.nvme_cq) {
8076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8077 "6077 Failed allocate memory for "
8078 "fast-path CQ record array\n");
8079 goto out_error;
8080 }
da0436e9 8081
895427bd
JS
8082 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
8083 sizeof(struct lpfc_queue *),
8084 GFP_KERNEL);
8085 if (!phba->sli4_hba.nvme_wq) {
8086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8087 "2581 Failed allocate memory for "
8088 "fast-path NVME WQ record array\n");
8089 goto out_error;
8090 }
8091
8092 /*
8093 * Since the first EQ can have multiple CQs associated with it,
8094 * this array is used to quickly see if we have a NVME fast-path
8095 * CQ match.
8096 */
8097 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
8098 sizeof(uint16_t),
8099 GFP_KERNEL);
8100 if (!phba->sli4_hba.nvme_cq_map) {
8101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8102 "6078 Failed allocate memory for "
8103 "fast-path CQ map\n");
8104 goto out_error;
8105 }
2d7dbc4c
JS
8106
8107 if (phba->nvmet_support) {
8108 phba->sli4_hba.nvmet_cqset = kcalloc(
8109 phba->cfg_nvmet_mrq,
8110 sizeof(struct lpfc_queue *),
8111 GFP_KERNEL);
8112 if (!phba->sli4_hba.nvmet_cqset) {
8113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8114 "3121 Fail allocate memory for "
8115 "fast-path CQ set array\n");
8116 goto out_error;
8117 }
8118 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8119 phba->cfg_nvmet_mrq,
8120 sizeof(struct lpfc_queue *),
8121 GFP_KERNEL);
8122 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8124 "3122 Fail allocate memory for "
8125 "fast-path RQ set hdr array\n");
8126 goto out_error;
8127 }
8128 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8129 phba->cfg_nvmet_mrq,
8130 sizeof(struct lpfc_queue *),
8131 GFP_KERNEL);
8132 if (!phba->sli4_hba.nvmet_mrq_data) {
8133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8134 "3124 Fail allocate memory for "
8135 "fast-path RQ set data array\n");
8136 goto out_error;
8137 }
8138 }
da0436e9 8139 }
67d12733 8140
895427bd 8141 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
67d12733 8142
895427bd
JS
8143 /* Create HBA Event Queues (EQs) */
8144 for (idx = 0; idx < io_channel; idx++) {
67d12733 8145 /* Create EQs */
da0436e9
JS
8146 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
8147 phba->sli4_hba.eq_ecount);
8148 if (!qdesc) {
8149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
67d12733
JS
8150 "0497 Failed allocate EQ (%d)\n", idx);
8151 goto out_error;
da0436e9 8152 }
67d12733 8153 phba->sli4_hba.hba_eq[idx] = qdesc;
895427bd 8154 }
67d12733 8155
895427bd 8156 /* FCP and NVME io channels are not required to be balanced */
67d12733 8157
895427bd
JS
8158 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8159 if (lpfc_alloc_fcp_wq_cq(phba, idx))
67d12733 8160 goto out_error;
da0436e9 8161
895427bd
JS
8162 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
8163 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8164 goto out_error;
67d12733 8165
2d7dbc4c
JS
8166 if (phba->nvmet_support) {
8167 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8168 qdesc = lpfc_sli4_queue_alloc(phba,
8169 phba->sli4_hba.cq_esize,
8170 phba->sli4_hba.cq_ecount);
8171 if (!qdesc) {
8172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8173 "3142 Failed allocate NVME "
8174 "CQ Set (%d)\n", idx);
8175 goto out_error;
8176 }
8177 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8178 }
8179 }
8180
da0436e9 8181 /*
67d12733 8182 * Create Slow Path Completion Queues (CQs)
da0436e9
JS
8183 */
8184
da0436e9
JS
8185 /* Create slow-path Mailbox Command Complete Queue */
8186 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8187 phba->sli4_hba.cq_ecount);
8188 if (!qdesc) {
8189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8190 "0500 Failed allocate slow-path mailbox CQ\n");
67d12733 8191 goto out_error;
da0436e9
JS
8192 }
8193 phba->sli4_hba.mbx_cq = qdesc;
8194
8195 /* Create slow-path ELS Complete Queue */
8196 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8197 phba->sli4_hba.cq_ecount);
8198 if (!qdesc) {
8199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8200 "0501 Failed allocate slow-path ELS CQ\n");
67d12733 8201 goto out_error;
da0436e9
JS
8202 }
8203 phba->sli4_hba.els_cq = qdesc;
8204
da0436e9 8205
5350d872 8206 /*
67d12733 8207 * Create Slow Path Work Queues (WQs)
5350d872 8208 */
da0436e9
JS
8209
8210 /* Create Mailbox Command Queue */
da0436e9
JS
8211
8212 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
8213 phba->sli4_hba.mq_ecount);
8214 if (!qdesc) {
8215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8216 "0505 Failed allocate slow-path MQ\n");
67d12733 8217 goto out_error;
da0436e9
JS
8218 }
8219 phba->sli4_hba.mbx_wq = qdesc;
8220
8221 /*
67d12733 8222 * Create ELS Work Queues
da0436e9 8223 */
da0436e9
JS
8224
8225 /* Create slow-path ELS Work Queue */
8226 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
8227 phba->sli4_hba.wq_ecount);
8228 if (!qdesc) {
8229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8230 "0504 Failed allocate slow-path ELS WQ\n");
67d12733 8231 goto out_error;
da0436e9
JS
8232 }
8233 phba->sli4_hba.els_wq = qdesc;
895427bd
JS
8234 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8235
8236 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8237 /* Create NVME LS Complete Queue */
8238 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
8239 phba->sli4_hba.cq_ecount);
8240 if (!qdesc) {
8241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8242 "6079 Failed allocate NVME LS CQ\n");
8243 goto out_error;
8244 }
8245 phba->sli4_hba.nvmels_cq = qdesc;
8246
8247 /* Create NVME LS Work Queue */
8248 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
8249 phba->sli4_hba.wq_ecount);
8250 if (!qdesc) {
8251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8252 "6080 Failed allocate NVME LS WQ\n");
8253 goto out_error;
8254 }
8255 phba->sli4_hba.nvmels_wq = qdesc;
8256 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8257 }
da0436e9 8258
da0436e9
JS
8259 /*
8260 * Create Receive Queue (RQ)
8261 */
da0436e9
JS
8262
8263 /* Create Receive Queue for header */
8264 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
8265 phba->sli4_hba.rq_ecount);
8266 if (!qdesc) {
8267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8268 "0506 Failed allocate receive HRQ\n");
67d12733 8269 goto out_error;
da0436e9
JS
8270 }
8271 phba->sli4_hba.hdr_rq = qdesc;
8272
8273 /* Create Receive Queue for data */
8274 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
8275 phba->sli4_hba.rq_ecount);
8276 if (!qdesc) {
8277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8278 "0507 Failed allocate receive DRQ\n");
67d12733 8279 goto out_error;
da0436e9
JS
8280 }
8281 phba->sli4_hba.dat_rq = qdesc;
8282
2d7dbc4c
JS
8283 if (phba->nvmet_support) {
8284 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8285 /* Create NVMET Receive Queue for header */
8286 qdesc = lpfc_sli4_queue_alloc(phba,
8287 phba->sli4_hba.rq_esize,
61f3d4bf 8288 LPFC_NVMET_RQE_DEF_COUNT);
2d7dbc4c
JS
8289 if (!qdesc) {
8290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8291 "3146 Failed allocate "
8292 "receive HRQ\n");
8293 goto out_error;
8294 }
8295 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
8296
8297 /* Only needed for header of RQ pair */
8298 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
8299 GFP_KERNEL);
8300 if (qdesc->rqbp == NULL) {
8301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8302 "6131 Failed allocate "
8303 "Header RQBP\n");
8304 goto out_error;
8305 }
8306
8307 /* Create NVMET Receive Queue for data */
8308 qdesc = lpfc_sli4_queue_alloc(phba,
8309 phba->sli4_hba.rq_esize,
61f3d4bf 8310 LPFC_NVMET_RQE_DEF_COUNT);
2d7dbc4c
JS
8311 if (!qdesc) {
8312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8313 "3156 Failed allocate "
8314 "receive DRQ\n");
8315 goto out_error;
8316 }
8317 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
8318 }
8319 }
8320
1ba981fd
JS
8321 /* Create the Queues needed for Flash Optimized Fabric operations */
8322 if (phba->cfg_fof)
8323 lpfc_fof_queue_create(phba);
da0436e9
JS
8324 return 0;
8325
da0436e9 8326out_error:
67d12733 8327 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
8328 return -ENOMEM;
8329}
8330
895427bd
JS
8331static inline void
8332__lpfc_sli4_release_queue(struct lpfc_queue **qp)
8333{
8334 if (*qp != NULL) {
8335 lpfc_sli4_queue_free(*qp);
8336 *qp = NULL;
8337 }
8338}
8339
8340static inline void
8341lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
8342{
8343 int idx;
8344
8345 if (*qs == NULL)
8346 return;
8347
8348 for (idx = 0; idx < max; idx++)
8349 __lpfc_sli4_release_queue(&(*qs)[idx]);
8350
8351 kfree(*qs);
8352 *qs = NULL;
8353}
8354
8355static inline void
8356lpfc_sli4_release_queue_map(uint16_t **qmap)
8357{
8358 if (*qmap != NULL) {
8359 kfree(*qmap);
8360 *qmap = NULL;
8361 }
8362}
8363
da0436e9
JS
8364/**
8365 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
8366 * @phba: pointer to lpfc hba data structure.
8367 *
8368 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
8369 * operation.
8370 *
8371 * Return codes
af901ca1 8372 * 0 - successful
25985edc 8373 * -ENOMEM - No available memory
d439d286 8374 * -EIO - The mailbox failed to complete successfully.
da0436e9 8375 **/
5350d872 8376void
da0436e9
JS
8377lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8378{
1ba981fd
JS
8379 if (phba->cfg_fof)
8380 lpfc_fof_queue_destroy(phba);
8381
895427bd
JS
8382 /* Release HBA eqs */
8383 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
8384
8385 /* Release FCP cqs */
8386 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
d1f525aa 8387 phba->cfg_fcp_io_channel);
895427bd
JS
8388
8389 /* Release FCP wqs */
8390 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
d1f525aa 8391 phba->cfg_fcp_io_channel);
895427bd
JS
8392
8393 /* Release FCP CQ mapping array */
8394 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
8395
8396 /* Release NVME cqs */
8397 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
8398 phba->cfg_nvme_io_channel);
8399
8400 /* Release NVME wqs */
8401 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
8402 phba->cfg_nvme_io_channel);
8403
8404 /* Release NVME CQ mapping array */
8405 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
8406
2d7dbc4c
JS
8407 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
8408 phba->cfg_nvmet_mrq);
8409
8410 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
8411 phba->cfg_nvmet_mrq);
8412 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
8413 phba->cfg_nvmet_mrq);
8414
895427bd
JS
8415 /* Release mailbox command work queue */
8416 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
8417
8418 /* Release ELS work queue */
8419 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
8420
8421 /* Release ELS work queue */
8422 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
8423
8424 /* Release unsolicited receive queue */
8425 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
8426 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
8427
8428 /* Release ELS complete queue */
8429 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
8430
8431 /* Release NVME LS complete queue */
8432 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
8433
8434 /* Release mailbox command complete queue */
8435 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
8436
8437 /* Everything on this list has been freed */
8438 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8439}
8440
895427bd
JS
8441int
8442lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8443{
8444 struct lpfc_rqb *rqbp;
8445 struct lpfc_dmabuf *h_buf;
8446 struct rqb_dmabuf *rqb_buffer;
8447
8448 rqbp = rq->rqbp;
8449 while (!list_empty(&rqbp->rqb_buffer_list)) {
8450 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
8451 struct lpfc_dmabuf, list);
8452
8453 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
8454 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8455 rqbp->buffer_count--;
67d12733 8456 }
895427bd
JS
8457 return 1;
8458}
67d12733 8459
895427bd
JS
8460static int
8461lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8462 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
8463 int qidx, uint32_t qtype)
8464{
8465 struct lpfc_sli_ring *pring;
8466 int rc;
8467
8468 if (!eq || !cq || !wq) {
8469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8470 "6085 Fast-path %s (%d) not allocated\n",
8471 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
8472 return -ENOMEM;
8473 }
8474
8475 /* create the Cq first */
8476 rc = lpfc_cq_create(phba, cq, eq,
8477 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
8478 if (rc) {
8479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8480 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
8481 qidx, (uint32_t)rc);
8482 return rc;
67d12733
JS
8483 }
8484
895427bd
JS
8485 if (qtype != LPFC_MBOX) {
8486 /* Setup nvme_cq_map for fast lookup */
8487 if (cq_map)
8488 *cq_map = cq->queue_id;
da0436e9 8489
895427bd
JS
8490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8491 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
8492 qidx, cq->queue_id, qidx, eq->queue_id);
da0436e9 8493
895427bd
JS
8494 /* create the wq */
8495 rc = lpfc_wq_create(phba, wq, cq, qtype);
8496 if (rc) {
8497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8498 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
8499 qidx, (uint32_t)rc);
8500 /* no need to tear down cq - caller will do so */
8501 return rc;
8502 }
da0436e9 8503
895427bd
JS
8504 /* Bind this CQ/WQ to the NVME ring */
8505 pring = wq->pring;
8506 pring->sli.sli4.wqp = (void *)wq;
8507 cq->pring = pring;
da0436e9 8508
895427bd
JS
8509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8510 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
8511 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
8512 } else {
8513 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
8514 if (rc) {
8515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8516 "0539 Failed setup of slow-path MQ: "
8517 "rc = 0x%x\n", rc);
8518 /* no need to tear down cq - caller will do so */
8519 return rc;
8520 }
da0436e9 8521
895427bd
JS
8522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8523 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
8524 phba->sli4_hba.mbx_wq->queue_id,
8525 phba->sli4_hba.mbx_cq->queue_id);
67d12733 8526 }
da0436e9 8527
895427bd 8528 return 0;
da0436e9
JS
8529}
8530
8531/**
8532 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
8533 * @phba: pointer to lpfc hba data structure.
8534 *
8535 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
8536 * operation.
8537 *
8538 * Return codes
af901ca1 8539 * 0 - successful
25985edc 8540 * -ENOMEM - No available memory
d439d286 8541 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
8542 **/
8543int
8544lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8545{
962bc51b
JS
8546 uint32_t shdr_status, shdr_add_status;
8547 union lpfc_sli4_cfg_shdr *shdr;
8548 LPFC_MBOXQ_t *mboxq;
895427bd
JS
8549 int qidx;
8550 uint32_t length, io_channel;
8551 int rc = -ENOMEM;
962bc51b
JS
8552
8553 /* Check for dual-ULP support */
8554 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8555 if (!mboxq) {
8556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8557 "3249 Unable to allocate memory for "
8558 "QUERY_FW_CFG mailbox command\n");
8559 return -ENOMEM;
8560 }
8561 length = (sizeof(struct lpfc_mbx_query_fw_config) -
8562 sizeof(struct lpfc_sli4_cfg_mhdr));
8563 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8564 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
8565 length, LPFC_SLI4_MBX_EMBED);
8566
8567 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8568
8569 shdr = (union lpfc_sli4_cfg_shdr *)
8570 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8571 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8572 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8573 if (shdr_status || shdr_add_status || rc) {
8574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8575 "3250 QUERY_FW_CFG mailbox failed with status "
8576 "x%x add_status x%x, mbx status x%x\n",
8577 shdr_status, shdr_add_status, rc);
8578 if (rc != MBX_TIMEOUT)
8579 mempool_free(mboxq, phba->mbox_mem_pool);
8580 rc = -ENXIO;
8581 goto out_error;
8582 }
8583
8584 phba->sli4_hba.fw_func_mode =
8585 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
8586 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
8587 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8b017a30
JS
8588 phba->sli4_hba.physical_port =
8589 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
962bc51b
JS
8590 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8591 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
8592 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
8593 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
8594
8595 if (rc != MBX_TIMEOUT)
8596 mempool_free(mboxq, phba->mbox_mem_pool);
da0436e9
JS
8597
8598 /*
67d12733 8599 * Set up HBA Event Queues (EQs)
da0436e9 8600 */
895427bd 8601 io_channel = phba->io_channel_irqs;
da0436e9 8602
67d12733 8603 /* Set up HBA event queue */
895427bd 8604 if (io_channel && !phba->sli4_hba.hba_eq) {
2e90f4b5
JS
8605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8606 "3147 Fast-path EQs not allocated\n");
1b51197d 8607 rc = -ENOMEM;
67d12733 8608 goto out_error;
2e90f4b5 8609 }
895427bd
JS
8610 for (qidx = 0; qidx < io_channel; qidx++) {
8611 if (!phba->sli4_hba.hba_eq[qidx]) {
da0436e9
JS
8612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8613 "0522 Fast-path EQ (%d) not "
895427bd 8614 "allocated\n", qidx);
1b51197d 8615 rc = -ENOMEM;
895427bd 8616 goto out_destroy;
da0436e9 8617 }
895427bd
JS
8618 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
8619 phba->cfg_fcp_imax);
da0436e9
JS
8620 if (rc) {
8621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8622 "0523 Failed setup of fast-path EQ "
895427bd 8623 "(%d), rc = 0x%x\n", qidx,
a2fc4aef 8624 (uint32_t)rc);
895427bd 8625 goto out_destroy;
da0436e9
JS
8626 }
8627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
895427bd
JS
8628 "2584 HBA EQ setup: queue[%d]-id=%d\n",
8629 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
67d12733
JS
8630 }
8631
895427bd
JS
8632 if (phba->cfg_nvme_io_channel) {
8633 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
67d12733 8634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8635 "6084 Fast-path NVME %s array not allocated\n",
8636 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
67d12733 8637 rc = -ENOMEM;
895427bd 8638 goto out_destroy;
67d12733
JS
8639 }
8640
895427bd
JS
8641 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
8642 rc = lpfc_create_wq_cq(phba,
8643 phba->sli4_hba.hba_eq[
8644 qidx % io_channel],
8645 phba->sli4_hba.nvme_cq[qidx],
8646 phba->sli4_hba.nvme_wq[qidx],
8647 &phba->sli4_hba.nvme_cq_map[qidx],
8648 qidx, LPFC_NVME);
8649 if (rc) {
8650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8651 "6123 Failed to setup fastpath "
8652 "NVME WQ/CQ (%d), rc = 0x%x\n",
8653 qidx, (uint32_t)rc);
8654 goto out_destroy;
8655 }
8656 }
67d12733
JS
8657 }
8658
895427bd
JS
8659 if (phba->cfg_fcp_io_channel) {
8660 /* Set up fast-path FCP Response Complete Queue */
8661 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
67d12733 8662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8663 "3148 Fast-path FCP %s array not allocated\n",
8664 phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
67d12733 8665 rc = -ENOMEM;
895427bd 8666 goto out_destroy;
67d12733
JS
8667 }
8668
895427bd
JS
8669 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
8670 rc = lpfc_create_wq_cq(phba,
8671 phba->sli4_hba.hba_eq[
8672 qidx % io_channel],
8673 phba->sli4_hba.fcp_cq[qidx],
8674 phba->sli4_hba.fcp_wq[qidx],
8675 &phba->sli4_hba.fcp_cq_map[qidx],
8676 qidx, LPFC_FCP);
8677 if (rc) {
8678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8679 "0535 Failed to setup fastpath "
8680 "FCP WQ/CQ (%d), rc = 0x%x\n",
8681 qidx, (uint32_t)rc);
8682 goto out_destroy;
8683 }
8684 }
67d12733 8685 }
895427bd 8686
da0436e9 8687 /*
895427bd 8688 * Set up Slow Path Complete Queues (CQs)
da0436e9
JS
8689 */
8690
895427bd 8691 /* Set up slow-path MBOX CQ/MQ */
da0436e9 8692
895427bd 8693 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
da0436e9 8694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8695 "0528 %s not allocated\n",
8696 phba->sli4_hba.mbx_cq ?
d1f525aa 8697 "Mailbox WQ" : "Mailbox CQ");
1b51197d 8698 rc = -ENOMEM;
895427bd 8699 goto out_destroy;
da0436e9 8700 }
da0436e9 8701
895427bd 8702 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
d1f525aa
JS
8703 phba->sli4_hba.mbx_cq,
8704 phba->sli4_hba.mbx_wq,
8705 NULL, 0, LPFC_MBOX);
da0436e9
JS
8706 if (rc) {
8707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8708 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
8709 (uint32_t)rc);
8710 goto out_destroy;
da0436e9 8711 }
2d7dbc4c
JS
8712 if (phba->nvmet_support) {
8713 if (!phba->sli4_hba.nvmet_cqset) {
8714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8715 "3165 Fast-path NVME CQ Set "
8716 "array not allocated\n");
8717 rc = -ENOMEM;
8718 goto out_destroy;
8719 }
8720 if (phba->cfg_nvmet_mrq > 1) {
8721 rc = lpfc_cq_create_set(phba,
8722 phba->sli4_hba.nvmet_cqset,
8723 phba->sli4_hba.hba_eq,
8724 LPFC_WCQ, LPFC_NVMET);
8725 if (rc) {
8726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8727 "3164 Failed setup of NVME CQ "
8728 "Set, rc = 0x%x\n",
8729 (uint32_t)rc);
8730 goto out_destroy;
8731 }
8732 } else {
8733 /* Set up NVMET Receive Complete Queue */
8734 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
8735 phba->sli4_hba.hba_eq[0],
8736 LPFC_WCQ, LPFC_NVMET);
8737 if (rc) {
8738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8739 "6089 Failed setup NVMET CQ: "
8740 "rc = 0x%x\n", (uint32_t)rc);
8741 goto out_destroy;
8742 }
8743 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8744 "6090 NVMET CQ setup: cq-id=%d, "
8745 "parent eq-id=%d\n",
8746 phba->sli4_hba.nvmet_cqset[0]->queue_id,
8747 phba->sli4_hba.hba_eq[0]->queue_id);
8748 }
8749 }
da0436e9 8750
895427bd
JS
8751 /* Set up slow-path ELS WQ/CQ */
8752 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
da0436e9 8753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8754 "0530 ELS %s not allocated\n",
8755 phba->sli4_hba.els_cq ? "WQ" : "CQ");
1b51197d 8756 rc = -ENOMEM;
895427bd 8757 goto out_destroy;
da0436e9 8758 }
895427bd
JS
8759 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8760 phba->sli4_hba.els_cq,
8761 phba->sli4_hba.els_wq,
8762 NULL, 0, LPFC_ELS);
da0436e9
JS
8763 if (rc) {
8764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8765 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
8766 (uint32_t)rc);
8767 goto out_destroy;
da0436e9
JS
8768 }
8769 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8770 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
8771 phba->sli4_hba.els_wq->queue_id,
8772 phba->sli4_hba.els_cq->queue_id);
8773
895427bd
JS
8774 if (phba->cfg_nvme_io_channel) {
8775 /* Set up NVME LS Complete Queue */
8776 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
8777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8778 "6091 LS %s not allocated\n",
8779 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
8780 rc = -ENOMEM;
8781 goto out_destroy;
8782 }
8783 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8784 phba->sli4_hba.nvmels_cq,
8785 phba->sli4_hba.nvmels_wq,
8786 NULL, 0, LPFC_NVME_LS);
8787 if (rc) {
8788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8789 "0529 Failed setup of NVVME LS WQ/CQ: "
8790 "rc = 0x%x\n", (uint32_t)rc);
8791 goto out_destroy;
8792 }
8793
8794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8795 "6096 ELS WQ setup: wq-id=%d, "
8796 "parent cq-id=%d\n",
8797 phba->sli4_hba.nvmels_wq->queue_id,
8798 phba->sli4_hba.nvmels_cq->queue_id);
8799 }
8800
2d7dbc4c
JS
8801 /*
8802 * Create NVMET Receive Queue (RQ)
8803 */
8804 if (phba->nvmet_support) {
8805 if ((!phba->sli4_hba.nvmet_cqset) ||
8806 (!phba->sli4_hba.nvmet_mrq_hdr) ||
8807 (!phba->sli4_hba.nvmet_mrq_data)) {
8808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8809 "6130 MRQ CQ Queues not "
8810 "allocated\n");
8811 rc = -ENOMEM;
8812 goto out_destroy;
8813 }
8814 if (phba->cfg_nvmet_mrq > 1) {
8815 rc = lpfc_mrq_create(phba,
8816 phba->sli4_hba.nvmet_mrq_hdr,
8817 phba->sli4_hba.nvmet_mrq_data,
8818 phba->sli4_hba.nvmet_cqset,
8819 LPFC_NVMET);
8820 if (rc) {
8821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8822 "6098 Failed setup of NVMET "
8823 "MRQ: rc = 0x%x\n",
8824 (uint32_t)rc);
8825 goto out_destroy;
8826 }
8827
8828 } else {
8829 rc = lpfc_rq_create(phba,
8830 phba->sli4_hba.nvmet_mrq_hdr[0],
8831 phba->sli4_hba.nvmet_mrq_data[0],
8832 phba->sli4_hba.nvmet_cqset[0],
8833 LPFC_NVMET);
8834 if (rc) {
8835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8836 "6057 Failed setup of NVMET "
8837 "Receive Queue: rc = 0x%x\n",
8838 (uint32_t)rc);
8839 goto out_destroy;
8840 }
8841
8842 lpfc_printf_log(
8843 phba, KERN_INFO, LOG_INIT,
8844 "6099 NVMET RQ setup: hdr-rq-id=%d, "
8845 "dat-rq-id=%d parent cq-id=%d\n",
8846 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
8847 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
8848 phba->sli4_hba.nvmet_cqset[0]->queue_id);
8849
8850 }
8851 }
8852
da0436e9
JS
8853 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
8854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8855 "0540 Receive Queue not allocated\n");
1b51197d 8856 rc = -ENOMEM;
895427bd 8857 goto out_destroy;
da0436e9 8858 }
73d91e50 8859
da0436e9 8860 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 8861 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9
JS
8862 if (rc) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "0541 Failed setup of Receive Queue: "
a2fc4aef 8865 "rc = 0x%x\n", (uint32_t)rc);
895427bd 8866 goto out_destroy;
da0436e9 8867 }
73d91e50 8868
da0436e9
JS
8869 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8870 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
8871 "parent cq-id=%d\n",
8872 phba->sli4_hba.hdr_rq->queue_id,
8873 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 8874 phba->sli4_hba.els_cq->queue_id);
1ba981fd
JS
8875
8876 if (phba->cfg_fof) {
8877 rc = lpfc_fof_queue_setup(phba);
8878 if (rc) {
8879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8880 "0549 Failed setup of FOF Queues: "
8881 "rc = 0x%x\n", rc);
895427bd 8882 goto out_destroy;
1ba981fd
JS
8883 }
8884 }
2c9c5a00 8885
43140ca6 8886 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
0cf07f84
JS
8887 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
8888 phba->cfg_fcp_imax);
43140ca6 8889
da0436e9
JS
8890 return 0;
8891
895427bd
JS
8892out_destroy:
8893 lpfc_sli4_queue_unset(phba);
da0436e9
JS
8894out_error:
8895 return rc;
8896}
8897
8898/**
8899 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
8900 * @phba: pointer to lpfc hba data structure.
8901 *
8902 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
8903 * operation.
8904 *
8905 * Return codes
af901ca1 8906 * 0 - successful
25985edc 8907 * -ENOMEM - No available memory
d439d286 8908 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
8909 **/
8910void
8911lpfc_sli4_queue_unset(struct lpfc_hba *phba)
8912{
895427bd 8913 int qidx;
da0436e9 8914
1ba981fd
JS
8915 /* Unset the queues created for Flash Optimized Fabric operations */
8916 if (phba->cfg_fof)
8917 lpfc_fof_queue_destroy(phba);
895427bd 8918
da0436e9 8919 /* Unset mailbox command work queue */
895427bd
JS
8920 if (phba->sli4_hba.mbx_wq)
8921 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
8922
8923 /* Unset NVME LS work queue */
8924 if (phba->sli4_hba.nvmels_wq)
8925 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
8926
da0436e9 8927 /* Unset ELS work queue */
019c0d66 8928 if (phba->sli4_hba.els_wq)
895427bd
JS
8929 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
8930
da0436e9 8931 /* Unset unsolicited receive queue */
895427bd
JS
8932 if (phba->sli4_hba.hdr_rq)
8933 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
8934 phba->sli4_hba.dat_rq);
8935
da0436e9 8936 /* Unset FCP work queue */
895427bd
JS
8937 if (phba->sli4_hba.fcp_wq)
8938 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
8939 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
8940
8941 /* Unset NVME work queue */
8942 if (phba->sli4_hba.nvme_wq) {
8943 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
8944 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
67d12733 8945 }
895427bd 8946
da0436e9 8947 /* Unset mailbox command complete queue */
895427bd
JS
8948 if (phba->sli4_hba.mbx_cq)
8949 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
8950
da0436e9 8951 /* Unset ELS complete queue */
895427bd
JS
8952 if (phba->sli4_hba.els_cq)
8953 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
8954
8955 /* Unset NVME LS complete queue */
8956 if (phba->sli4_hba.nvmels_cq)
8957 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
8958
8959 /* Unset NVME response complete queue */
8960 if (phba->sli4_hba.nvme_cq)
8961 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
8962 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
8963
2d7dbc4c
JS
8964 /* Unset NVMET MRQ queue */
8965 if (phba->sli4_hba.nvmet_mrq_hdr) {
8966 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
8967 lpfc_rq_destroy(phba,
8968 phba->sli4_hba.nvmet_mrq_hdr[qidx],
8969 phba->sli4_hba.nvmet_mrq_data[qidx]);
8970 }
8971
8972 /* Unset NVMET CQ Set complete queue */
8973 if (phba->sli4_hba.nvmet_cqset) {
8974 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
8975 lpfc_cq_destroy(phba,
8976 phba->sli4_hba.nvmet_cqset[qidx]);
8977 }
8978
da0436e9 8979 /* Unset FCP response complete queue */
895427bd
JS
8980 if (phba->sli4_hba.fcp_cq)
8981 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
8982 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
8983
da0436e9 8984 /* Unset fast-path event queue */
895427bd
JS
8985 if (phba->sli4_hba.hba_eq)
8986 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
8987 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
da0436e9
JS
8988}
8989
8990/**
8991 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
8992 * @phba: pointer to lpfc hba data structure.
8993 *
8994 * This routine is invoked to allocate and set up a pool of completion queue
8995 * events. The body of the completion queue event is a completion queue entry
8996 * CQE. For now, this pool is used for the interrupt service routine to queue
8997 * the following HBA completion queue events for the worker thread to process:
8998 * - Mailbox asynchronous events
8999 * - Receive queue completion unsolicited events
9000 * Later, this can be used for all the slow-path events.
9001 *
9002 * Return codes
af901ca1 9003 * 0 - successful
25985edc 9004 * -ENOMEM - No available memory
da0436e9
JS
9005 **/
9006static int
9007lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9008{
9009 struct lpfc_cq_event *cq_event;
9010 int i;
9011
9012 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9013 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9014 if (!cq_event)
9015 goto out_pool_create_fail;
9016 list_add_tail(&cq_event->list,
9017 &phba->sli4_hba.sp_cqe_event_pool);
9018 }
9019 return 0;
9020
9021out_pool_create_fail:
9022 lpfc_sli4_cq_event_pool_destroy(phba);
9023 return -ENOMEM;
9024}
9025
9026/**
9027 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9028 * @phba: pointer to lpfc hba data structure.
9029 *
9030 * This routine is invoked to free the pool of completion queue events at
9031 * driver unload time. Note that, it is the responsibility of the driver
9032 * cleanup routine to free all the outstanding completion-queue events
9033 * allocated from this pool back into the pool before invoking this routine
9034 * to destroy the pool.
9035 **/
9036static void
9037lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9038{
9039 struct lpfc_cq_event *cq_event, *next_cq_event;
9040
9041 list_for_each_entry_safe(cq_event, next_cq_event,
9042 &phba->sli4_hba.sp_cqe_event_pool, list) {
9043 list_del(&cq_event->list);
9044 kfree(cq_event);
9045 }
9046}
9047
9048/**
9049 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9050 * @phba: pointer to lpfc hba data structure.
9051 *
9052 * This routine is the lock free version of the API invoked to allocate a
9053 * completion-queue event from the free pool.
9054 *
9055 * Return: Pointer to the newly allocated completion-queue event if successful
9056 * NULL otherwise.
9057 **/
9058struct lpfc_cq_event *
9059__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9060{
9061 struct lpfc_cq_event *cq_event = NULL;
9062
9063 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9064 struct lpfc_cq_event, list);
9065 return cq_event;
9066}
9067
9068/**
9069 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9070 * @phba: pointer to lpfc hba data structure.
9071 *
9072 * This routine is the lock version of the API invoked to allocate a
9073 * completion-queue event from the free pool.
9074 *
9075 * Return: Pointer to the newly allocated completion-queue event if successful
9076 * NULL otherwise.
9077 **/
9078struct lpfc_cq_event *
9079lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9080{
9081 struct lpfc_cq_event *cq_event;
9082 unsigned long iflags;
9083
9084 spin_lock_irqsave(&phba->hbalock, iflags);
9085 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9086 spin_unlock_irqrestore(&phba->hbalock, iflags);
9087 return cq_event;
9088}
9089
9090/**
9091 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9092 * @phba: pointer to lpfc hba data structure.
9093 * @cq_event: pointer to the completion queue event to be freed.
9094 *
9095 * This routine is the lock free version of the API invoked to release a
9096 * completion-queue event back into the free pool.
9097 **/
9098void
9099__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9100 struct lpfc_cq_event *cq_event)
9101{
9102 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9103}
9104
9105/**
9106 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9107 * @phba: pointer to lpfc hba data structure.
9108 * @cq_event: pointer to the completion queue event to be freed.
9109 *
9110 * This routine is the lock version of the API invoked to release a
9111 * completion-queue event back into the free pool.
9112 **/
9113void
9114lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9115 struct lpfc_cq_event *cq_event)
9116{
9117 unsigned long iflags;
9118 spin_lock_irqsave(&phba->hbalock, iflags);
9119 __lpfc_sli4_cq_event_release(phba, cq_event);
9120 spin_unlock_irqrestore(&phba->hbalock, iflags);
9121}
9122
9123/**
9124 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9125 * @phba: pointer to lpfc hba data structure.
9126 *
9127 * This routine is to free all the pending completion-queue events to the
9128 * back into the free pool for device reset.
9129 **/
9130static void
9131lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9132{
9133 LIST_HEAD(cqelist);
9134 struct lpfc_cq_event *cqe;
9135 unsigned long iflags;
9136
9137 /* Retrieve all the pending WCQEs from pending WCQE lists */
9138 spin_lock_irqsave(&phba->hbalock, iflags);
9139 /* Pending FCP XRI abort events */
9140 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9141 &cqelist);
9142 /* Pending ELS XRI abort events */
9143 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9144 &cqelist);
318083ad
JS
9145 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9146 /* Pending NVME XRI abort events */
9147 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
9148 &cqelist);
9149 }
da0436e9
JS
9150 /* Pending asynnc events */
9151 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9152 &cqelist);
9153 spin_unlock_irqrestore(&phba->hbalock, iflags);
9154
9155 while (!list_empty(&cqelist)) {
9156 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9157 lpfc_sli4_cq_event_release(phba, cqe);
9158 }
9159}
9160
9161/**
9162 * lpfc_pci_function_reset - Reset pci function.
9163 * @phba: pointer to lpfc hba data structure.
9164 *
9165 * This routine is invoked to request a PCI function reset. It will destroys
9166 * all resources assigned to the PCI function which originates this request.
9167 *
9168 * Return codes
af901ca1 9169 * 0 - successful
25985edc 9170 * -ENOMEM - No available memory
d439d286 9171 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9172 **/
9173int
9174lpfc_pci_function_reset(struct lpfc_hba *phba)
9175{
9176 LPFC_MBOXQ_t *mboxq;
2fcee4bf 9177 uint32_t rc = 0, if_type;
da0436e9 9178 uint32_t shdr_status, shdr_add_status;
2f6fa2c9
JS
9179 uint32_t rdy_chk;
9180 uint32_t port_reset = 0;
da0436e9 9181 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 9182 struct lpfc_register reg_data;
2b81f942 9183 uint16_t devid;
da0436e9 9184
2fcee4bf
JS
9185 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9186 switch (if_type) {
9187 case LPFC_SLI_INTF_IF_TYPE_0:
9188 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9189 GFP_KERNEL);
9190 if (!mboxq) {
9191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9192 "0494 Unable to allocate memory for "
9193 "issuing SLI_FUNCTION_RESET mailbox "
9194 "command\n");
9195 return -ENOMEM;
9196 }
da0436e9 9197
2fcee4bf
JS
9198 /* Setup PCI function reset mailbox-ioctl command */
9199 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9200 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9201 LPFC_SLI4_MBX_EMBED);
9202 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9203 shdr = (union lpfc_sli4_cfg_shdr *)
9204 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9205 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9206 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
9207 &shdr->response);
9208 if (rc != MBX_TIMEOUT)
9209 mempool_free(mboxq, phba->mbox_mem_pool);
9210 if (shdr_status || shdr_add_status || rc) {
9211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9212 "0495 SLI_FUNCTION_RESET mailbox "
9213 "failed with status x%x add_status x%x,"
9214 " mbx status x%x\n",
9215 shdr_status, shdr_add_status, rc);
9216 rc = -ENXIO;
9217 }
9218 break;
9219 case LPFC_SLI_INTF_IF_TYPE_2:
2f6fa2c9
JS
9220wait:
9221 /*
9222 * Poll the Port Status Register and wait for RDY for
9223 * up to 30 seconds. If the port doesn't respond, treat
9224 * it as an error.
9225 */
77d093fb 9226 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
2f6fa2c9
JS
9227 if (lpfc_readl(phba->sli4_hba.u.if_type2.
9228 STATUSregaddr, &reg_data.word0)) {
9229 rc = -ENODEV;
9230 goto out;
9231 }
9232 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
9233 break;
9234 msleep(20);
9235 }
9236
9237 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
9238 phba->work_status[0] = readl(
9239 phba->sli4_hba.u.if_type2.ERR1regaddr);
9240 phba->work_status[1] = readl(
9241 phba->sli4_hba.u.if_type2.ERR2regaddr);
9242 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9243 "2890 Port not ready, port status reg "
9244 "0x%x error 1=0x%x, error 2=0x%x\n",
9245 reg_data.word0,
9246 phba->work_status[0],
9247 phba->work_status[1]);
9248 rc = -ENODEV;
9249 goto out;
9250 }
9251
9252 if (!port_reset) {
9253 /*
9254 * Reset the port now
9255 */
2fcee4bf
JS
9256 reg_data.word0 = 0;
9257 bf_set(lpfc_sliport_ctrl_end, &reg_data,
9258 LPFC_SLIPORT_LITTLE_ENDIAN);
9259 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
9260 LPFC_SLIPORT_INIT_PORT);
9261 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
9262 CTRLregaddr);
8fcb8acd 9263 /* flush */
2b81f942
JS
9264 pci_read_config_word(phba->pcidev,
9265 PCI_DEVICE_ID, &devid);
2fcee4bf 9266
2f6fa2c9
JS
9267 port_reset = 1;
9268 msleep(20);
9269 goto wait;
9270 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
9271 rc = -ENODEV;
9272 goto out;
2fcee4bf
JS
9273 }
9274 break;
2f6fa2c9 9275
2fcee4bf
JS
9276 case LPFC_SLI_INTF_IF_TYPE_1:
9277 default:
9278 break;
da0436e9 9279 }
2fcee4bf 9280
73d91e50 9281out:
2fcee4bf 9282 /* Catch the not-ready port failure after a port reset. */
2f6fa2c9 9283 if (rc) {
229adb0e
JS
9284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9285 "3317 HBA not functional: IP Reset Failed "
2f6fa2c9 9286 "try: echo fw_reset > board_mode\n");
2fcee4bf 9287 rc = -ENODEV;
229adb0e 9288 }
2fcee4bf 9289
da0436e9
JS
9290 return rc;
9291}
9292
da0436e9
JS
9293/**
9294 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
9295 * @phba: pointer to lpfc hba data structure.
9296 *
9297 * This routine is invoked to set up the PCI device memory space for device
9298 * with SLI-4 interface spec.
9299 *
9300 * Return codes
af901ca1 9301 * 0 - successful
da0436e9
JS
9302 * other values - error
9303 **/
9304static int
9305lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9306{
9307 struct pci_dev *pdev;
9308 unsigned long bar0map_len, bar1map_len, bar2map_len;
9309 int error = -ENODEV;
2fcee4bf 9310 uint32_t if_type;
da0436e9
JS
9311
9312 /* Obtain PCI device reference */
9313 if (!phba->pcidev)
9314 return error;
9315 else
9316 pdev = phba->pcidev;
9317
9318 /* Set the device DMA mask size */
8e68597d
MR
9319 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
9320 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
9321 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
9322 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
da0436e9 9323 return error;
8e68597d
MR
9324 }
9325 }
da0436e9 9326
2fcee4bf
JS
9327 /*
9328 * The BARs and register set definitions and offset locations are
9329 * dependent on the if_type.
9330 */
9331 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
9332 &phba->sli4_hba.sli_intf.word0)) {
9333 return error;
9334 }
9335
9336 /* There is no SLI3 failback for SLI4 devices. */
9337 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
9338 LPFC_SLI_INTF_VALID) {
9339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9340 "2894 SLI_INTF reg contents invalid "
9341 "sli_intf reg 0x%x\n",
9342 phba->sli4_hba.sli_intf.word0);
9343 return error;
9344 }
9345
9346 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9347 /*
9348 * Get the bus address of SLI4 device Bar regions and the
9349 * number of bytes required by each mapping. The mapping of the
9350 * particular PCI BARs regions is dependent on the type of
9351 * SLI4 device.
da0436e9 9352 */
f5ca6f2e
JS
9353 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
9354 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
9355 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
2fcee4bf
JS
9356
9357 /*
9358 * Map SLI4 PCI Config Space Register base to a kernel virtual
9359 * addr
9360 */
9361 phba->sli4_hba.conf_regs_memmap_p =
9362 ioremap(phba->pci_bar0_map, bar0map_len);
9363 if (!phba->sli4_hba.conf_regs_memmap_p) {
9364 dev_printk(KERN_ERR, &pdev->dev,
9365 "ioremap failed for SLI4 PCI config "
9366 "registers.\n");
9367 goto out;
9368 }
f5ca6f2e 9369 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
2fcee4bf
JS
9370 /* Set up BAR0 PCI config space register memory map */
9371 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
9372 } else {
9373 phba->pci_bar0_map = pci_resource_start(pdev, 1);
9374 bar0map_len = pci_resource_len(pdev, 1);
2fcee4bf
JS
9375 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
9376 dev_printk(KERN_ERR, &pdev->dev,
9377 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
9378 goto out;
9379 }
9380 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 9381 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
9382 if (!phba->sli4_hba.conf_regs_memmap_p) {
9383 dev_printk(KERN_ERR, &pdev->dev,
9384 "ioremap failed for SLI4 PCI config "
9385 "registers.\n");
9386 goto out;
9387 }
9388 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
9389 }
9390
c31098ce 9391 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
f5ca6f2e 9392 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
2fcee4bf
JS
9393 /*
9394 * Map SLI4 if type 0 HBA Control Register base to a kernel
9395 * virtual address and setup the registers.
9396 */
f5ca6f2e
JS
9397 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
9398 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
2fcee4bf 9399 phba->sli4_hba.ctrl_regs_memmap_p =
da0436e9 9400 ioremap(phba->pci_bar1_map, bar1map_len);
2fcee4bf
JS
9401 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
9402 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 9403 "ioremap failed for SLI4 HBA control registers.\n");
2fcee4bf
JS
9404 goto out_iounmap_conf;
9405 }
f5ca6f2e 9406 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
2fcee4bf 9407 lpfc_sli4_bar1_register_memmap(phba);
da0436e9
JS
9408 }
9409
c31098ce 9410 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
f5ca6f2e 9411 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
2fcee4bf
JS
9412 /*
9413 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
9414 * virtual address and setup the registers.
9415 */
f5ca6f2e
JS
9416 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
9417 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
2fcee4bf 9418 phba->sli4_hba.drbl_regs_memmap_p =
da0436e9 9419 ioremap(phba->pci_bar2_map, bar2map_len);
2fcee4bf
JS
9420 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9421 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 9422 "ioremap failed for SLI4 HBA doorbell registers.\n");
2fcee4bf
JS
9423 goto out_iounmap_ctrl;
9424 }
f5ca6f2e 9425 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
2fcee4bf
JS
9426 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
9427 if (error)
9428 goto out_iounmap_all;
da0436e9
JS
9429 }
9430
da0436e9
JS
9431 return 0;
9432
9433out_iounmap_all:
9434 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9435out_iounmap_ctrl:
9436 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9437out_iounmap_conf:
9438 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9439out:
9440 return error;
9441}
9442
9443/**
9444 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
9445 * @phba: pointer to lpfc hba data structure.
9446 *
9447 * This routine is invoked to unset the PCI device memory space for device
9448 * with SLI-4 interface spec.
9449 **/
9450static void
9451lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
9452{
2e90f4b5
JS
9453 uint32_t if_type;
9454 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
da0436e9 9455
2e90f4b5
JS
9456 switch (if_type) {
9457 case LPFC_SLI_INTF_IF_TYPE_0:
9458 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9459 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9460 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9461 break;
9462 case LPFC_SLI_INTF_IF_TYPE_2:
9463 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9464 break;
9465 case LPFC_SLI_INTF_IF_TYPE_1:
9466 default:
9467 dev_printk(KERN_ERR, &phba->pcidev->dev,
9468 "FATAL - unsupported SLI4 interface type - %d\n",
9469 if_type);
9470 break;
9471 }
da0436e9
JS
9472}
9473
9474/**
9475 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
9476 * @phba: pointer to lpfc hba data structure.
9477 *
9478 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 9479 * with SLI-3 interface specs.
da0436e9
JS
9480 *
9481 * Return codes
af901ca1 9482 * 0 - successful
da0436e9
JS
9483 * other values - error
9484 **/
9485static int
9486lpfc_sli_enable_msix(struct lpfc_hba *phba)
9487{
45ffac19 9488 int rc;
da0436e9
JS
9489 LPFC_MBOXQ_t *pmb;
9490
9491 /* Set up MSI-X multi-message vectors */
45ffac19
CH
9492 rc = pci_alloc_irq_vectors(phba->pcidev,
9493 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
9494 if (rc < 0) {
da0436e9
JS
9495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9496 "0420 PCI enable MSI-X failed (%d)\n", rc);
029165ac 9497 goto vec_fail_out;
da0436e9 9498 }
45ffac19 9499
da0436e9
JS
9500 /*
9501 * Assign MSI-X vectors to interrupt handlers
9502 */
9503
9504 /* vector-0 is associated to slow-path handler */
45ffac19 9505 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
ed243d37 9506 &lpfc_sli_sp_intr_handler, 0,
da0436e9
JS
9507 LPFC_SP_DRIVER_HANDLER_NAME, phba);
9508 if (rc) {
9509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9510 "0421 MSI-X slow-path request_irq failed "
9511 "(%d)\n", rc);
9512 goto msi_fail_out;
9513 }
9514
9515 /* vector-1 is associated to fast-path handler */
45ffac19 9516 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
ed243d37 9517 &lpfc_sli_fp_intr_handler, 0,
da0436e9
JS
9518 LPFC_FP_DRIVER_HANDLER_NAME, phba);
9519
9520 if (rc) {
9521 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9522 "0429 MSI-X fast-path request_irq failed "
9523 "(%d)\n", rc);
9524 goto irq_fail_out;
9525 }
9526
9527 /*
9528 * Configure HBA MSI-X attention conditions to messages
9529 */
9530 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9531
9532 if (!pmb) {
9533 rc = -ENOMEM;
9534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9535 "0474 Unable to allocate memory for issuing "
9536 "MBOX_CONFIG_MSI command\n");
9537 goto mem_fail_out;
9538 }
9539 rc = lpfc_config_msi(phba, pmb);
9540 if (rc)
9541 goto mbx_fail_out;
9542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9543 if (rc != MBX_SUCCESS) {
9544 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
9545 "0351 Config MSI mailbox command failed, "
9546 "mbxCmd x%x, mbxStatus x%x\n",
9547 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
9548 goto mbx_fail_out;
9549 }
9550
9551 /* Free memory allocated for mailbox command */
9552 mempool_free(pmb, phba->mbox_mem_pool);
9553 return rc;
9554
9555mbx_fail_out:
9556 /* Free memory allocated for mailbox command */
9557 mempool_free(pmb, phba->mbox_mem_pool);
9558
9559mem_fail_out:
9560 /* free the irq already requested */
45ffac19 9561 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
da0436e9
JS
9562
9563irq_fail_out:
9564 /* free the irq already requested */
45ffac19 9565 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
da0436e9
JS
9566
9567msi_fail_out:
9568 /* Unconfigure MSI-X capability structure */
45ffac19 9569 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
9570
9571vec_fail_out:
da0436e9
JS
9572 return rc;
9573}
9574
da0436e9
JS
9575/**
9576 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
9577 * @phba: pointer to lpfc hba data structure.
9578 *
9579 * This routine is invoked to enable the MSI interrupt mode to device with
9580 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
9581 * enable the MSI vector. The device driver is responsible for calling the
9582 * request_irq() to register MSI vector with a interrupt the handler, which
9583 * is done in this function.
9584 *
9585 * Return codes
af901ca1 9586 * 0 - successful
da0436e9
JS
9587 * other values - error
9588 */
9589static int
9590lpfc_sli_enable_msi(struct lpfc_hba *phba)
9591{
9592 int rc;
9593
9594 rc = pci_enable_msi(phba->pcidev);
9595 if (!rc)
9596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9597 "0462 PCI enable MSI mode success.\n");
9598 else {
9599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9600 "0471 PCI enable MSI mode failed (%d)\n", rc);
9601 return rc;
9602 }
9603
9604 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
ed243d37 9605 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
9606 if (rc) {
9607 pci_disable_msi(phba->pcidev);
9608 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9609 "0478 MSI request_irq failed (%d)\n", rc);
9610 }
9611 return rc;
9612}
9613
da0436e9
JS
9614/**
9615 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
9616 * @phba: pointer to lpfc hba data structure.
9617 *
9618 * This routine is invoked to enable device interrupt and associate driver's
9619 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
9620 * spec. Depends on the interrupt mode configured to the driver, the driver
9621 * will try to fallback from the configured interrupt mode to an interrupt
9622 * mode which is supported by the platform, kernel, and device in the order
9623 * of:
9624 * MSI-X -> MSI -> IRQ.
9625 *
9626 * Return codes
af901ca1 9627 * 0 - successful
da0436e9
JS
9628 * other values - error
9629 **/
9630static uint32_t
9631lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9632{
9633 uint32_t intr_mode = LPFC_INTR_ERROR;
9634 int retval;
9635
9636 if (cfg_mode == 2) {
9637 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
9638 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
9639 if (!retval) {
9640 /* Now, try to enable MSI-X interrupt mode */
9641 retval = lpfc_sli_enable_msix(phba);
9642 if (!retval) {
9643 /* Indicate initialization to MSI-X mode */
9644 phba->intr_type = MSIX;
9645 intr_mode = 2;
9646 }
9647 }
9648 }
9649
9650 /* Fallback to MSI if MSI-X initialization failed */
9651 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9652 retval = lpfc_sli_enable_msi(phba);
9653 if (!retval) {
9654 /* Indicate initialization to MSI mode */
9655 phba->intr_type = MSI;
9656 intr_mode = 1;
9657 }
9658 }
9659
9660 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9661 if (phba->intr_type == NONE) {
9662 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9663 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9664 if (!retval) {
9665 /* Indicate initialization to INTx mode */
9666 phba->intr_type = INTx;
9667 intr_mode = 0;
9668 }
9669 }
9670 return intr_mode;
9671}
9672
9673/**
9674 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
9675 * @phba: pointer to lpfc hba data structure.
9676 *
9677 * This routine is invoked to disable device interrupt and disassociate the
9678 * driver's interrupt handler(s) from interrupt vector(s) to device with
9679 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
9680 * release the interrupt vector(s) for the message signaled interrupt.
9681 **/
9682static void
9683lpfc_sli_disable_intr(struct lpfc_hba *phba)
9684{
45ffac19
CH
9685 int nr_irqs, i;
9686
da0436e9 9687 if (phba->intr_type == MSIX)
45ffac19
CH
9688 nr_irqs = LPFC_MSIX_VECTORS;
9689 else
9690 nr_irqs = 1;
9691
9692 for (i = 0; i < nr_irqs; i++)
9693 free_irq(pci_irq_vector(phba->pcidev, i), phba);
9694 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
9695
9696 /* Reset interrupt management states */
9697 phba->intr_type = NONE;
9698 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
9699}
9700
7bb03bbf 9701/**
895427bd 9702 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
7bb03bbf 9703 * @phba: pointer to lpfc hba data structure.
895427bd
JS
9704 * @vectors: number of msix vectors allocated.
9705 *
9706 * The routine will figure out the CPU affinity assignment for every
9707 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
9708 * with a pointer to the CPU mask that defines ALL the CPUs this vector
9709 * can be associated with. If the vector can be unquely associated with
9710 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
9711 * In addition, the CPU to IO channel mapping will be calculated
9712 * and the phba->sli4_hba.cpu_map array will reflect this.
7bb03bbf 9713 */
895427bd
JS
9714static void
9715lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
7bb03bbf
JS
9716{
9717 struct lpfc_vector_map_info *cpup;
895427bd
JS
9718 int index = 0;
9719 int vec = 0;
7bb03bbf 9720 int cpu;
7bb03bbf
JS
9721#ifdef CONFIG_X86
9722 struct cpuinfo_x86 *cpuinfo;
9723#endif
7bb03bbf
JS
9724
9725 /* Init cpu_map array */
9726 memset(phba->sli4_hba.cpu_map, 0xff,
9727 (sizeof(struct lpfc_vector_map_info) *
895427bd 9728 phba->sli4_hba.num_present_cpu));
7bb03bbf
JS
9729
9730 /* Update CPU map with physical id and core id of each CPU */
9731 cpup = phba->sli4_hba.cpu_map;
9732 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
9733#ifdef CONFIG_X86
9734 cpuinfo = &cpu_data(cpu);
9735 cpup->phys_id = cpuinfo->phys_proc_id;
9736 cpup->core_id = cpuinfo->cpu_core_id;
9737#else
9738 /* No distinction between CPUs for other platforms */
9739 cpup->phys_id = 0;
9740 cpup->core_id = 0;
9741#endif
895427bd
JS
9742 cpup->channel_id = index; /* For now round robin */
9743 cpup->irq = pci_irq_vector(phba->pcidev, vec);
9744 vec++;
9745 if (vec >= vectors)
9746 vec = 0;
9747 index++;
9748 if (index >= phba->cfg_fcp_io_channel)
9749 index = 0;
7bb03bbf
JS
9750 cpup++;
9751 }
7bb03bbf
JS
9752}
9753
9754
da0436e9
JS
9755/**
9756 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
9757 * @phba: pointer to lpfc hba data structure.
9758 *
9759 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 9760 * with SLI-4 interface spec.
da0436e9
JS
9761 *
9762 * Return codes
af901ca1 9763 * 0 - successful
da0436e9
JS
9764 * other values - error
9765 **/
9766static int
9767lpfc_sli4_enable_msix(struct lpfc_hba *phba)
9768{
75baf696 9769 int vectors, rc, index;
b83d005e 9770 char *name;
da0436e9
JS
9771
9772 /* Set up MSI-X multi-message vectors */
895427bd 9773 vectors = phba->io_channel_irqs;
45ffac19 9774 if (phba->cfg_fof)
1ba981fd 9775 vectors++;
45ffac19 9776
f358dd0c
JS
9777 rc = pci_alloc_irq_vectors(phba->pcidev,
9778 (phba->nvmet_support) ? 1 : 2,
9779 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
4f871e1b 9780 if (rc < 0) {
da0436e9
JS
9781 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9782 "0484 PCI enable MSI-X failed (%d)\n", rc);
029165ac 9783 goto vec_fail_out;
da0436e9 9784 }
4f871e1b 9785 vectors = rc;
75baf696 9786
7bb03bbf 9787 /* Assign MSI-X vectors to interrupt handlers */
67d12733 9788 for (index = 0; index < vectors; index++) {
b83d005e
JS
9789 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
9790 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
9791 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
4305f183 9792 LPFC_DRIVER_HANDLER_NAME"%d", index);
da0436e9 9793
895427bd
JS
9794 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9795 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
9796 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
1ba981fd 9797 if (phba->cfg_fof && (index == (vectors - 1)))
45ffac19 9798 rc = request_irq(pci_irq_vector(phba->pcidev, index),
ed243d37 9799 &lpfc_sli4_fof_intr_handler, 0,
b83d005e 9800 name,
895427bd 9801 &phba->sli4_hba.hba_eq_hdl[index]);
1ba981fd 9802 else
45ffac19 9803 rc = request_irq(pci_irq_vector(phba->pcidev, index),
ed243d37 9804 &lpfc_sli4_hba_intr_handler, 0,
b83d005e 9805 name,
895427bd 9806 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9
JS
9807 if (rc) {
9808 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9809 "0486 MSI-X fast-path (%d) "
9810 "request_irq failed (%d)\n", index, rc);
9811 goto cfg_fail_out;
9812 }
9813 }
9814
1ba981fd
JS
9815 if (phba->cfg_fof)
9816 vectors--;
9817
895427bd 9818 if (vectors != phba->io_channel_irqs) {
82c3e9ba
JS
9819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9820 "3238 Reducing IO channels to match number of "
9821 "MSI-X vectors, requested %d got %d\n",
895427bd
JS
9822 phba->io_channel_irqs, vectors);
9823 if (phba->cfg_fcp_io_channel > vectors)
9824 phba->cfg_fcp_io_channel = vectors;
9825 if (phba->cfg_nvme_io_channel > vectors)
9826 phba->cfg_nvme_io_channel = vectors;
9827 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
9828 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
9829 else
9830 phba->io_channel_irqs = phba->cfg_nvme_io_channel;
82c3e9ba 9831 }
895427bd 9832 lpfc_cpu_affinity_check(phba, vectors);
7bb03bbf 9833
da0436e9
JS
9834 return rc;
9835
9836cfg_fail_out:
9837 /* free the irq already requested */
895427bd
JS
9838 for (--index; index >= 0; index--)
9839 free_irq(pci_irq_vector(phba->pcidev, index),
9840 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9 9841
da0436e9 9842 /* Unconfigure MSI-X capability structure */
45ffac19 9843 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
9844
9845vec_fail_out:
da0436e9
JS
9846 return rc;
9847}
9848
da0436e9
JS
9849/**
9850 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
9851 * @phba: pointer to lpfc hba data structure.
9852 *
9853 * This routine is invoked to enable the MSI interrupt mode to device with
9854 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
9855 * to enable the MSI vector. The device driver is responsible for calling
9856 * the request_irq() to register MSI vector with a interrupt the handler,
9857 * which is done in this function.
9858 *
9859 * Return codes
af901ca1 9860 * 0 - successful
da0436e9
JS
9861 * other values - error
9862 **/
9863static int
9864lpfc_sli4_enable_msi(struct lpfc_hba *phba)
9865{
9866 int rc, index;
9867
9868 rc = pci_enable_msi(phba->pcidev);
9869 if (!rc)
9870 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9871 "0487 PCI enable MSI mode success.\n");
9872 else {
9873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9874 "0488 PCI enable MSI mode failed (%d)\n", rc);
9875 return rc;
9876 }
9877
9878 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
ed243d37 9879 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
9880 if (rc) {
9881 pci_disable_msi(phba->pcidev);
9882 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9883 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 9884 return rc;
da0436e9
JS
9885 }
9886
895427bd
JS
9887 for (index = 0; index < phba->io_channel_irqs; index++) {
9888 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9889 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
da0436e9
JS
9890 }
9891
1ba981fd 9892 if (phba->cfg_fof) {
895427bd
JS
9893 phba->sli4_hba.hba_eq_hdl[index].idx = index;
9894 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
1ba981fd 9895 }
75baf696 9896 return 0;
da0436e9
JS
9897}
9898
da0436e9
JS
9899/**
9900 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
9901 * @phba: pointer to lpfc hba data structure.
9902 *
9903 * This routine is invoked to enable device interrupt and associate driver's
9904 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
9905 * interface spec. Depends on the interrupt mode configured to the driver,
9906 * the driver will try to fallback from the configured interrupt mode to an
9907 * interrupt mode which is supported by the platform, kernel, and device in
9908 * the order of:
9909 * MSI-X -> MSI -> IRQ.
9910 *
9911 * Return codes
af901ca1 9912 * 0 - successful
da0436e9
JS
9913 * other values - error
9914 **/
9915static uint32_t
9916lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9917{
9918 uint32_t intr_mode = LPFC_INTR_ERROR;
895427bd 9919 int retval, idx;
da0436e9
JS
9920
9921 if (cfg_mode == 2) {
9922 /* Preparation before conf_msi mbox cmd */
9923 retval = 0;
9924 if (!retval) {
9925 /* Now, try to enable MSI-X interrupt mode */
9926 retval = lpfc_sli4_enable_msix(phba);
9927 if (!retval) {
9928 /* Indicate initialization to MSI-X mode */
9929 phba->intr_type = MSIX;
9930 intr_mode = 2;
9931 }
9932 }
9933 }
9934
9935 /* Fallback to MSI if MSI-X initialization failed */
9936 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9937 retval = lpfc_sli4_enable_msi(phba);
9938 if (!retval) {
9939 /* Indicate initialization to MSI mode */
9940 phba->intr_type = MSI;
9941 intr_mode = 1;
9942 }
9943 }
9944
9945 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9946 if (phba->intr_type == NONE) {
9947 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9948 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9949 if (!retval) {
895427bd
JS
9950 struct lpfc_hba_eq_hdl *eqhdl;
9951
da0436e9
JS
9952 /* Indicate initialization to INTx mode */
9953 phba->intr_type = INTx;
9954 intr_mode = 0;
895427bd
JS
9955
9956 for (idx = 0; idx < phba->io_channel_irqs; idx++) {
9957 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
9958 eqhdl->idx = idx;
9959 eqhdl->phba = phba;
9960 atomic_set(&eqhdl->hba_eq_in_use, 1);
da0436e9 9961 }
1ba981fd 9962 if (phba->cfg_fof) {
895427bd
JS
9963 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
9964 eqhdl->idx = idx;
9965 eqhdl->phba = phba;
9966 atomic_set(&eqhdl->hba_eq_in_use, 1);
1ba981fd 9967 }
da0436e9
JS
9968 }
9969 }
9970 return intr_mode;
9971}
9972
9973/**
9974 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9975 * @phba: pointer to lpfc hba data structure.
9976 *
9977 * This routine is invoked to disable device interrupt and disassociate
9978 * the driver's interrupt handler(s) from interrupt vector(s) to device
9979 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9980 * will release the interrupt vector(s) for the message signaled interrupt.
9981 **/
9982static void
9983lpfc_sli4_disable_intr(struct lpfc_hba *phba)
9984{
9985 /* Disable the currently initialized interrupt mode */
45ffac19
CH
9986 if (phba->intr_type == MSIX) {
9987 int index;
9988
9989 /* Free up MSI-X multi-message vectors */
895427bd
JS
9990 for (index = 0; index < phba->io_channel_irqs; index++)
9991 free_irq(pci_irq_vector(phba->pcidev, index),
9992 &phba->sli4_hba.hba_eq_hdl[index]);
45ffac19
CH
9993
9994 if (phba->cfg_fof)
895427bd
JS
9995 free_irq(pci_irq_vector(phba->pcidev, index),
9996 &phba->sli4_hba.hba_eq_hdl[index]);
45ffac19 9997 } else {
da0436e9 9998 free_irq(phba->pcidev->irq, phba);
45ffac19
CH
9999 }
10000
10001 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
10002
10003 /* Reset interrupt management states */
10004 phba->intr_type = NONE;
10005 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
10006}
10007
10008/**
10009 * lpfc_unset_hba - Unset SLI3 hba device initialization
10010 * @phba: pointer to lpfc hba data structure.
10011 *
10012 * This routine is invoked to unset the HBA device initialization steps to
10013 * a device with SLI-3 interface spec.
10014 **/
10015static void
10016lpfc_unset_hba(struct lpfc_hba *phba)
10017{
10018 struct lpfc_vport *vport = phba->pport;
10019 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10020
10021 spin_lock_irq(shost->host_lock);
10022 vport->load_flag |= FC_UNLOADING;
10023 spin_unlock_irq(shost->host_lock);
10024
72859909
JS
10025 kfree(phba->vpi_bmask);
10026 kfree(phba->vpi_ids);
10027
da0436e9
JS
10028 lpfc_stop_hba_timers(phba);
10029
10030 phba->pport->work_port_events = 0;
10031
10032 lpfc_sli_hba_down(phba);
10033
10034 lpfc_sli_brdrestart(phba);
10035
10036 lpfc_sli_disable_intr(phba);
10037
10038 return;
10039}
10040
5af5eee7
JS
10041/**
10042 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
10043 * @phba: Pointer to HBA context object.
10044 *
10045 * This function is called in the SLI4 code path to wait for completion
10046 * of device's XRIs exchange busy. It will check the XRI exchange busy
10047 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
10048 * that, it will check the XRI exchange busy on outstanding FCP and ELS
10049 * I/Os every 30 seconds, log error message, and wait forever. Only when
10050 * all XRI exchange busy complete, the driver unload shall proceed with
10051 * invoking the function reset ioctl mailbox command to the CNA and the
10052 * the rest of the driver unload resource release.
10053 **/
10054static void
10055lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
10056{
10057 int wait_time = 0;
895427bd 10058 int nvme_xri_cmpl = 1;
86c67379 10059 int nvmet_xri_cmpl = 1;
895427bd 10060 int fcp_xri_cmpl = 1;
5af5eee7
JS
10061 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10062
895427bd
JS
10063 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10064 fcp_xri_cmpl =
10065 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
86c67379 10066 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
10067 nvme_xri_cmpl =
10068 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379
JS
10069 nvmet_xri_cmpl =
10070 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10071 }
895427bd 10072
f358dd0c
JS
10073 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
10074 !nvmet_xri_cmpl) {
5af5eee7 10075 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
895427bd
JS
10076 if (!nvme_xri_cmpl)
10077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10078 "6100 NVME XRI exchange busy "
10079 "wait time: %d seconds.\n",
10080 wait_time/1000);
5af5eee7
JS
10081 if (!fcp_xri_cmpl)
10082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10083 "2877 FCP XRI exchange busy "
10084 "wait time: %d seconds.\n",
10085 wait_time/1000);
10086 if (!els_xri_cmpl)
10087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10088 "2878 ELS XRI exchange busy "
10089 "wait time: %d seconds.\n",
10090 wait_time/1000);
10091 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
10092 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
10093 } else {
10094 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
10095 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
10096 }
86c67379 10097 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
10098 nvme_xri_cmpl = list_empty(
10099 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379
JS
10100 nvmet_xri_cmpl = list_empty(
10101 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10102 }
895427bd
JS
10103
10104 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10105 fcp_xri_cmpl = list_empty(
10106 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
10107
5af5eee7
JS
10108 els_xri_cmpl =
10109 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 10110
5af5eee7
JS
10111 }
10112}
10113
da0436e9
JS
10114/**
10115 * lpfc_sli4_hba_unset - Unset the fcoe hba
10116 * @phba: Pointer to HBA context object.
10117 *
10118 * This function is called in the SLI4 code path to reset the HBA's FCoE
10119 * function. The caller is not required to hold any lock. This routine
10120 * issues PCI function reset mailbox command to reset the FCoE function.
10121 * At the end of the function, it calls lpfc_hba_down_post function to
10122 * free any pending commands.
10123 **/
10124static void
10125lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10126{
10127 int wait_cnt = 0;
10128 LPFC_MBOXQ_t *mboxq;
912e3acd 10129 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
10130
10131 lpfc_stop_hba_timers(phba);
10132 phba->sli4_hba.intr_enable = 0;
10133
10134 /*
10135 * Gracefully wait out the potential current outstanding asynchronous
10136 * mailbox command.
10137 */
10138
10139 /* First, block any pending async mailbox command from posted */
10140 spin_lock_irq(&phba->hbalock);
10141 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10142 spin_unlock_irq(&phba->hbalock);
10143 /* Now, trying to wait it out if we can */
10144 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10145 msleep(10);
10146 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
10147 break;
10148 }
10149 /* Forcefully release the outstanding mailbox command if timed out */
10150 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10151 spin_lock_irq(&phba->hbalock);
10152 mboxq = phba->sli.mbox_active;
10153 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10154 __lpfc_mbox_cmpl_put(phba, mboxq);
10155 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10156 phba->sli.mbox_active = NULL;
10157 spin_unlock_irq(&phba->hbalock);
10158 }
10159
5af5eee7
JS
10160 /* Abort all iocbs associated with the hba */
10161 lpfc_sli_hba_iocb_abort(phba);
10162
10163 /* Wait for completion of device XRI exchange busy */
10164 lpfc_sli4_xri_exchange_busy_wait(phba);
10165
da0436e9
JS
10166 /* Disable PCI subsystem interrupt */
10167 lpfc_sli4_disable_intr(phba);
10168
912e3acd
JS
10169 /* Disable SR-IOV if enabled */
10170 if (phba->cfg_sriov_nr_virtfn)
10171 pci_disable_sriov(pdev);
10172
da0436e9
JS
10173 /* Stop kthread signal shall trigger work_done one more time */
10174 kthread_stop(phba->worker_thread);
10175
d1f525aa
JS
10176 /* Unset the queues shared with the hardware then release all
10177 * allocated resources.
10178 */
10179 lpfc_sli4_queue_unset(phba);
10180 lpfc_sli4_queue_destroy(phba);
10181
3677a3a7
JS
10182 /* Reset SLI4 HBA FCoE function */
10183 lpfc_pci_function_reset(phba);
10184
da0436e9
JS
10185 /* Stop the SLI4 device port */
10186 phba->pport->work_port_events = 0;
10187}
10188
28baac74
JS
10189 /**
10190 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
10191 * @phba: Pointer to HBA context object.
10192 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10193 *
10194 * This function is called in the SLI4 code path to read the port's
10195 * sli4 capabilities.
10196 *
10197 * This function may be be called from any context that can block-wait
10198 * for the completion. The expectation is that this routine is called
10199 * typically from probe_one or from the online routine.
10200 **/
10201int
10202lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10203{
10204 int rc;
10205 struct lpfc_mqe *mqe;
10206 struct lpfc_pc_sli4_params *sli4_params;
10207 uint32_t mbox_tmo;
10208
10209 rc = 0;
10210 mqe = &mboxq->u.mqe;
10211
10212 /* Read the port's SLI4 Parameters port capabilities */
fedd3b7b 10213 lpfc_pc_sli4_params(mboxq);
28baac74
JS
10214 if (!phba->sli4_hba.intr_enable)
10215 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10216 else {
a183a15f 10217 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
28baac74
JS
10218 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10219 }
10220
10221 if (unlikely(rc))
10222 return 1;
10223
10224 sli4_params = &phba->sli4_hba.pc_sli4_params;
10225 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
10226 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
10227 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
10228 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
10229 &mqe->un.sli4_params);
10230 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
10231 &mqe->un.sli4_params);
10232 sli4_params->proto_types = mqe->un.sli4_params.word3;
10233 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
10234 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
10235 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
10236 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
10237 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
10238 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
10239 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
10240 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
10241 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
10242 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
10243 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
10244 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
10245 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
10246 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
10247 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
10248 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
10249 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
10250 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
10251 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
10252 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
0558056c
JS
10253
10254 /* Make sure that sge_supp_len can be handled by the driver */
10255 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10256 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10257
28baac74
JS
10258 return rc;
10259}
10260
fedd3b7b
JS
10261/**
10262 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
10263 * @phba: Pointer to HBA context object.
10264 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10265 *
10266 * This function is called in the SLI4 code path to read the port's
10267 * sli4 capabilities.
10268 *
10269 * This function may be be called from any context that can block-wait
10270 * for the completion. The expectation is that this routine is called
10271 * typically from probe_one or from the online routine.
10272 **/
10273int
10274lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10275{
10276 int rc;
10277 struct lpfc_mqe *mqe = &mboxq->u.mqe;
10278 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 10279 uint32_t mbox_tmo;
fedd3b7b
JS
10280 int length;
10281 struct lpfc_sli4_parameters *mbx_sli4_parameters;
10282
6d368e53
JS
10283 /*
10284 * By default, the driver assumes the SLI4 port requires RPI
10285 * header postings. The SLI4_PARAM response will correct this
10286 * assumption.
10287 */
10288 phba->sli4_hba.rpi_hdrs_in_use = 1;
10289
fedd3b7b
JS
10290 /* Read the port's SLI4 Config Parameters */
10291 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
10292 sizeof(struct lpfc_sli4_cfg_mhdr));
10293 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10294 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
10295 length, LPFC_SLI4_MBX_EMBED);
10296 if (!phba->sli4_hba.intr_enable)
10297 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
10298 else {
10299 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10300 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10301 }
fedd3b7b
JS
10302 if (unlikely(rc))
10303 return rc;
10304 sli4_params = &phba->sli4_hba.pc_sli4_params;
10305 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
10306 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
10307 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
10308 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
10309 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
10310 mbx_sli4_parameters);
10311 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
10312 mbx_sli4_parameters);
10313 if (bf_get(cfg_phwq, mbx_sli4_parameters))
10314 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
10315 else
10316 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
10317 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
10318 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
1ba981fd 10319 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
fedd3b7b
JS
10320 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
10321 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
10322 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
10323 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
0c651878 10324 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
fedd3b7b
JS
10325 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
10326 mbx_sli4_parameters);
895427bd 10327 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
fedd3b7b
JS
10328 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
10329 mbx_sli4_parameters);
6d368e53
JS
10330 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
10331 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
895427bd
JS
10332 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
10333 bf_get(cfg_xib, mbx_sli4_parameters));
10334
10335 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
10336 !phba->nvme_support) {
10337 phba->nvme_support = 0;
10338 phba->nvmet_support = 0;
2d7dbc4c 10339 phba->cfg_nvmet_mrq = 0;
895427bd
JS
10340 phba->cfg_nvme_io_channel = 0;
10341 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
10343 "6101 Disabling NVME support: "
10344 "Not supported by firmware: %d %d\n",
10345 bf_get(cfg_nvme, mbx_sli4_parameters),
10346 bf_get(cfg_xib, mbx_sli4_parameters));
10347
10348 /* If firmware doesn't support NVME, just use SCSI support */
10349 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
10350 return -ENODEV;
10351 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
10352 }
0558056c 10353
f358dd0c
JS
10354 if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
10355 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
10356
0cf07f84
JS
10357 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
10358 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
10359
0558056c
JS
10360 /* Make sure that sge_supp_len can be handled by the driver */
10361 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10362 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10363
b5c53958
JS
10364 /*
10365 * Issue IOs with CDB embedded in WQE to minimized the number
10366 * of DMAs the firmware has to do. Setting this to 1 also forces
10367 * the driver to use 128 bytes WQEs for FCP IOs.
10368 */
10369 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
10370 phba->fcp_embed_io = 1;
10371 else
10372 phba->fcp_embed_io = 0;
7bdedb34
JS
10373
10374 /*
10375 * Check if the SLI port supports MDS Diagnostics
10376 */
10377 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
10378 phba->mds_diags_support = 1;
10379 else
10380 phba->mds_diags_support = 0;
fedd3b7b
JS
10381 return 0;
10382}
10383
da0436e9
JS
10384/**
10385 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
10386 * @pdev: pointer to PCI device
10387 * @pid: pointer to PCI device identifier
10388 *
10389 * This routine is to be called to attach a device with SLI-3 interface spec
10390 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10391 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10392 * information of the device and driver to see if the driver state that it can
10393 * support this kind of device. If the match is successful, the driver core
10394 * invokes this routine. If this routine determines it can claim the HBA, it
10395 * does all the initialization that it needs to do to handle the HBA properly.
10396 *
10397 * Return code
10398 * 0 - driver can claim the device
10399 * negative value - driver can not claim the device
10400 **/
6f039790 10401static int
da0436e9
JS
10402lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
10403{
10404 struct lpfc_hba *phba;
10405 struct lpfc_vport *vport = NULL;
6669f9bb 10406 struct Scsi_Host *shost = NULL;
da0436e9
JS
10407 int error;
10408 uint32_t cfg_mode, intr_mode;
10409
10410 /* Allocate memory for HBA structure */
10411 phba = lpfc_hba_alloc(pdev);
10412 if (!phba)
10413 return -ENOMEM;
10414
10415 /* Perform generic PCI device enabling operation */
10416 error = lpfc_enable_pci_dev(phba);
079b5c91 10417 if (error)
da0436e9 10418 goto out_free_phba;
da0436e9
JS
10419
10420 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
10421 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
10422 if (error)
10423 goto out_disable_pci_dev;
10424
10425 /* Set up SLI-3 specific device PCI memory space */
10426 error = lpfc_sli_pci_mem_setup(phba);
10427 if (error) {
10428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10429 "1402 Failed to set up pci memory space.\n");
10430 goto out_disable_pci_dev;
10431 }
10432
da0436e9
JS
10433 /* Set up SLI-3 specific device driver resources */
10434 error = lpfc_sli_driver_resource_setup(phba);
10435 if (error) {
10436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10437 "1404 Failed to set up driver resource.\n");
10438 goto out_unset_pci_mem_s3;
10439 }
10440
10441 /* Initialize and populate the iocb list per host */
d1f525aa 10442
da0436e9
JS
10443 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
10444 if (error) {
10445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10446 "1405 Failed to initialize iocb list.\n");
10447 goto out_unset_driver_resource_s3;
10448 }
10449
10450 /* Set up common device driver resources */
10451 error = lpfc_setup_driver_resource_phase2(phba);
10452 if (error) {
10453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10454 "1406 Failed to set up driver resource.\n");
10455 goto out_free_iocb_list;
10456 }
10457
079b5c91
JS
10458 /* Get the default values for Model Name and Description */
10459 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10460
da0436e9
JS
10461 /* Create SCSI host to the physical port */
10462 error = lpfc_create_shost(phba);
10463 if (error) {
10464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10465 "1407 Failed to create scsi host.\n");
10466 goto out_unset_driver_resource;
10467 }
10468
10469 /* Configure sysfs attributes */
10470 vport = phba->pport;
10471 error = lpfc_alloc_sysfs_attr(vport);
10472 if (error) {
10473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10474 "1476 Failed to allocate sysfs attr\n");
10475 goto out_destroy_shost;
10476 }
10477
6669f9bb 10478 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
10479 /* Now, trying to enable interrupt and bring up the device */
10480 cfg_mode = phba->cfg_use_msi;
10481 while (true) {
10482 /* Put device to a known state before enabling interrupt */
10483 lpfc_stop_port(phba);
10484 /* Configure and enable interrupt */
10485 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
10486 if (intr_mode == LPFC_INTR_ERROR) {
10487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10488 "0431 Failed to enable interrupt.\n");
10489 error = -ENODEV;
10490 goto out_free_sysfs_attr;
10491 }
10492 /* SLI-3 HBA setup */
10493 if (lpfc_sli_hba_setup(phba)) {
10494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10495 "1477 Failed to set up hba\n");
10496 error = -ENODEV;
10497 goto out_remove_device;
10498 }
10499
10500 /* Wait 50ms for the interrupts of previous mailbox commands */
10501 msleep(50);
10502 /* Check active interrupts on message signaled interrupts */
10503 if (intr_mode == 0 ||
10504 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
10505 /* Log the current active interrupt mode */
10506 phba->intr_mode = intr_mode;
10507 lpfc_log_intr_mode(phba, intr_mode);
10508 break;
10509 } else {
10510 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10511 "0447 Configure interrupt mode (%d) "
10512 "failed active interrupt test.\n",
10513 intr_mode);
10514 /* Disable the current interrupt mode */
10515 lpfc_sli_disable_intr(phba);
10516 /* Try next level of interrupt mode */
10517 cfg_mode = --intr_mode;
10518 }
10519 }
10520
10521 /* Perform post initialization setup */
10522 lpfc_post_init_setup(phba);
10523
10524 /* Check if there are static vports to be created. */
10525 lpfc_create_static_vport(phba);
10526
10527 return 0;
10528
10529out_remove_device:
10530 lpfc_unset_hba(phba);
10531out_free_sysfs_attr:
10532 lpfc_free_sysfs_attr(vport);
10533out_destroy_shost:
10534 lpfc_destroy_shost(phba);
10535out_unset_driver_resource:
10536 lpfc_unset_driver_resource_phase2(phba);
10537out_free_iocb_list:
10538 lpfc_free_iocb_list(phba);
10539out_unset_driver_resource_s3:
10540 lpfc_sli_driver_resource_unset(phba);
10541out_unset_pci_mem_s3:
10542 lpfc_sli_pci_mem_unset(phba);
10543out_disable_pci_dev:
10544 lpfc_disable_pci_dev(phba);
6669f9bb
JS
10545 if (shost)
10546 scsi_host_put(shost);
da0436e9
JS
10547out_free_phba:
10548 lpfc_hba_free(phba);
10549 return error;
10550}
10551
10552/**
10553 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
10554 * @pdev: pointer to PCI device
10555 *
10556 * This routine is to be called to disattach a device with SLI-3 interface
10557 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10558 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10559 * device to be removed from the PCI subsystem properly.
10560 **/
6f039790 10561static void
da0436e9
JS
10562lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10563{
10564 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10565 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10566 struct lpfc_vport **vports;
10567 struct lpfc_hba *phba = vport->phba;
10568 int i;
da0436e9
JS
10569
10570 spin_lock_irq(&phba->hbalock);
10571 vport->load_flag |= FC_UNLOADING;
10572 spin_unlock_irq(&phba->hbalock);
10573
10574 lpfc_free_sysfs_attr(vport);
10575
10576 /* Release all the vports against this physical port */
10577 vports = lpfc_create_vport_work_array(phba);
10578 if (vports != NULL)
587a37f6
JS
10579 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10580 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10581 continue;
da0436e9 10582 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 10583 }
da0436e9
JS
10584 lpfc_destroy_vport_work_array(phba, vports);
10585
10586 /* Remove FC host and then SCSI host with the physical port */
10587 fc_remove_host(shost);
10588 scsi_remove_host(shost);
d613b6a7 10589
da0436e9
JS
10590 lpfc_cleanup(vport);
10591
10592 /*
10593 * Bring down the SLI Layer. This step disable all interrupts,
10594 * clears the rings, discards all mailbox commands, and resets
10595 * the HBA.
10596 */
10597
48e34d0f 10598 /* HBA interrupt will be disabled after this call */
da0436e9
JS
10599 lpfc_sli_hba_down(phba);
10600 /* Stop kthread signal shall trigger work_done one more time */
10601 kthread_stop(phba->worker_thread);
10602 /* Final cleanup of txcmplq and reset the HBA */
10603 lpfc_sli_brdrestart(phba);
10604
72859909
JS
10605 kfree(phba->vpi_bmask);
10606 kfree(phba->vpi_ids);
10607
da0436e9
JS
10608 lpfc_stop_hba_timers(phba);
10609 spin_lock_irq(&phba->hbalock);
10610 list_del_init(&vport->listentry);
10611 spin_unlock_irq(&phba->hbalock);
10612
10613 lpfc_debugfs_terminate(vport);
10614
912e3acd
JS
10615 /* Disable SR-IOV if enabled */
10616 if (phba->cfg_sriov_nr_virtfn)
10617 pci_disable_sriov(pdev);
10618
da0436e9
JS
10619 /* Disable interrupt */
10620 lpfc_sli_disable_intr(phba);
10621
da0436e9
JS
10622 scsi_host_put(shost);
10623
10624 /*
10625 * Call scsi_free before mem_free since scsi bufs are released to their
10626 * corresponding pools here.
10627 */
10628 lpfc_scsi_free(phba);
10629 lpfc_mem_free_all(phba);
10630
10631 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
10632 phba->hbqslimp.virt, phba->hbqslimp.phys);
10633
10634 /* Free resources associated with SLI2 interface */
10635 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
10636 phba->slim2p.virt, phba->slim2p.phys);
10637
10638 /* unmap adapter SLIM and Control Registers */
10639 iounmap(phba->ctrl_regs_memmap_p);
10640 iounmap(phba->slim_memmap_p);
10641
10642 lpfc_hba_free(phba);
10643
e0c0483c 10644 pci_release_mem_regions(pdev);
da0436e9
JS
10645 pci_disable_device(pdev);
10646}
10647
10648/**
10649 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
10650 * @pdev: pointer to PCI device
10651 * @msg: power management message
10652 *
10653 * This routine is to be called from the kernel's PCI subsystem to support
10654 * system Power Management (PM) to device with SLI-3 interface spec. When
10655 * PM invokes this method, it quiesces the device by stopping the driver's
10656 * worker thread for the device, turning off device's interrupt and DMA,
10657 * and bring the device offline. Note that as the driver implements the
10658 * minimum PM requirements to a power-aware driver's PM support for the
10659 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10660 * to the suspend() method call will be treated as SUSPEND and the driver will
10661 * fully reinitialize its device during resume() method call, the driver will
10662 * set device to PCI_D3hot state in PCI config space instead of setting it
10663 * according to the @msg provided by the PM.
10664 *
10665 * Return code
10666 * 0 - driver suspended the device
10667 * Error otherwise
10668 **/
10669static int
10670lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
10671{
10672 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10673 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10674
10675 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10676 "0473 PCI device Power Management suspend.\n");
10677
10678 /* Bring down the device */
618a5230 10679 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
10680 lpfc_offline(phba);
10681 kthread_stop(phba->worker_thread);
10682
10683 /* Disable interrupt from device */
10684 lpfc_sli_disable_intr(phba);
10685
10686 /* Save device state to PCI config space */
10687 pci_save_state(pdev);
10688 pci_set_power_state(pdev, PCI_D3hot);
10689
10690 return 0;
10691}
10692
10693/**
10694 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
10695 * @pdev: pointer to PCI device
10696 *
10697 * This routine is to be called from the kernel's PCI subsystem to support
10698 * system Power Management (PM) to device with SLI-3 interface spec. When PM
10699 * invokes this method, it restores the device's PCI config space state and
10700 * fully reinitializes the device and brings it online. Note that as the
10701 * driver implements the minimum PM requirements to a power-aware driver's
10702 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
10703 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
10704 * driver will fully reinitialize its device during resume() method call,
10705 * the device will be set to PCI_D0 directly in PCI config space before
10706 * restoring the state.
10707 *
10708 * Return code
10709 * 0 - driver suspended the device
10710 * Error otherwise
10711 **/
10712static int
10713lpfc_pci_resume_one_s3(struct pci_dev *pdev)
10714{
10715 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10716 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10717 uint32_t intr_mode;
10718 int error;
10719
10720 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10721 "0452 PCI device Power Management resume.\n");
10722
10723 /* Restore device state from PCI config space */
10724 pci_set_power_state(pdev, PCI_D0);
10725 pci_restore_state(pdev);
0d878419 10726
1dfb5a47
JS
10727 /*
10728 * As the new kernel behavior of pci_restore_state() API call clears
10729 * device saved_state flag, need to save the restored state again.
10730 */
10731 pci_save_state(pdev);
10732
da0436e9
JS
10733 if (pdev->is_busmaster)
10734 pci_set_master(pdev);
10735
10736 /* Startup the kernel thread for this host adapter. */
10737 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10738 "lpfc_worker_%d", phba->brd_no);
10739 if (IS_ERR(phba->worker_thread)) {
10740 error = PTR_ERR(phba->worker_thread);
10741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10742 "0434 PM resume failed to start worker "
10743 "thread: error=x%x.\n", error);
10744 return error;
10745 }
10746
10747 /* Configure and enable interrupt */
10748 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10749 if (intr_mode == LPFC_INTR_ERROR) {
10750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10751 "0430 PM resume Failed to enable interrupt\n");
10752 return -EIO;
10753 } else
10754 phba->intr_mode = intr_mode;
10755
10756 /* Restart HBA and bring it online */
10757 lpfc_sli_brdrestart(phba);
10758 lpfc_online(phba);
10759
10760 /* Log the current active interrupt mode */
10761 lpfc_log_intr_mode(phba, phba->intr_mode);
10762
10763 return 0;
10764}
10765
891478a2
JS
10766/**
10767 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
10768 * @phba: pointer to lpfc hba data structure.
10769 *
10770 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 10771 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
10772 **/
10773static void
10774lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
10775{
10776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10777 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
10778
10779 /*
10780 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10781 * and let the SCSI mid-layer to retry them to recover.
10782 */
db55fba8 10783 lpfc_sli_abort_fcp_rings(phba);
891478a2
JS
10784}
10785
0d878419
JS
10786/**
10787 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
10788 * @phba: pointer to lpfc hba data structure.
10789 *
10790 * This routine is called to prepare the SLI3 device for PCI slot reset. It
10791 * disables the device interrupt and pci device, and aborts the internal FCP
10792 * pending I/Os.
10793 **/
10794static void
10795lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
10796{
0d878419 10797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 10798 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 10799
75baf696 10800 /* Block any management I/Os to the device */
618a5230 10801 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
75baf696 10802
e2af0d2e
JS
10803 /* Block all SCSI devices' I/Os on the host */
10804 lpfc_scsi_dev_block(phba);
10805
ea714f3d
JS
10806 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10807 lpfc_sli_flush_fcp_rings(phba);
10808
e2af0d2e
JS
10809 /* stop all timers */
10810 lpfc_stop_hba_timers(phba);
10811
0d878419
JS
10812 /* Disable interrupt and pci device */
10813 lpfc_sli_disable_intr(phba);
10814 pci_disable_device(phba->pcidev);
0d878419
JS
10815}
10816
10817/**
10818 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
10819 * @phba: pointer to lpfc hba data structure.
10820 *
10821 * This routine is called to prepare the SLI3 device for PCI slot permanently
10822 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10823 * pending I/Os.
10824 **/
10825static void
75baf696 10826lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419
JS
10827{
10828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 10829 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
10830 /* Block all SCSI devices' I/Os on the host */
10831 lpfc_scsi_dev_block(phba);
10832
10833 /* stop all timers */
10834 lpfc_stop_hba_timers(phba);
10835
0d878419
JS
10836 /* Clean up all driver's outstanding SCSI I/Os */
10837 lpfc_sli_flush_fcp_rings(phba);
10838}
10839
da0436e9
JS
10840/**
10841 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
10842 * @pdev: pointer to PCI device.
10843 * @state: the current PCI connection state.
10844 *
10845 * This routine is called from the PCI subsystem for I/O error handling to
10846 * device with SLI-3 interface spec. This function is called by the PCI
10847 * subsystem after a PCI bus error affecting this device has been detected.
10848 * When this function is invoked, it will need to stop all the I/Os and
10849 * interrupt(s) to the device. Once that is done, it will return
10850 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
10851 * as desired.
10852 *
10853 * Return codes
0d878419 10854 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
10855 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10856 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10857 **/
10858static pci_ers_result_t
10859lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
10860{
10861 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10862 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 10863
0d878419
JS
10864 switch (state) {
10865 case pci_channel_io_normal:
891478a2
JS
10866 /* Non-fatal error, prepare for recovery */
10867 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
10868 return PCI_ERS_RESULT_CAN_RECOVER;
10869 case pci_channel_io_frozen:
10870 /* Fatal error, prepare for slot reset */
10871 lpfc_sli_prep_dev_for_reset(phba);
10872 return PCI_ERS_RESULT_NEED_RESET;
10873 case pci_channel_io_perm_failure:
10874 /* Permanent failure, prepare for device down */
75baf696 10875 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 10876 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
10877 default:
10878 /* Unknown state, prepare and request slot reset */
10879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10880 "0472 Unknown PCI error state: x%x\n", state);
10881 lpfc_sli_prep_dev_for_reset(phba);
10882 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 10883 }
da0436e9
JS
10884}
10885
10886/**
10887 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
10888 * @pdev: pointer to PCI device.
10889 *
10890 * This routine is called from the PCI subsystem for error handling to
10891 * device with SLI-3 interface spec. This is called after PCI bus has been
10892 * reset to restart the PCI card from scratch, as if from a cold-boot.
10893 * During the PCI subsystem error recovery, after driver returns
10894 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10895 * recovery and then call this routine before calling the .resume method
10896 * to recover the device. This function will initialize the HBA device,
10897 * enable the interrupt, but it will just put the HBA to offline state
10898 * without passing any I/O traffic.
10899 *
10900 * Return codes
10901 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10902 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10903 */
10904static pci_ers_result_t
10905lpfc_io_slot_reset_s3(struct pci_dev *pdev)
10906{
10907 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10908 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10909 struct lpfc_sli *psli = &phba->sli;
10910 uint32_t intr_mode;
10911
10912 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10913 if (pci_enable_device_mem(pdev)) {
10914 printk(KERN_ERR "lpfc: Cannot re-enable "
10915 "PCI device after reset.\n");
10916 return PCI_ERS_RESULT_DISCONNECT;
10917 }
10918
10919 pci_restore_state(pdev);
1dfb5a47
JS
10920
10921 /*
10922 * As the new kernel behavior of pci_restore_state() API call clears
10923 * device saved_state flag, need to save the restored state again.
10924 */
10925 pci_save_state(pdev);
10926
da0436e9
JS
10927 if (pdev->is_busmaster)
10928 pci_set_master(pdev);
10929
10930 spin_lock_irq(&phba->hbalock);
10931 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10932 spin_unlock_irq(&phba->hbalock);
10933
10934 /* Configure and enable interrupt */
10935 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10936 if (intr_mode == LPFC_INTR_ERROR) {
10937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10938 "0427 Cannot re-enable interrupt after "
10939 "slot reset.\n");
10940 return PCI_ERS_RESULT_DISCONNECT;
10941 } else
10942 phba->intr_mode = intr_mode;
10943
75baf696 10944 /* Take device offline, it will perform cleanup */
618a5230 10945 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
10946 lpfc_offline(phba);
10947 lpfc_sli_brdrestart(phba);
10948
10949 /* Log the current active interrupt mode */
10950 lpfc_log_intr_mode(phba, phba->intr_mode);
10951
10952 return PCI_ERS_RESULT_RECOVERED;
10953}
10954
10955/**
10956 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
10957 * @pdev: pointer to PCI device
10958 *
10959 * This routine is called from the PCI subsystem for error handling to device
10960 * with SLI-3 interface spec. It is called when kernel error recovery tells
10961 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10962 * error recovery. After this call, traffic can start to flow from this device
10963 * again.
10964 */
10965static void
10966lpfc_io_resume_s3(struct pci_dev *pdev)
10967{
10968 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10969 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 10970
e2af0d2e 10971 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9 10972 lpfc_online(phba);
0d878419
JS
10973
10974 /* Clean up Advanced Error Reporting (AER) if needed */
10975 if (phba->hba_flag & HBA_AER_ENABLED)
10976 pci_cleanup_aer_uncorrect_error_status(pdev);
da0436e9 10977}
3772a991 10978
da0436e9
JS
10979/**
10980 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
10981 * @phba: pointer to lpfc hba data structure.
10982 *
10983 * returns the number of ELS/CT IOCBs to reserve
10984 **/
10985int
10986lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
10987{
10988 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
10989
f1126688
JS
10990 if (phba->sli_rev == LPFC_SLI_REV4) {
10991 if (max_xri <= 100)
6a9c52cf 10992 return 10;
f1126688 10993 else if (max_xri <= 256)
6a9c52cf 10994 return 25;
f1126688 10995 else if (max_xri <= 512)
6a9c52cf 10996 return 50;
f1126688 10997 else if (max_xri <= 1024)
6a9c52cf 10998 return 100;
8a9d2e80 10999 else if (max_xri <= 1536)
6a9c52cf 11000 return 150;
8a9d2e80
JS
11001 else if (max_xri <= 2048)
11002 return 200;
11003 else
11004 return 250;
f1126688
JS
11005 } else
11006 return 0;
3772a991
JS
11007}
11008
895427bd
JS
11009/**
11010 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
11011 * @phba: pointer to lpfc hba data structure.
11012 *
f358dd0c 11013 * returns the number of ELS/CT + NVMET IOCBs to reserve
895427bd
JS
11014 **/
11015int
11016lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
11017{
11018 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
11019
f358dd0c
JS
11020 if (phba->nvmet_support)
11021 max_xri += LPFC_NVMET_BUF_POST;
895427bd
JS
11022 return max_xri;
11023}
11024
11025
52d52440
JS
11026/**
11027 * lpfc_write_firmware - attempt to write a firmware image to the port
52d52440 11028 * @fw: pointer to firmware image returned from request_firmware.
ce396282 11029 * @phba: pointer to lpfc hba data structure.
52d52440 11030 *
52d52440 11031 **/
ce396282
JS
11032static void
11033lpfc_write_firmware(const struct firmware *fw, void *context)
52d52440 11034{
ce396282 11035 struct lpfc_hba *phba = (struct lpfc_hba *)context;
6b5151fd 11036 char fwrev[FW_REV_STR_SIZE];
ce396282 11037 struct lpfc_grp_hdr *image;
52d52440
JS
11038 struct list_head dma_buffer_list;
11039 int i, rc = 0;
11040 struct lpfc_dmabuf *dmabuf, *next;
11041 uint32_t offset = 0, temp_offset = 0;
6b6ef5db 11042 uint32_t magic_number, ftype, fid, fsize;
52d52440 11043
c71ab861 11044 /* It can be null in no-wait mode, sanity check */
ce396282
JS
11045 if (!fw) {
11046 rc = -ENXIO;
11047 goto out;
11048 }
11049 image = (struct lpfc_grp_hdr *)fw->data;
11050
6b6ef5db
JS
11051 magic_number = be32_to_cpu(image->magic_number);
11052 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
11053 fid = bf_get_be32(lpfc_grp_hdr_id, image),
11054 fsize = be32_to_cpu(image->size);
11055
52d52440 11056 INIT_LIST_HEAD(&dma_buffer_list);
6b6ef5db
JS
11057 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
11058 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
11059 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
52d52440
JS
11060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11061 "3022 Invalid FW image found. "
efe583c6 11062 "Magic:%x Type:%x ID:%x Size %d %zd\n",
6b6ef5db 11063 magic_number, ftype, fid, fsize, fw->size);
ce396282
JS
11064 rc = -EINVAL;
11065 goto release_out;
52d52440
JS
11066 }
11067 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 11068 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
52d52440 11069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
ce396282 11070 "3023 Updating Firmware, Current Version:%s "
52d52440 11071 "New Version:%s\n",
88a2cfbb 11072 fwrev, image->revision);
52d52440
JS
11073 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
11074 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
11075 GFP_KERNEL);
11076 if (!dmabuf) {
11077 rc = -ENOMEM;
ce396282 11078 goto release_out;
52d52440
JS
11079 }
11080 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
11081 SLI4_PAGE_SIZE,
11082 &dmabuf->phys,
11083 GFP_KERNEL);
11084 if (!dmabuf->virt) {
11085 kfree(dmabuf);
11086 rc = -ENOMEM;
ce396282 11087 goto release_out;
52d52440
JS
11088 }
11089 list_add_tail(&dmabuf->list, &dma_buffer_list);
11090 }
11091 while (offset < fw->size) {
11092 temp_offset = offset;
11093 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 11094 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
11095 memcpy(dmabuf->virt,
11096 fw->data + temp_offset,
079b5c91
JS
11097 fw->size - temp_offset);
11098 temp_offset = fw->size;
52d52440
JS
11099 break;
11100 }
52d52440
JS
11101 memcpy(dmabuf->virt, fw->data + temp_offset,
11102 SLI4_PAGE_SIZE);
88a2cfbb 11103 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
11104 }
11105 rc = lpfc_wr_object(phba, &dma_buffer_list,
11106 (fw->size - offset), &offset);
ce396282
JS
11107 if (rc)
11108 goto release_out;
52d52440
JS
11109 }
11110 rc = offset;
11111 }
ce396282
JS
11112
11113release_out:
52d52440
JS
11114 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
11115 list_del(&dmabuf->list);
11116 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
11117 dmabuf->virt, dmabuf->phys);
11118 kfree(dmabuf);
11119 }
ce396282
JS
11120 release_firmware(fw);
11121out:
11122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
c71ab861 11123 "3024 Firmware update done: %d.\n", rc);
ce396282 11124 return;
52d52440
JS
11125}
11126
c71ab861
JS
11127/**
11128 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
11129 * @phba: pointer to lpfc hba data structure.
11130 *
11131 * This routine is called to perform Linux generic firmware upgrade on device
11132 * that supports such feature.
11133 **/
11134int
11135lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
11136{
11137 uint8_t file_name[ELX_MODEL_NAME_SIZE];
11138 int ret;
11139 const struct firmware *fw;
11140
11141 /* Only supported on SLI4 interface type 2 for now */
11142 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11143 LPFC_SLI_INTF_IF_TYPE_2)
11144 return -EPERM;
11145
11146 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
11147
11148 if (fw_upgrade == INT_FW_UPGRADE) {
11149 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
11150 file_name, &phba->pcidev->dev,
11151 GFP_KERNEL, (void *)phba,
11152 lpfc_write_firmware);
11153 } else if (fw_upgrade == RUN_FW_UPGRADE) {
11154 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
11155 if (!ret)
11156 lpfc_write_firmware(fw, (void *)phba);
11157 } else {
11158 ret = -EINVAL;
11159 }
11160
11161 return ret;
11162}
11163
3772a991 11164/**
da0436e9 11165 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
11166 * @pdev: pointer to PCI device
11167 * @pid: pointer to PCI device identifier
11168 *
da0436e9
JS
11169 * This routine is called from the kernel's PCI subsystem to device with
11170 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 11171 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
11172 * information of the device and driver to see if the driver state that it
11173 * can support this kind of device. If the match is successful, the driver
11174 * core invokes this routine. If this routine determines it can claim the HBA,
11175 * it does all the initialization that it needs to do to handle the HBA
11176 * properly.
3772a991
JS
11177 *
11178 * Return code
11179 * 0 - driver can claim the device
11180 * negative value - driver can not claim the device
11181 **/
6f039790 11182static int
da0436e9 11183lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
11184{
11185 struct lpfc_hba *phba;
11186 struct lpfc_vport *vport = NULL;
6669f9bb 11187 struct Scsi_Host *shost = NULL;
6c621a22 11188 int error;
3772a991
JS
11189 uint32_t cfg_mode, intr_mode;
11190
11191 /* Allocate memory for HBA structure */
11192 phba = lpfc_hba_alloc(pdev);
11193 if (!phba)
11194 return -ENOMEM;
11195
11196 /* Perform generic PCI device enabling operation */
11197 error = lpfc_enable_pci_dev(phba);
079b5c91 11198 if (error)
3772a991 11199 goto out_free_phba;
3772a991 11200
da0436e9
JS
11201 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
11202 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
11203 if (error)
11204 goto out_disable_pci_dev;
11205
da0436e9
JS
11206 /* Set up SLI-4 specific device PCI memory space */
11207 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
11208 if (error) {
11209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11210 "1410 Failed to set up pci memory space.\n");
3772a991
JS
11211 goto out_disable_pci_dev;
11212 }
11213
da0436e9
JS
11214 /* Set up SLI-4 Specific device driver resources */
11215 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
11216 if (error) {
11217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
11218 "1412 Failed to set up driver resource.\n");
11219 goto out_unset_pci_mem_s4;
3772a991
JS
11220 }
11221
19ca7609 11222 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 11223 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 11224
3772a991
JS
11225 /* Set up common device driver resources */
11226 error = lpfc_setup_driver_resource_phase2(phba);
11227 if (error) {
11228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11229 "1414 Failed to set up driver resource.\n");
6c621a22 11230 goto out_unset_driver_resource_s4;
3772a991
JS
11231 }
11232
079b5c91
JS
11233 /* Get the default values for Model Name and Description */
11234 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11235
3772a991
JS
11236 /* Create SCSI host to the physical port */
11237 error = lpfc_create_shost(phba);
11238 if (error) {
11239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11240 "1415 Failed to create scsi host.\n");
3772a991
JS
11241 goto out_unset_driver_resource;
11242 }
9399627f 11243
5b75da2f 11244 /* Configure sysfs attributes */
3772a991
JS
11245 vport = phba->pport;
11246 error = lpfc_alloc_sysfs_attr(vport);
11247 if (error) {
9399627f 11248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11249 "1416 Failed to allocate sysfs attr\n");
3772a991 11250 goto out_destroy_shost;
98c9ea5c 11251 }
875fbdfe 11252
6669f9bb 11253 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
3772a991 11254 /* Now, trying to enable interrupt and bring up the device */
5b75da2f 11255 cfg_mode = phba->cfg_use_msi;
5b75da2f 11256
7b15db32
JS
11257 /* Put device to a known state before enabling interrupt */
11258 lpfc_stop_port(phba);
895427bd 11259
7b15db32
JS
11260 /* Configure and enable interrupt */
11261 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
11262 if (intr_mode == LPFC_INTR_ERROR) {
11263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11264 "0426 Failed to enable interrupt.\n");
11265 error = -ENODEV;
11266 goto out_free_sysfs_attr;
11267 }
11268 /* Default to single EQ for non-MSI-X */
895427bd
JS
11269 if (phba->intr_type != MSIX) {
11270 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
11271 phba->cfg_fcp_io_channel = 1;
2d7dbc4c 11272 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd 11273 phba->cfg_nvme_io_channel = 1;
2d7dbc4c
JS
11274 if (phba->nvmet_support)
11275 phba->cfg_nvmet_mrq = 1;
11276 }
895427bd
JS
11277 phba->io_channel_irqs = 1;
11278 }
11279
7b15db32
JS
11280 /* Set up SLI-4 HBA */
11281 if (lpfc_sli4_hba_setup(phba)) {
11282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11283 "1421 Failed to set up hba\n");
11284 error = -ENODEV;
11285 goto out_disable_intr;
98c9ea5c 11286 }
858c9f6c 11287
7b15db32
JS
11288 /* Log the current active interrupt mode */
11289 phba->intr_mode = intr_mode;
11290 lpfc_log_intr_mode(phba, intr_mode);
11291
3772a991
JS
11292 /* Perform post initialization setup */
11293 lpfc_post_init_setup(phba);
dea3101e 11294
01649561
JS
11295 /* NVME support in FW earlier in the driver load corrects the
11296 * FC4 type making a check for nvme_support unnecessary.
11297 */
11298 if ((phba->nvmet_support == 0) &&
11299 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
11300 /* Create NVME binding with nvme_fc_transport. This
d1f525aa
JS
11301 * ensures the vport is initialized. If the localport
11302 * create fails, it should not unload the driver to
11303 * support field issues.
01649561
JS
11304 */
11305 error = lpfc_nvme_create_localport(vport);
11306 if (error) {
11307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11308 "6004 NVME registration failed, "
11309 "error x%x\n",
11310 error);
01649561
JS
11311 }
11312 }
895427bd 11313
c71ab861
JS
11314 /* check for firmware upgrade or downgrade */
11315 if (phba->cfg_request_firmware_upgrade)
db6f1c2f 11316 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
52d52440 11317
1c6834a7
JS
11318 /* Check if there are static vports to be created. */
11319 lpfc_create_static_vport(phba);
dea3101e
JB
11320 return 0;
11321
da0436e9
JS
11322out_disable_intr:
11323 lpfc_sli4_disable_intr(phba);
5b75da2f
JS
11324out_free_sysfs_attr:
11325 lpfc_free_sysfs_attr(vport);
3772a991
JS
11326out_destroy_shost:
11327 lpfc_destroy_shost(phba);
11328out_unset_driver_resource:
11329 lpfc_unset_driver_resource_phase2(phba);
da0436e9
JS
11330out_unset_driver_resource_s4:
11331 lpfc_sli4_driver_resource_unset(phba);
11332out_unset_pci_mem_s4:
11333 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
11334out_disable_pci_dev:
11335 lpfc_disable_pci_dev(phba);
6669f9bb
JS
11336 if (shost)
11337 scsi_host_put(shost);
2e0fef85 11338out_free_phba:
3772a991 11339 lpfc_hba_free(phba);
dea3101e
JB
11340 return error;
11341}
11342
e59058c4 11343/**
da0436e9 11344 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
11345 * @pdev: pointer to PCI device
11346 *
da0436e9
JS
11347 * This routine is called from the kernel's PCI subsystem to device with
11348 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
11349 * removed from PCI bus, it performs all the necessary cleanup for the HBA
11350 * device to be removed from the PCI subsystem properly.
e59058c4 11351 **/
6f039790 11352static void
da0436e9 11353lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 11354{
da0436e9 11355 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 11356 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 11357 struct lpfc_vport **vports;
da0436e9 11358 struct lpfc_hba *phba = vport->phba;
eada272d 11359 int i;
8a4df120 11360
da0436e9 11361 /* Mark the device unloading flag */
549e55cd 11362 spin_lock_irq(&phba->hbalock);
51ef4c26 11363 vport->load_flag |= FC_UNLOADING;
549e55cd 11364 spin_unlock_irq(&phba->hbalock);
2e0fef85 11365
da0436e9 11366 /* Free the HBA sysfs attributes */
858c9f6c
JS
11367 lpfc_free_sysfs_attr(vport);
11368
eada272d
JS
11369 /* Release all the vports against this physical port */
11370 vports = lpfc_create_vport_work_array(phba);
11371 if (vports != NULL)
587a37f6
JS
11372 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11373 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11374 continue;
eada272d 11375 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 11376 }
eada272d
JS
11377 lpfc_destroy_vport_work_array(phba, vports);
11378
11379 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
11380 fc_remove_host(shost);
11381 scsi_remove_host(shost);
da0436e9 11382
d613b6a7
JS
11383 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
11384 * localports are destroyed after to cleanup all transport memory.
895427bd 11385 */
87af33fe 11386 lpfc_cleanup(vport);
d613b6a7
JS
11387 lpfc_nvmet_destroy_targetport(phba);
11388 lpfc_nvme_destroy_localport(vport);
87af33fe 11389
2e0fef85 11390 /*
da0436e9 11391 * Bring down the SLI Layer. This step disables all interrupts,
2e0fef85 11392 * clears the rings, discards all mailbox commands, and resets
da0436e9 11393 * the HBA FCoE function.
2e0fef85 11394 */
da0436e9
JS
11395 lpfc_debugfs_terminate(vport);
11396 lpfc_sli4_hba_unset(phba);
a257bf90 11397
858c9f6c
JS
11398 spin_lock_irq(&phba->hbalock);
11399 list_del_init(&vport->listentry);
11400 spin_unlock_irq(&phba->hbalock);
11401
3677a3a7 11402 /* Perform scsi free before driver resource_unset since scsi
da0436e9 11403 * buffers are released to their corresponding pools here.
2e0fef85
JS
11404 */
11405 lpfc_scsi_free(phba);
895427bd 11406 lpfc_nvme_free(phba);
01649561 11407 lpfc_free_iocb_list(phba);
67d12733 11408
da0436e9 11409 lpfc_sli4_driver_resource_unset(phba);
ed957684 11410
da0436e9
JS
11411 /* Unmap adapter Control and Doorbell registers */
11412 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 11413
da0436e9
JS
11414 /* Release PCI resources and disable device's PCI function */
11415 scsi_host_put(shost);
11416 lpfc_disable_pci_dev(phba);
2e0fef85 11417
da0436e9 11418 /* Finally, free the driver's device data structure */
3772a991 11419 lpfc_hba_free(phba);
2e0fef85 11420
da0436e9 11421 return;
dea3101e
JB
11422}
11423
3a55b532 11424/**
da0436e9 11425 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
11426 * @pdev: pointer to PCI device
11427 * @msg: power management message
11428 *
da0436e9
JS
11429 * This routine is called from the kernel's PCI subsystem to support system
11430 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
11431 * this method, it quiesces the device by stopping the driver's worker
11432 * thread for the device, turning off device's interrupt and DMA, and bring
11433 * the device offline. Note that as the driver implements the minimum PM
11434 * requirements to a power-aware driver's PM support for suspend/resume -- all
11435 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
11436 * method call will be treated as SUSPEND and the driver will fully
11437 * reinitialize its device during resume() method call, the driver will set
11438 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 11439 * according to the @msg provided by the PM.
3a55b532
JS
11440 *
11441 * Return code
3772a991
JS
11442 * 0 - driver suspended the device
11443 * Error otherwise
3a55b532
JS
11444 **/
11445static int
da0436e9 11446lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
11447{
11448 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11449 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11450
11451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 11452 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
11453
11454 /* Bring down the device */
618a5230 11455 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3a55b532
JS
11456 lpfc_offline(phba);
11457 kthread_stop(phba->worker_thread);
11458
11459 /* Disable interrupt from device */
da0436e9 11460 lpfc_sli4_disable_intr(phba);
5350d872 11461 lpfc_sli4_queue_destroy(phba);
3a55b532
JS
11462
11463 /* Save device state to PCI config space */
11464 pci_save_state(pdev);
11465 pci_set_power_state(pdev, PCI_D3hot);
11466
11467 return 0;
11468}
11469
11470/**
da0436e9 11471 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
11472 * @pdev: pointer to PCI device
11473 *
da0436e9
JS
11474 * This routine is called from the kernel's PCI subsystem to support system
11475 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
11476 * this method, it restores the device's PCI config space state and fully
11477 * reinitializes the device and brings it online. Note that as the driver
11478 * implements the minimum PM requirements to a power-aware driver's PM for
11479 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11480 * to the suspend() method call will be treated as SUSPEND and the driver
11481 * will fully reinitialize its device during resume() method call, the device
11482 * will be set to PCI_D0 directly in PCI config space before restoring the
11483 * state.
3a55b532
JS
11484 *
11485 * Return code
3772a991
JS
11486 * 0 - driver suspended the device
11487 * Error otherwise
3a55b532
JS
11488 **/
11489static int
da0436e9 11490lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
11491{
11492 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11493 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 11494 uint32_t intr_mode;
3a55b532
JS
11495 int error;
11496
11497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 11498 "0292 PCI device Power Management resume.\n");
3a55b532
JS
11499
11500 /* Restore device state from PCI config space */
11501 pci_set_power_state(pdev, PCI_D0);
11502 pci_restore_state(pdev);
1dfb5a47
JS
11503
11504 /*
11505 * As the new kernel behavior of pci_restore_state() API call clears
11506 * device saved_state flag, need to save the restored state again.
11507 */
11508 pci_save_state(pdev);
11509
3a55b532
JS
11510 if (pdev->is_busmaster)
11511 pci_set_master(pdev);
11512
da0436e9 11513 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
11514 phba->worker_thread = kthread_run(lpfc_do_work, phba,
11515 "lpfc_worker_%d", phba->brd_no);
11516 if (IS_ERR(phba->worker_thread)) {
11517 error = PTR_ERR(phba->worker_thread);
11518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11519 "0293 PM resume failed to start worker "
3a55b532
JS
11520 "thread: error=x%x.\n", error);
11521 return error;
11522 }
11523
5b75da2f 11524 /* Configure and enable interrupt */
da0436e9 11525 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 11526 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 11527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11528 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
11529 return -EIO;
11530 } else
11531 phba->intr_mode = intr_mode;
3a55b532
JS
11532
11533 /* Restart HBA and bring it online */
11534 lpfc_sli_brdrestart(phba);
11535 lpfc_online(phba);
11536
5b75da2f
JS
11537 /* Log the current active interrupt mode */
11538 lpfc_log_intr_mode(phba, phba->intr_mode);
11539
3a55b532
JS
11540 return 0;
11541}
11542
75baf696
JS
11543/**
11544 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
11545 * @phba: pointer to lpfc hba data structure.
11546 *
11547 * This routine is called to prepare the SLI4 device for PCI slot recover. It
11548 * aborts all the outstanding SCSI I/Os to the pci device.
11549 **/
11550static void
11551lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
11552{
75baf696
JS
11553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11554 "2828 PCI channel I/O abort preparing for recovery\n");
11555 /*
11556 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11557 * and let the SCSI mid-layer to retry them to recover.
11558 */
db55fba8 11559 lpfc_sli_abort_fcp_rings(phba);
75baf696
JS
11560}
11561
11562/**
11563 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
11564 * @phba: pointer to lpfc hba data structure.
11565 *
11566 * This routine is called to prepare the SLI4 device for PCI slot reset. It
11567 * disables the device interrupt and pci device, and aborts the internal FCP
11568 * pending I/Os.
11569 **/
11570static void
11571lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
11572{
11573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11574 "2826 PCI channel disable preparing for reset\n");
11575
11576 /* Block any management I/Os to the device */
618a5230 11577 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
75baf696
JS
11578
11579 /* Block all SCSI devices' I/Os on the host */
11580 lpfc_scsi_dev_block(phba);
11581
ea714f3d
JS
11582 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11583 lpfc_sli_flush_fcp_rings(phba);
11584
75baf696
JS
11585 /* stop all timers */
11586 lpfc_stop_hba_timers(phba);
11587
11588 /* Disable interrupt and pci device */
11589 lpfc_sli4_disable_intr(phba);
5350d872 11590 lpfc_sli4_queue_destroy(phba);
75baf696 11591 pci_disable_device(phba->pcidev);
75baf696
JS
11592}
11593
11594/**
11595 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
11596 * @phba: pointer to lpfc hba data structure.
11597 *
11598 * This routine is called to prepare the SLI4 device for PCI slot permanently
11599 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
11600 * pending I/Os.
11601 **/
11602static void
11603lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
11604{
11605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11606 "2827 PCI channel permanent disable for failure\n");
11607
11608 /* Block all SCSI devices' I/Os on the host */
11609 lpfc_scsi_dev_block(phba);
11610
11611 /* stop all timers */
11612 lpfc_stop_hba_timers(phba);
11613
11614 /* Clean up all driver's outstanding SCSI I/Os */
11615 lpfc_sli_flush_fcp_rings(phba);
11616}
11617
8d63f375 11618/**
da0436e9 11619 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
11620 * @pdev: pointer to PCI device.
11621 * @state: the current PCI connection state.
8d63f375 11622 *
da0436e9
JS
11623 * This routine is called from the PCI subsystem for error handling to device
11624 * with SLI-4 interface spec. This function is called by the PCI subsystem
11625 * after a PCI bus error affecting this device has been detected. When this
11626 * function is invoked, it will need to stop all the I/Os and interrupt(s)
11627 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
11628 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
11629 *
11630 * Return codes
3772a991
JS
11631 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11632 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 11633 **/
3772a991 11634static pci_ers_result_t
da0436e9 11635lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 11636{
75baf696
JS
11637 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11638 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11639
11640 switch (state) {
11641 case pci_channel_io_normal:
11642 /* Non-fatal error, prepare for recovery */
11643 lpfc_sli4_prep_dev_for_recover(phba);
11644 return PCI_ERS_RESULT_CAN_RECOVER;
11645 case pci_channel_io_frozen:
11646 /* Fatal error, prepare for slot reset */
11647 lpfc_sli4_prep_dev_for_reset(phba);
11648 return PCI_ERS_RESULT_NEED_RESET;
11649 case pci_channel_io_perm_failure:
11650 /* Permanent failure, prepare for device down */
11651 lpfc_sli4_prep_dev_for_perm_failure(phba);
11652 return PCI_ERS_RESULT_DISCONNECT;
11653 default:
11654 /* Unknown state, prepare and request slot reset */
11655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11656 "2825 Unknown PCI error state: x%x\n", state);
11657 lpfc_sli4_prep_dev_for_reset(phba);
11658 return PCI_ERS_RESULT_NEED_RESET;
11659 }
8d63f375
LV
11660}
11661
11662/**
da0436e9 11663 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
11664 * @pdev: pointer to PCI device.
11665 *
da0436e9
JS
11666 * This routine is called from the PCI subsystem for error handling to device
11667 * with SLI-4 interface spec. It is called after PCI bus has been reset to
11668 * restart the PCI card from scratch, as if from a cold-boot. During the
11669 * PCI subsystem error recovery, after the driver returns
3772a991 11670 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
11671 * recovery and then call this routine before calling the .resume method to
11672 * recover the device. This function will initialize the HBA device, enable
11673 * the interrupt, but it will just put the HBA to offline state without
11674 * passing any I/O traffic.
8d63f375 11675 *
e59058c4 11676 * Return codes
3772a991
JS
11677 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11678 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 11679 */
3772a991 11680static pci_ers_result_t
da0436e9 11681lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 11682{
75baf696
JS
11683 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11684 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11685 struct lpfc_sli *psli = &phba->sli;
11686 uint32_t intr_mode;
11687
11688 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
11689 if (pci_enable_device_mem(pdev)) {
11690 printk(KERN_ERR "lpfc: Cannot re-enable "
11691 "PCI device after reset.\n");
11692 return PCI_ERS_RESULT_DISCONNECT;
11693 }
11694
11695 pci_restore_state(pdev);
0a96e975
JS
11696
11697 /*
11698 * As the new kernel behavior of pci_restore_state() API call clears
11699 * device saved_state flag, need to save the restored state again.
11700 */
11701 pci_save_state(pdev);
11702
75baf696
JS
11703 if (pdev->is_busmaster)
11704 pci_set_master(pdev);
11705
11706 spin_lock_irq(&phba->hbalock);
11707 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
11708 spin_unlock_irq(&phba->hbalock);
11709
11710 /* Configure and enable interrupt */
11711 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
11712 if (intr_mode == LPFC_INTR_ERROR) {
11713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11714 "2824 Cannot re-enable interrupt after "
11715 "slot reset.\n");
11716 return PCI_ERS_RESULT_DISCONNECT;
11717 } else
11718 phba->intr_mode = intr_mode;
11719
11720 /* Log the current active interrupt mode */
11721 lpfc_log_intr_mode(phba, phba->intr_mode);
11722
8d63f375
LV
11723 return PCI_ERS_RESULT_RECOVERED;
11724}
11725
11726/**
da0436e9 11727 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 11728 * @pdev: pointer to PCI device
8d63f375 11729 *
3772a991 11730 * This routine is called from the PCI subsystem for error handling to device
da0436e9 11731 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
11732 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
11733 * error recovery. After this call, traffic can start to flow from this device
11734 * again.
da0436e9 11735 **/
3772a991 11736static void
da0436e9 11737lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 11738{
75baf696
JS
11739 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11740 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11741
11742 /*
11743 * In case of slot reset, as function reset is performed through
11744 * mailbox command which needs DMA to be enabled, this operation
11745 * has to be moved to the io resume phase. Taking device offline
11746 * will perform the necessary cleanup.
11747 */
11748 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
11749 /* Perform device reset */
618a5230 11750 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
75baf696
JS
11751 lpfc_offline(phba);
11752 lpfc_sli_brdrestart(phba);
11753 /* Bring the device back online */
11754 lpfc_online(phba);
11755 }
11756
11757 /* Clean up Advanced Error Reporting (AER) if needed */
11758 if (phba->hba_flag & HBA_AER_ENABLED)
11759 pci_cleanup_aer_uncorrect_error_status(pdev);
8d63f375
LV
11760}
11761
3772a991
JS
11762/**
11763 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
11764 * @pdev: pointer to PCI device
11765 * @pid: pointer to PCI device identifier
11766 *
11767 * This routine is to be registered to the kernel's PCI subsystem. When an
11768 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
11769 * at PCI device-specific information of the device and driver to see if the
11770 * driver state that it can support this kind of device. If the match is
11771 * successful, the driver core invokes this routine. This routine dispatches
11772 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
11773 * do all the initialization that it needs to do to handle the HBA device
11774 * properly.
11775 *
11776 * Return code
11777 * 0 - driver can claim the device
11778 * negative value - driver can not claim the device
11779 **/
6f039790 11780static int
3772a991
JS
11781lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
11782{
11783 int rc;
8fa38513 11784 struct lpfc_sli_intf intf;
3772a991 11785
28baac74 11786 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
11787 return -ENODEV;
11788
8fa38513 11789 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 11790 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 11791 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 11792 else
3772a991 11793 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 11794
3772a991
JS
11795 return rc;
11796}
11797
11798/**
11799 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
11800 * @pdev: pointer to PCI device
11801 *
11802 * This routine is to be registered to the kernel's PCI subsystem. When an
11803 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
11804 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
11805 * remove routine, which will perform all the necessary cleanup for the
11806 * device to be removed from the PCI subsystem properly.
11807 **/
6f039790 11808static void
3772a991
JS
11809lpfc_pci_remove_one(struct pci_dev *pdev)
11810{
11811 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11812 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11813
11814 switch (phba->pci_dev_grp) {
11815 case LPFC_PCI_DEV_LP:
11816 lpfc_pci_remove_one_s3(pdev);
11817 break;
da0436e9
JS
11818 case LPFC_PCI_DEV_OC:
11819 lpfc_pci_remove_one_s4(pdev);
11820 break;
3772a991
JS
11821 default:
11822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11823 "1424 Invalid PCI device group: 0x%x\n",
11824 phba->pci_dev_grp);
11825 break;
11826 }
11827 return;
11828}
11829
11830/**
11831 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
11832 * @pdev: pointer to PCI device
11833 * @msg: power management message
11834 *
11835 * This routine is to be registered to the kernel's PCI subsystem to support
11836 * system Power Management (PM). When PM invokes this method, it dispatches
11837 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
11838 * suspend the device.
11839 *
11840 * Return code
11841 * 0 - driver suspended the device
11842 * Error otherwise
11843 **/
11844static int
11845lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
11846{
11847 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11848 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11849 int rc = -ENODEV;
11850
11851 switch (phba->pci_dev_grp) {
11852 case LPFC_PCI_DEV_LP:
11853 rc = lpfc_pci_suspend_one_s3(pdev, msg);
11854 break;
da0436e9
JS
11855 case LPFC_PCI_DEV_OC:
11856 rc = lpfc_pci_suspend_one_s4(pdev, msg);
11857 break;
3772a991
JS
11858 default:
11859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11860 "1425 Invalid PCI device group: 0x%x\n",
11861 phba->pci_dev_grp);
11862 break;
11863 }
11864 return rc;
11865}
11866
11867/**
11868 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
11869 * @pdev: pointer to PCI device
11870 *
11871 * This routine is to be registered to the kernel's PCI subsystem to support
11872 * system Power Management (PM). When PM invokes this method, it dispatches
11873 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
11874 * resume the device.
11875 *
11876 * Return code
11877 * 0 - driver suspended the device
11878 * Error otherwise
11879 **/
11880static int
11881lpfc_pci_resume_one(struct pci_dev *pdev)
11882{
11883 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11884 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11885 int rc = -ENODEV;
11886
11887 switch (phba->pci_dev_grp) {
11888 case LPFC_PCI_DEV_LP:
11889 rc = lpfc_pci_resume_one_s3(pdev);
11890 break;
da0436e9
JS
11891 case LPFC_PCI_DEV_OC:
11892 rc = lpfc_pci_resume_one_s4(pdev);
11893 break;
3772a991
JS
11894 default:
11895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11896 "1426 Invalid PCI device group: 0x%x\n",
11897 phba->pci_dev_grp);
11898 break;
11899 }
11900 return rc;
11901}
11902
11903/**
11904 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
11905 * @pdev: pointer to PCI device.
11906 * @state: the current PCI connection state.
11907 *
11908 * This routine is registered to the PCI subsystem for error handling. This
11909 * function is called by the PCI subsystem after a PCI bus error affecting
11910 * this device has been detected. When this routine is invoked, it dispatches
11911 * the action to the proper SLI-3 or SLI-4 device error detected handling
11912 * routine, which will perform the proper error detected operation.
11913 *
11914 * Return codes
11915 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11916 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11917 **/
11918static pci_ers_result_t
11919lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11920{
11921 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11922 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11923 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11924
11925 switch (phba->pci_dev_grp) {
11926 case LPFC_PCI_DEV_LP:
11927 rc = lpfc_io_error_detected_s3(pdev, state);
11928 break;
da0436e9
JS
11929 case LPFC_PCI_DEV_OC:
11930 rc = lpfc_io_error_detected_s4(pdev, state);
11931 break;
3772a991
JS
11932 default:
11933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11934 "1427 Invalid PCI device group: 0x%x\n",
11935 phba->pci_dev_grp);
11936 break;
11937 }
11938 return rc;
11939}
11940
11941/**
11942 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
11943 * @pdev: pointer to PCI device.
11944 *
11945 * This routine is registered to the PCI subsystem for error handling. This
11946 * function is called after PCI bus has been reset to restart the PCI card
11947 * from scratch, as if from a cold-boot. When this routine is invoked, it
11948 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
11949 * routine, which will perform the proper device reset.
11950 *
11951 * Return codes
11952 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11953 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11954 **/
11955static pci_ers_result_t
11956lpfc_io_slot_reset(struct pci_dev *pdev)
11957{
11958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11960 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11961
11962 switch (phba->pci_dev_grp) {
11963 case LPFC_PCI_DEV_LP:
11964 rc = lpfc_io_slot_reset_s3(pdev);
11965 break;
da0436e9
JS
11966 case LPFC_PCI_DEV_OC:
11967 rc = lpfc_io_slot_reset_s4(pdev);
11968 break;
3772a991
JS
11969 default:
11970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11971 "1428 Invalid PCI device group: 0x%x\n",
11972 phba->pci_dev_grp);
11973 break;
11974 }
11975 return rc;
11976}
11977
11978/**
11979 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
11980 * @pdev: pointer to PCI device
11981 *
11982 * This routine is registered to the PCI subsystem for error handling. It
11983 * is called when kernel error recovery tells the lpfc driver that it is
11984 * OK to resume normal PCI operation after PCI bus error recovery. When
11985 * this routine is invoked, it dispatches the action to the proper SLI-3
11986 * or SLI-4 device io_resume routine, which will resume the device operation.
11987 **/
11988static void
11989lpfc_io_resume(struct pci_dev *pdev)
11990{
11991 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11992 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11993
11994 switch (phba->pci_dev_grp) {
11995 case LPFC_PCI_DEV_LP:
11996 lpfc_io_resume_s3(pdev);
11997 break;
da0436e9
JS
11998 case LPFC_PCI_DEV_OC:
11999 lpfc_io_resume_s4(pdev);
12000 break;
3772a991
JS
12001 default:
12002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12003 "1429 Invalid PCI device group: 0x%x\n",
12004 phba->pci_dev_grp);
12005 break;
12006 }
12007 return;
12008}
12009
1ba981fd
JS
12010/**
12011 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
12012 * @phba: pointer to lpfc hba data structure.
12013 *
12014 * This routine checks to see if OAS is supported for this adapter. If
12015 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
12016 * the enable oas flag is cleared and the pool created for OAS device data
12017 * is destroyed.
12018 *
12019 **/
12020void
12021lpfc_sli4_oas_verify(struct lpfc_hba *phba)
12022{
12023
12024 if (!phba->cfg_EnableXLane)
12025 return;
12026
12027 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
12028 phba->cfg_fof = 1;
12029 } else {
f38fa0bb 12030 phba->cfg_fof = 0;
1ba981fd
JS
12031 if (phba->device_data_mem_pool)
12032 mempool_destroy(phba->device_data_mem_pool);
12033 phba->device_data_mem_pool = NULL;
12034 }
12035
12036 return;
12037}
12038
12039/**
12040 * lpfc_fof_queue_setup - Set up all the fof queues
12041 * @phba: pointer to lpfc hba data structure.
12042 *
12043 * This routine is invoked to set up all the fof queues for the FC HBA
12044 * operation.
12045 *
12046 * Return codes
12047 * 0 - successful
12048 * -ENOMEM - No available memory
12049 **/
12050int
12051lpfc_fof_queue_setup(struct lpfc_hba *phba)
12052{
895427bd 12053 struct lpfc_sli_ring *pring;
1ba981fd
JS
12054 int rc;
12055
12056 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
12057 if (rc)
12058 return -ENOMEM;
12059
f38fa0bb 12060 if (phba->cfg_fof) {
1ba981fd
JS
12061
12062 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
12063 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
12064 if (rc)
12065 goto out_oas_cq;
12066
12067 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
12068 phba->sli4_hba.oas_cq, LPFC_FCP);
12069 if (rc)
12070 goto out_oas_wq;
12071
895427bd
JS
12072 /* Bind this CQ/WQ to the NVME ring */
12073 pring = phba->sli4_hba.oas_wq->pring;
12074 pring->sli.sli4.wqp =
12075 (void *)phba->sli4_hba.oas_wq;
12076 phba->sli4_hba.oas_cq->pring = pring;
1ba981fd
JS
12077 }
12078
12079 return 0;
12080
12081out_oas_wq:
f38fa0bb 12082 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
1ba981fd
JS
12083out_oas_cq:
12084 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
12085 return rc;
12086
12087}
12088
12089/**
12090 * lpfc_fof_queue_create - Create all the fof queues
12091 * @phba: pointer to lpfc hba data structure.
12092 *
12093 * This routine is invoked to allocate all the fof queues for the FC HBA
12094 * operation. For each SLI4 queue type, the parameters such as queue entry
12095 * count (queue depth) shall be taken from the module parameter. For now,
12096 * we just use some constant number as place holder.
12097 *
12098 * Return codes
12099 * 0 - successful
12100 * -ENOMEM - No availble memory
12101 * -EIO - The mailbox failed to complete successfully.
12102 **/
12103int
12104lpfc_fof_queue_create(struct lpfc_hba *phba)
12105{
12106 struct lpfc_queue *qdesc;
7e04e21a 12107 uint32_t wqesize;
1ba981fd
JS
12108
12109 /* Create FOF EQ */
12110 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
12111 phba->sli4_hba.eq_ecount);
12112 if (!qdesc)
12113 goto out_error;
12114
12115 phba->sli4_hba.fof_eq = qdesc;
12116
f38fa0bb 12117 if (phba->cfg_fof) {
1ba981fd
JS
12118
12119 /* Create OAS CQ */
12120 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
12121 phba->sli4_hba.cq_ecount);
12122 if (!qdesc)
12123 goto out_error;
12124
12125 phba->sli4_hba.oas_cq = qdesc;
12126
12127 /* Create OAS WQ */
7e04e21a
JS
12128 wqesize = (phba->fcp_embed_io) ?
12129 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
12130 qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
1ba981fd 12131 phba->sli4_hba.wq_ecount);
7e04e21a 12132
1ba981fd
JS
12133 if (!qdesc)
12134 goto out_error;
12135
12136 phba->sli4_hba.oas_wq = qdesc;
895427bd 12137 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
1ba981fd
JS
12138
12139 }
12140 return 0;
12141
12142out_error:
12143 lpfc_fof_queue_destroy(phba);
12144 return -ENOMEM;
12145}
12146
12147/**
12148 * lpfc_fof_queue_destroy - Destroy all the fof queues
12149 * @phba: pointer to lpfc hba data structure.
12150 *
12151 * This routine is invoked to release all the SLI4 queues with the FC HBA
12152 * operation.
12153 *
12154 * Return codes
12155 * 0 - successful
12156 **/
12157int
12158lpfc_fof_queue_destroy(struct lpfc_hba *phba)
12159{
12160 /* Release FOF Event queue */
12161 if (phba->sli4_hba.fof_eq != NULL) {
12162 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
12163 phba->sli4_hba.fof_eq = NULL;
12164 }
12165
12166 /* Release OAS Completion queue */
12167 if (phba->sli4_hba.oas_cq != NULL) {
12168 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
12169 phba->sli4_hba.oas_cq = NULL;
12170 }
12171
12172 /* Release OAS Work queue */
12173 if (phba->sli4_hba.oas_wq != NULL) {
12174 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
12175 phba->sli4_hba.oas_wq = NULL;
12176 }
12177 return 0;
12178}
12179
dea3101e
JB
12180MODULE_DEVICE_TABLE(pci, lpfc_id_table);
12181
a55b2d21 12182static const struct pci_error_handlers lpfc_err_handler = {
8d63f375
LV
12183 .error_detected = lpfc_io_error_detected,
12184 .slot_reset = lpfc_io_slot_reset,
12185 .resume = lpfc_io_resume,
12186};
12187
dea3101e
JB
12188static struct pci_driver lpfc_driver = {
12189 .name = LPFC_DRIVER_NAME,
12190 .id_table = lpfc_id_table,
12191 .probe = lpfc_pci_probe_one,
6f039790 12192 .remove = lpfc_pci_remove_one,
85e8a239 12193 .shutdown = lpfc_pci_remove_one,
3a55b532 12194 .suspend = lpfc_pci_suspend_one,
3772a991 12195 .resume = lpfc_pci_resume_one,
2e0fef85 12196 .err_handler = &lpfc_err_handler,
dea3101e
JB
12197};
12198
3ef6d24c 12199static const struct file_operations lpfc_mgmt_fop = {
858feacd 12200 .owner = THIS_MODULE,
3ef6d24c
JS
12201};
12202
12203static struct miscdevice lpfc_mgmt_dev = {
12204 .minor = MISC_DYNAMIC_MINOR,
12205 .name = "lpfcmgmt",
12206 .fops = &lpfc_mgmt_fop,
12207};
12208
e59058c4 12209/**
3621a710 12210 * lpfc_init - lpfc module initialization routine
e59058c4
JS
12211 *
12212 * This routine is to be invoked when the lpfc module is loaded into the
12213 * kernel. The special kernel macro module_init() is used to indicate the
12214 * role of this routine to the kernel as lpfc module entry point.
12215 *
12216 * Return codes
12217 * 0 - successful
12218 * -ENOMEM - FC attach transport failed
12219 * all others - failed
12220 */
dea3101e
JB
12221static int __init
12222lpfc_init(void)
12223{
12224 int error = 0;
12225
12226 printk(LPFC_MODULE_DESC "\n");
c44ce173 12227 printk(LPFC_COPYRIGHT "\n");
dea3101e 12228
3ef6d24c
JS
12229 error = misc_register(&lpfc_mgmt_dev);
12230 if (error)
12231 printk(KERN_ERR "Could not register lpfcmgmt device, "
12232 "misc_register returned with status %d", error);
12233
458c083e
JS
12234 lpfc_transport_functions.vport_create = lpfc_vport_create;
12235 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
dea3101e
JB
12236 lpfc_transport_template =
12237 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 12238 if (lpfc_transport_template == NULL)
dea3101e 12239 return -ENOMEM;
458c083e
JS
12240 lpfc_vport_transport_template =
12241 fc_attach_transport(&lpfc_vport_transport_functions);
12242 if (lpfc_vport_transport_template == NULL) {
12243 fc_release_transport(lpfc_transport_template);
12244 return -ENOMEM;
7ee5d43e 12245 }
7bb03bbf
JS
12246
12247 /* Initialize in case vector mapping is needed */
b246de17 12248 lpfc_used_cpu = NULL;
2ea259ee 12249 lpfc_present_cpu = num_present_cpus();
7bb03bbf 12250
dea3101e 12251 error = pci_register_driver(&lpfc_driver);
92d7f7b0 12252 if (error) {
dea3101e 12253 fc_release_transport(lpfc_transport_template);
458c083e 12254 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 12255 }
dea3101e
JB
12256
12257 return error;
12258}
12259
e59058c4 12260/**
3621a710 12261 * lpfc_exit - lpfc module removal routine
e59058c4
JS
12262 *
12263 * This routine is invoked when the lpfc module is removed from the kernel.
12264 * The special kernel macro module_exit() is used to indicate the role of
12265 * this routine to the kernel as lpfc module exit point.
12266 */
dea3101e
JB
12267static void __exit
12268lpfc_exit(void)
12269{
3ef6d24c 12270 misc_deregister(&lpfc_mgmt_dev);
dea3101e
JB
12271 pci_unregister_driver(&lpfc_driver);
12272 fc_release_transport(lpfc_transport_template);
458c083e 12273 fc_release_transport(lpfc_vport_transport_template);
81301a9b 12274 if (_dump_buf_data) {
6a9c52cf
JS
12275 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
12276 "_dump_buf_data at 0x%p\n",
81301a9b
JS
12277 (1L << _dump_buf_data_order), _dump_buf_data);
12278 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
12279 }
12280
12281 if (_dump_buf_dif) {
6a9c52cf
JS
12282 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
12283 "_dump_buf_dif at 0x%p\n",
81301a9b
JS
12284 (1L << _dump_buf_dif_order), _dump_buf_dif);
12285 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
12286 }
b246de17 12287 kfree(lpfc_used_cpu);
7973967f 12288 idr_destroy(&lpfc_hba_index);
dea3101e
JB
12289}
12290
12291module_init(lpfc_init);
12292module_exit(lpfc_exit);
12293MODULE_LICENSE("GPL");
12294MODULE_DESCRIPTION(LPFC_MODULE_DESC);
d080abe0 12295MODULE_AUTHOR("Broadcom");
dea3101e 12296MODULE_VERSION("0:" LPFC_DRIVER_VERSION);