]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/lpfc/lpfc_sli.c
scsi: lpfc: Fix NVMe rport deregister and registration during ADISC
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
145e5a8a 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e
JB
22 *******************************************************************/
23
dea3101e
JB
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
5a0e3ad6 28#include <linux/slab.h>
1c2ba475 29#include <linux/lockdep.h>
dea3101e 30
91886523 31#include <scsi/scsi.h>
dea3101e
JB
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
f888ba3c 35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include <scsi/fc/fc_fs.h>
0d878419 37#include <linux/aer.h>
1351e69f
JS
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea3101e 41
da0436e9 42#include "lpfc_hw4.h"
dea3101e
JB
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
da0436e9 45#include "lpfc_sli4.h"
ea2151b4 46#include "lpfc_nl.h"
dea3101e 47#include "lpfc_disc.h"
dea3101e 48#include "lpfc.h"
895427bd
JS
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
dea3101e
JB
51#include "lpfc_crtn.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_compat.h"
858c9f6c 54#include "lpfc_debugfs.h"
04c68496 55#include "lpfc_vport.h"
61bda8f7 56#include "lpfc_version.h"
dea3101e
JB
57
58/* There are only four IOCB completion types. */
59typedef enum _lpfc_iocb_type {
60 LPFC_UNKNOWN_IOCB,
61 LPFC_UNSOL_IOCB,
62 LPFC_SOL_IOCB,
63 LPFC_ABORT_IOCB
64} lpfc_iocb_type;
65
4f774513
JS
66
67/* Provide function prototypes local to this module. */
68static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69 uint32_t);
70static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
71 uint8_t *, uint32_t *);
72static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
73 struct lpfc_iocbq *);
6669f9bb
JS
74static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
75 struct hbq_dmabuf *);
ae9e28f3
JS
76static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
77 struct hbq_dmabuf *dmabuf);
32517fc0
JS
78static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
79 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
895427bd 80static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 81 int);
f485c18d 82static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
32517fc0
JS
83 struct lpfc_queue *eq,
84 struct lpfc_eqe *eqe);
e8d3c3b1
JS
85static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
86static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
24c7c0a6
JS
87static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
88static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
89 struct lpfc_queue *cq,
90 struct lpfc_cqe *cqe);
0558056c 91
4f774513
JS
92static IOCB_t *
93lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
94{
95 return &iocbq->iocb;
96}
97
48f8fdb4
JS
98#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
99/**
100 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
101 * @srcp: Source memory pointer.
102 * @destp: Destination memory pointer.
103 * @cnt: Number of words required to be copied.
104 * Must be a multiple of sizeof(uint64_t)
105 *
106 * This function is used for copying data between driver memory
107 * and the SLI WQ. This function also changes the endianness
108 * of each word if native endianness is different from SLI
109 * endianness. This function can be called with or without
110 * lock.
111 **/
d7b761b0 112static void
48f8fdb4
JS
113lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
114{
115 uint64_t *src = srcp;
116 uint64_t *dest = destp;
117 int i;
118
119 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
120 *dest++ = *src++;
121}
122#else
123#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
124#endif
125
4f774513
JS
126/**
127 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
128 * @q: The Work Queue to operate on.
129 * @wqe: The work Queue Entry to put on the Work queue.
130 *
131 * This routine will copy the contents of @wqe to the next available entry on
132 * the @q. This function will then ring the Work Queue Doorbell to signal the
133 * HBA to start processing the Work Queue Entry. This function returns 0 if
134 * successful. If no entries are available on @q then this function will return
135 * -ENOMEM.
136 * The caller is expected to hold the hbalock when calling this routine.
137 **/
cd22d605 138static int
205e8240 139lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
4f774513 140{
2e90f4b5 141 union lpfc_wqe *temp_wqe;
4f774513
JS
142 struct lpfc_register doorbell;
143 uint32_t host_index;
027140ea 144 uint32_t idx;
1351e69f
JS
145 uint32_t i = 0;
146 uint8_t *tmp;
5cc167dd 147 u32 if_type;
4f774513 148
2e90f4b5
JS
149 /* sanity check on queue memory */
150 if (unlikely(!q))
151 return -ENOMEM;
9afbee3d 152 temp_wqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 153
4f774513 154 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
155 idx = ((q->host_index + 1) % q->entry_count);
156 if (idx == q->hba_index) {
b84daac9 157 q->WQ_overflow++;
cd22d605 158 return -EBUSY;
b84daac9
JS
159 }
160 q->WQ_posted++;
4f774513 161 /* set consumption flag every once in a while */
32517fc0 162 if (!((q->host_index + 1) % q->notify_interval))
f0d9bccc 163 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
04673e38
JS
164 else
165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
fedd3b7b
JS
166 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
167 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
48f8fdb4 168 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
1351e69f
JS
169 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
170 /* write to DPP aperture taking advatage of Combined Writes */
4c06619f
JS
171 tmp = (uint8_t *)temp_wqe;
172#ifdef __raw_writeq
1351e69f 173 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
4c06619f
JS
174 __raw_writeq(*((uint64_t *)(tmp + i)),
175 q->dpp_regaddr + i);
176#else
177 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
178 __raw_writel(*((uint32_t *)(tmp + i)),
179 q->dpp_regaddr + i);
180#endif
1351e69f
JS
181 }
182 /* ensure WQE bcopy and DPP flushed before doorbell write */
6b3b3bdb 183 wmb();
4f774513
JS
184
185 /* Update the host index before invoking device */
186 host_index = q->host_index;
027140ea
JS
187
188 q->host_index = idx;
4f774513
JS
189
190 /* Ring Doorbell */
191 doorbell.word0 = 0;
962bc51b 192 if (q->db_format == LPFC_DB_LIST_FORMAT) {
1351e69f
JS
193 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
194 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
197 q->dpp_id);
198 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
199 q->queue_id);
200 } else {
201 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
1351e69f 202 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
5cc167dd
JS
203
204 /* Leave bits <23:16> clear for if_type 6 dpp */
205 if_type = bf_get(lpfc_sli_intf_if_type,
206 &q->phba->sli4_hba.sli_intf);
207 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
208 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
209 host_index);
1351e69f 210 }
962bc51b
JS
211 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
212 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
213 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
214 } else {
215 return -EINVAL;
216 }
217 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
218
219 return 0;
220}
221
222/**
223 * lpfc_sli4_wq_release - Updates internal hba index for WQ
224 * @q: The Work Queue to operate on.
225 * @index: The index to advance the hba index to.
226 *
227 * This routine will update the HBA index of a queue to reflect consumption of
228 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
229 * an entry the host calls this function to update the queue's internal
1543af38 230 * pointers.
4f774513 231 **/
1543af38 232static void
4f774513
JS
233lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
234{
2e90f4b5
JS
235 /* sanity check on queue memory */
236 if (unlikely(!q))
1543af38 237 return;
2e90f4b5 238
1543af38 239 q->hba_index = index;
4f774513
JS
240}
241
242/**
243 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
244 * @q: The Mailbox Queue to operate on.
245 * @wqe: The Mailbox Queue Entry to put on the Work queue.
246 *
247 * This routine will copy the contents of @mqe to the next available entry on
248 * the @q. This function will then ring the Work Queue Doorbell to signal the
249 * HBA to start processing the Work Queue Entry. This function returns 0 if
250 * successful. If no entries are available on @q then this function will return
251 * -ENOMEM.
252 * The caller is expected to hold the hbalock when calling this routine.
253 **/
254static uint32_t
255lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
256{
2e90f4b5 257 struct lpfc_mqe *temp_mqe;
4f774513 258 struct lpfc_register doorbell;
4f774513 259
2e90f4b5
JS
260 /* sanity check on queue memory */
261 if (unlikely(!q))
262 return -ENOMEM;
9afbee3d 263 temp_mqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 264
4f774513
JS
265 /* If the host has not yet processed the next entry then we are done */
266 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
267 return -ENOMEM;
48f8fdb4 268 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
4f774513
JS
269 /* Save off the mailbox pointer for completion */
270 q->phba->mbox = (MAILBOX_t *)temp_mqe;
271
272 /* Update the host index before invoking device */
4f774513
JS
273 q->host_index = ((q->host_index + 1) % q->entry_count);
274
275 /* Ring Doorbell */
276 doorbell.word0 = 0;
277 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
278 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
279 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
280 return 0;
281}
282
283/**
284 * lpfc_sli4_mq_release - Updates internal hba index for MQ
285 * @q: The Mailbox Queue to operate on.
286 *
287 * This routine will update the HBA index of a queue to reflect consumption of
288 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
289 * an entry the host calls this function to update the queue's internal
290 * pointers. This routine returns the number of entries that were consumed by
291 * the HBA.
292 **/
293static uint32_t
294lpfc_sli4_mq_release(struct lpfc_queue *q)
295{
2e90f4b5
JS
296 /* sanity check on queue memory */
297 if (unlikely(!q))
298 return 0;
299
4f774513
JS
300 /* Clear the mailbox pointer for completion */
301 q->phba->mbox = NULL;
302 q->hba_index = ((q->hba_index + 1) % q->entry_count);
303 return 1;
304}
305
306/**
307 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
308 * @q: The Event Queue to get the first valid EQE from
309 *
310 * This routine will get the first valid Event Queue Entry from @q, update
311 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
312 * the Queue (no more work to do), or the Queue is full of EQEs that have been
313 * processed, but not popped back to the HBA then this routine will return NULL.
314 **/
315static struct lpfc_eqe *
316lpfc_sli4_eq_get(struct lpfc_queue *q)
317{
2e90f4b5
JS
318 struct lpfc_eqe *eqe;
319
320 /* sanity check on queue memory */
321 if (unlikely(!q))
322 return NULL;
9afbee3d 323 eqe = lpfc_sli4_qe(q, q->host_index);
4f774513
JS
324
325 /* If the next EQE is not valid then we are done */
7365f6fd 326 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
4f774513 327 return NULL;
27f344eb
JS
328
329 /*
330 * insert barrier for instruction interlock : data from the hardware
331 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
332 * upon. Speculative instructions were allowing a bcopy at the start
333 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
334 * after our return, to copy data before the valid bit check above
335 * was done. As such, some of the copied data was stale. The barrier
336 * ensures the check is before any data is copied.
27f344eb
JS
337 */
338 mb();
4f774513
JS
339 return eqe;
340}
341
ba20c853
JS
342/**
343 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
344 * @q: The Event Queue to disable interrupts
345 *
346 **/
92f3b327 347void
ba20c853
JS
348lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
349{
350 struct lpfc_register doorbell;
351
352 doorbell.word0 = 0;
353 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
355 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
356 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
357 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 358 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
ba20c853
JS
359}
360
27d6ac0a
JS
361/**
362 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
363 * @q: The Event Queue to disable interrupts
364 *
365 **/
92f3b327 366void
27d6ac0a
JS
367lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
368{
369 struct lpfc_register doorbell;
370
371 doorbell.word0 = 0;
aad59d5d 372 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
27d6ac0a
JS
373 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
374}
375
4f774513 376/**
32517fc0
JS
377 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
378 * @phba: adapter with EQ
4f774513 379 * @q: The Event Queue that the host has completed processing for.
32517fc0 380 * @count: Number of elements that have been consumed
4f774513
JS
381 * @arm: Indicates whether the host wants to arms this CQ.
382 *
32517fc0
JS
383 * This routine will notify the HBA, by ringing the doorbell, that count
384 * number of EQEs have been processed. The @arm parameter indicates whether
385 * the queue should be rearmed when ringing the doorbell.
4f774513 386 **/
32517fc0
JS
387void
388lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
389 uint32_t count, bool arm)
4f774513 390{
4f774513
JS
391 struct lpfc_register doorbell;
392
2e90f4b5 393 /* sanity check on queue memory */
32517fc0
JS
394 if (unlikely(!q || (count == 0 && !arm)))
395 return;
4f774513
JS
396
397 /* ring doorbell for number popped */
398 doorbell.word0 = 0;
399 if (arm) {
400 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
401 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
402 }
32517fc0 403 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 404 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
405 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
406 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
407 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 408 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
a747c9ce
JS
409 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
410 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
9dd35425 411 readl(q->phba->sli4_hba.EQDBregaddr);
4f774513
JS
412}
413
27d6ac0a 414/**
32517fc0
JS
415 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
416 * @phba: adapter with EQ
27d6ac0a 417 * @q: The Event Queue that the host has completed processing for.
32517fc0 418 * @count: Number of elements that have been consumed
27d6ac0a
JS
419 * @arm: Indicates whether the host wants to arms this CQ.
420 *
32517fc0
JS
421 * This routine will notify the HBA, by ringing the doorbell, that count
422 * number of EQEs have been processed. The @arm parameter indicates whether
423 * the queue should be rearmed when ringing the doorbell.
27d6ac0a 424 **/
32517fc0
JS
425void
426lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
427 uint32_t count, bool arm)
27d6ac0a 428{
27d6ac0a
JS
429 struct lpfc_register doorbell;
430
431 /* sanity check on queue memory */
32517fc0
JS
432 if (unlikely(!q || (count == 0 && !arm)))
433 return;
27d6ac0a
JS
434
435 /* ring doorbell for number popped */
436 doorbell.word0 = 0;
437 if (arm)
438 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
32517fc0 439 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
440 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
441 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
442 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
443 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
444 readl(q->phba->sli4_hba.EQDBregaddr);
32517fc0
JS
445}
446
447static void
448__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
449 struct lpfc_eqe *eqe)
450{
451 if (!phba->sli4_hba.pc_sli4_params.eqav)
452 bf_set_le32(lpfc_eqe_valid, eqe, 0);
453
454 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
455
456 /* if the index wrapped around, toggle the valid bit */
457 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
458 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
459}
460
461static void
24c7c0a6 462lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
32517fc0 463{
24c7c0a6
JS
464 struct lpfc_eqe *eqe = NULL;
465 u32 eq_count = 0, cq_count = 0;
466 struct lpfc_cqe *cqe = NULL;
467 struct lpfc_queue *cq = NULL, *childq = NULL;
468 int cqid = 0;
32517fc0
JS
469
470 /* walk all the EQ entries and drop on the floor */
471 eqe = lpfc_sli4_eq_get(eq);
472 while (eqe) {
24c7c0a6
JS
473 /* Get the reference to the corresponding CQ */
474 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
475 cq = NULL;
476
477 list_for_each_entry(childq, &eq->child_list, list) {
478 if (childq->queue_id == cqid) {
479 cq = childq;
480 break;
481 }
482 }
483 /* If CQ is valid, iterate through it and drop all the CQEs */
484 if (cq) {
485 cqe = lpfc_sli4_cq_get(cq);
486 while (cqe) {
487 __lpfc_sli4_consume_cqe(phba, cq, cqe);
488 cq_count++;
489 cqe = lpfc_sli4_cq_get(cq);
490 }
491 /* Clear and re-arm the CQ */
492 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
493 LPFC_QUEUE_REARM);
494 cq_count = 0;
495 }
32517fc0 496 __lpfc_sli4_consume_eqe(phba, eq, eqe);
24c7c0a6 497 eq_count++;
32517fc0
JS
498 eqe = lpfc_sli4_eq_get(eq);
499 }
500
501 /* Clear and re-arm the EQ */
24c7c0a6 502 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
32517fc0
JS
503}
504
505static int
93a4d6f4
JS
506lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
507 uint8_t rearm)
32517fc0
JS
508{
509 struct lpfc_eqe *eqe;
510 int count = 0, consumed = 0;
511
512 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
513 goto rearm_and_exit;
514
515 eqe = lpfc_sli4_eq_get(eq);
516 while (eqe) {
517 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
518 __lpfc_sli4_consume_eqe(phba, eq, eqe);
519
520 consumed++;
521 if (!(++count % eq->max_proc_limit))
522 break;
523
524 if (!(count % eq->notify_interval)) {
525 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
526 LPFC_QUEUE_NOARM);
527 consumed = 0;
528 }
529
530 eqe = lpfc_sli4_eq_get(eq);
531 }
532 eq->EQ_processed += count;
533
534 /* Track the max number of EQEs processed in 1 intr */
535 if (count > eq->EQ_max_eqe)
536 eq->EQ_max_eqe = count;
537
164ba8d2 538 xchg(&eq->queue_claimed, 0);
32517fc0
JS
539
540rearm_and_exit:
93a4d6f4
JS
541 /* Always clear the EQ. */
542 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
32517fc0
JS
543
544 return count;
27d6ac0a
JS
545}
546
4f774513
JS
547/**
548 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
549 * @q: The Completion Queue to get the first valid CQE from
550 *
551 * This routine will get the first valid Completion Queue Entry from @q, update
552 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
553 * the Queue (no more work to do), or the Queue is full of CQEs that have been
554 * processed, but not popped back to the HBA then this routine will return NULL.
555 **/
556static struct lpfc_cqe *
557lpfc_sli4_cq_get(struct lpfc_queue *q)
558{
559 struct lpfc_cqe *cqe;
560
2e90f4b5
JS
561 /* sanity check on queue memory */
562 if (unlikely(!q))
563 return NULL;
9afbee3d 564 cqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 565
4f774513 566 /* If the next CQE is not valid then we are done */
7365f6fd 567 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
4f774513 568 return NULL;
27f344eb
JS
569
570 /*
571 * insert barrier for instruction interlock : data from the hardware
572 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
573 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
574 * instructions allowing action on content before valid bit checked,
575 * add barrier here as well. May not be needed as "content" is a
576 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
577 */
578 mb();
4f774513
JS
579 return cqe;
580}
581
32517fc0
JS
582static void
583__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
584 struct lpfc_cqe *cqe)
585{
586 if (!phba->sli4_hba.pc_sli4_params.cqav)
587 bf_set_le32(lpfc_cqe_valid, cqe, 0);
588
589 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
590
591 /* if the index wrapped around, toggle the valid bit */
592 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
593 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
594}
595
4f774513 596/**
32517fc0
JS
597 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
598 * @phba: the adapter with the CQ
4f774513 599 * @q: The Completion Queue that the host has completed processing for.
32517fc0 600 * @count: the number of elements that were consumed
4f774513
JS
601 * @arm: Indicates whether the host wants to arms this CQ.
602 *
32517fc0
JS
603 * This routine will notify the HBA, by ringing the doorbell, that the
604 * CQEs have been processed. The @arm parameter specifies whether the
605 * queue should be rearmed when ringing the doorbell.
4f774513 606 **/
32517fc0
JS
607void
608lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
609 uint32_t count, bool arm)
4f774513 610{
4f774513
JS
611 struct lpfc_register doorbell;
612
2e90f4b5 613 /* sanity check on queue memory */
32517fc0
JS
614 if (unlikely(!q || (count == 0 && !arm)))
615 return;
4f774513
JS
616
617 /* ring doorbell for number popped */
618 doorbell.word0 = 0;
619 if (arm)
620 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
32517fc0 621 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 622 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
623 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
624 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
625 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
9dd35425 626 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
4f774513
JS
627}
628
27d6ac0a 629/**
32517fc0
JS
630 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
631 * @phba: the adapter with the CQ
27d6ac0a 632 * @q: The Completion Queue that the host has completed processing for.
32517fc0 633 * @count: the number of elements that were consumed
27d6ac0a
JS
634 * @arm: Indicates whether the host wants to arms this CQ.
635 *
32517fc0
JS
636 * This routine will notify the HBA, by ringing the doorbell, that the
637 * CQEs have been processed. The @arm parameter specifies whether the
638 * queue should be rearmed when ringing the doorbell.
27d6ac0a 639 **/
32517fc0
JS
640void
641lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
642 uint32_t count, bool arm)
27d6ac0a 643{
27d6ac0a
JS
644 struct lpfc_register doorbell;
645
646 /* sanity check on queue memory */
32517fc0
JS
647 if (unlikely(!q || (count == 0 && !arm)))
648 return;
27d6ac0a
JS
649
650 /* ring doorbell for number popped */
651 doorbell.word0 = 0;
652 if (arm)
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
32517fc0 654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
27d6ac0a
JS
657}
658
4f774513
JS
659/**
660 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
661 * @q: The Header Receive Queue to operate on.
662 * @wqe: The Receive Queue Entry to put on the Receive queue.
663 *
664 * This routine will copy the contents of @wqe to the next available entry on
665 * the @q. This function will then ring the Receive Queue Doorbell to signal the
666 * HBA to start processing the Receive Queue Entry. This function returns the
667 * index that the rqe was copied to if successful. If no entries are available
668 * on @q then this function will return -ENOMEM.
669 * The caller is expected to hold the hbalock when calling this routine.
670 **/
895427bd 671int
4f774513
JS
672lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
673 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
674{
2e90f4b5
JS
675 struct lpfc_rqe *temp_hrqe;
676 struct lpfc_rqe *temp_drqe;
4f774513 677 struct lpfc_register doorbell;
cbc5de1b
JS
678 int hq_put_index;
679 int dq_put_index;
4f774513 680
2e90f4b5
JS
681 /* sanity check on queue memory */
682 if (unlikely(!hq) || unlikely(!dq))
683 return -ENOMEM;
cbc5de1b
JS
684 hq_put_index = hq->host_index;
685 dq_put_index = dq->host_index;
9afbee3d
JS
686 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
687 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
2e90f4b5 688
4f774513
JS
689 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
690 return -EINVAL;
cbc5de1b 691 if (hq_put_index != dq_put_index)
4f774513
JS
692 return -EINVAL;
693 /* If the host has not yet processed the next entry then we are done */
cbc5de1b 694 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513 695 return -EBUSY;
48f8fdb4
JS
696 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
697 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
4f774513
JS
698
699 /* Update the host index to point to the next slot */
cbc5de1b
JS
700 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
701 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
61f3d4bf 702 hq->RQ_buf_posted++;
4f774513
JS
703
704 /* Ring The Header Receive Queue Doorbell */
32517fc0 705 if (!(hq->host_index % hq->notify_interval)) {
4f774513 706 doorbell.word0 = 0;
962bc51b
JS
707 if (hq->db_format == LPFC_DB_RING_FORMAT) {
708 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
32517fc0 709 hq->notify_interval);
962bc51b
JS
710 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
711 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
712 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
32517fc0 713 hq->notify_interval);
962bc51b
JS
714 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
715 hq->host_index);
716 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
717 } else {
718 return -EINVAL;
719 }
720 writel(doorbell.word0, hq->db_regaddr);
4f774513 721 }
cbc5de1b 722 return hq_put_index;
4f774513
JS
723}
724
725/**
726 * lpfc_sli4_rq_release - Updates internal hba index for RQ
727 * @q: The Header Receive Queue to operate on.
728 *
729 * This routine will update the HBA index of a queue to reflect consumption of
730 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
731 * consumed an entry the host calls this function to update the queue's
732 * internal pointers. This routine returns the number of entries that were
733 * consumed by the HBA.
734 **/
735static uint32_t
736lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
737{
2e90f4b5
JS
738 /* sanity check on queue memory */
739 if (unlikely(!hq) || unlikely(!dq))
740 return 0;
741
4f774513
JS
742 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
743 return 0;
744 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
745 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
746 return 1;
747}
748
e59058c4 749/**
3621a710 750 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
751 * @phba: Pointer to HBA context object.
752 * @pring: Pointer to driver SLI ring object.
753 *
754 * This function returns pointer to next command iocb entry
755 * in the command ring. The caller must hold hbalock to prevent
756 * other threads consume the next command iocb.
757 * SLI-2/SLI-3 provide different sized iocbs.
758 **/
ed957684
JS
759static inline IOCB_t *
760lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
761{
7e56aa25
JS
762 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
763 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
764}
765
e59058c4 766/**
3621a710 767 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
768 * @phba: Pointer to HBA context object.
769 * @pring: Pointer to driver SLI ring object.
770 *
771 * This function returns pointer to next response iocb entry
772 * in the response ring. The caller must hold hbalock to make sure
773 * that no other thread consume the next response iocb.
774 * SLI-2/SLI-3 provide different sized iocbs.
775 **/
ed957684
JS
776static inline IOCB_t *
777lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
778{
7e56aa25
JS
779 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
780 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
781}
782
e59058c4 783/**
3621a710 784 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
785 * @phba: Pointer to HBA context object.
786 *
787 * This function is called with hbalock held. This function
788 * allocates a new driver iocb object from the iocb pool. If the
789 * allocation is successful, it returns pointer to the newly
790 * allocated iocb object else it returns NULL.
791 **/
4f2e66c6 792struct lpfc_iocbq *
2e0fef85 793__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
794{
795 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
796 struct lpfc_iocbq * iocbq = NULL;
797
1c2ba475
JT
798 lockdep_assert_held(&phba->hbalock);
799
0bd4ca25 800 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
801 if (iocbq)
802 phba->iocb_cnt++;
803 if (phba->iocb_cnt > phba->iocb_max)
804 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
805 return iocbq;
806}
807
da0436e9
JS
808/**
809 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
810 * @phba: Pointer to HBA context object.
811 * @xritag: XRI value.
812 *
813 * This function clears the sglq pointer from the array of acive
814 * sglq's. The xritag that is passed in is used to index into the
815 * array. Before the xritag can be used it needs to be adjusted
816 * by subtracting the xribase.
817 *
818 * Returns sglq ponter = success, NULL = Failure.
819 **/
895427bd 820struct lpfc_sglq *
da0436e9
JS
821__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
822{
da0436e9 823 struct lpfc_sglq *sglq;
6d368e53
JS
824
825 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
826 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
827 return sglq;
828}
829
830/**
831 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
832 * @phba: Pointer to HBA context object.
833 * @xritag: XRI value.
834 *
835 * This function returns the sglq pointer from the array of acive
836 * sglq's. The xritag that is passed in is used to index into the
837 * array. Before the xritag can be used it needs to be adjusted
838 * by subtracting the xribase.
839 *
840 * Returns sglq ponter = success, NULL = Failure.
841 **/
0f65ff68 842struct lpfc_sglq *
da0436e9
JS
843__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
844{
da0436e9 845 struct lpfc_sglq *sglq;
6d368e53
JS
846
847 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
848 return sglq;
849}
850
19ca7609 851/**
1151e3ec 852 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
853 * @phba: Pointer to HBA context object.
854 * @xritag: xri used in this exchange.
855 * @rrq: The RRQ to be cleared.
856 *
19ca7609 857 **/
1151e3ec
JS
858void
859lpfc_clr_rrq_active(struct lpfc_hba *phba,
860 uint16_t xritag,
861 struct lpfc_node_rrq *rrq)
19ca7609 862{
1151e3ec 863 struct lpfc_nodelist *ndlp = NULL;
19ca7609 864
1151e3ec
JS
865 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
866 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
867
868 /* The target DID could have been swapped (cable swap)
869 * we should use the ndlp from the findnode if it is
870 * available.
871 */
1151e3ec 872 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
873 ndlp = rrq->ndlp;
874
1151e3ec
JS
875 if (!ndlp)
876 goto out;
877
cff261f6 878 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
879 rrq->send_rrq = 0;
880 rrq->xritag = 0;
881 rrq->rrq_stop_time = 0;
882 }
1151e3ec 883out:
19ca7609
JS
884 mempool_free(rrq, phba->rrq_pool);
885}
886
887/**
888 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
889 * @phba: Pointer to HBA context object.
890 *
891 * This function is called with hbalock held. This function
892 * Checks if stop_time (ratov from setting rrq active) has
893 * been reached, if it has and the send_rrq flag is set then
894 * it will call lpfc_send_rrq. If the send_rrq flag is not set
895 * then it will just call the routine to clear the rrq and
896 * free the rrq resource.
897 * The timer is set to the next rrq that is going to expire before
898 * leaving the routine.
899 *
900 **/
901void
902lpfc_handle_rrq_active(struct lpfc_hba *phba)
903{
904 struct lpfc_node_rrq *rrq;
905 struct lpfc_node_rrq *nextrrq;
906 unsigned long next_time;
907 unsigned long iflags;
1151e3ec 908 LIST_HEAD(send_rrq);
19ca7609
JS
909
910 spin_lock_irqsave(&phba->hbalock, iflags);
911 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 912 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 913 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
914 &phba->active_rrq_list, list) {
915 if (time_after(jiffies, rrq->rrq_stop_time))
916 list_move(&rrq->list, &send_rrq);
917 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
918 next_time = rrq->rrq_stop_time;
919 }
920 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
921 if ((!list_empty(&phba->active_rrq_list)) &&
922 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 923 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
924 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
925 list_del(&rrq->list);
ffd43814 926 if (!rrq->send_rrq) {
1151e3ec 927 /* this call will free the rrq */
ffd43814
BVA
928 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
929 } else if (lpfc_send_rrq(phba, rrq)) {
1151e3ec
JS
930 /* if we send the rrq then the completion handler
931 * will clear the bit in the xribitmap.
932 */
933 lpfc_clr_rrq_active(phba, rrq->xritag,
934 rrq);
935 }
936 }
19ca7609
JS
937}
938
939/**
940 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
941 * @vport: Pointer to vport context object.
942 * @xri: The xri used in the exchange.
943 * @did: The targets DID for this exchange.
944 *
945 * returns NULL = rrq not found in the phba->active_rrq_list.
946 * rrq = rrq for this xri and target.
947 **/
948struct lpfc_node_rrq *
949lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
950{
951 struct lpfc_hba *phba = vport->phba;
952 struct lpfc_node_rrq *rrq;
953 struct lpfc_node_rrq *nextrrq;
954 unsigned long iflags;
955
956 if (phba->sli_rev != LPFC_SLI_REV4)
957 return NULL;
958 spin_lock_irqsave(&phba->hbalock, iflags);
959 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
960 if (rrq->vport == vport && rrq->xritag == xri &&
961 rrq->nlp_DID == did){
962 list_del(&rrq->list);
963 spin_unlock_irqrestore(&phba->hbalock, iflags);
964 return rrq;
965 }
966 }
967 spin_unlock_irqrestore(&phba->hbalock, iflags);
968 return NULL;
969}
970
971/**
972 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
973 * @vport: Pointer to vport context object.
1151e3ec
JS
974 * @ndlp: Pointer to the lpfc_node_list structure.
975 * If ndlp is NULL Remove all active RRQs for this vport from the
976 * phba->active_rrq_list and clear the rrq.
977 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
978 **/
979void
1151e3ec 980lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
981
982{
983 struct lpfc_hba *phba = vport->phba;
984 struct lpfc_node_rrq *rrq;
985 struct lpfc_node_rrq *nextrrq;
986 unsigned long iflags;
1151e3ec 987 LIST_HEAD(rrq_list);
19ca7609
JS
988
989 if (phba->sli_rev != LPFC_SLI_REV4)
990 return;
1151e3ec
JS
991 if (!ndlp) {
992 lpfc_sli4_vport_delete_els_xri_aborted(vport);
993 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 994 }
1151e3ec
JS
995 spin_lock_irqsave(&phba->hbalock, iflags);
996 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
997 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
998 list_move(&rrq->list, &rrq_list);
19ca7609 999 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
1000
1001 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1002 list_del(&rrq->list);
1003 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1004 }
19ca7609
JS
1005}
1006
19ca7609 1007/**
1151e3ec 1008 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
1009 * @phba: Pointer to HBA context object.
1010 * @ndlp: Targets nodelist pointer for this exchange.
1011 * @xritag the xri in the bitmap to test.
1012 *
e2a8be56
JS
1013 * This function returns:
1014 * 0 = rrq not active for this xri
1015 * 1 = rrq is valid for this xri.
19ca7609 1016 **/
1151e3ec
JS
1017int
1018lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
1019 uint16_t xritag)
1020{
19ca7609
JS
1021 if (!ndlp)
1022 return 0;
cff261f6
JS
1023 if (!ndlp->active_rrqs_xri_bitmap)
1024 return 0;
1025 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
258f84fa 1026 return 1;
19ca7609
JS
1027 else
1028 return 0;
1029}
1030
1031/**
1032 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1033 * @phba: Pointer to HBA context object.
1034 * @ndlp: nodelist pointer for this target.
1035 * @xritag: xri used in this exchange.
1036 * @rxid: Remote Exchange ID.
1037 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1038 *
1039 * This function takes the hbalock.
1040 * The active bit is always set in the active rrq xri_bitmap even
1041 * if there is no slot avaiable for the other rrq information.
1042 *
1043 * returns 0 rrq actived for this xri
1044 * < 0 No memory or invalid ndlp.
1045 **/
1046int
1047lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 1048 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 1049{
19ca7609 1050 unsigned long iflags;
b42c07c8
JS
1051 struct lpfc_node_rrq *rrq;
1052 int empty;
1053
1054 if (!ndlp)
1055 return -EINVAL;
1056
1057 if (!phba->cfg_enable_rrq)
1058 return -EINVAL;
19ca7609
JS
1059
1060 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
1061 if (phba->pport->load_flag & FC_UNLOADING) {
1062 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1063 goto out;
1064 }
1065
1066 /*
1067 * set the active bit even if there is no mem available.
1068 */
1069 if (NLP_CHK_FREE_REQ(ndlp))
1070 goto out;
1071
1072 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1073 goto out;
1074
cff261f6
JS
1075 if (!ndlp->active_rrqs_xri_bitmap)
1076 goto out;
1077
1078 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
1079 goto out;
1080
19ca7609 1081 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
1082 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1083 if (!rrq) {
1084 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1085 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1086 " DID:0x%x Send:%d\n",
1087 xritag, rxid, ndlp->nlp_DID, send_rrq);
1088 return -EINVAL;
1089 }
e5771b4d
JS
1090 if (phba->cfg_enable_rrq == 1)
1091 rrq->send_rrq = send_rrq;
1092 else
1093 rrq->send_rrq = 0;
b42c07c8 1094 rrq->xritag = xritag;
256ec0d0
JS
1095 rrq->rrq_stop_time = jiffies +
1096 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
1097 rrq->ndlp = ndlp;
1098 rrq->nlp_DID = ndlp->nlp_DID;
1099 rrq->vport = ndlp->vport;
1100 rrq->rxid = rxid;
b42c07c8
JS
1101 spin_lock_irqsave(&phba->hbalock, iflags);
1102 empty = list_empty(&phba->active_rrq_list);
1103 list_add_tail(&rrq->list, &phba->active_rrq_list);
1104 phba->hba_flag |= HBA_RRQ_ACTIVE;
1105 if (empty)
1106 lpfc_worker_wake_up(phba);
1107 spin_unlock_irqrestore(&phba->hbalock, iflags);
1108 return 0;
1109out:
1110 spin_unlock_irqrestore(&phba->hbalock, iflags);
1111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1112 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1113 " DID:0x%x Send:%d\n",
1114 xritag, rxid, ndlp->nlp_DID, send_rrq);
1115 return -EINVAL;
19ca7609
JS
1116}
1117
da0436e9 1118/**
895427bd 1119 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 1120 * @phba: Pointer to HBA context object.
19ca7609 1121 * @piocb: Pointer to the iocbq.
da0436e9 1122 *
e2a8be56
JS
1123 * The driver calls this function with either the nvme ls ring lock
1124 * or the fc els ring lock held depending on the iocb usage. This function
1125 * gets a new driver sglq object from the sglq list. If the list is not empty
1126 * then it is successful, it returns pointer to the newly allocated sglq
1127 * object else it returns NULL.
da0436e9
JS
1128 **/
1129static struct lpfc_sglq *
895427bd 1130__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 1131{
895427bd 1132 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 1133 struct lpfc_sglq *sglq = NULL;
19ca7609 1134 struct lpfc_sglq *start_sglq = NULL;
c490850a 1135 struct lpfc_io_buf *lpfc_cmd;
19ca7609 1136 struct lpfc_nodelist *ndlp;
e2a8be56 1137 struct lpfc_sli_ring *pring = NULL;
19ca7609
JS
1138 int found = 0;
1139
e2a8be56
JS
1140 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1141 pring = phba->sli4_hba.nvmels_wq->pring;
1142 else
1143 pring = lpfc_phba_elsring(phba);
1144
1145 lockdep_assert_held(&pring->ring_lock);
1c2ba475 1146
19ca7609 1147 if (piocbq->iocb_flag & LPFC_IO_FCP) {
c490850a 1148 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
19ca7609 1149 ndlp = lpfc_cmd->rdata->pnode;
be858b65 1150 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 1151 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 1152 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
1153 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1154 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1155 ndlp = NULL;
1156 else
1157 ndlp = piocbq->context_un.ndlp;
1158 } else {
19ca7609 1159 ndlp = piocbq->context1;
6c7cf486 1160 }
19ca7609 1161
895427bd
JS
1162 spin_lock(&phba->sli4_hba.sgl_list_lock);
1163 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
1164 start_sglq = sglq;
1165 while (!found) {
1166 if (!sglq)
d11f54b7 1167 break;
895427bd
JS
1168 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1169 test_bit(sglq->sli4_lxritag,
1170 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
1171 /* This xri has an rrq outstanding for this DID.
1172 * put it back in the list and get another xri.
1173 */
895427bd 1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 1175 sglq = NULL;
895427bd 1176 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
1177 struct lpfc_sglq, list);
1178 if (sglq == start_sglq) {
14041bd1 1179 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609
JS
1180 sglq = NULL;
1181 break;
1182 } else
1183 continue;
1184 }
1185 sglq->ndlp = ndlp;
1186 found = 1;
6d368e53 1187 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
1188 sglq->state = SGL_ALLOCATED;
1189 }
895427bd 1190 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1191 return sglq;
1192}
1193
f358dd0c
JS
1194/**
1195 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1196 * @phba: Pointer to HBA context object.
1197 * @piocb: Pointer to the iocbq.
1198 *
1199 * This function is called with the sgl_list lock held. This function
1200 * gets a new driver sglq object from the sglq list. If the
1201 * list is not empty then it is successful, it returns pointer to the newly
1202 * allocated sglq object else it returns NULL.
1203 **/
1204struct lpfc_sglq *
1205__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1206{
1207 struct list_head *lpfc_nvmet_sgl_list;
1208 struct lpfc_sglq *sglq = NULL;
1209
1210 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1211
1212 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1213
1214 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1215 if (!sglq)
1216 return NULL;
1217 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1218 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1219 return sglq;
1220}
1221
e59058c4 1222/**
3621a710 1223 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1224 * @phba: Pointer to HBA context object.
1225 *
1226 * This function is called with no lock held. This function
1227 * allocates a new driver iocb object from the iocb pool. If the
1228 * allocation is successful, it returns pointer to the newly
1229 * allocated iocb object else it returns NULL.
1230 **/
2e0fef85
JS
1231struct lpfc_iocbq *
1232lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1233{
1234 struct lpfc_iocbq * iocbq = NULL;
1235 unsigned long iflags;
1236
1237 spin_lock_irqsave(&phba->hbalock, iflags);
1238 iocbq = __lpfc_sli_get_iocbq(phba);
1239 spin_unlock_irqrestore(&phba->hbalock, iflags);
1240 return iocbq;
1241}
1242
4f774513
JS
1243/**
1244 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1245 * @phba: Pointer to HBA context object.
1246 * @iocbq: Pointer to driver iocb object.
1247 *
88acb4d9
DK
1248 * This function is called to release the driver iocb object
1249 * to the iocb pool. The iotag in the iocb object
4f774513
JS
1250 * does not change for each use of the iocb object. This function
1251 * clears all other fields of the iocb object when it is freed.
1252 * The sqlq structure that holds the xritag and phys and virtual
1253 * mappings for the scatter gather list is retrieved from the
1254 * active array of sglq. The get of the sglq pointer also clears
1255 * the entry in the array. If the status of the IO indiactes that
1256 * this IO was aborted then the sglq entry it put on the
1257 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1258 * IO has good status or fails for any other reason then the sglq
88acb4d9
DK
1259 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1260 * asserted held in the code path calling this routine.
4f774513
JS
1261 **/
1262static void
1263__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1264{
1265 struct lpfc_sglq *sglq;
1266 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1267 unsigned long iflag = 0;
895427bd 1268 struct lpfc_sli_ring *pring;
4f774513
JS
1269
1270 if (iocbq->sli4_xritag == NO_XRI)
1271 sglq = NULL;
1272 else
6d368e53
JS
1273 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1274
0e9bb8d7 1275
4f774513 1276 if (sglq) {
f358dd0c
JS
1277 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1278 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1279 iflag);
1280 sglq->state = SGL_FREED;
1281 sglq->ndlp = NULL;
1282 list_add_tail(&sglq->list,
1283 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1284 spin_unlock_irqrestore(
1285 &phba->sli4_hba.sgl_list_lock, iflag);
1286 goto out;
1287 }
1288
895427bd 1289 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1290 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1291 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1292 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1293 iflag);
4f774513 1294 list_add(&sglq->list,
895427bd 1295 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1296 spin_unlock_irqrestore(
895427bd 1297 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1298 } else {
895427bd
JS
1299 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1300 iflag);
0f65ff68 1301 sglq->state = SGL_FREED;
19ca7609 1302 sglq->ndlp = NULL;
fedd3b7b 1303 list_add_tail(&sglq->list,
895427bd
JS
1304 &phba->sli4_hba.lpfc_els_sgl_list);
1305 spin_unlock_irqrestore(
1306 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1307
1308 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1309 if (!list_empty(&pring->txq))
2a9bf3d0 1310 lpfc_worker_wake_up(phba);
0f65ff68 1311 }
4f774513
JS
1312 }
1313
f358dd0c 1314out:
4f774513
JS
1315 /*
1316 * Clean all volatile data fields, preserve iotag and node struct.
1317 */
1318 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1319 iocbq->sli4_lxritag = NO_XRI;
4f774513 1320 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1321 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1322 LPFC_IO_NVME_LS);
4f774513
JS
1323 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1324}
1325
2a9bf3d0 1326
e59058c4 1327/**
3772a991 1328 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1329 * @phba: Pointer to HBA context object.
1330 * @iocbq: Pointer to driver iocb object.
1331 *
88acb4d9
DK
1332 * This function is called to release the driver iocb object to the
1333 * iocb pool. The iotag in the iocb object does not change for each
1334 * use of the iocb object. This function clears all other fields of
1335 * the iocb object when it is freed. The hbalock is asserted held in
1336 * the code path calling this routine.
e59058c4 1337 **/
a6ababd2 1338static void
3772a991 1339__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1340{
2e0fef85 1341 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
1342
1343 /*
1344 * Clean all volatile data fields, preserve iotag and node struct.
1345 */
1346 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1347 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1348 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1349}
1350
3772a991
JS
1351/**
1352 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1353 * @phba: Pointer to HBA context object.
1354 * @iocbq: Pointer to driver iocb object.
1355 *
1356 * This function is called with hbalock held to release driver
1357 * iocb object to the iocb pool. The iotag in the iocb object
1358 * does not change for each use of the iocb object. This function
1359 * clears all other fields of the iocb object when it is freed.
1360 **/
1361static void
1362__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1363{
1c2ba475
JT
1364 lockdep_assert_held(&phba->hbalock);
1365
3772a991 1366 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1367 phba->iocb_cnt--;
3772a991
JS
1368}
1369
e59058c4 1370/**
3621a710 1371 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1372 * @phba: Pointer to HBA context object.
1373 * @iocbq: Pointer to driver iocb object.
1374 *
1375 * This function is called with no lock held to release the iocb to
1376 * iocb pool.
1377 **/
2e0fef85
JS
1378void
1379lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1380{
1381 unsigned long iflags;
1382
1383 /*
1384 * Clean all volatile data fields, preserve iotag and node struct.
1385 */
1386 spin_lock_irqsave(&phba->hbalock, iflags);
1387 __lpfc_sli_release_iocbq(phba, iocbq);
1388 spin_unlock_irqrestore(&phba->hbalock, iflags);
1389}
1390
a257bf90
JS
1391/**
1392 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1393 * @phba: Pointer to HBA context object.
1394 * @iocblist: List of IOCBs.
1395 * @ulpstatus: ULP status in IOCB command field.
1396 * @ulpWord4: ULP word-4 in IOCB command field.
1397 *
1398 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1399 * on the list by invoking the complete callback function associated with the
1400 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1401 * fields.
1402 **/
1403void
1404lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1405 uint32_t ulpstatus, uint32_t ulpWord4)
1406{
1407 struct lpfc_iocbq *piocb;
1408
1409 while (!list_empty(iocblist)) {
1410 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
84f2ddf8
JS
1411 if (!piocb->iocb_cmpl) {
1412 if (piocb->iocb_flag & LPFC_IO_NVME)
1413 lpfc_nvme_cancel_iocb(phba, piocb);
1414 else
1415 lpfc_sli_release_iocbq(phba, piocb);
1416 } else {
a257bf90
JS
1417 piocb->iocb.ulpStatus = ulpstatus;
1418 piocb->iocb.un.ulpWord[4] = ulpWord4;
1419 (piocb->iocb_cmpl) (phba, piocb, piocb);
1420 }
1421 }
1422 return;
1423}
1424
e59058c4 1425/**
3621a710
JS
1426 * lpfc_sli_iocb_cmd_type - Get the iocb type
1427 * @iocb_cmnd: iocb command code.
e59058c4
JS
1428 *
1429 * This function is called by ring event handler function to get the iocb type.
1430 * This function translates the iocb command to an iocb command type used to
1431 * decide the final disposition of each completed IOCB.
1432 * The function returns
1433 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1434 * LPFC_SOL_IOCB if it is a solicited iocb completion
1435 * LPFC_ABORT_IOCB if it is an abort iocb
1436 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1437 *
1438 * The caller is not required to hold any lock.
1439 **/
dea3101e
JB
1440static lpfc_iocb_type
1441lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1442{
1443 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1444
1445 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1446 return 0;
1447
1448 switch (iocb_cmnd) {
1449 case CMD_XMIT_SEQUENCE_CR:
1450 case CMD_XMIT_SEQUENCE_CX:
1451 case CMD_XMIT_BCAST_CN:
1452 case CMD_XMIT_BCAST_CX:
1453 case CMD_ELS_REQUEST_CR:
1454 case CMD_ELS_REQUEST_CX:
1455 case CMD_CREATE_XRI_CR:
1456 case CMD_CREATE_XRI_CX:
1457 case CMD_GET_RPI_CN:
1458 case CMD_XMIT_ELS_RSP_CX:
1459 case CMD_GET_RPI_CR:
1460 case CMD_FCP_IWRITE_CR:
1461 case CMD_FCP_IWRITE_CX:
1462 case CMD_FCP_IREAD_CR:
1463 case CMD_FCP_IREAD_CX:
1464 case CMD_FCP_ICMND_CR:
1465 case CMD_FCP_ICMND_CX:
f5603511
JS
1466 case CMD_FCP_TSEND_CX:
1467 case CMD_FCP_TRSP_CX:
1468 case CMD_FCP_TRECEIVE_CX:
1469 case CMD_FCP_AUTO_TRSP_CX:
dea3101e
JB
1470 case CMD_ADAPTER_MSG:
1471 case CMD_ADAPTER_DUMP:
1472 case CMD_XMIT_SEQUENCE64_CR:
1473 case CMD_XMIT_SEQUENCE64_CX:
1474 case CMD_XMIT_BCAST64_CN:
1475 case CMD_XMIT_BCAST64_CX:
1476 case CMD_ELS_REQUEST64_CR:
1477 case CMD_ELS_REQUEST64_CX:
1478 case CMD_FCP_IWRITE64_CR:
1479 case CMD_FCP_IWRITE64_CX:
1480 case CMD_FCP_IREAD64_CR:
1481 case CMD_FCP_IREAD64_CX:
1482 case CMD_FCP_ICMND64_CR:
1483 case CMD_FCP_ICMND64_CX:
f5603511
JS
1484 case CMD_FCP_TSEND64_CX:
1485 case CMD_FCP_TRSP64_CX:
1486 case CMD_FCP_TRECEIVE64_CX:
dea3101e
JB
1487 case CMD_GEN_REQUEST64_CR:
1488 case CMD_GEN_REQUEST64_CX:
1489 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1490 case DSSCMD_IWRITE64_CR:
1491 case DSSCMD_IWRITE64_CX:
1492 case DSSCMD_IREAD64_CR:
1493 case DSSCMD_IREAD64_CX:
c93764a6 1494 case CMD_SEND_FRAME:
dea3101e
JB
1495 type = LPFC_SOL_IOCB;
1496 break;
1497 case CMD_ABORT_XRI_CN:
1498 case CMD_ABORT_XRI_CX:
1499 case CMD_CLOSE_XRI_CN:
1500 case CMD_CLOSE_XRI_CX:
1501 case CMD_XRI_ABORTED_CX:
1502 case CMD_ABORT_MXRI64_CN:
6669f9bb 1503 case CMD_XMIT_BLS_RSP64_CX:
dea3101e
JB
1504 type = LPFC_ABORT_IOCB;
1505 break;
1506 case CMD_RCV_SEQUENCE_CX:
1507 case CMD_RCV_ELS_REQ_CX:
1508 case CMD_RCV_SEQUENCE64_CX:
1509 case CMD_RCV_ELS_REQ64_CX:
57127f15 1510 case CMD_ASYNC_STATUS:
ed957684
JS
1511 case CMD_IOCB_RCV_SEQ64_CX:
1512 case CMD_IOCB_RCV_ELS64_CX:
1513 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1514 case CMD_IOCB_RET_XRI64_CX:
dea3101e
JB
1515 type = LPFC_UNSOL_IOCB;
1516 break;
3163f725
JS
1517 case CMD_IOCB_XMIT_MSEQ64_CR:
1518 case CMD_IOCB_XMIT_MSEQ64_CX:
1519 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1520 case CMD_IOCB_RCV_ELS_LIST64_CX:
1521 case CMD_IOCB_CLOSE_EXTENDED_CN:
1522 case CMD_IOCB_ABORT_EXTENDED_CN:
1523 case CMD_IOCB_RET_HBQE64_CN:
1524 case CMD_IOCB_FCP_IBIDIR64_CR:
1525 case CMD_IOCB_FCP_IBIDIR64_CX:
1526 case CMD_IOCB_FCP_ITASKMGT64_CX:
1527 case CMD_IOCB_LOGENTRY_CN:
1528 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1529 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1530 __func__, iocb_cmnd);
3163f725
JS
1531 type = LPFC_UNKNOWN_IOCB;
1532 break;
dea3101e
JB
1533 default:
1534 type = LPFC_UNKNOWN_IOCB;
1535 break;
1536 }
1537
1538 return type;
1539}
1540
e59058c4 1541/**
3621a710 1542 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1543 * @phba: Pointer to HBA context object.
1544 *
1545 * This function is called from SLI initialization code
1546 * to configure every ring of the HBA's SLI interface. The
1547 * caller is not required to hold any lock. This function issues
1548 * a config_ring mailbox command for each ring.
1549 * This function returns zero if successful else returns a negative
1550 * error code.
1551 **/
dea3101e 1552static int
ed957684 1553lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e
JB
1554{
1555 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1556 LPFC_MBOXQ_t *pmb;
1557 MAILBOX_t *pmbox;
1558 int i, rc, ret = 0;
dea3101e 1559
ed957684
JS
1560 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1561 if (!pmb)
1562 return -ENOMEM;
04c68496 1563 pmbox = &pmb->u.mb;
ed957684 1564 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1565 for (i = 0; i < psli->num_rings; i++) {
dea3101e
JB
1566 lpfc_config_ring(phba, i, pmb);
1567 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1568 if (rc != MBX_SUCCESS) {
92d7f7b0 1569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1570 "0446 Adapter failed to init (%d), "
dea3101e
JB
1571 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1572 "ring %d\n",
e8b62011
JS
1573 rc, pmbox->mbxCommand,
1574 pmbox->mbxStatus, i);
2e0fef85 1575 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1576 ret = -ENXIO;
1577 break;
dea3101e
JB
1578 }
1579 }
ed957684
JS
1580 mempool_free(pmb, phba->mbox_mem_pool);
1581 return ret;
dea3101e
JB
1582}
1583
e59058c4 1584/**
3621a710 1585 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1586 * @phba: Pointer to HBA context object.
1587 * @pring: Pointer to driver SLI ring object.
1588 * @piocb: Pointer to the driver iocb object.
1589 *
e2a8be56
JS
1590 * The driver calls this function with the hbalock held for SLI3 ports or
1591 * the ring lock held for SLI4 ports. The function adds the
e59058c4
JS
1592 * new iocb to txcmplq of the given ring. This function always returns
1593 * 0. If this function is called for ELS ring, this function checks if
1594 * there is a vport associated with the ELS command. This function also
1595 * starts els_tmofunc timer if this is an ELS command.
1596 **/
dea3101e 1597static int
2e0fef85
JS
1598lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1599 struct lpfc_iocbq *piocb)
dea3101e 1600{
e2a8be56
JS
1601 if (phba->sli_rev == LPFC_SLI_REV4)
1602 lockdep_assert_held(&pring->ring_lock);
1603 else
1604 lockdep_assert_held(&phba->hbalock);
1c2ba475 1605
2319f847 1606 BUG_ON(!piocb);
22466da5 1607
dea3101e 1608 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1609 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
c490850a 1610 pring->txcmplq_cnt++;
2a9bf3d0 1611
92d7f7b0
JS
1612 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1613 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1614 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1615 BUG_ON(!piocb->vport);
1616 if (!(piocb->vport->load_flag & FC_UNLOADING))
1617 mod_timer(&piocb->vport->els_tmofunc,
1618 jiffies +
1619 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1620 }
dea3101e 1621
2e0fef85 1622 return 0;
dea3101e
JB
1623}
1624
e59058c4 1625/**
3621a710 1626 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1627 * @phba: Pointer to HBA context object.
1628 * @pring: Pointer to driver SLI ring object.
1629 *
1630 * This function is called with hbalock held to get next
1631 * iocb in txq of the given ring. If there is any iocb in
1632 * the txq, the function returns first iocb in the list after
1633 * removing the iocb from the list, else it returns NULL.
1634 **/
2a9bf3d0 1635struct lpfc_iocbq *
2e0fef85 1636lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1637{
dea3101e
JB
1638 struct lpfc_iocbq *cmd_iocb;
1639
1c2ba475
JT
1640 lockdep_assert_held(&phba->hbalock);
1641
858c9f6c 1642 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1643 return cmd_iocb;
dea3101e
JB
1644}
1645
e59058c4 1646/**
3621a710 1647 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1648 * @phba: Pointer to HBA context object.
1649 * @pring: Pointer to driver SLI ring object.
1650 *
1651 * This function is called with hbalock held and the caller must post the
1652 * iocb without releasing the lock. If the caller releases the lock,
1653 * iocb slot returned by the function is not guaranteed to be available.
1654 * The function returns pointer to the next available iocb slot if there
1655 * is available slot in the ring, else it returns NULL.
1656 * If the get index of the ring is ahead of the put index, the function
1657 * will post an error attention event to the worker thread to take the
1658 * HBA to offline state.
1659 **/
dea3101e
JB
1660static IOCB_t *
1661lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1662{
34b02dcd 1663 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1664 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1665
1666 lockdep_assert_held(&phba->hbalock);
1667
7e56aa25
JS
1668 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1669 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1670 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1671
7e56aa25
JS
1672 if (unlikely(pring->sli.sli3.local_getidx ==
1673 pring->sli.sli3.next_cmdidx)) {
dea3101e 1674
7e56aa25 1675 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1676
7e56aa25 1677 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1679 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1680 "is bigger than cmd ring %d\n",
e8b62011 1681 pring->ringno,
7e56aa25
JS
1682 pring->sli.sli3.local_getidx,
1683 max_cmd_idx);
dea3101e 1684
2e0fef85 1685 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1686 /*
1687 * All error attention handlers are posted to
1688 * worker thread
1689 */
1690 phba->work_ha |= HA_ERATT;
1691 phba->work_hs = HS_FFER3;
92d7f7b0 1692
5e9d9b82 1693 lpfc_worker_wake_up(phba);
dea3101e
JB
1694
1695 return NULL;
1696 }
1697
7e56aa25 1698 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e
JB
1699 return NULL;
1700 }
1701
ed957684 1702 return lpfc_cmd_iocb(phba, pring);
dea3101e
JB
1703}
1704
e59058c4 1705/**
3621a710 1706 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1707 * @phba: Pointer to HBA context object.
1708 * @iocbq: Pointer to driver iocb object.
1709 *
1710 * This function gets an iotag for the iocb. If there is no unused iotag and
1711 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1712 * array and assigns a new iotag.
1713 * The function returns the allocated iotag if successful, else returns zero.
1714 * Zero is not a valid iotag.
1715 * The caller is not required to hold any lock.
1716 **/
604a3e30 1717uint16_t
2e0fef85 1718lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1719{
2e0fef85
JS
1720 struct lpfc_iocbq **new_arr;
1721 struct lpfc_iocbq **old_arr;
604a3e30
JB
1722 size_t new_len;
1723 struct lpfc_sli *psli = &phba->sli;
1724 uint16_t iotag;
dea3101e 1725
2e0fef85 1726 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1727 iotag = psli->last_iotag;
1728 if(++iotag < psli->iocbq_lookup_len) {
1729 psli->last_iotag = iotag;
1730 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1731 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1732 iocbq->iotag = iotag;
1733 return iotag;
2e0fef85 1734 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1735 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1736 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85 1737 spin_unlock_irq(&phba->hbalock);
6396bb22 1738 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
604a3e30
JB
1739 GFP_KERNEL);
1740 if (new_arr) {
2e0fef85 1741 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1742 old_arr = psli->iocbq_lookup;
1743 if (new_len <= psli->iocbq_lookup_len) {
1744 /* highly unprobable case */
1745 kfree(new_arr);
1746 iotag = psli->last_iotag;
1747 if(++iotag < psli->iocbq_lookup_len) {
1748 psli->last_iotag = iotag;
1749 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1750 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1751 iocbq->iotag = iotag;
1752 return iotag;
1753 }
2e0fef85 1754 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1755 return 0;
1756 }
1757 if (psli->iocbq_lookup)
1758 memcpy(new_arr, old_arr,
1759 ((psli->last_iotag + 1) *
311464ec 1760 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1761 psli->iocbq_lookup = new_arr;
1762 psli->iocbq_lookup_len = new_len;
1763 psli->last_iotag = iotag;
1764 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1765 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1766 iocbq->iotag = iotag;
1767 kfree(old_arr);
1768 return iotag;
1769 }
8f6d98d2 1770 } else
2e0fef85 1771 spin_unlock_irq(&phba->hbalock);
dea3101e 1772
bc73905a 1773 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1774 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1775 psli->last_iotag);
dea3101e 1776
604a3e30 1777 return 0;
dea3101e
JB
1778}
1779
e59058c4 1780/**
3621a710 1781 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1782 * @phba: Pointer to HBA context object.
1783 * @pring: Pointer to driver SLI ring object.
1784 * @iocb: Pointer to iocb slot in the ring.
1785 * @nextiocb: Pointer to driver iocb object which need to be
1786 * posted to firmware.
1787 *
88acb4d9
DK
1788 * This function is called to post a new iocb to the firmware. This
1789 * function copies the new iocb to ring iocb slot and updates the
1790 * ring pointers. It adds the new iocb to txcmplq if there is
e59058c4 1791 * a completion call back for this iocb else the function will free the
88acb4d9
DK
1792 * iocb object. The hbalock is asserted held in the code path calling
1793 * this routine.
e59058c4 1794 **/
dea3101e
JB
1795static void
1796lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1797 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1798{
1799 /*
604a3e30 1800 * Set up an iotag
dea3101e 1801 */
604a3e30 1802 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1803
e2a0a9d6 1804
a58cbd52
JS
1805 if (pring->ringno == LPFC_ELS_RING) {
1806 lpfc_debugfs_slow_ring_trc(phba,
1807 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1808 *(((uint32_t *) &nextiocb->iocb) + 4),
1809 *(((uint32_t *) &nextiocb->iocb) + 6),
1810 *(((uint32_t *) &nextiocb->iocb) + 7));
1811 }
1812
dea3101e
JB
1813 /*
1814 * Issue iocb command to adapter
1815 */
92d7f7b0 1816 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e
JB
1817 wmb();
1818 pring->stats.iocb_cmd++;
1819
1820 /*
1821 * If there is no completion routine to call, we can release the
1822 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1823 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1824 */
1825 if (nextiocb->iocb_cmpl)
1826 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1827 else
2e0fef85 1828 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e
JB
1829
1830 /*
1831 * Let the HBA know what IOCB slot will be the next one the
1832 * driver will put a command into.
1833 */
7e56aa25
JS
1834 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1835 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e
JB
1836}
1837
e59058c4 1838/**
3621a710 1839 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1840 * @phba: Pointer to HBA context object.
1841 * @pring: Pointer to driver SLI ring object.
1842 *
1843 * The caller is not required to hold any lock for calling this function.
1844 * This function updates the chip attention bits for the ring to inform firmware
1845 * that there are pending work to be done for this ring and requests an
1846 * interrupt when there is space available in the ring. This function is
1847 * called when the driver is unable to post more iocbs to the ring due
1848 * to unavailability of space in the ring.
1849 **/
dea3101e 1850static void
2e0fef85 1851lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1852{
1853 int ringno = pring->ringno;
1854
1855 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1856
1857 wmb();
1858
1859 /*
1860 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1861 * The HBA will tell us when an IOCB entry is available.
1862 */
1863 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1864 readl(phba->CAregaddr); /* flush */
1865
1866 pring->stats.iocb_cmd_full++;
1867}
1868
e59058c4 1869/**
3621a710 1870 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1871 * @phba: Pointer to HBA context object.
1872 * @pring: Pointer to driver SLI ring object.
1873 *
1874 * This function updates the chip attention register bit for the
1875 * given ring to inform HBA that there is more work to be done
1876 * in this ring. The caller is not required to hold any lock.
1877 **/
dea3101e 1878static void
2e0fef85 1879lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1880{
1881 int ringno = pring->ringno;
1882
1883 /*
1884 * Tell the HBA that there is work to do in this ring.
1885 */
34b02dcd
JS
1886 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1887 wmb();
1888 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1889 readl(phba->CAregaddr); /* flush */
1890 }
dea3101e
JB
1891}
1892
e59058c4 1893/**
3621a710 1894 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1895 * @phba: Pointer to HBA context object.
1896 * @pring: Pointer to driver SLI ring object.
1897 *
1898 * This function is called with hbalock held to post pending iocbs
1899 * in the txq to the firmware. This function is called when driver
1900 * detects space available in the ring.
1901 **/
dea3101e 1902static void
2e0fef85 1903lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1904{
1905 IOCB_t *iocb;
1906 struct lpfc_iocbq *nextiocb;
1907
1c2ba475
JT
1908 lockdep_assert_held(&phba->hbalock);
1909
dea3101e
JB
1910 /*
1911 * Check to see if:
1912 * (a) there is anything on the txq to send
1913 * (b) link is up
1914 * (c) link attention events can be processed (fcp ring only)
1915 * (d) IOCB processing is not blocked by the outstanding mbox command.
1916 */
0e9bb8d7
JS
1917
1918 if (lpfc_is_link_up(phba) &&
1919 (!list_empty(&pring->txq)) &&
895427bd 1920 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1921 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e
JB
1922
1923 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1924 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1925 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1926
1927 if (iocb)
1928 lpfc_sli_update_ring(phba, pring);
1929 else
1930 lpfc_sli_update_full_ring(phba, pring);
1931 }
1932
1933 return;
1934}
1935
e59058c4 1936/**
3621a710 1937 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1938 * @phba: Pointer to HBA context object.
1939 * @hbqno: HBQ number.
1940 *
1941 * This function is called with hbalock held to get the next
1942 * available slot for the given HBQ. If there is free slot
1943 * available for the HBQ it will return pointer to the next available
1944 * HBQ entry else it will return NULL.
1945 **/
a6ababd2 1946static struct lpfc_hbq_entry *
ed957684
JS
1947lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1948{
1949 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1950
1c2ba475
JT
1951 lockdep_assert_held(&phba->hbalock);
1952
ed957684
JS
1953 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1954 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1955 hbqp->next_hbqPutIdx = 0;
1956
1957 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1958 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1959 uint32_t getidx = le32_to_cpu(raw_index);
1960
1961 hbqp->local_hbqGetIdx = getidx;
1962
1963 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1964 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1965 LOG_SLI | LOG_VPORT,
e8b62011 1966 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1967 "%u is > than hbqp->entry_count %u\n",
e8b62011 1968 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1969 hbqp->entry_count);
1970
1971 phba->link_state = LPFC_HBA_ERROR;
1972 return NULL;
1973 }
1974
1975 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1976 return NULL;
1977 }
1978
51ef4c26
JS
1979 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1980 hbqp->hbqPutIdx;
ed957684
JS
1981}
1982
e59058c4 1983/**
3621a710 1984 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1985 * @phba: Pointer to HBA context object.
1986 *
1987 * This function is called with no lock held to free all the
1988 * hbq buffers while uninitializing the SLI interface. It also
1989 * frees the HBQ buffers returned by the firmware but not yet
1990 * processed by the upper layers.
1991 **/
ed957684
JS
1992void
1993lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1994{
92d7f7b0
JS
1995 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1996 struct hbq_dmabuf *hbq_buf;
3163f725 1997 unsigned long flags;
51ef4c26 1998 int i, hbq_count;
ed957684 1999
51ef4c26 2000 hbq_count = lpfc_sli_hbq_count();
ed957684 2001 /* Return all memory used by all HBQs */
3163f725 2002 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
2003 for (i = 0; i < hbq_count; ++i) {
2004 list_for_each_entry_safe(dmabuf, next_dmabuf,
2005 &phba->hbqs[i].hbq_buffer_list, list) {
2006 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2007 list_del(&hbq_buf->dbuf.list);
2008 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2009 }
a8adb832 2010 phba->hbqs[i].buffer_count = 0;
ed957684 2011 }
3163f725
JS
2012
2013 /* Mark the HBQs not in use */
2014 phba->hbq_in_use = 0;
2015 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
2016}
2017
e59058c4 2018/**
3621a710 2019 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
2020 * @phba: Pointer to HBA context object.
2021 * @hbqno: HBQ number.
2022 * @hbq_buf: Pointer to HBQ buffer.
2023 *
2024 * This function is called with the hbalock held to post a
2025 * hbq buffer to the firmware. If the function finds an empty
2026 * slot in the HBQ, it will post the buffer. The function will return
2027 * pointer to the hbq entry if it successfully post the buffer
2028 * else it will return NULL.
2029 **/
3772a991 2030static int
ed957684 2031lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 2032 struct hbq_dmabuf *hbq_buf)
3772a991 2033{
1c2ba475 2034 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2035 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2036}
2037
2038/**
2039 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2040 * @phba: Pointer to HBA context object.
2041 * @hbqno: HBQ number.
2042 * @hbq_buf: Pointer to HBQ buffer.
2043 *
2044 * This function is called with the hbalock held to post a hbq buffer to the
2045 * firmware. If the function finds an empty slot in the HBQ, it will post the
2046 * buffer and place it on the hbq_buffer_list. The function will return zero if
2047 * it successfully post the buffer else it will return an error.
2048 **/
2049static int
2050lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2051 struct hbq_dmabuf *hbq_buf)
ed957684
JS
2052{
2053 struct lpfc_hbq_entry *hbqe;
92d7f7b0 2054 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 2055
1c2ba475 2056 lockdep_assert_held(&phba->hbalock);
ed957684
JS
2057 /* Get next HBQ entry slot to use */
2058 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2059 if (hbqe) {
2060 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2061
92d7f7b0
JS
2062 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2063 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 2064 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 2065 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
2066 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2067 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2068 /* Sync SLIM */
ed957684
JS
2069 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2070 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 2071 /* flush */
ed957684 2072 readl(phba->hbq_put + hbqno);
51ef4c26 2073 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
2074 return 0;
2075 } else
2076 return -ENOMEM;
ed957684
JS
2077}
2078
4f774513
JS
2079/**
2080 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2081 * @phba: Pointer to HBA context object.
2082 * @hbqno: HBQ number.
2083 * @hbq_buf: Pointer to HBQ buffer.
2084 *
2085 * This function is called with the hbalock held to post an RQE to the SLI4
2086 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2087 * the hbq_buffer_list and return zero, otherwise it will return an error.
2088 **/
2089static int
2090lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2091 struct hbq_dmabuf *hbq_buf)
2092{
2093 int rc;
2094 struct lpfc_rqe hrqe;
2095 struct lpfc_rqe drqe;
895427bd
JS
2096 struct lpfc_queue *hrq;
2097 struct lpfc_queue *drq;
2098
2099 if (hbqno != LPFC_ELS_HBQ)
2100 return 1;
2101 hrq = phba->sli4_hba.hdr_rq;
2102 drq = phba->sli4_hba.dat_rq;
4f774513 2103
1c2ba475 2104 lockdep_assert_held(&phba->hbalock);
4f774513
JS
2105 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2106 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2107 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2108 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 2109 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
2110 if (rc < 0)
2111 return rc;
895427bd 2112 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
2113 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2114 return 0;
2115}
2116
e59058c4 2117/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
2118static struct lpfc_hbq_init lpfc_els_hbq = {
2119 .rn = 1,
def9c7a9 2120 .entry_count = 256,
92d7f7b0
JS
2121 .mask_count = 0,
2122 .profile = 0,
51ef4c26 2123 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 2124 .buffer_count = 0,
a257bf90
JS
2125 .init_count = 40,
2126 .add_count = 40,
92d7f7b0 2127};
ed957684 2128
e59058c4 2129/* Array of HBQs */
78b2d852 2130struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
2131 &lpfc_els_hbq,
2132};
ed957684 2133
e59058c4 2134/**
3621a710 2135 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
2136 * @phba: Pointer to HBA context object.
2137 * @hbqno: HBQ number.
2138 * @count: Number of HBQ buffers to be posted.
2139 *
d7c255b2
JS
2140 * This function is called with no lock held to post more hbq buffers to the
2141 * given HBQ. The function returns the number of HBQ buffers successfully
2142 * posted.
e59058c4 2143 **/
311464ec 2144static int
92d7f7b0 2145lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 2146{
d7c255b2 2147 uint32_t i, posted = 0;
3163f725 2148 unsigned long flags;
92d7f7b0 2149 struct hbq_dmabuf *hbq_buffer;
d7c255b2 2150 LIST_HEAD(hbq_buf_list);
eafe1df9 2151 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 2152 return 0;
51ef4c26 2153
d7c255b2
JS
2154 if ((phba->hbqs[hbqno].buffer_count + count) >
2155 lpfc_hbq_defs[hbqno]->entry_count)
2156 count = lpfc_hbq_defs[hbqno]->entry_count -
2157 phba->hbqs[hbqno].buffer_count;
2158 if (!count)
2159 return 0;
2160 /* Allocate HBQ entries */
2161 for (i = 0; i < count; i++) {
2162 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2163 if (!hbq_buffer)
2164 break;
2165 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2166 }
3163f725
JS
2167 /* Check whether HBQ is still in use */
2168 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 2169 if (!phba->hbq_in_use)
d7c255b2
JS
2170 goto err;
2171 while (!list_empty(&hbq_buf_list)) {
2172 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2173 dbuf.list);
2174 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2175 (hbqno << 16));
3772a991 2176 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 2177 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
2178 posted++;
2179 } else
51ef4c26 2180 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 2181 }
3163f725 2182 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2183 return posted;
2184err:
eafe1df9 2185 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2186 while (!list_empty(&hbq_buf_list)) {
2187 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2188 dbuf.list);
2189 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2190 }
2191 return 0;
ed957684
JS
2192}
2193
e59058c4 2194/**
3621a710 2195 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
2196 * @phba: Pointer to HBA context object.
2197 * @qno: HBQ number.
2198 *
2199 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
2200 * is called with no lock held. The function returns the number of HBQ entries
2201 * successfully allocated.
e59058c4 2202 **/
92d7f7b0
JS
2203int
2204lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 2205{
def9c7a9
JS
2206 if (phba->sli_rev == LPFC_SLI_REV4)
2207 return 0;
2208 else
2209 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2210 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 2211}
ed957684 2212
e59058c4 2213/**
3621a710 2214 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
2215 * @phba: Pointer to HBA context object.
2216 * @qno: HBQ queue number.
2217 *
2218 * This function is called from SLI initialization code path with
2219 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2220 * function returns the number of HBQ entries successfully allocated.
e59058c4 2221 **/
a6ababd2 2222static int
92d7f7b0
JS
2223lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2224{
def9c7a9
JS
2225 if (phba->sli_rev == LPFC_SLI_REV4)
2226 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2227 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2228 else
2229 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2230 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2231}
2232
3772a991
JS
2233/**
2234 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2235 * @phba: Pointer to HBA context object.
2236 * @hbqno: HBQ number.
2237 *
2238 * This function removes the first hbq buffer on an hbq list and returns a
2239 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2240 **/
2241static struct hbq_dmabuf *
2242lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2243{
2244 struct lpfc_dmabuf *d_buf;
2245
2246 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2247 if (!d_buf)
2248 return NULL;
2249 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2250}
2251
2d7dbc4c
JS
2252/**
2253 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2254 * @phba: Pointer to HBA context object.
2255 * @hbqno: HBQ number.
2256 *
2257 * This function removes the first RQ buffer on an RQ buffer list and returns a
2258 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2259 **/
2260static struct rqb_dmabuf *
2261lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2262{
2263 struct lpfc_dmabuf *h_buf;
2264 struct lpfc_rqb *rqbp;
2265
2266 rqbp = hrq->rqbp;
2267 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2268 struct lpfc_dmabuf, list);
2269 if (!h_buf)
2270 return NULL;
2271 rqbp->buffer_count--;
2272 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2273}
2274
e59058c4 2275/**
3621a710 2276 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2277 * @phba: Pointer to HBA context object.
2278 * @tag: Tag of the hbq buffer.
2279 *
71892418
SH
2280 * This function searches for the hbq buffer associated with the given tag in
2281 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2282 * otherwise it returns NULL.
e59058c4 2283 **/
a6ababd2 2284static struct hbq_dmabuf *
92d7f7b0 2285lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2286{
92d7f7b0
JS
2287 struct lpfc_dmabuf *d_buf;
2288 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2289 uint32_t hbqno;
2290
2291 hbqno = tag >> 16;
a0a74e45 2292 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2293 return NULL;
ed957684 2294
3772a991 2295 spin_lock_irq(&phba->hbalock);
51ef4c26 2296 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2297 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2298 if (hbq_buf->tag == tag) {
3772a991 2299 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2300 return hbq_buf;
ed957684
JS
2301 }
2302 }
3772a991 2303 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2304 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2305 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2306 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2307 return NULL;
ed957684
JS
2308}
2309
e59058c4 2310/**
3621a710 2311 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2312 * @phba: Pointer to HBA context object.
2313 * @hbq_buffer: Pointer to HBQ buffer.
2314 *
2315 * This function is called with hbalock. This function gives back
2316 * the hbq buffer to firmware. If the HBQ does not have space to
2317 * post the buffer, it will free the buffer.
2318 **/
ed957684 2319void
51ef4c26 2320lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2321{
2322 uint32_t hbqno;
2323
51ef4c26
JS
2324 if (hbq_buffer) {
2325 hbqno = hbq_buffer->tag >> 16;
3772a991 2326 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2327 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2328 }
2329}
2330
e59058c4 2331/**
3621a710 2332 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2333 * @mbxCommand: mailbox command code.
2334 *
2335 * This function is called by the mailbox event handler function to verify
2336 * that the completed mailbox command is a legitimate mailbox command. If the
2337 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2338 * and the mailbox event handler will take the HBA offline.
2339 **/
dea3101e
JB
2340static int
2341lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2342{
2343 uint8_t ret;
2344
2345 switch (mbxCommand) {
2346 case MBX_LOAD_SM:
2347 case MBX_READ_NV:
2348 case MBX_WRITE_NV:
a8adb832 2349 case MBX_WRITE_VPARMS:
dea3101e
JB
2350 case MBX_RUN_BIU_DIAG:
2351 case MBX_INIT_LINK:
2352 case MBX_DOWN_LINK:
2353 case MBX_CONFIG_LINK:
2354 case MBX_CONFIG_RING:
2355 case MBX_RESET_RING:
2356 case MBX_READ_CONFIG:
2357 case MBX_READ_RCONFIG:
2358 case MBX_READ_SPARM:
2359 case MBX_READ_STATUS:
2360 case MBX_READ_RPI:
2361 case MBX_READ_XRI:
2362 case MBX_READ_REV:
2363 case MBX_READ_LNK_STAT:
2364 case MBX_REG_LOGIN:
2365 case MBX_UNREG_LOGIN:
dea3101e
JB
2366 case MBX_CLEAR_LA:
2367 case MBX_DUMP_MEMORY:
2368 case MBX_DUMP_CONTEXT:
2369 case MBX_RUN_DIAGS:
2370 case MBX_RESTART:
2371 case MBX_UPDATE_CFG:
2372 case MBX_DOWN_LOAD:
2373 case MBX_DEL_LD_ENTRY:
2374 case MBX_RUN_PROGRAM:
2375 case MBX_SET_MASK:
09372820 2376 case MBX_SET_VARIABLE:
dea3101e 2377 case MBX_UNREG_D_ID:
41415862 2378 case MBX_KILL_BOARD:
dea3101e 2379 case MBX_CONFIG_FARP:
41415862 2380 case MBX_BEACON:
dea3101e
JB
2381 case MBX_LOAD_AREA:
2382 case MBX_RUN_BIU_DIAG64:
2383 case MBX_CONFIG_PORT:
2384 case MBX_READ_SPARM64:
2385 case MBX_READ_RPI64:
2386 case MBX_REG_LOGIN64:
76a95d75 2387 case MBX_READ_TOPOLOGY:
09372820 2388 case MBX_WRITE_WWN:
dea3101e
JB
2389 case MBX_SET_DEBUG:
2390 case MBX_LOAD_EXP_ROM:
57127f15 2391 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2392 case MBX_REG_VPI:
2393 case MBX_UNREG_VPI:
858c9f6c 2394 case MBX_HEARTBEAT:
84774a4d
JS
2395 case MBX_PORT_CAPABILITIES:
2396 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2397 case MBX_SLI4_CONFIG:
2398 case MBX_SLI4_REQ_FTRS:
2399 case MBX_REG_FCFI:
2400 case MBX_UNREG_FCFI:
2401 case MBX_REG_VFI:
2402 case MBX_UNREG_VFI:
2403 case MBX_INIT_VPI:
2404 case MBX_INIT_VFI:
2405 case MBX_RESUME_RPI:
c7495937
JS
2406 case MBX_READ_EVENT_LOG_STATUS:
2407 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2408 case MBX_SECURITY_MGMT:
2409 case MBX_AUTH_PORT:
940eb687 2410 case MBX_ACCESS_VDATA:
dea3101e
JB
2411 ret = mbxCommand;
2412 break;
2413 default:
2414 ret = MBX_SHUTDOWN;
2415 break;
2416 }
2e0fef85 2417 return ret;
dea3101e 2418}
e59058c4
JS
2419
2420/**
3621a710 2421 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2422 * @phba: Pointer to HBA context object.
2423 * @pmboxq: Pointer to mailbox command.
2424 *
2425 * This is completion handler function for mailbox commands issued from
2426 * lpfc_sli_issue_mbox_wait function. This function is called by the
2427 * mailbox event handler function with no lock held. This function
2428 * will wake up thread waiting on the wait queue pointed by context1
2429 * of the mailbox.
2430 **/
04c68496 2431void
2e0fef85 2432lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2433{
858c9f6c 2434 unsigned long drvr_flag;
e29d74f8 2435 struct completion *pmbox_done;
dea3101e
JB
2436
2437 /*
e29d74f8 2438 * If pmbox_done is empty, the driver thread gave up waiting and
dea3101e
JB
2439 * continued running.
2440 */
7054a606 2441 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2442 spin_lock_irqsave(&phba->hbalock, drvr_flag);
e29d74f8
JS
2443 pmbox_done = (struct completion *)pmboxq->context3;
2444 if (pmbox_done)
2445 complete(pmbox_done);
858c9f6c 2446 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
2447 return;
2448}
2449
b95b2119
JS
2450static void
2451__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2452{
2453 unsigned long iflags;
2454
2455 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2456 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2457 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2458 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2459 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2460 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2461 }
2462 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2463}
e59058c4
JS
2464
2465/**
3621a710 2466 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2467 * @phba: Pointer to HBA context object.
2468 * @pmb: Pointer to mailbox object.
2469 *
2470 * This function is the default mailbox completion handler. It
2471 * frees the memory resources associated with the completed mailbox
2472 * command. If the completed command is a REG_LOGIN mailbox command,
2473 * this function will issue a UREG_LOGIN to re-claim the RPI.
2474 **/
dea3101e 2475void
2e0fef85 2476lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2477{
d439d286 2478 struct lpfc_vport *vport = pmb->vport;
dea3101e 2479 struct lpfc_dmabuf *mp;
d439d286 2480 struct lpfc_nodelist *ndlp;
5af5eee7 2481 struct Scsi_Host *shost;
04c68496 2482 uint16_t rpi, vpi;
7054a606
JS
2483 int rc;
2484
3e1f0718 2485 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
7054a606 2486
dea3101e
JB
2487 if (mp) {
2488 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2489 kfree(mp);
2490 }
7054a606
JS
2491
2492 /*
2493 * If a REG_LOGIN succeeded after node is destroyed or node
2494 * is in re-discovery driver need to cleanup the RPI.
2495 */
2e0fef85 2496 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2497 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2498 !pmb->u.mb.mbxStatus) {
2499 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2500 vpi = pmb->u.mb.un.varRegLogin.vpi;
38503943
JS
2501 if (phba->sli_rev == LPFC_SLI_REV4)
2502 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
04c68496 2503 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2504 pmb->vport = vport;
92d7f7b0 2505 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2506 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2507 if (rc != MBX_NOT_FINISHED)
2508 return;
2509 }
2510
695a814e
JS
2511 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2512 !(phba->pport->load_flag & FC_UNLOADING) &&
2513 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2514 shost = lpfc_shost_from_vport(vport);
2515 spin_lock_irq(shost->host_lock);
2516 vport->vpi_state |= LPFC_VPI_REGISTERED;
2517 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2518 spin_unlock_irq(shost->host_lock);
695a814e
JS
2519 }
2520
d439d286 2521 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 2522 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
d439d286 2523 lpfc_nlp_put(ndlp);
dea16bda
JS
2524 pmb->ctx_buf = NULL;
2525 pmb->ctx_ndlp = NULL;
2526 }
2527
2528 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2529 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2530
2531 /* Check to see if there are any deferred events to process */
2532 if (ndlp) {
2533 lpfc_printf_vlog(
2534 vport,
2535 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2536 "1438 UNREG cmpl deferred mbox x%x "
32350664 2537 "on NPort x%x Data: x%x x%x %px\n",
dea16bda
JS
2538 ndlp->nlp_rpi, ndlp->nlp_DID,
2539 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2540
2541 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2542 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
00292e03 2543 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2544 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2545 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
00292e03 2546 } else {
b95b2119 2547 __lpfc_sli_rpi_release(vport, ndlp);
dea16bda 2548 }
97acd001
JS
2549 if (vport->load_flag & FC_UNLOADING)
2550 lpfc_nlp_put(ndlp);
9b164068 2551 pmb->ctx_ndlp = NULL;
dea16bda 2552 }
d439d286
JS
2553 }
2554
dcf2a4e0
JS
2555 /* Check security permission status on INIT_LINK mailbox command */
2556 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2557 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2558 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2559 "2860 SLI authentication is required "
2560 "for INIT_LINK but has not done yet\n");
2561
04c68496
JS
2562 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2563 lpfc_sli4_mbox_cmd_free(phba, pmb);
2564 else
2565 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2566}
be6bb941
JS
2567 /**
2568 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2569 * @phba: Pointer to HBA context object.
2570 * @pmb: Pointer to mailbox object.
2571 *
2572 * This function is the unreg rpi mailbox completion handler. It
2573 * frees the memory resources associated with the completed mailbox
2574 * command. An additional refrenece is put on the ndlp to prevent
2575 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2576 * the unreg mailbox command completes, this routine puts the
2577 * reference back.
2578 *
2579 **/
2580void
2581lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2582{
2583 struct lpfc_vport *vport = pmb->vport;
2584 struct lpfc_nodelist *ndlp;
2585
3e1f0718 2586 ndlp = pmb->ctx_ndlp;
be6bb941
JS
2587 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2588 if (phba->sli_rev == LPFC_SLI_REV4 &&
2589 (bf_get(lpfc_sli_intf_if_type,
27d6ac0a 2590 &phba->sli4_hba.sli_intf) >=
be6bb941
JS
2591 LPFC_SLI_INTF_IF_TYPE_2)) {
2592 if (ndlp) {
dea16bda
JS
2593 lpfc_printf_vlog(
2594 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2595 "0010 UNREG_LOGIN vpi:%x "
2596 "rpi:%x DID:%x defer x%x flg x%x "
32350664 2597 "map:%x %px\n",
dea16bda
JS
2598 vport->vpi, ndlp->nlp_rpi,
2599 ndlp->nlp_DID, ndlp->nlp_defer_did,
2600 ndlp->nlp_flag,
2601 ndlp->nlp_usg_map, ndlp);
7c5e518c 2602 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941 2603 lpfc_nlp_put(ndlp);
dea16bda
JS
2604
2605 /* Check to see if there are any deferred
2606 * events to process
2607 */
2608 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2609 (ndlp->nlp_defer_did !=
2610 NLP_EVT_NOTHING_PENDING)) {
2611 lpfc_printf_vlog(
2612 vport, KERN_INFO, LOG_DISCOVERY,
2613 "4111 UNREG cmpl deferred "
2614 "clr x%x on "
32350664 2615 "NPort x%x Data: x%x x%px\n",
dea16bda
JS
2616 ndlp->nlp_rpi, ndlp->nlp_DID,
2617 ndlp->nlp_defer_did, ndlp);
00292e03 2618 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2619 ndlp->nlp_defer_did =
2620 NLP_EVT_NOTHING_PENDING;
2621 lpfc_issue_els_plogi(
2622 vport, ndlp->nlp_DID, 0);
00292e03 2623 } else {
b95b2119 2624 __lpfc_sli_rpi_release(vport, ndlp);
dea16bda 2625 }
be6bb941
JS
2626 }
2627 }
2628 }
2629
2630 mempool_free(pmb, phba->mbox_mem_pool);
2631}
dea3101e 2632
e59058c4 2633/**
3621a710 2634 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2635 * @phba: Pointer to HBA context object.
2636 *
2637 * This function is called with no lock held. This function processes all
2638 * the completed mailbox commands and gives it to upper layers. The interrupt
2639 * service routine processes mailbox completion interrupt and adds completed
2640 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2641 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2642 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2643 * function returns the mailbox commands to the upper layer by calling the
2644 * completion handler function of each mailbox.
2645 **/
dea3101e 2646int
2e0fef85 2647lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2648{
92d7f7b0 2649 MAILBOX_t *pmbox;
dea3101e 2650 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2651 int rc;
2652 LIST_HEAD(cmplq);
dea3101e
JB
2653
2654 phba->sli.slistat.mbox_event++;
2655
92d7f7b0
JS
2656 /* Get all completed mailboxe buffers into the cmplq */
2657 spin_lock_irq(&phba->hbalock);
2658 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2659 spin_unlock_irq(&phba->hbalock);
dea3101e 2660
92d7f7b0
JS
2661 /* Get a Mailbox buffer to setup mailbox commands for callback */
2662 do {
2663 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2664 if (pmb == NULL)
2665 break;
2e0fef85 2666
04c68496 2667 pmbox = &pmb->u.mb;
dea3101e 2668
858c9f6c
JS
2669 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2670 if (pmb->vport) {
2671 lpfc_debugfs_disc_trc(pmb->vport,
2672 LPFC_DISC_TRC_MBOX_VPORT,
2673 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2674 (uint32_t)pmbox->mbxCommand,
2675 pmbox->un.varWords[0],
2676 pmbox->un.varWords[1]);
2677 }
2678 else {
2679 lpfc_debugfs_disc_trc(phba->pport,
2680 LPFC_DISC_TRC_MBOX,
2681 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2682 (uint32_t)pmbox->mbxCommand,
2683 pmbox->un.varWords[0],
2684 pmbox->un.varWords[1]);
2685 }
2686 }
2687
dea3101e
JB
2688 /*
2689 * It is a fatal error if unknown mbox command completion.
2690 */
2691 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2692 MBX_SHUTDOWN) {
af901ca1 2693 /* Unknown mailbox command compl */
92d7f7b0 2694 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2695 "(%d):0323 Unknown Mailbox command "
a183a15f 2696 "x%x (x%x/x%x) Cmpl\n",
43bfea1b
JS
2697 pmb->vport ? pmb->vport->vpi :
2698 LPFC_VPORT_UNKNOWN,
04c68496 2699 pmbox->mbxCommand,
a183a15f
JS
2700 lpfc_sli_config_mbox_subsys_get(phba,
2701 pmb),
2702 lpfc_sli_config_mbox_opcode_get(phba,
2703 pmb));
2e0fef85 2704 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
2705 phba->work_hs = HS_FFER3;
2706 lpfc_handle_eratt(phba);
92d7f7b0 2707 continue;
dea3101e
JB
2708 }
2709
dea3101e
JB
2710 if (pmbox->mbxStatus) {
2711 phba->sli.slistat.mbox_stat_err++;
2712 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2713 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2714 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2715 LOG_MBOX | LOG_SLI,
2716 "(%d):0305 Mbox cmd cmpl "
2717 "error - RETRYing Data: x%x "
2718 "(x%x/x%x) x%x x%x x%x\n",
43bfea1b
JS
2719 pmb->vport ? pmb->vport->vpi :
2720 LPFC_VPORT_UNKNOWN,
a183a15f
JS
2721 pmbox->mbxCommand,
2722 lpfc_sli_config_mbox_subsys_get(phba,
2723 pmb),
2724 lpfc_sli_config_mbox_opcode_get(phba,
2725 pmb),
2726 pmbox->mbxStatus,
2727 pmbox->un.varWords[0],
43bfea1b
JS
2728 pmb->vport ? pmb->vport->port_state :
2729 LPFC_VPORT_UNKNOWN);
dea3101e
JB
2730 pmbox->mbxStatus = 0;
2731 pmbox->mbxOwner = OWN_HOST;
dea3101e 2732 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2733 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2734 continue;
dea3101e
JB
2735 }
2736 }
2737
2738 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2739 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2d44d165 2740 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
e74c03c8
JS
2741 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2742 "x%x x%x x%x\n",
92d7f7b0 2743 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2744 pmbox->mbxCommand,
a183a15f
JS
2745 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2746 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e
JB
2747 pmb->mbox_cmpl,
2748 *((uint32_t *) pmbox),
2749 pmbox->un.varWords[0],
2750 pmbox->un.varWords[1],
2751 pmbox->un.varWords[2],
2752 pmbox->un.varWords[3],
2753 pmbox->un.varWords[4],
2754 pmbox->un.varWords[5],
2755 pmbox->un.varWords[6],
e74c03c8
JS
2756 pmbox->un.varWords[7],
2757 pmbox->un.varWords[8],
2758 pmbox->un.varWords[9],
2759 pmbox->un.varWords[10]);
dea3101e 2760
92d7f7b0 2761 if (pmb->mbox_cmpl)
dea3101e 2762 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2763 } while (1);
2764 return 0;
2765}
dea3101e 2766
e59058c4 2767/**
3621a710 2768 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2769 * @phba: Pointer to HBA context object.
2770 * @pring: Pointer to driver SLI ring object.
2771 * @tag: buffer tag.
2772 *
2773 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2774 * is set in the tag the buffer is posted for a particular exchange,
2775 * the function will return the buffer without replacing the buffer.
2776 * If the buffer is for unsolicited ELS or CT traffic, this function
2777 * returns the buffer and also posts another buffer to the firmware.
2778 **/
76bb24ef
JS
2779static struct lpfc_dmabuf *
2780lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2781 struct lpfc_sli_ring *pring,
2782 uint32_t tag)
76bb24ef 2783{
9f1e1b50
JS
2784 struct hbq_dmabuf *hbq_entry;
2785
76bb24ef
JS
2786 if (tag & QUE_BUFTAG_BIT)
2787 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2788 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2789 if (!hbq_entry)
2790 return NULL;
2791 return &hbq_entry->dbuf;
76bb24ef 2792}
57127f15 2793
3a8070c5
JS
2794/**
2795 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2796 * containing a NVME LS request.
2797 * @phba: pointer to lpfc hba data structure.
2798 * @piocb: pointer to the iocbq struct representing the sequence starting
2799 * frame.
2800 *
2801 * This routine initially validates the NVME LS, validates there is a login
2802 * with the port that sent the LS, and then calls the appropriate nvme host
2803 * or target LS request handler.
2804 **/
2805static void
2806lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2807{
2808 struct lpfc_nodelist *ndlp;
2809 struct lpfc_dmabuf *d_buf;
2810 struct hbq_dmabuf *nvmebuf;
2811 struct fc_frame_header *fc_hdr;
2812 struct lpfc_async_xchg_ctx *axchg = NULL;
2813 char *failwhy = NULL;
2814 uint32_t oxid, sid, did, fctl, size;
4e57e0b9 2815 int ret = 1;
3a8070c5
JS
2816
2817 d_buf = piocb->context2;
2818
2819 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2820 fc_hdr = nvmebuf->hbuf.virt;
2821 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2822 sid = sli4_sid_from_fc_hdr(fc_hdr);
2823 did = sli4_did_from_fc_hdr(fc_hdr);
2824 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2825 fc_hdr->fh_f_ctl[1] << 8 |
2826 fc_hdr->fh_f_ctl[2]);
2827 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2828
2829 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2830 oxid, size, sid);
2831
2832 if (phba->pport->load_flag & FC_UNLOADING) {
2833 failwhy = "Driver Unloading";
2834 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2835 failwhy = "NVME FC4 Disabled";
2836 } else if (!phba->nvmet_support && !phba->pport->localport) {
2837 failwhy = "No Localport";
2838 } else if (phba->nvmet_support && !phba->targetport) {
2839 failwhy = "No Targetport";
2840 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2841 failwhy = "Bad NVME LS R_CTL";
2842 } else if (unlikely((fctl & 0x00FF0000) !=
2843 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2844 failwhy = "Bad NVME LS F_CTL";
2845 } else {
2846 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2847 if (!axchg)
2848 failwhy = "No CTX memory";
2849 }
2850
2851 if (unlikely(failwhy)) {
2852 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
2853 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2854 sid, oxid, failwhy);
2855 goto out_fail;
2856 }
2857
2858 /* validate the source of the LS is logged in */
2859 ndlp = lpfc_findnode_did(phba->pport, sid);
2860 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2861 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2862 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2863 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2864 "6216 NVME Unsol rcv: No ndlp: "
2865 "NPort_ID x%x oxid x%x\n",
2866 sid, oxid);
2867 goto out_fail;
2868 }
2869
2870 axchg->phba = phba;
2871 axchg->ndlp = ndlp;
2872 axchg->size = size;
2873 axchg->oxid = oxid;
2874 axchg->sid = sid;
2875 axchg->wqeq = NULL;
2876 axchg->state = LPFC_NVME_STE_LS_RCV;
2877 axchg->entry_cnt = 1;
2878 axchg->rqb_buffer = (void *)nvmebuf;
2879 axchg->hdwq = &phba->sli4_hba.hdwq[0];
2880 axchg->payload = nvmebuf->dbuf.virt;
2881 INIT_LIST_HEAD(&axchg->list);
2882
2883 if (phba->nvmet_support)
2884 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2885 else
2886 ret = lpfc_nvme_handle_lsreq(phba, axchg);
2887
2888 /* if zero, LS was successfully handled. If non-zero, LS not handled */
2889 if (!ret)
2890 return;
2891
2892 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
2893 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2894 "NVMe%s handler failed %d\n",
2895 did, sid, oxid,
2896 (phba->nvmet_support) ? "T" : "I", ret);
2897
2898out_fail:
3a8070c5
JS
2899
2900 /* recycle receive buffer */
2901 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2902
2903 /* If start of new exchange, abort it */
4e57e0b9
JS
2904 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2905 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2906
2907 if (ret)
2908 kfree(axchg);
3a8070c5
JS
2909}
2910
3772a991
JS
2911/**
2912 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2913 * @phba: Pointer to HBA context object.
2914 * @pring: Pointer to driver SLI ring object.
2915 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2916 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2917 * @fch_type: the type for the first frame of the sequence.
2918 *
2919 * This function is called with no lock held. This function uses the r_ctl and
2920 * type of the received sequence to find the correct callback function to call
2921 * to process the sequence.
2922 **/
2923static int
2924lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2925 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2926 uint32_t fch_type)
2927{
2928 int i;
2929
f358dd0c
JS
2930 switch (fch_type) {
2931 case FC_TYPE_NVME:
3a8070c5 2932 lpfc_nvme_unsol_ls_handler(phba, saveq);
f358dd0c
JS
2933 return 1;
2934 default:
2935 break;
2936 }
2937
3772a991
JS
2938 /* unSolicited Responses */
2939 if (pring->prt[0].profile) {
2940 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2941 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2942 saveq);
2943 return 1;
2944 }
2945 /* We must search, based on rctl / type
2946 for the right routine */
2947 for (i = 0; i < pring->num_mask; i++) {
2948 if ((pring->prt[i].rctl == fch_r_ctl) &&
2949 (pring->prt[i].type == fch_type)) {
2950 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2951 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2952 (phba, pring, saveq);
2953 return 1;
2954 }
2955 }
2956 return 0;
2957}
e59058c4
JS
2958
2959/**
3621a710 2960 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2961 * @phba: Pointer to HBA context object.
2962 * @pring: Pointer to driver SLI ring object.
2963 * @saveq: Pointer to the unsolicited iocb.
2964 *
2965 * This function is called with no lock held by the ring event handler
2966 * when there is an unsolicited iocb posted to the response ring by the
2967 * firmware. This function gets the buffer associated with the iocbs
2968 * and calls the event handler for the ring. This function handles both
2969 * qring buffers and hbq buffers.
2970 * When the function returns 1 the caller can free the iocb object otherwise
2971 * upper layer functions will free the iocb objects.
2972 **/
dea3101e
JB
2973static int
2974lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2975 struct lpfc_iocbq *saveq)
2976{
2977 IOCB_t * irsp;
2978 WORD5 * w5p;
2979 uint32_t Rctl, Type;
76bb24ef 2980 struct lpfc_iocbq *iocbq;
3163f725 2981 struct lpfc_dmabuf *dmzbuf;
dea3101e 2982
dea3101e 2983 irsp = &(saveq->iocb);
57127f15
JS
2984
2985 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2986 if (pring->lpfc_sli_rcv_async_status)
2987 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2988 else
2989 lpfc_printf_log(phba,
2990 KERN_WARNING,
2991 LOG_SLI,
2992 "0316 Ring %d handler: unexpected "
2993 "ASYNC_STATUS iocb received evt_code "
2994 "0x%x\n",
2995 pring->ringno,
2996 irsp->un.asyncstat.evt_code);
2997 return 1;
2998 }
2999
3163f725
JS
3000 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3001 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3002 if (irsp->ulpBdeCount > 0) {
3003 dmzbuf = lpfc_sli_get_buff(phba, pring,
3004 irsp->un.ulpWord[3]);
3005 lpfc_in_buf_free(phba, dmzbuf);
3006 }
3007
3008 if (irsp->ulpBdeCount > 1) {
3009 dmzbuf = lpfc_sli_get_buff(phba, pring,
3010 irsp->unsli3.sli3Words[3]);
3011 lpfc_in_buf_free(phba, dmzbuf);
3012 }
3013
3014 if (irsp->ulpBdeCount > 2) {
3015 dmzbuf = lpfc_sli_get_buff(phba, pring,
3016 irsp->unsli3.sli3Words[7]);
3017 lpfc_in_buf_free(phba, dmzbuf);
3018 }
3019
3020 return 1;
3021 }
3022
92d7f7b0 3023 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
3024 if (irsp->ulpBdeCount != 0) {
3025 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3026 irsp->un.ulpWord[3]);
3027 if (!saveq->context2)
3028 lpfc_printf_log(phba,
3029 KERN_ERR,
3030 LOG_SLI,
3031 "0341 Ring %d Cannot find buffer for "
3032 "an unsolicited iocb. tag 0x%x\n",
3033 pring->ringno,
3034 irsp->un.ulpWord[3]);
76bb24ef
JS
3035 }
3036 if (irsp->ulpBdeCount == 2) {
3037 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3038 irsp->unsli3.sli3Words[7]);
3039 if (!saveq->context3)
3040 lpfc_printf_log(phba,
3041 KERN_ERR,
3042 LOG_SLI,
3043 "0342 Ring %d Cannot find buffer for an"
3044 " unsolicited iocb. tag 0x%x\n",
3045 pring->ringno,
3046 irsp->unsli3.sli3Words[7]);
3047 }
3048 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 3049 irsp = &(iocbq->iocb);
76bb24ef
JS
3050 if (irsp->ulpBdeCount != 0) {
3051 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3052 irsp->un.ulpWord[3]);
9c2face6 3053 if (!iocbq->context2)
76bb24ef
JS
3054 lpfc_printf_log(phba,
3055 KERN_ERR,
3056 LOG_SLI,
3057 "0343 Ring %d Cannot find "
3058 "buffer for an unsolicited iocb"
3059 ". tag 0x%x\n", pring->ringno,
92d7f7b0 3060 irsp->un.ulpWord[3]);
76bb24ef
JS
3061 }
3062 if (irsp->ulpBdeCount == 2) {
3063 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 3064 irsp->unsli3.sli3Words[7]);
9c2face6 3065 if (!iocbq->context3)
76bb24ef
JS
3066 lpfc_printf_log(phba,
3067 KERN_ERR,
3068 LOG_SLI,
3069 "0344 Ring %d Cannot find "
3070 "buffer for an unsolicited "
3071 "iocb. tag 0x%x\n",
3072 pring->ringno,
3073 irsp->unsli3.sli3Words[7]);
3074 }
3075 }
92d7f7b0 3076 }
9c2face6
JS
3077 if (irsp->ulpBdeCount != 0 &&
3078 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3079 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3080 int found = 0;
3081
3082 /* search continue save q for same XRI */
3083 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
3084 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3085 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
3086 list_add_tail(&saveq->list, &iocbq->list);
3087 found = 1;
3088 break;
3089 }
3090 }
3091 if (!found)
3092 list_add_tail(&saveq->clist,
3093 &pring->iocb_continue_saveq);
3094 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3095 list_del_init(&iocbq->clist);
3096 saveq = iocbq;
3097 irsp = &(saveq->iocb);
3098 } else
3099 return 0;
3100 }
3101 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3102 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3103 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
3104 Rctl = FC_RCTL_ELS_REQ;
3105 Type = FC_TYPE_ELS;
9c2face6
JS
3106 } else {
3107 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3108 Rctl = w5p->hcsw.Rctl;
3109 Type = w5p->hcsw.Type;
3110
3111 /* Firmware Workaround */
3112 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3113 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3114 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
3115 Rctl = FC_RCTL_ELS_REQ;
3116 Type = FC_TYPE_ELS;
9c2face6
JS
3117 w5p->hcsw.Rctl = Rctl;
3118 w5p->hcsw.Type = Type;
3119 }
3120 }
92d7f7b0 3121
3772a991 3122 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 3123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3124 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 3125 "Type x%x received\n",
e8b62011 3126 pring->ringno, Rctl, Type);
3772a991 3127
92d7f7b0 3128 return 1;
dea3101e
JB
3129}
3130
e59058c4 3131/**
3621a710 3132 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
3133 * @phba: Pointer to HBA context object.
3134 * @pring: Pointer to driver SLI ring object.
3135 * @prspiocb: Pointer to response iocb object.
3136 *
3137 * This function looks up the iocb_lookup table to get the command iocb
3138 * corresponding to the given response iocb using the iotag of the
e2a8be56
JS
3139 * response iocb. The driver calls this function with the hbalock held
3140 * for SLI3 ports or the ring lock held for SLI4 ports.
e59058c4
JS
3141 * This function returns the command iocb object if it finds the command
3142 * iocb else returns NULL.
3143 **/
dea3101e 3144static struct lpfc_iocbq *
2e0fef85
JS
3145lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3146 struct lpfc_sli_ring *pring,
3147 struct lpfc_iocbq *prspiocb)
dea3101e 3148{
dea3101e
JB
3149 struct lpfc_iocbq *cmd_iocb = NULL;
3150 uint16_t iotag;
e2a8be56
JS
3151 spinlock_t *temp_lock = NULL;
3152 unsigned long iflag = 0;
3153
3154 if (phba->sli_rev == LPFC_SLI_REV4)
3155 temp_lock = &pring->ring_lock;
3156 else
3157 temp_lock = &phba->hbalock;
dea3101e 3158
e2a8be56 3159 spin_lock_irqsave(temp_lock, iflag);
604a3e30
JB
3160 iotag = prspiocb->iocb.ulpIoTag;
3161
3162 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3163 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 3164 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
3165 /* remove from txcmpl queue list */
3166 list_del_init(&cmd_iocb->list);
4f2e66c6 3167 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 3168 pring->txcmplq_cnt--;
e2a8be56 3169 spin_unlock_irqrestore(temp_lock, iflag);
89533e9b 3170 return cmd_iocb;
2a9bf3d0 3171 }
dea3101e
JB
3172 }
3173
e2a8be56 3174 spin_unlock_irqrestore(temp_lock, iflag);
dea3101e 3175 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 3176 "0317 iotag x%x is out of "
604a3e30 3177 "range: max iotag x%x wd0 x%x\n",
e8b62011 3178 iotag, phba->sli.last_iotag,
604a3e30 3179 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e
JB
3180 return NULL;
3181}
3182
3772a991
JS
3183/**
3184 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3185 * @phba: Pointer to HBA context object.
3186 * @pring: Pointer to driver SLI ring object.
3187 * @iotag: IOCB tag.
3188 *
3189 * This function looks up the iocb_lookup table to get the command iocb
e2a8be56
JS
3190 * corresponding to the given iotag. The driver calls this function with
3191 * the ring lock held because this function is an SLI4 port only helper.
3772a991
JS
3192 * This function returns the command iocb object if it finds the command
3193 * iocb else returns NULL.
3194 **/
3195static struct lpfc_iocbq *
3196lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3197 struct lpfc_sli_ring *pring, uint16_t iotag)
3198{
895427bd 3199 struct lpfc_iocbq *cmd_iocb = NULL;
e2a8be56
JS
3200 spinlock_t *temp_lock = NULL;
3201 unsigned long iflag = 0;
3772a991 3202
e2a8be56
JS
3203 if (phba->sli_rev == LPFC_SLI_REV4)
3204 temp_lock = &pring->ring_lock;
3205 else
3206 temp_lock = &phba->hbalock;
3207
3208 spin_lock_irqsave(temp_lock, iflag);
3772a991
JS
3209 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3210 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
3211 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3212 /* remove from txcmpl queue list */
3213 list_del_init(&cmd_iocb->list);
3214 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 3215 pring->txcmplq_cnt--;
e2a8be56 3216 spin_unlock_irqrestore(temp_lock, iflag);
4f2e66c6 3217 return cmd_iocb;
2a9bf3d0 3218 }
3772a991 3219 }
89533e9b 3220
e2a8be56 3221 spin_unlock_irqrestore(temp_lock, iflag);
3772a991 3222 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
3223 "0372 iotag x%x lookup error: max iotag (x%x) "
3224 "iocb_flag x%x\n",
3225 iotag, phba->sli.last_iotag,
3226 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
3227 return NULL;
3228}
3229
e59058c4 3230/**
3621a710 3231 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
3232 * @phba: Pointer to HBA context object.
3233 * @pring: Pointer to driver SLI ring object.
3234 * @saveq: Pointer to the response iocb to be processed.
3235 *
3236 * This function is called by the ring event handler for non-fcp
3237 * rings when there is a new response iocb in the response ring.
3238 * The caller is not required to hold any locks. This function
3239 * gets the command iocb associated with the response iocb and
3240 * calls the completion handler for the command iocb. If there
3241 * is no completion handler, the function will free the resources
3242 * associated with command iocb. If the response iocb is for
3243 * an already aborted command iocb, the status of the completion
3244 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3245 * This function always returns 1.
3246 **/
dea3101e 3247static int
2e0fef85 3248lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e
JB
3249 struct lpfc_iocbq *saveq)
3250{
2e0fef85 3251 struct lpfc_iocbq *cmdiocbp;
dea3101e
JB
3252 int rc = 1;
3253 unsigned long iflag;
3254
604a3e30 3255 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
dea3101e
JB
3256 if (cmdiocbp) {
3257 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
3258 /*
3259 * If an ELS command failed send an event to mgmt
3260 * application.
3261 */
3262 if (saveq->iocb.ulpStatus &&
3263 (pring->ringno == LPFC_ELS_RING) &&
3264 (cmdiocbp->iocb.ulpCommand ==
3265 CMD_ELS_REQUEST64_CR))
3266 lpfc_send_els_failure_event(phba,
3267 cmdiocbp, saveq);
3268
dea3101e
JB
3269 /*
3270 * Post all ELS completions to the worker thread.
3271 * All other are passed to the completion callback.
3272 */
3273 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
3274 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3275 (cmdiocbp->iocb_flag &
3276 LPFC_DRIVER_ABORTED)) {
3277 spin_lock_irqsave(&phba->hbalock,
3278 iflag);
07951076
JS
3279 cmdiocbp->iocb_flag &=
3280 ~LPFC_DRIVER_ABORTED;
341af102
JS
3281 spin_unlock_irqrestore(&phba->hbalock,
3282 iflag);
07951076
JS
3283 saveq->iocb.ulpStatus =
3284 IOSTAT_LOCAL_REJECT;
3285 saveq->iocb.un.ulpWord[4] =
3286 IOERR_SLI_ABORTED;
0ff10d46
JS
3287
3288 /* Firmware could still be in progress
3289 * of DMAing payload, so don't free data
3290 * buffer till after a hbeat.
3291 */
341af102
JS
3292 spin_lock_irqsave(&phba->hbalock,
3293 iflag);
0ff10d46 3294 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
3295 spin_unlock_irqrestore(&phba->hbalock,
3296 iflag);
3297 }
0f65ff68
JS
3298 if (phba->sli_rev == LPFC_SLI_REV4) {
3299 if (saveq->iocb_flag &
3300 LPFC_EXCHANGE_BUSY) {
3301 /* Set cmdiocb flag for the
3302 * exchange busy so sgl (xri)
3303 * will not be released until
3304 * the abort xri is received
3305 * from hba.
3306 */
3307 spin_lock_irqsave(
3308 &phba->hbalock, iflag);
3309 cmdiocbp->iocb_flag |=
3310 LPFC_EXCHANGE_BUSY;
3311 spin_unlock_irqrestore(
3312 &phba->hbalock, iflag);
3313 }
3314 if (cmdiocbp->iocb_flag &
3315 LPFC_DRIVER_ABORTED) {
3316 /*
3317 * Clear LPFC_DRIVER_ABORTED
3318 * bit in case it was driver
3319 * initiated abort.
3320 */
3321 spin_lock_irqsave(
3322 &phba->hbalock, iflag);
3323 cmdiocbp->iocb_flag &=
3324 ~LPFC_DRIVER_ABORTED;
3325 spin_unlock_irqrestore(
3326 &phba->hbalock, iflag);
3327 cmdiocbp->iocb.ulpStatus =
3328 IOSTAT_LOCAL_REJECT;
3329 cmdiocbp->iocb.un.ulpWord[4] =
3330 IOERR_ABORT_REQUESTED;
3331 /*
3332 * For SLI4, irsiocb contains
3333 * NO_XRI in sli_xritag, it
3334 * shall not affect releasing
3335 * sgl (xri) process.
3336 */
3337 saveq->iocb.ulpStatus =
3338 IOSTAT_LOCAL_REJECT;
3339 saveq->iocb.un.ulpWord[4] =
3340 IOERR_SLI_ABORTED;
3341 spin_lock_irqsave(
3342 &phba->hbalock, iflag);
3343 saveq->iocb_flag |=
3344 LPFC_DELAY_MEM_FREE;
3345 spin_unlock_irqrestore(
3346 &phba->hbalock, iflag);
3347 }
07951076 3348 }
dea3101e 3349 }
2e0fef85 3350 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
3351 } else
3352 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e
JB
3353 } else {
3354 /*
3355 * Unknown initiating command based on the response iotag.
3356 * This could be the case on the ELS ring because of
3357 * lpfc_els_abort().
3358 */
3359 if (pring->ringno != LPFC_ELS_RING) {
3360 /*
3361 * Ring <ringno> handler: unexpected completion IoTag
3362 * <IoTag>
3363 */
a257bf90 3364 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
3365 "0322 Ring %d handler: "
3366 "unexpected completion IoTag x%x "
3367 "Data: x%x x%x x%x x%x\n",
3368 pring->ringno,
3369 saveq->iocb.ulpIoTag,
3370 saveq->iocb.ulpStatus,
3371 saveq->iocb.un.ulpWord[4],
3372 saveq->iocb.ulpCommand,
3373 saveq->iocb.ulpContext);
dea3101e
JB
3374 }
3375 }
68876920 3376
dea3101e
JB
3377 return rc;
3378}
3379
e59058c4 3380/**
3621a710 3381 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
3382 * @phba: Pointer to HBA context object.
3383 * @pring: Pointer to driver SLI ring object.
3384 *
3385 * This function is called from the iocb ring event handlers when
3386 * put pointer is ahead of the get pointer for a ring. This function signal
3387 * an error attention condition to the worker thread and the worker
3388 * thread will transition the HBA to offline state.
3389 **/
2e0fef85
JS
3390static void
3391lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 3392{
34b02dcd 3393 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 3394 /*
025dfdaf 3395 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
3396 * rsp ring <portRspMax>
3397 */
3398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3399 "0312 Ring %d handler: portRspPut %d "
025dfdaf 3400 "is bigger than rsp ring %d\n",
e8b62011 3401 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 3402 pring->sli.sli3.numRiocb);
875fbdfe 3403
2e0fef85 3404 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
3405
3406 /*
3407 * All error attention handlers are posted to
3408 * worker thread
3409 */
3410 phba->work_ha |= HA_ERATT;
3411 phba->work_hs = HS_FFER3;
92d7f7b0 3412
5e9d9b82 3413 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
3414
3415 return;
3416}
3417
9399627f 3418/**
3621a710 3419 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
3420 * @ptr: Pointer to address of HBA context object.
3421 *
3422 * This function is invoked by the Error Attention polling timer when the
3423 * timer times out. It will check the SLI Error Attention register for
3424 * possible attention events. If so, it will post an Error Attention event
3425 * and wake up worker thread to process it. Otherwise, it will set up the
3426 * Error Attention polling timer for the next poll.
3427 **/
f22eb4d3 3428void lpfc_poll_eratt(struct timer_list *t)
9399627f
JS
3429{
3430 struct lpfc_hba *phba;
eb016566 3431 uint32_t eratt = 0;
aa6fbb75 3432 uint64_t sli_intr, cnt;
9399627f 3433
f22eb4d3 3434 phba = from_timer(phba, t, eratt_poll);
9399627f 3435
aa6fbb75
JS
3436 /* Here we will also keep track of interrupts per sec of the hba */
3437 sli_intr = phba->sli.slistat.sli_intr;
3438
3439 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3440 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3441 sli_intr);
3442 else
3443 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3444
65791f1f
JS
3445 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3446 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3447 phba->sli.slistat.sli_ips = cnt;
3448
3449 phba->sli.slistat.sli_prev_intr = sli_intr;
3450
9399627f
JS
3451 /* Check chip HA register for error event */
3452 eratt = lpfc_sli_check_eratt(phba);
3453
3454 if (eratt)
3455 /* Tell the worker thread there is work to do */
3456 lpfc_worker_wake_up(phba);
3457 else
3458 /* Restart the timer for next eratt poll */
256ec0d0
JS
3459 mod_timer(&phba->eratt_poll,
3460 jiffies +
65791f1f 3461 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3462 return;
3463}
3464
875fbdfe 3465
e59058c4 3466/**
3621a710 3467 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3468 * @phba: Pointer to HBA context object.
3469 * @pring: Pointer to driver SLI ring object.
3470 * @mask: Host attention register mask for this ring.
3471 *
3472 * This function is called from the interrupt context when there is a ring
3473 * event for the fcp ring. The caller does not hold any lock.
3474 * The function processes each response iocb in the response ring until it
25985edc 3475 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3476 * LE bit set. The function will call the completion handler of the command iocb
3477 * if the response iocb indicates a completion for a command iocb or it is
3478 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3479 * function if this is an unsolicited iocb.
dea3101e 3480 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3481 * to check it explicitly.
3482 */
3483int
2e0fef85
JS
3484lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3485 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3486{
34b02dcd 3487 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3488 IOCB_t *irsp = NULL;
87f6eaff 3489 IOCB_t *entry = NULL;
dea3101e
JB
3490 struct lpfc_iocbq *cmdiocbq = NULL;
3491 struct lpfc_iocbq rspiocbq;
dea3101e
JB
3492 uint32_t status;
3493 uint32_t portRspPut, portRspMax;
3494 int rc = 1;
3495 lpfc_iocb_type type;
3496 unsigned long iflag;
3497 uint32_t rsp_cmpl = 0;
dea3101e 3498
2e0fef85 3499 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
3500 pring->stats.iocb_event++;
3501
dea3101e
JB
3502 /*
3503 * The next available response entry should never exceed the maximum
3504 * entries. If it does, treat it as an adapter hardware error.
3505 */
7e56aa25 3506 portRspMax = pring->sli.sli3.numRiocb;
dea3101e
JB
3507 portRspPut = le32_to_cpu(pgp->rspPutInx);
3508 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3509 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3510 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
3511 return 1;
3512 }
45ed1190
JS
3513 if (phba->fcp_ring_in_use) {
3514 spin_unlock_irqrestore(&phba->hbalock, iflag);
3515 return 1;
3516 } else
3517 phba->fcp_ring_in_use = 1;
dea3101e
JB
3518
3519 rmb();
7e56aa25 3520 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3521 /*
3522 * Fetch an entry off the ring and copy it into a local data
3523 * structure. The copy involves a byte-swap since the
3524 * network byte order and pci byte orders are different.
3525 */
ed957684 3526 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3527 phba->last_completion_time = jiffies;
875fbdfe 3528
7e56aa25
JS
3529 if (++pring->sli.sli3.rspidx >= portRspMax)
3530 pring->sli.sli3.rspidx = 0;
875fbdfe 3531
87f6eaff
JSEC
3532 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3533 (uint32_t *) &rspiocbq.iocb,
ed957684 3534 phba->iocb_rsp_size);
a4bc3379 3535 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3536 irsp = &rspiocbq.iocb;
3537
dea3101e
JB
3538 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3539 pring->stats.iocb_rsp++;
3540 rsp_cmpl++;
3541
3542 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3543 /*
3544 * If resource errors reported from HBA, reduce
3545 * queuedepths of the SCSI device.
3546 */
3547 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3548 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3549 IOERR_NO_RESOURCES)) {
92d7f7b0 3550 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3551 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3552 spin_lock_irqsave(&phba->hbalock, iflag);
3553 }
3554
dea3101e
JB
3555 /* Rsp ring <ringno> error: IOCB */
3556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3557 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3558 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3559 pring->ringno,
92d7f7b0
JS
3560 irsp->un.ulpWord[0],
3561 irsp->un.ulpWord[1],
3562 irsp->un.ulpWord[2],
3563 irsp->un.ulpWord[3],
3564 irsp->un.ulpWord[4],
3565 irsp->un.ulpWord[5],
d7c255b2
JS
3566 *(uint32_t *)&irsp->un1,
3567 *((uint32_t *)&irsp->un1 + 1));
dea3101e
JB
3568 }
3569
3570 switch (type) {
3571 case LPFC_ABORT_IOCB:
3572 case LPFC_SOL_IOCB:
3573 /*
3574 * Idle exchange closed via ABTS from port. No iocb
3575 * resources need to be recovered.
3576 */
3577 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3578 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3579 "0333 IOCB cmd 0x%x"
dca9479b 3580 " processed. Skipping"
92d7f7b0 3581 " completion\n",
dca9479b 3582 irsp->ulpCommand);
dea3101e
JB
3583 break;
3584 }
3585
e2a8be56 3586 spin_unlock_irqrestore(&phba->hbalock, iflag);
604a3e30
JB
3587 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3588 &rspiocbq);
e2a8be56 3589 spin_lock_irqsave(&phba->hbalock, iflag);
0f65ff68
JS
3590 if (unlikely(!cmdiocbq))
3591 break;
3592 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3593 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3594 if (cmdiocbq->iocb_cmpl) {
3595 spin_unlock_irqrestore(&phba->hbalock, iflag);
3596 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3597 &rspiocbq);
3598 spin_lock_irqsave(&phba->hbalock, iflag);
3599 }
dea3101e 3600 break;
a4bc3379 3601 case LPFC_UNSOL_IOCB:
2e0fef85 3602 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3603 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3604 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3605 break;
dea3101e
JB
3606 default:
3607 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3608 char adaptermsg[LPFC_MAX_ADPTMSG];
3609 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3610 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3611 MAX_MSG_DATA);
898eb71c
JP
3612 dev_warn(&((phba->pcidev)->dev),
3613 "lpfc%d: %s\n",
dea3101e
JB
3614 phba->brd_no, adaptermsg);
3615 } else {
3616 /* Unknown IOCB command */
3617 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3618 "0334 Unknown IOCB command "
92d7f7b0 3619 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3620 type, irsp->ulpCommand,
92d7f7b0
JS
3621 irsp->ulpStatus,
3622 irsp->ulpIoTag,
3623 irsp->ulpContext);
dea3101e
JB
3624 }
3625 break;
3626 }
3627
3628 /*
3629 * The response IOCB has been processed. Update the ring
3630 * pointer in SLIM. If the port response put pointer has not
3631 * been updated, sync the pgp->rspPutInx and fetch the new port
3632 * response put pointer.
3633 */
7e56aa25
JS
3634 writel(pring->sli.sli3.rspidx,
3635 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3636
7e56aa25 3637 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e
JB
3638 portRspPut = le32_to_cpu(pgp->rspPutInx);
3639 }
3640
3641 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3642 pring->stats.iocb_rsp_full++;
3643 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3644 writel(status, phba->CAregaddr);
3645 readl(phba->CAregaddr);
3646 }
3647 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3648 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3649 pring->stats.iocb_cmd_empty++;
3650
3651 /* Force update of the local copy of cmdGetInx */
7e56aa25 3652 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e
JB
3653 lpfc_sli_resume_iocb(phba, pring);
3654
3655 if ((pring->lpfc_sli_cmd_available))
3656 (pring->lpfc_sli_cmd_available) (phba, pring);
3657
3658 }
3659
45ed1190 3660 phba->fcp_ring_in_use = 0;
2e0fef85 3661 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
3662 return rc;
3663}
3664
e59058c4 3665/**
3772a991
JS
3666 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3667 * @phba: Pointer to HBA context object.
3668 * @pring: Pointer to driver SLI ring object.
3669 * @rspiocbp: Pointer to driver response IOCB object.
3670 *
3671 * This function is called from the worker thread when there is a slow-path
3672 * response IOCB to process. This function chains all the response iocbs until
3673 * seeing the iocb with the LE bit set. The function will call
3674 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3675 * completion of a command iocb. The function will call the
3676 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3677 * The function frees the resources or calls the completion handler if this
3678 * iocb is an abort completion. The function returns NULL when the response
3679 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3680 * this function shall chain the iocb on to the iocb_continueq and return the
3681 * response iocb passed in.
3682 **/
3683static struct lpfc_iocbq *
3684lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3685 struct lpfc_iocbq *rspiocbp)
3686{
3687 struct lpfc_iocbq *saveq;
3688 struct lpfc_iocbq *cmdiocbp;
3689 struct lpfc_iocbq *next_iocb;
3690 IOCB_t *irsp = NULL;
3691 uint32_t free_saveq;
3692 uint8_t iocb_cmd_type;
3693 lpfc_iocb_type type;
3694 unsigned long iflag;
3695 int rc;
3696
3697 spin_lock_irqsave(&phba->hbalock, iflag);
3698 /* First add the response iocb to the countinueq list */
3699 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3700 pring->iocb_continueq_cnt++;
3701
70f23fd6 3702 /* Now, determine whether the list is completed for processing */
3772a991
JS
3703 irsp = &rspiocbp->iocb;
3704 if (irsp->ulpLe) {
3705 /*
3706 * By default, the driver expects to free all resources
3707 * associated with this iocb completion.
3708 */
3709 free_saveq = 1;
3710 saveq = list_get_first(&pring->iocb_continueq,
3711 struct lpfc_iocbq, list);
3712 irsp = &(saveq->iocb);
3713 list_del_init(&pring->iocb_continueq);
3714 pring->iocb_continueq_cnt = 0;
3715
3716 pring->stats.iocb_rsp++;
3717
3718 /*
3719 * If resource errors reported from HBA, reduce
3720 * queuedepths of the SCSI device.
3721 */
3722 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3723 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3724 IOERR_NO_RESOURCES)) {
3772a991
JS
3725 spin_unlock_irqrestore(&phba->hbalock, iflag);
3726 phba->lpfc_rampdown_queue_depth(phba);
3727 spin_lock_irqsave(&phba->hbalock, iflag);
3728 }
3729
3730 if (irsp->ulpStatus) {
3731 /* Rsp ring <ringno> error: IOCB */
3732 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3733 "0328 Rsp Ring %d error: "
3734 "IOCB Data: "
3735 "x%x x%x x%x x%x "
3736 "x%x x%x x%x x%x "
3737 "x%x x%x x%x x%x "
3738 "x%x x%x x%x x%x\n",
3739 pring->ringno,
3740 irsp->un.ulpWord[0],
3741 irsp->un.ulpWord[1],
3742 irsp->un.ulpWord[2],
3743 irsp->un.ulpWord[3],
3744 irsp->un.ulpWord[4],
3745 irsp->un.ulpWord[5],
3746 *(((uint32_t *) irsp) + 6),
3747 *(((uint32_t *) irsp) + 7),
3748 *(((uint32_t *) irsp) + 8),
3749 *(((uint32_t *) irsp) + 9),
3750 *(((uint32_t *) irsp) + 10),
3751 *(((uint32_t *) irsp) + 11),
3752 *(((uint32_t *) irsp) + 12),
3753 *(((uint32_t *) irsp) + 13),
3754 *(((uint32_t *) irsp) + 14),
3755 *(((uint32_t *) irsp) + 15));
3756 }
3757
3758 /*
3759 * Fetch the IOCB command type and call the correct completion
3760 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3761 * get freed back to the lpfc_iocb_list by the discovery
3762 * kernel thread.
3763 */
3764 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3765 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3766 switch (type) {
3767 case LPFC_SOL_IOCB:
3768 spin_unlock_irqrestore(&phba->hbalock, iflag);
3769 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3770 spin_lock_irqsave(&phba->hbalock, iflag);
3771 break;
3772
3773 case LPFC_UNSOL_IOCB:
3774 spin_unlock_irqrestore(&phba->hbalock, iflag);
3775 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3776 spin_lock_irqsave(&phba->hbalock, iflag);
3777 if (!rc)
3778 free_saveq = 0;
3779 break;
3780
3781 case LPFC_ABORT_IOCB:
3782 cmdiocbp = NULL;
e2a8be56
JS
3783 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3784 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991
JS
3785 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3786 saveq);
e2a8be56
JS
3787 spin_lock_irqsave(&phba->hbalock, iflag);
3788 }
3772a991
JS
3789 if (cmdiocbp) {
3790 /* Call the specified completion routine */
3791 if (cmdiocbp->iocb_cmpl) {
3792 spin_unlock_irqrestore(&phba->hbalock,
3793 iflag);
3794 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3795 saveq);
3796 spin_lock_irqsave(&phba->hbalock,
3797 iflag);
3798 } else
3799 __lpfc_sli_release_iocbq(phba,
3800 cmdiocbp);
3801 }
3802 break;
3803
3804 case LPFC_UNKNOWN_IOCB:
3805 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3806 char adaptermsg[LPFC_MAX_ADPTMSG];
3807 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3808 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3809 MAX_MSG_DATA);
3810 dev_warn(&((phba->pcidev)->dev),
3811 "lpfc%d: %s\n",
3812 phba->brd_no, adaptermsg);
3813 } else {
3814 /* Unknown IOCB command */
3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816 "0335 Unknown IOCB "
3817 "command Data: x%x "
3818 "x%x x%x x%x\n",
3819 irsp->ulpCommand,
3820 irsp->ulpStatus,
3821 irsp->ulpIoTag,
3822 irsp->ulpContext);
3823 }
3824 break;
3825 }
3826
3827 if (free_saveq) {
3828 list_for_each_entry_safe(rspiocbp, next_iocb,
3829 &saveq->list, list) {
61f35bff 3830 list_del_init(&rspiocbp->list);
3772a991
JS
3831 __lpfc_sli_release_iocbq(phba, rspiocbp);
3832 }
3833 __lpfc_sli_release_iocbq(phba, saveq);
3834 }
3835 rspiocbp = NULL;
3836 }
3837 spin_unlock_irqrestore(&phba->hbalock, iflag);
3838 return rspiocbp;
3839}
3840
3841/**
3842 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3843 * @phba: Pointer to HBA context object.
3844 * @pring: Pointer to driver SLI ring object.
3845 * @mask: Host attention register mask for this ring.
3846 *
3772a991
JS
3847 * This routine wraps the actual slow_ring event process routine from the
3848 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3849 **/
3772a991 3850void
2e0fef85
JS
3851lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3852 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3853{
3854 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3855}
3856
3857/**
3858 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3859 * @phba: Pointer to HBA context object.
3860 * @pring: Pointer to driver SLI ring object.
3861 * @mask: Host attention register mask for this ring.
3862 *
3863 * This function is called from the worker thread when there is a ring event
3864 * for non-fcp rings. The caller does not hold any lock. The function will
3865 * remove each response iocb in the response ring and calls the handle
3866 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3867 **/
3868static void
3869lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3870 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3871{
34b02dcd 3872 struct lpfc_pgp *pgp;
dea3101e
JB
3873 IOCB_t *entry;
3874 IOCB_t *irsp = NULL;
3875 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3876 uint32_t portRspPut, portRspMax;
dea3101e 3877 unsigned long iflag;
3772a991 3878 uint32_t status;
dea3101e 3879
34b02dcd 3880 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3881 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
3882 pring->stats.iocb_event++;
3883
dea3101e
JB
3884 /*
3885 * The next available response entry should never exceed the maximum
3886 * entries. If it does, treat it as an adapter hardware error.
3887 */
7e56aa25 3888 portRspMax = pring->sli.sli3.numRiocb;
dea3101e
JB
3889 portRspPut = le32_to_cpu(pgp->rspPutInx);
3890 if (portRspPut >= portRspMax) {
3891 /*
025dfdaf 3892 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e
JB
3893 * rsp ring <portRspMax>
3894 */
ed957684 3895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3896 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3897 "is bigger than rsp ring %d\n",
e8b62011 3898 pring->ringno, portRspPut, portRspMax);
dea3101e 3899
2e0fef85
JS
3900 phba->link_state = LPFC_HBA_ERROR;
3901 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
3902
3903 phba->work_hs = HS_FFER3;
3904 lpfc_handle_eratt(phba);
3905
3772a991 3906 return;
dea3101e
JB
3907 }
3908
3909 rmb();
7e56aa25 3910 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e
JB
3911 /*
3912 * Build a completion list and call the appropriate handler.
3913 * The process is to get the next available response iocb, get
3914 * a free iocb from the list, copy the response data into the
3915 * free iocb, insert to the continuation list, and update the
3916 * next response index to slim. This process makes response
3917 * iocb's in the ring available to DMA as fast as possible but
3918 * pays a penalty for a copy operation. Since the iocb is
3919 * only 32 bytes, this penalty is considered small relative to
3920 * the PCI reads for register values and a slim write. When
3921 * the ulpLe field is set, the entire Command has been
3922 * received.
3923 */
ed957684
JS
3924 entry = lpfc_resp_iocb(phba, pring);
3925
858c9f6c 3926 phba->last_completion_time = jiffies;
2e0fef85 3927 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
3928 if (rspiocbp == NULL) {
3929 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3930 "completion.\n", __func__);
dea3101e
JB
3931 break;
3932 }
3933
ed957684
JS
3934 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3935 phba->iocb_rsp_size);
dea3101e
JB
3936 irsp = &rspiocbp->iocb;
3937
7e56aa25
JS
3938 if (++pring->sli.sli3.rspidx >= portRspMax)
3939 pring->sli.sli3.rspidx = 0;
dea3101e 3940
a58cbd52
JS
3941 if (pring->ringno == LPFC_ELS_RING) {
3942 lpfc_debugfs_slow_ring_trc(phba,
3943 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3944 *(((uint32_t *) irsp) + 4),
3945 *(((uint32_t *) irsp) + 6),
3946 *(((uint32_t *) irsp) + 7));
3947 }
3948
7e56aa25
JS
3949 writel(pring->sli.sli3.rspidx,
3950 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3951
3772a991
JS
3952 spin_unlock_irqrestore(&phba->hbalock, iflag);
3953 /* Handle the response IOCB */
3954 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3955 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
3956
3957 /*
3958 * If the port response put pointer has not been updated, sync
3959 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3960 * response put pointer.
3961 */
7e56aa25 3962 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e
JB
3963 portRspPut = le32_to_cpu(pgp->rspPutInx);
3964 }
7e56aa25 3965 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3966
92d7f7b0 3967 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e
JB
3968 /* At least one response entry has been freed */
3969 pring->stats.iocb_rsp_full++;
3970 /* SET RxRE_RSP in Chip Att register */
3971 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3972 writel(status, phba->CAregaddr);
3973 readl(phba->CAregaddr); /* flush */
3974 }
3975 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3976 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3977 pring->stats.iocb_cmd_empty++;
3978
3979 /* Force update of the local copy of cmdGetInx */
7e56aa25 3980 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e
JB
3981 lpfc_sli_resume_iocb(phba, pring);
3982
3983 if ((pring->lpfc_sli_cmd_available))
3984 (pring->lpfc_sli_cmd_available) (phba, pring);
3985
3986 }
3987
2e0fef85 3988 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3989 return;
dea3101e
JB
3990}
3991
4f774513
JS
3992/**
3993 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3994 * @phba: Pointer to HBA context object.
3995 * @pring: Pointer to driver SLI ring object.
3996 * @mask: Host attention register mask for this ring.
3997 *
3998 * This function is called from the worker thread when there is a pending
3999 * ELS response iocb on the driver internal slow-path response iocb worker
4000 * queue. The caller does not hold any lock. The function will remove each
4001 * response iocb from the response worker queue and calls the handle
4002 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4003 **/
4004static void
4005lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4006 struct lpfc_sli_ring *pring, uint32_t mask)
4007{
4008 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
4009 struct hbq_dmabuf *dmabuf;
4010 struct lpfc_cq_event *cq_event;
4f774513 4011 unsigned long iflag;
0ef01a2d 4012 int count = 0;
4f774513 4013
45ed1190
JS
4014 spin_lock_irqsave(&phba->hbalock, iflag);
4015 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4016 spin_unlock_irqrestore(&phba->hbalock, iflag);
4017 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
4018 /* Get the response iocb from the head of work queue */
4019 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 4020 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 4021 cq_event, struct lpfc_cq_event, list);
4f774513 4022 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
4023
4024 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4025 case CQE_CODE_COMPL_WQE:
4026 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4027 cq_event);
45ed1190
JS
4028 /* Translate ELS WCQE to response IOCBQ */
4029 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4030 irspiocbq);
4031 if (irspiocbq)
4032 lpfc_sli_sp_handle_rspiocb(phba, pring,
4033 irspiocbq);
0ef01a2d 4034 count++;
4d9ab994
JS
4035 break;
4036 case CQE_CODE_RECEIVE:
7851fe2c 4037 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
4038 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4039 cq_event);
4040 lpfc_sli4_handle_received_buffer(phba, dmabuf);
0ef01a2d 4041 count++;
4d9ab994
JS
4042 break;
4043 default:
4044 break;
4045 }
0ef01a2d
JS
4046
4047 /* Limit the number of events to 64 to avoid soft lockups */
4048 if (count == 64)
4049 break;
4f774513
JS
4050 }
4051}
4052
e59058c4 4053/**
3621a710 4054 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
4055 * @phba: Pointer to HBA context object.
4056 * @pring: Pointer to driver SLI ring object.
4057 *
4058 * This function aborts all iocbs in the given ring and frees all the iocb
4059 * objects in txq. This function issues an abort iocb for all the iocb commands
4060 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4061 * the return of this function. The caller is not required to hold any locks.
4062 **/
2e0fef85 4063void
dea3101e
JB
4064lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4065{
2534ba75 4066 LIST_HEAD(completions);
dea3101e 4067 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 4068
92d7f7b0
JS
4069 if (pring->ringno == LPFC_ELS_RING) {
4070 lpfc_fabric_abort_hba(phba);
4071 }
4072
dea3101e
JB
4073 /* Error everything on txq and txcmplq
4074 * First do the txq.
4075 */
db55fba8
JS
4076 if (phba->sli_rev >= LPFC_SLI_REV4) {
4077 spin_lock_irq(&pring->ring_lock);
4078 list_splice_init(&pring->txq, &completions);
4079 pring->txq_cnt = 0;
4080 spin_unlock_irq(&pring->ring_lock);
dea3101e 4081
db55fba8
JS
4082 spin_lock_irq(&phba->hbalock);
4083 /* Next issue ABTS for everything on the txcmplq */
4084 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4085 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4086 spin_unlock_irq(&phba->hbalock);
4087 } else {
4088 spin_lock_irq(&phba->hbalock);
4089 list_splice_init(&pring->txq, &completions);
4090 pring->txq_cnt = 0;
dea3101e 4091
db55fba8
JS
4092 /* Next issue ABTS for everything on the txcmplq */
4093 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4094 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4095 spin_unlock_irq(&phba->hbalock);
4096 }
dea3101e 4097
a257bf90
JS
4098 /* Cancel all the IOCBs from the completions list */
4099 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4100 IOERR_SLI_ABORTED);
dea3101e
JB
4101}
4102
db55fba8
JS
4103/**
4104 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4105 * @phba: Pointer to HBA context object.
4106 * @pring: Pointer to driver SLI ring object.
4107 *
4108 * This function aborts all iocbs in FCP rings and frees all the iocb
4109 * objects in txq. This function issues an abort iocb for all the iocb commands
4110 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4111 * the return of this function. The caller is not required to hold any locks.
4112 **/
4113void
4114lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4115{
4116 struct lpfc_sli *psli = &phba->sli;
4117 struct lpfc_sli_ring *pring;
4118 uint32_t i;
4119
4120 /* Look on all the FCP Rings for the iotag */
4121 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec 4122 for (i = 0; i < phba->cfg_hdw_queue; i++) {
c00f62e6 4123 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
db55fba8
JS
4124 lpfc_sli_abort_iocb_ring(phba, pring);
4125 }
4126 } else {
895427bd 4127 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
4128 lpfc_sli_abort_iocb_ring(phba, pring);
4129 }
4130}
4131
a8e497d5 4132/**
c00f62e6 4133 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
a8e497d5
JS
4134 * @phba: Pointer to HBA context object.
4135 *
c00f62e6 4136 * This function flushes all iocbs in the IO ring and frees all the iocb
a8e497d5
JS
4137 * objects in txq and txcmplq. This function will not issue abort iocbs
4138 * for all the iocb commands in txcmplq, they will just be returned with
4139 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4140 * slot has been permanently disabled.
4141 **/
4142void
c00f62e6 4143lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
a8e497d5
JS
4144{
4145 LIST_HEAD(txq);
4146 LIST_HEAD(txcmplq);
a8e497d5
JS
4147 struct lpfc_sli *psli = &phba->sli;
4148 struct lpfc_sli_ring *pring;
db55fba8 4149 uint32_t i;
c1dd9111 4150 struct lpfc_iocbq *piocb, *next_iocb;
a8e497d5
JS
4151
4152 spin_lock_irq(&phba->hbalock);
4cd70891
JS
4153 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4154 !phba->sli4_hba.hdwq) {
4155 spin_unlock_irq(&phba->hbalock);
4156 return;
4157 }
4f2e66c6 4158 /* Indicate the I/O queues are flushed */
c00f62e6 4159 phba->hba_flag |= HBA_IOQ_FLUSH;
a8e497d5
JS
4160 spin_unlock_irq(&phba->hbalock);
4161
db55fba8
JS
4162 /* Look on all the FCP Rings for the iotag */
4163 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec 4164 for (i = 0; i < phba->cfg_hdw_queue; i++) {
c00f62e6 4165 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
db55fba8
JS
4166
4167 spin_lock_irq(&pring->ring_lock);
4168 /* Retrieve everything on txq */
4169 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4170 list_for_each_entry_safe(piocb, next_iocb,
4171 &pring->txcmplq, list)
4172 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4173 /* Retrieve everything on the txcmplq */
4174 list_splice_init(&pring->txcmplq, &txcmplq);
4175 pring->txq_cnt = 0;
4176 pring->txcmplq_cnt = 0;
4177 spin_unlock_irq(&pring->ring_lock);
4178
4179 /* Flush the txq */
4180 lpfc_sli_cancel_iocbs(phba, &txq,
4181 IOSTAT_LOCAL_REJECT,
4182 IOERR_SLI_DOWN);
4183 /* Flush the txcmpq */
4184 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4185 IOSTAT_LOCAL_REJECT,
4186 IOERR_SLI_DOWN);
4187 }
4188 } else {
895427bd 4189 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 4190
db55fba8
JS
4191 spin_lock_irq(&phba->hbalock);
4192 /* Retrieve everything on txq */
4193 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4194 list_for_each_entry_safe(piocb, next_iocb,
4195 &pring->txcmplq, list)
4196 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4197 /* Retrieve everything on the txcmplq */
4198 list_splice_init(&pring->txcmplq, &txcmplq);
4199 pring->txq_cnt = 0;
4200 pring->txcmplq_cnt = 0;
4201 spin_unlock_irq(&phba->hbalock);
4202
4203 /* Flush the txq */
4204 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4205 IOERR_SLI_DOWN);
4206 /* Flush the txcmpq */
4207 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4208 IOERR_SLI_DOWN);
4209 }
a8e497d5
JS
4210}
4211
e59058c4 4212/**
3772a991 4213 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
4214 * @phba: Pointer to HBA context object.
4215 * @mask: Bit mask to be checked.
4216 *
4217 * This function reads the host status register and compares
4218 * with the provided bit mask to check if HBA completed
4219 * the restart. This function will wait in a loop for the
4220 * HBA to complete restart. If the HBA does not restart within
4221 * 15 iterations, the function will reset the HBA again. The
4222 * function returns 1 when HBA fail to restart otherwise returns
4223 * zero.
4224 **/
3772a991
JS
4225static int
4226lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 4227{
41415862
JW
4228 uint32_t status;
4229 int i = 0;
4230 int retval = 0;
dea3101e 4231
41415862 4232 /* Read the HBA Host Status Register */
9940b97b
JS
4233 if (lpfc_readl(phba->HSregaddr, &status))
4234 return 1;
dea3101e 4235
41415862
JW
4236 /*
4237 * Check status register every 100ms for 5 retries, then every
4238 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4239 * every 2.5 sec for 4.
4240 * Break our of the loop if errors occurred during init.
4241 */
4242 while (((status & mask) != mask) &&
4243 !(status & HS_FFERM) &&
4244 i++ < 20) {
dea3101e 4245
41415862
JW
4246 if (i <= 5)
4247 msleep(10);
4248 else if (i <= 10)
4249 msleep(500);
4250 else
4251 msleep(2500);
dea3101e 4252
41415862 4253 if (i == 15) {
2e0fef85 4254 /* Do post */
92d7f7b0 4255 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
4256 lpfc_sli_brdrestart(phba);
4257 }
4258 /* Read the HBA Host Status Register */
9940b97b
JS
4259 if (lpfc_readl(phba->HSregaddr, &status)) {
4260 retval = 1;
4261 break;
4262 }
41415862 4263 }
dea3101e 4264
41415862
JW
4265 /* Check to see if any errors occurred during init */
4266 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
4267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4268 "2751 Adapter failed to restart, "
4269 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4270 status,
4271 readl(phba->MBslimaddr + 0xa8),
4272 readl(phba->MBslimaddr + 0xac));
2e0fef85 4273 phba->link_state = LPFC_HBA_ERROR;
41415862 4274 retval = 1;
dea3101e 4275 }
dea3101e 4276
41415862
JW
4277 return retval;
4278}
dea3101e 4279
da0436e9
JS
4280/**
4281 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4282 * @phba: Pointer to HBA context object.
4283 * @mask: Bit mask to be checked.
4284 *
4285 * This function checks the host status register to check if HBA is
4286 * ready. This function will wait in a loop for the HBA to be ready
4287 * If the HBA is not ready , the function will will reset the HBA PCI
4288 * function again. The function returns 1 when HBA fail to be ready
4289 * otherwise returns zero.
4290 **/
4291static int
4292lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4293{
4294 uint32_t status;
4295 int retval = 0;
4296
4297 /* Read the HBA Host Status Register */
4298 status = lpfc_sli4_post_status_check(phba);
4299
4300 if (status) {
4301 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4302 lpfc_sli_brdrestart(phba);
4303 status = lpfc_sli4_post_status_check(phba);
4304 }
4305
4306 /* Check to see if any errors occurred during init */
4307 if (status) {
4308 phba->link_state = LPFC_HBA_ERROR;
4309 retval = 1;
4310 } else
4311 phba->sli4_hba.intr_enable = 0;
4312
4313 return retval;
4314}
4315
4316/**
4317 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4318 * @phba: Pointer to HBA context object.
4319 * @mask: Bit mask to be checked.
4320 *
4321 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4322 * from the API jump table function pointer from the lpfc_hba struct.
4323 **/
4324int
4325lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4326{
4327 return phba->lpfc_sli_brdready(phba, mask);
4328}
4329
9290831f
JS
4330#define BARRIER_TEST_PATTERN (0xdeadbeef)
4331
e59058c4 4332/**
3621a710 4333 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
4334 * @phba: Pointer to HBA context object.
4335 *
1b51197d
JS
4336 * This function is called before resetting an HBA. This function is called
4337 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 4338 **/
2e0fef85 4339void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 4340{
65a29c16
JS
4341 uint32_t __iomem *resp_buf;
4342 uint32_t __iomem *mbox_buf;
9290831f 4343 volatile uint32_t mbox;
9940b97b 4344 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
4345 int i;
4346 uint8_t hdrtype;
4347
1c2ba475
JT
4348 lockdep_assert_held(&phba->hbalock);
4349
9290831f
JS
4350 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4351 if (hdrtype != 0x80 ||
4352 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4353 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4354 return;
4355
4356 /*
4357 * Tell the other part of the chip to suspend temporarily all
4358 * its DMA activity.
4359 */
65a29c16 4360 resp_buf = phba->MBslimaddr;
9290831f
JS
4361
4362 /* Disable the error attention */
9940b97b
JS
4363 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4364 return;
9290831f
JS
4365 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4366 readl(phba->HCregaddr); /* flush */
2e0fef85 4367 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4368
9940b97b
JS
4369 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4370 return;
4371 if (ha_copy & HA_ERATT) {
9290831f
JS
4372 /* Clear Chip error bit */
4373 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4374 phba->pport->stopped = 1;
9290831f
JS
4375 }
4376
4377 mbox = 0;
4378 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4379 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4380
4381 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4382 mbox_buf = phba->MBslimaddr;
9290831f
JS
4383 writel(mbox, mbox_buf);
4384
9940b97b
JS
4385 for (i = 0; i < 50; i++) {
4386 if (lpfc_readl((resp_buf + 1), &resp_data))
4387 return;
4388 if (resp_data != ~(BARRIER_TEST_PATTERN))
4389 mdelay(1);
4390 else
4391 break;
4392 }
4393 resp_data = 0;
4394 if (lpfc_readl((resp_buf + 1), &resp_data))
4395 return;
4396 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4397 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4398 phba->pport->stopped)
9290831f
JS
4399 goto restore_hc;
4400 else
4401 goto clear_errat;
4402 }
4403
4404 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4405 resp_data = 0;
4406 for (i = 0; i < 500; i++) {
4407 if (lpfc_readl(resp_buf, &resp_data))
4408 return;
4409 if (resp_data != mbox)
4410 mdelay(1);
4411 else
4412 break;
4413 }
9290831f
JS
4414
4415clear_errat:
4416
9940b97b
JS
4417 while (++i < 500) {
4418 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4419 return;
4420 if (!(ha_copy & HA_ERATT))
4421 mdelay(1);
4422 else
4423 break;
4424 }
9290831f
JS
4425
4426 if (readl(phba->HAregaddr) & HA_ERATT) {
4427 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4428 phba->pport->stopped = 1;
9290831f
JS
4429 }
4430
4431restore_hc:
2e0fef85 4432 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4433 writel(hc_copy, phba->HCregaddr);
4434 readl(phba->HCregaddr); /* flush */
4435}
4436
e59058c4 4437/**
3621a710 4438 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4439 * @phba: Pointer to HBA context object.
4440 *
4441 * This function issues a kill_board mailbox command and waits for
4442 * the error attention interrupt. This function is called for stopping
4443 * the firmware processing. The caller is not required to hold any
4444 * locks. This function calls lpfc_hba_down_post function to free
4445 * any pending commands after the kill. The function will return 1 when it
4446 * fails to kill the board else will return 0.
4447 **/
41415862 4448int
2e0fef85 4449lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4450{
4451 struct lpfc_sli *psli;
4452 LPFC_MBOXQ_t *pmb;
4453 uint32_t status;
4454 uint32_t ha_copy;
4455 int retval;
4456 int i = 0;
dea3101e 4457
41415862 4458 psli = &phba->sli;
dea3101e 4459
41415862 4460 /* Kill HBA */
ed957684 4461 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4462 "0329 Kill HBA Data: x%x x%x\n",
4463 phba->pport->port_state, psli->sli_flag);
41415862 4464
98c9ea5c
JS
4465 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4466 if (!pmb)
41415862 4467 return 1;
41415862
JW
4468
4469 /* Disable the error attention */
2e0fef85 4470 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4471 if (lpfc_readl(phba->HCregaddr, &status)) {
4472 spin_unlock_irq(&phba->hbalock);
4473 mempool_free(pmb, phba->mbox_mem_pool);
4474 return 1;
4475 }
41415862
JW
4476 status &= ~HC_ERINT_ENA;
4477 writel(status, phba->HCregaddr);
4478 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4479 phba->link_flag |= LS_IGNORE_ERATT;
4480 spin_unlock_irq(&phba->hbalock);
41415862
JW
4481
4482 lpfc_kill_board(phba, pmb);
4483 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4484 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4485
4486 if (retval != MBX_SUCCESS) {
4487 if (retval != MBX_BUSY)
4488 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4490 "2752 KILL_BOARD command failed retval %d\n",
4491 retval);
2e0fef85
JS
4492 spin_lock_irq(&phba->hbalock);
4493 phba->link_flag &= ~LS_IGNORE_ERATT;
4494 spin_unlock_irq(&phba->hbalock);
41415862
JW
4495 return 1;
4496 }
4497
f4b4c68f
JS
4498 spin_lock_irq(&phba->hbalock);
4499 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4500 spin_unlock_irq(&phba->hbalock);
9290831f 4501
41415862
JW
4502 mempool_free(pmb, phba->mbox_mem_pool);
4503
4504 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4505 * attention every 100ms for 3 seconds. If we don't get ERATT after
4506 * 3 seconds we still set HBA_ERROR state because the status of the
4507 * board is now undefined.
4508 */
9940b97b
JS
4509 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4510 return 1;
41415862
JW
4511 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4512 mdelay(100);
9940b97b
JS
4513 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4514 return 1;
41415862
JW
4515 }
4516
4517 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4518 if (ha_copy & HA_ERATT) {
4519 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4520 phba->pport->stopped = 1;
9290831f 4521 }
2e0fef85 4522 spin_lock_irq(&phba->hbalock);
41415862 4523 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4524 psli->mbox_active = NULL;
2e0fef85
JS
4525 phba->link_flag &= ~LS_IGNORE_ERATT;
4526 spin_unlock_irq(&phba->hbalock);
41415862 4527
41415862 4528 lpfc_hba_down_post(phba);
2e0fef85 4529 phba->link_state = LPFC_HBA_ERROR;
41415862 4530
2e0fef85 4531 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e
JB
4532}
4533
e59058c4 4534/**
3772a991 4535 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4536 * @phba: Pointer to HBA context object.
4537 *
4538 * This function resets the HBA by writing HC_INITFF to the control
4539 * register. After the HBA resets, this function resets all the iocb ring
4540 * indices. This function disables PCI layer parity checking during
4541 * the reset.
4542 * This function returns 0 always.
4543 * The caller is not required to hold any locks.
4544 **/
41415862 4545int
2e0fef85 4546lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4547{
41415862 4548 struct lpfc_sli *psli;
dea3101e 4549 struct lpfc_sli_ring *pring;
41415862 4550 uint16_t cfg_value;
dea3101e 4551 int i;
dea3101e 4552
41415862 4553 psli = &phba->sli;
dea3101e 4554
41415862
JW
4555 /* Reset HBA */
4556 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4557 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4558 (phba->pport) ? phba->pport->port_state : 0,
4559 psli->sli_flag);
dea3101e
JB
4560
4561 /* perform board reset */
4562 phba->fc_eventTag = 0;
4d9ab994 4563 phba->link_events = 0;
4492b739
JS
4564 if (phba->pport) {
4565 phba->pport->fc_myDID = 0;
4566 phba->pport->fc_prevDID = 0;
4567 }
dea3101e 4568
41415862 4569 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4570 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4571 return -EIO;
4572
41415862
JW
4573 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4574 (cfg_value &
4575 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4576
3772a991
JS
4577 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4578
41415862
JW
4579 /* Now toggle INITFF bit in the Host Control Register */
4580 writel(HC_INITFF, phba->HCregaddr);
4581 mdelay(1);
4582 readl(phba->HCregaddr); /* flush */
4583 writel(0, phba->HCregaddr);
4584 readl(phba->HCregaddr); /* flush */
4585
4586 /* Restore PCI cmd register */
4587 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e
JB
4588
4589 /* Initialize relevant SLI info */
41415862 4590 for (i = 0; i < psli->num_rings; i++) {
895427bd 4591 pring = &psli->sli3_ring[i];
dea3101e 4592 pring->flag = 0;
7e56aa25
JS
4593 pring->sli.sli3.rspidx = 0;
4594 pring->sli.sli3.next_cmdidx = 0;
4595 pring->sli.sli3.local_getidx = 0;
4596 pring->sli.sli3.cmdidx = 0;
dea3101e
JB
4597 pring->missbufcnt = 0;
4598 }
dea3101e 4599
2e0fef85 4600 phba->link_state = LPFC_WARM_START;
41415862
JW
4601 return 0;
4602}
4603
e59058c4 4604/**
da0436e9
JS
4605 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4606 * @phba: Pointer to HBA context object.
4607 *
4608 * This function resets a SLI4 HBA. This function disables PCI layer parity
4609 * checking during resets the device. The caller is not required to hold
4610 * any locks.
4611 *
8c24a4f6 4612 * This function returns 0 on success else returns negative error code.
da0436e9
JS
4613 **/
4614int
4615lpfc_sli4_brdreset(struct lpfc_hba *phba)
4616{
4617 struct lpfc_sli *psli = &phba->sli;
4618 uint16_t cfg_value;
0293635e 4619 int rc = 0;
da0436e9
JS
4620
4621 /* Reset HBA */
4622 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4623 "0295 Reset HBA Data: x%x x%x x%x\n",
4624 phba->pport->port_state, psli->sli_flag,
4625 phba->hba_flag);
da0436e9
JS
4626
4627 /* perform board reset */
4628 phba->fc_eventTag = 0;
4d9ab994 4629 phba->link_events = 0;
da0436e9
JS
4630 phba->pport->fc_myDID = 0;
4631 phba->pport->fc_prevDID = 0;
4632
da0436e9
JS
4633 spin_lock_irq(&phba->hbalock);
4634 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4635 phba->fcf.fcf_flag = 0;
da0436e9
JS
4636 spin_unlock_irq(&phba->hbalock);
4637
0293635e
JS
4638 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4639 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4640 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4641 return rc;
4642 }
4643
da0436e9
JS
4644 /* Now physically reset the device */
4645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4646 "0389 Performing PCI function reset!\n");
be858b65
JS
4647
4648 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4649 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4650 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4651 "3205 PCI read Config failed\n");
4652 return -EIO;
4653 }
4654
be858b65
JS
4655 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4656 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4657
88318816 4658 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4659 rc = lpfc_pci_function_reset(phba);
da0436e9 4660
be858b65
JS
4661 /* Restore PCI cmd register */
4662 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4663
27b01b82 4664 return rc;
da0436e9
JS
4665}
4666
4667/**
4668 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4669 * @phba: Pointer to HBA context object.
4670 *
4671 * This function is called in the SLI initialization code path to
4672 * restart the HBA. The caller is not required to hold any lock.
4673 * This function writes MBX_RESTART mailbox command to the SLIM and
4674 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4675 * function to free any pending commands. The function enables
4676 * POST only during the first initialization. The function returns zero.
4677 * The function does not guarantee completion of MBX_RESTART mailbox
4678 * command before the return of this function.
4679 **/
da0436e9
JS
4680static int
4681lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4682{
4683 MAILBOX_t *mb;
4684 struct lpfc_sli *psli;
41415862
JW
4685 volatile uint32_t word0;
4686 void __iomem *to_slim;
0d878419 4687 uint32_t hba_aer_enabled;
41415862 4688
2e0fef85 4689 spin_lock_irq(&phba->hbalock);
41415862 4690
0d878419
JS
4691 /* Take PCIe device Advanced Error Reporting (AER) state */
4692 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4693
41415862
JW
4694 psli = &phba->sli;
4695
4696 /* Restart HBA */
4697 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4698 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4699 (phba->pport) ? phba->pport->port_state : 0,
4700 psli->sli_flag);
41415862
JW
4701
4702 word0 = 0;
4703 mb = (MAILBOX_t *) &word0;
4704 mb->mbxCommand = MBX_RESTART;
4705 mb->mbxHc = 1;
4706
9290831f
JS
4707 lpfc_reset_barrier(phba);
4708
41415862
JW
4709 to_slim = phba->MBslimaddr;
4710 writel(*(uint32_t *) mb, to_slim);
4711 readl(to_slim); /* flush */
4712
4713 /* Only skip post after fc_ffinit is completed */
4492b739 4714 if (phba->pport && phba->pport->port_state)
41415862 4715 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4716 else
41415862 4717 word0 = 0; /* This is really setting up word1 */
65a29c16 4718 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4719 writel(*(uint32_t *) mb, to_slim);
4720 readl(to_slim); /* flush */
dea3101e 4721
41415862 4722 lpfc_sli_brdreset(phba);
4492b739
JS
4723 if (phba->pport)
4724 phba->pport->stopped = 0;
2e0fef85 4725 phba->link_state = LPFC_INIT_START;
da0436e9 4726 phba->hba_flag = 0;
2e0fef85 4727 spin_unlock_irq(&phba->hbalock);
41415862 4728
64ba8818 4729 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4730 psli->stats_start = ktime_get_seconds();
64ba8818 4731
eaf15d5b
JS
4732 /* Give the INITFF and Post time to settle. */
4733 mdelay(100);
41415862 4734
0d878419
JS
4735 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4736 if (hba_aer_enabled)
4737 pci_disable_pcie_error_reporting(phba->pcidev);
4738
41415862 4739 lpfc_hba_down_post(phba);
dea3101e
JB
4740
4741 return 0;
4742}
4743
da0436e9
JS
4744/**
4745 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4746 * @phba: Pointer to HBA context object.
4747 *
4748 * This function is called in the SLI initialization code path to restart
4749 * a SLI4 HBA. The caller is not required to hold any lock.
4750 * At the end of the function, it calls lpfc_hba_down_post function to
4751 * free any pending commands.
4752 **/
4753static int
4754lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4755{
4756 struct lpfc_sli *psli = &phba->sli;
75baf696 4757 uint32_t hba_aer_enabled;
27b01b82 4758 int rc;
da0436e9
JS
4759
4760 /* Restart HBA */
4761 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4762 "0296 Restart HBA Data: x%x x%x\n",
4763 phba->pport->port_state, psli->sli_flag);
4764
75baf696
JS
4765 /* Take PCIe device Advanced Error Reporting (AER) state */
4766 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4767
27b01b82 4768 rc = lpfc_sli4_brdreset(phba);
4fb86a6b
JS
4769 if (rc) {
4770 phba->link_state = LPFC_HBA_ERROR;
4771 goto hba_down_queue;
4772 }
da0436e9
JS
4773
4774 spin_lock_irq(&phba->hbalock);
4775 phba->pport->stopped = 0;
4776 phba->link_state = LPFC_INIT_START;
4777 phba->hba_flag = 0;
4778 spin_unlock_irq(&phba->hbalock);
4779
4780 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4781 psli->stats_start = ktime_get_seconds();
da0436e9 4782
75baf696
JS
4783 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4784 if (hba_aer_enabled)
4785 pci_disable_pcie_error_reporting(phba->pcidev);
4786
4fb86a6b 4787hba_down_queue:
da0436e9 4788 lpfc_hba_down_post(phba);
569dbe84 4789 lpfc_sli4_queue_destroy(phba);
da0436e9 4790
27b01b82 4791 return rc;
da0436e9
JS
4792}
4793
4794/**
4795 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4796 * @phba: Pointer to HBA context object.
4797 *
4798 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4799 * API jump table function pointer from the lpfc_hba struct.
4800**/
4801int
4802lpfc_sli_brdrestart(struct lpfc_hba *phba)
4803{
4804 return phba->lpfc_sli_brdrestart(phba);
4805}
4806
e59058c4 4807/**
3621a710 4808 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4809 * @phba: Pointer to HBA context object.
4810 *
4811 * This function is called after a HBA restart to wait for successful
4812 * restart of the HBA. Successful restart of the HBA is indicated by
4813 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4814 * iteration, the function will restart the HBA again. The function returns
4815 * zero if HBA successfully restarted else returns negative error code.
4816 **/
4492b739 4817int
dea3101e
JB
4818lpfc_sli_chipset_init(struct lpfc_hba *phba)
4819{
4820 uint32_t status, i = 0;
4821
4822 /* Read the HBA Host Status Register */
9940b97b
JS
4823 if (lpfc_readl(phba->HSregaddr, &status))
4824 return -EIO;
dea3101e
JB
4825
4826 /* Check status register to see what current state is */
4827 i = 0;
4828 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4829
dcf2a4e0
JS
4830 /* Check every 10ms for 10 retries, then every 100ms for 90
4831 * retries, then every 1 sec for 50 retires for a total of
4832 * ~60 seconds before reset the board again and check every
4833 * 1 sec for 50 retries. The up to 60 seconds before the
4834 * board ready is required by the Falcon FIPS zeroization
4835 * complete, and any reset the board in between shall cause
4836 * restart of zeroization, further delay the board ready.
dea3101e 4837 */
dcf2a4e0 4838 if (i++ >= 200) {
dea3101e
JB
4839 /* Adapter failed to init, timeout, status reg
4840 <status> */
ed957684 4841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4842 "0436 Adapter failed to init, "
09372820
JS
4843 "timeout, status reg x%x, "
4844 "FW Data: A8 x%x AC x%x\n", status,
4845 readl(phba->MBslimaddr + 0xa8),
4846 readl(phba->MBslimaddr + 0xac));
2e0fef85 4847 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4848 return -ETIMEDOUT;
4849 }
4850
4851 /* Check to see if any errors occurred during init */
4852 if (status & HS_FFERM) {
4853 /* ERROR: During chipset initialization */
4854 /* Adapter failed to init, chipset, status reg
4855 <status> */
ed957684 4856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4857 "0437 Adapter failed to init, "
09372820
JS
4858 "chipset, status reg x%x, "
4859 "FW Data: A8 x%x AC x%x\n", status,
4860 readl(phba->MBslimaddr + 0xa8),
4861 readl(phba->MBslimaddr + 0xac));
2e0fef85 4862 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4863 return -EIO;
4864 }
4865
dcf2a4e0 4866 if (i <= 10)
dea3101e 4867 msleep(10);
dcf2a4e0
JS
4868 else if (i <= 100)
4869 msleep(100);
4870 else
4871 msleep(1000);
dea3101e 4872
dcf2a4e0
JS
4873 if (i == 150) {
4874 /* Do post */
92d7f7b0 4875 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4876 lpfc_sli_brdrestart(phba);
dea3101e
JB
4877 }
4878 /* Read the HBA Host Status Register */
9940b97b
JS
4879 if (lpfc_readl(phba->HSregaddr, &status))
4880 return -EIO;
dea3101e
JB
4881 }
4882
4883 /* Check to see if any errors occurred during init */
4884 if (status & HS_FFERM) {
4885 /* ERROR: During chipset initialization */
4886 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4888 "0438 Adapter failed to init, chipset, "
09372820
JS
4889 "status reg x%x, "
4890 "FW Data: A8 x%x AC x%x\n", status,
4891 readl(phba->MBslimaddr + 0xa8),
4892 readl(phba->MBslimaddr + 0xac));
2e0fef85 4893 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4894 return -EIO;
4895 }
4896
4897 /* Clear all interrupt enable conditions */
4898 writel(0, phba->HCregaddr);
4899 readl(phba->HCregaddr); /* flush */
4900
4901 /* setup host attn register */
4902 writel(0xffffffff, phba->HAregaddr);
4903 readl(phba->HAregaddr); /* flush */
4904 return 0;
4905}
4906
e59058c4 4907/**
3621a710 4908 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4909 *
4910 * This function calculates and returns the number of HBQs required to be
4911 * configured.
4912 **/
78b2d852 4913int
ed957684
JS
4914lpfc_sli_hbq_count(void)
4915{
92d7f7b0 4916 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4917}
4918
e59058c4 4919/**
3621a710 4920 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4921 *
4922 * This function adds the number of hbq entries in every HBQ to get
4923 * the total number of hbq entries required for the HBA and returns
4924 * the total count.
4925 **/
ed957684
JS
4926static int
4927lpfc_sli_hbq_entry_count(void)
4928{
4929 int hbq_count = lpfc_sli_hbq_count();
4930 int count = 0;
4931 int i;
4932
4933 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4934 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4935 return count;
4936}
4937
e59058c4 4938/**
3621a710 4939 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4940 *
4941 * This function calculates amount of memory required for all hbq entries
4942 * to be configured and returns the total memory required.
4943 **/
dea3101e 4944int
ed957684
JS
4945lpfc_sli_hbq_size(void)
4946{
4947 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4948}
4949
e59058c4 4950/**
3621a710 4951 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4952 * @phba: Pointer to HBA context object.
4953 *
4954 * This function is called during the SLI initialization to configure
4955 * all the HBQs and post buffers to the HBQ. The caller is not
4956 * required to hold any locks. This function will return zero if successful
4957 * else it will return negative error code.
4958 **/
ed957684
JS
4959static int
4960lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4961{
4962 int hbq_count = lpfc_sli_hbq_count();
4963 LPFC_MBOXQ_t *pmb;
4964 MAILBOX_t *pmbox;
4965 uint32_t hbqno;
4966 uint32_t hbq_entry_index;
ed957684 4967
92d7f7b0
JS
4968 /* Get a Mailbox buffer to setup mailbox
4969 * commands for HBA initialization
4970 */
ed957684
JS
4971 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4972
4973 if (!pmb)
4974 return -ENOMEM;
4975
04c68496 4976 pmbox = &pmb->u.mb;
ed957684
JS
4977
4978 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4979 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4980 phba->hbq_in_use = 1;
ed957684
JS
4981
4982 hbq_entry_index = 0;
4983 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4984 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4985 phba->hbqs[hbqno].hbqPutIdx = 0;
4986 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4987 phba->hbqs[hbqno].entry_count =
92d7f7b0 4988 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4989 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4990 hbq_entry_index, pmb);
ed957684
JS
4991 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4992
4993 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4994 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4995 mbxStatus <status>, ring <num> */
4996
4997 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4998 LOG_SLI | LOG_VPORT,
e8b62011 4999 "1805 Adapter failed to init. "
ed957684 5000 "Data: x%x x%x x%x\n",
e8b62011 5001 pmbox->mbxCommand,
ed957684
JS
5002 pmbox->mbxStatus, hbqno);
5003
5004 phba->link_state = LPFC_HBA_ERROR;
5005 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 5006 return -ENXIO;
ed957684
JS
5007 }
5008 }
5009 phba->hbq_count = hbq_count;
5010
ed957684
JS
5011 mempool_free(pmb, phba->mbox_mem_pool);
5012
92d7f7b0 5013 /* Initially populate or replenish the HBQs */
d7c255b2
JS
5014 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5015 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
5016 return 0;
5017}
5018
4f774513
JS
5019/**
5020 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5021 * @phba: Pointer to HBA context object.
5022 *
5023 * This function is called during the SLI initialization to configure
5024 * all the HBQs and post buffers to the HBQ. The caller is not
5025 * required to hold any locks. This function will return zero if successful
5026 * else it will return negative error code.
5027 **/
5028static int
5029lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5030{
5031 phba->hbq_in_use = 1;
999fbbce
JS
5032 /**
5033 * Specific case when the MDS diagnostics is enabled and supported.
5034 * The receive buffer count is truncated to manage the incoming
5035 * traffic.
5036 **/
5037 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5038 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5039 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5040 else
5041 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5042 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 5043 phba->hbq_count = 1;
895427bd 5044 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 5045 /* Initially populate or replenish the HBQs */
4f774513
JS
5046 return 0;
5047}
5048
e59058c4 5049/**
3621a710 5050 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
5051 * @phba: Pointer to HBA context object.
5052 * @sli_mode: sli mode - 2/3
5053 *
183b8021 5054 * This function is called by the sli initialization code path
e59058c4
JS
5055 * to issue config_port mailbox command. This function restarts the
5056 * HBA firmware and issues a config_port mailbox command to configure
5057 * the SLI interface in the sli mode specified by sli_mode
5058 * variable. The caller is not required to hold any locks.
5059 * The function returns 0 if successful, else returns negative error
5060 * code.
5061 **/
9399627f
JS
5062int
5063lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e
JB
5064{
5065 LPFC_MBOXQ_t *pmb;
5066 uint32_t resetcount = 0, rc = 0, done = 0;
5067
5068 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5069 if (!pmb) {
2e0fef85 5070 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
5071 return -ENOMEM;
5072 }
5073
ed957684 5074 phba->sli_rev = sli_mode;
dea3101e 5075 while (resetcount < 2 && !done) {
2e0fef85 5076 spin_lock_irq(&phba->hbalock);
1c067a42 5077 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 5078 spin_unlock_irq(&phba->hbalock);
92d7f7b0 5079 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 5080 lpfc_sli_brdrestart(phba);
dea3101e
JB
5081 rc = lpfc_sli_chipset_init(phba);
5082 if (rc)
5083 break;
5084
2e0fef85 5085 spin_lock_irq(&phba->hbalock);
1c067a42 5086 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 5087 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
5088 resetcount++;
5089
ed957684
JS
5090 /* Call pre CONFIG_PORT mailbox command initialization. A
5091 * value of 0 means the call was successful. Any other
5092 * nonzero value is a failure, but if ERESTART is returned,
5093 * the driver may reset the HBA and try again.
5094 */
dea3101e
JB
5095 rc = lpfc_config_port_prep(phba);
5096 if (rc == -ERESTART) {
ed957684 5097 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 5098 continue;
34b02dcd 5099 } else if (rc)
dea3101e 5100 break;
6d368e53 5101
2e0fef85 5102 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
5103 lpfc_config_port(phba, pmb);
5104 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
5105 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5106 LPFC_SLI3_HBQ_ENABLED |
5107 LPFC_SLI3_CRP_ENABLED |
bc73905a 5108 LPFC_SLI3_DSS_ENABLED);
ed957684 5109 if (rc != MBX_SUCCESS) {
dea3101e 5110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5111 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 5112 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 5113 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 5114 spin_lock_irq(&phba->hbalock);
04c68496 5115 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
5116 spin_unlock_irq(&phba->hbalock);
5117 rc = -ENXIO;
04c68496
JS
5118 } else {
5119 /* Allow asynchronous mailbox command to go through */
5120 spin_lock_irq(&phba->hbalock);
5121 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5122 spin_unlock_irq(&phba->hbalock);
ed957684 5123 done = 1;
cb69f7de
JS
5124
5125 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5126 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5127 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5128 "3110 Port did not grant ASABT\n");
04c68496 5129 }
dea3101e 5130 }
ed957684
JS
5131 if (!done) {
5132 rc = -EINVAL;
5133 goto do_prep_failed;
5134 }
04c68496
JS
5135 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5136 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
5137 rc = -ENXIO;
5138 goto do_prep_failed;
5139 }
04c68496 5140 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 5141 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
5142 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5143 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5144 phba->max_vpi : phba->max_vports;
5145
34b02dcd
JS
5146 } else
5147 phba->max_vpi = 0;
04c68496 5148 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 5149 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 5150 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 5151 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
5152
5153 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5154 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6 5155
f44ac12f
JS
5156 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5157 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5158 phba->cfg_enable_bg = 0;
5159 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
e2a0a9d6
JS
5160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5161 "0443 Adapter did not grant "
5162 "BlockGuard\n");
f44ac12f 5163 }
e2a0a9d6 5164 }
34b02dcd 5165 } else {
8f34f4ce 5166 phba->hbq_get = NULL;
34b02dcd 5167 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 5168 phba->max_vpi = 0;
ed957684 5169 }
92d7f7b0 5170do_prep_failed:
ed957684
JS
5171 mempool_free(pmb, phba->mbox_mem_pool);
5172 return rc;
5173}
5174
e59058c4
JS
5175
5176/**
183b8021 5177 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
5178 * @phba: Pointer to HBA context object.
5179 *
183b8021
MY
5180 * This function is the main SLI initialization function. This function
5181 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
5182 * error attention handler code. Caller is not required to hold any
5183 * locks. This function issues config_port mailbox command to configure
5184 * the SLI, setup iocb rings and HBQ rings. In the end the function
5185 * calls the config_port_post function to issue init_link mailbox
5186 * command and to start the discovery. The function will return zero
5187 * if successful, else it will return negative error code.
5188 **/
ed957684
JS
5189int
5190lpfc_sli_hba_setup(struct lpfc_hba *phba)
5191{
5192 uint32_t rc;
6d368e53
JS
5193 int mode = 3, i;
5194 int longs;
ed957684 5195
12247e81 5196 switch (phba->cfg_sli_mode) {
ed957684 5197 case 2:
78b2d852 5198 if (phba->cfg_enable_npiv) {
92d7f7b0 5199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 5200 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 5201 "parameter (%d) to auto (0).\n",
12247e81 5202 phba->cfg_sli_mode);
92d7f7b0
JS
5203 break;
5204 }
ed957684
JS
5205 mode = 2;
5206 break;
5207 case 0:
5208 case 3:
5209 break;
5210 default:
92d7f7b0 5211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
5212 "1819 Unrecognized sli_mode parameter: %d.\n",
5213 phba->cfg_sli_mode);
ed957684
JS
5214
5215 break;
5216 }
b5c53958 5217 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 5218
9399627f
JS
5219 rc = lpfc_sli_config_port(phba, mode);
5220
12247e81 5221 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 5222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
5223 "1820 Unable to select SLI-3. "
5224 "Not supported by adapter.\n");
ed957684 5225 if (rc && mode != 2)
9399627f 5226 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
5227 else if (rc && mode == 2)
5228 rc = lpfc_sli_config_port(phba, 3);
ed957684 5229 if (rc)
dea3101e
JB
5230 goto lpfc_sli_hba_setup_error;
5231
0d878419
JS
5232 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5233 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5234 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5235 if (!rc) {
5236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5237 "2709 This device supports "
5238 "Advanced Error Reporting (AER)\n");
5239 spin_lock_irq(&phba->hbalock);
5240 phba->hba_flag |= HBA_AER_ENABLED;
5241 spin_unlock_irq(&phba->hbalock);
5242 } else {
5243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5244 "2708 This device does not support "
b069d7eb
JS
5245 "Advanced Error Reporting (AER): %d\n",
5246 rc);
0d878419
JS
5247 phba->cfg_aer_support = 0;
5248 }
5249 }
5250
ed957684
JS
5251 if (phba->sli_rev == 3) {
5252 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5253 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
5254 } else {
5255 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5256 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 5257 phba->sli3_options = 0;
ed957684
JS
5258 }
5259
5260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
5261 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5262 phba->sli_rev, phba->max_vpi);
ed957684 5263 rc = lpfc_sli_ring_map(phba);
dea3101e
JB
5264
5265 if (rc)
5266 goto lpfc_sli_hba_setup_error;
5267
6d368e53
JS
5268 /* Initialize VPIs. */
5269 if (phba->sli_rev == LPFC_SLI_REV3) {
5270 /*
5271 * The VPI bitmask and physical ID array are allocated
5272 * and initialized once only - at driver load. A port
5273 * reset doesn't need to reinitialize this memory.
5274 */
5275 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5276 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
6396bb22
KC
5277 phba->vpi_bmask = kcalloc(longs,
5278 sizeof(unsigned long),
6d368e53
JS
5279 GFP_KERNEL);
5280 if (!phba->vpi_bmask) {
5281 rc = -ENOMEM;
5282 goto lpfc_sli_hba_setup_error;
5283 }
5284
6396bb22
KC
5285 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5286 sizeof(uint16_t),
5287 GFP_KERNEL);
6d368e53
JS
5288 if (!phba->vpi_ids) {
5289 kfree(phba->vpi_bmask);
5290 rc = -ENOMEM;
5291 goto lpfc_sli_hba_setup_error;
5292 }
5293 for (i = 0; i < phba->max_vpi; i++)
5294 phba->vpi_ids[i] = i;
5295 }
5296 }
5297
9399627f 5298 /* Init HBQs */
ed957684
JS
5299 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5300 rc = lpfc_sli_hbq_setup(phba);
5301 if (rc)
5302 goto lpfc_sli_hba_setup_error;
5303 }
04c68496 5304 spin_lock_irq(&phba->hbalock);
dea3101e 5305 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 5306 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
5307
5308 rc = lpfc_config_port_post(phba);
5309 if (rc)
5310 goto lpfc_sli_hba_setup_error;
5311
ed957684
JS
5312 return rc;
5313
92d7f7b0 5314lpfc_sli_hba_setup_error:
2e0fef85 5315 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 5316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5317 "0445 Firmware initialization failed\n");
dea3101e
JB
5318 return rc;
5319}
5320
e59058c4 5321/**
da0436e9
JS
5322 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5323 * @phba: Pointer to HBA context object.
5324 * @mboxq: mailbox pointer.
5325 * This function issue a dump mailbox command to read config region
5326 * 23 and parse the records in the region and populate driver
5327 * data structure.
e59058c4 5328 **/
da0436e9 5329static int
ff78d8f9 5330lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 5331{
ff78d8f9 5332 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
5333 struct lpfc_dmabuf *mp;
5334 struct lpfc_mqe *mqe;
5335 uint32_t data_length;
5336 int rc;
dea3101e 5337
da0436e9
JS
5338 /* Program the default value of vlan_id and fc_map */
5339 phba->valid_vlan = 0;
5340 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5341 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5342 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 5343
ff78d8f9
JS
5344 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5345 if (!mboxq)
da0436e9
JS
5346 return -ENOMEM;
5347
ff78d8f9
JS
5348 mqe = &mboxq->u.mqe;
5349 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5350 rc = -ENOMEM;
5351 goto out_free_mboxq;
5352 }
5353
3e1f0718 5354 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
5355 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5356
5357 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5358 "(%d):2571 Mailbox cmd x%x Status x%x "
5359 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5360 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5361 "CQ: x%x x%x x%x x%x\n",
5362 mboxq->vport ? mboxq->vport->vpi : 0,
5363 bf_get(lpfc_mqe_command, mqe),
5364 bf_get(lpfc_mqe_status, mqe),
5365 mqe->un.mb_words[0], mqe->un.mb_words[1],
5366 mqe->un.mb_words[2], mqe->un.mb_words[3],
5367 mqe->un.mb_words[4], mqe->un.mb_words[5],
5368 mqe->un.mb_words[6], mqe->un.mb_words[7],
5369 mqe->un.mb_words[8], mqe->un.mb_words[9],
5370 mqe->un.mb_words[10], mqe->un.mb_words[11],
5371 mqe->un.mb_words[12], mqe->un.mb_words[13],
5372 mqe->un.mb_words[14], mqe->un.mb_words[15],
5373 mqe->un.mb_words[16], mqe->un.mb_words[50],
5374 mboxq->mcqe.word0,
5375 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5376 mboxq->mcqe.trailer);
5377
5378 if (rc) {
5379 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5380 kfree(mp);
ff78d8f9
JS
5381 rc = -EIO;
5382 goto out_free_mboxq;
da0436e9
JS
5383 }
5384 data_length = mqe->un.mb_words[5];
a0c87cbd 5385 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5386 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5387 kfree(mp);
ff78d8f9
JS
5388 rc = -EIO;
5389 goto out_free_mboxq;
d11e31dd 5390 }
dea3101e 5391
da0436e9
JS
5392 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5393 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5394 kfree(mp);
ff78d8f9
JS
5395 rc = 0;
5396
5397out_free_mboxq:
5398 mempool_free(mboxq, phba->mbox_mem_pool);
5399 return rc;
da0436e9 5400}
e59058c4
JS
5401
5402/**
da0436e9
JS
5403 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5404 * @phba: pointer to lpfc hba data structure.
5405 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5406 * @vpd: pointer to the memory to hold resulting port vpd data.
5407 * @vpd_size: On input, the number of bytes allocated to @vpd.
5408 * On output, the number of data bytes in @vpd.
e59058c4 5409 *
da0436e9
JS
5410 * This routine executes a READ_REV SLI4 mailbox command. In
5411 * addition, this routine gets the port vpd data.
5412 *
5413 * Return codes
af901ca1 5414 * 0 - successful
d439d286 5415 * -ENOMEM - could not allocated memory.
e59058c4 5416 **/
da0436e9
JS
5417static int
5418lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5419 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5420{
da0436e9
JS
5421 int rc = 0;
5422 uint32_t dma_size;
5423 struct lpfc_dmabuf *dmabuf;
5424 struct lpfc_mqe *mqe;
dea3101e 5425
da0436e9
JS
5426 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5427 if (!dmabuf)
5428 return -ENOMEM;
5429
5430 /*
5431 * Get a DMA buffer for the vpd data resulting from the READ_REV
5432 * mailbox command.
a257bf90 5433 */
da0436e9 5434 dma_size = *vpd_size;
750afb08
LC
5435 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5436 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5437 if (!dmabuf->virt) {
5438 kfree(dmabuf);
5439 return -ENOMEM;
a257bf90
JS
5440 }
5441
da0436e9
JS
5442 /*
5443 * The SLI4 implementation of READ_REV conflicts at word1,
5444 * bits 31:16 and SLI4 adds vpd functionality not present
5445 * in SLI3. This code corrects the conflicts.
1dcb58e5 5446 */
da0436e9
JS
5447 lpfc_read_rev(phba, mboxq);
5448 mqe = &mboxq->u.mqe;
5449 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5450 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5451 mqe->un.read_rev.word1 &= 0x0000FFFF;
5452 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5453 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5454
5455 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5456 if (rc) {
5457 dma_free_coherent(&phba->pcidev->dev, dma_size,
5458 dmabuf->virt, dmabuf->phys);
def9c7a9 5459 kfree(dmabuf);
da0436e9
JS
5460 return -EIO;
5461 }
1dcb58e5 5462
da0436e9
JS
5463 /*
5464 * The available vpd length cannot be bigger than the
5465 * DMA buffer passed to the port. Catch the less than
5466 * case and update the caller's size.
5467 */
5468 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5469 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5470
d7c47992
JS
5471 memcpy(vpd, dmabuf->virt, *vpd_size);
5472
da0436e9
JS
5473 dma_free_coherent(&phba->pcidev->dev, dma_size,
5474 dmabuf->virt, dmabuf->phys);
5475 kfree(dmabuf);
5476 return 0;
dea3101e
JB
5477}
5478
cd1c8301 5479/**
b3b4f3e1 5480 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
cd1c8301
JS
5481 * @phba: pointer to lpfc hba data structure.
5482 *
5483 * This routine retrieves SLI4 device physical port name this PCI function
5484 * is attached to.
5485 *
5486 * Return codes
4907cb7b 5487 * 0 - successful
b3b4f3e1 5488 * otherwise - failed to retrieve controller attributes
cd1c8301
JS
5489 **/
5490static int
b3b4f3e1 5491lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
cd1c8301
JS
5492{
5493 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5494 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5495 struct lpfc_controller_attribute *cntl_attr;
cd1c8301
JS
5496 void *virtaddr = NULL;
5497 uint32_t alloclen, reqlen;
5498 uint32_t shdr_status, shdr_add_status;
5499 union lpfc_sli4_cfg_shdr *shdr;
cd1c8301
JS
5500 int rc;
5501
cd1c8301
JS
5502 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5503 if (!mboxq)
5504 return -ENOMEM;
cd1c8301 5505
b3b4f3e1 5506 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
cd1c8301
JS
5507 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5508 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5509 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5510 LPFC_SLI4_MBX_NEMBED);
b3b4f3e1 5511
cd1c8301
JS
5512 if (alloclen < reqlen) {
5513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5514 "3084 Allocated DMA memory size (%d) is "
5515 "less than the requested DMA memory size "
5516 "(%d)\n", alloclen, reqlen);
5517 rc = -ENOMEM;
5518 goto out_free_mboxq;
5519 }
5520 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5521 virtaddr = mboxq->sge_array->addr[0];
5522 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5523 shdr = &mbx_cntl_attr->cfg_shdr;
5524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5526 if (shdr_status || shdr_add_status || rc) {
5527 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5528 "3085 Mailbox x%x (x%x/x%x) failed, "
5529 "rc:x%x, status:x%x, add_status:x%x\n",
5530 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5531 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5532 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5533 rc, shdr_status, shdr_add_status);
5534 rc = -ENXIO;
5535 goto out_free_mboxq;
5536 }
b3b4f3e1 5537
cd1c8301
JS
5538 cntl_attr = &mbx_cntl_attr->cntl_attr;
5539 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5540 phba->sli4_hba.lnk_info.lnk_tp =
5541 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5542 phba->sli4_hba.lnk_info.lnk_no =
5543 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
b3b4f3e1
JS
5544
5545 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5546 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5547 sizeof(phba->BIOSVersion));
5548
cd1c8301 5549 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
b3b4f3e1 5550 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
cd1c8301 5551 phba->sli4_hba.lnk_info.lnk_tp,
b3b4f3e1
JS
5552 phba->sli4_hba.lnk_info.lnk_no,
5553 phba->BIOSVersion);
5554out_free_mboxq:
5555 if (rc != MBX_TIMEOUT) {
5556 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5557 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5558 else
5559 mempool_free(mboxq, phba->mbox_mem_pool);
5560 }
5561 return rc;
5562}
5563
5564/**
5565 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5566 * @phba: pointer to lpfc hba data structure.
5567 *
5568 * This routine retrieves SLI4 device physical port name this PCI function
5569 * is attached to.
5570 *
5571 * Return codes
5572 * 0 - successful
5573 * otherwise - failed to retrieve physical port name
5574 **/
5575static int
5576lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5577{
5578 LPFC_MBOXQ_t *mboxq;
5579 struct lpfc_mbx_get_port_name *get_port_name;
5580 uint32_t shdr_status, shdr_add_status;
5581 union lpfc_sli4_cfg_shdr *shdr;
5582 char cport_name = 0;
5583 int rc;
5584
5585 /* We assume nothing at this point */
5586 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5587 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5588
5589 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5590 if (!mboxq)
5591 return -ENOMEM;
5592 /* obtain link type and link number via READ_CONFIG */
5593 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5594 lpfc_sli4_read_config(phba);
5595 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5596 goto retrieve_ppname;
5597
5598 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5599 rc = lpfc_sli4_get_ctl_attr(phba);
5600 if (rc)
5601 goto out_free_mboxq;
cd1c8301
JS
5602
5603retrieve_ppname:
5604 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5605 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5606 sizeof(struct lpfc_mbx_get_port_name) -
5607 sizeof(struct lpfc_sli4_cfg_mhdr),
5608 LPFC_SLI4_MBX_EMBED);
5609 get_port_name = &mboxq->u.mqe.un.get_port_name;
5610 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5611 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5612 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5613 phba->sli4_hba.lnk_info.lnk_tp);
5614 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5615 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5616 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5617 if (shdr_status || shdr_add_status || rc) {
5618 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5619 "3087 Mailbox x%x (x%x/x%x) failed: "
5620 "rc:x%x, status:x%x, add_status:x%x\n",
5621 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5622 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5623 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5624 rc, shdr_status, shdr_add_status);
5625 rc = -ENXIO;
5626 goto out_free_mboxq;
5627 }
5628 switch (phba->sli4_hba.lnk_info.lnk_no) {
5629 case LPFC_LINK_NUMBER_0:
5630 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5631 &get_port_name->u.response);
5632 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5633 break;
5634 case LPFC_LINK_NUMBER_1:
5635 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5636 &get_port_name->u.response);
5637 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5638 break;
5639 case LPFC_LINK_NUMBER_2:
5640 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5641 &get_port_name->u.response);
5642 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5643 break;
5644 case LPFC_LINK_NUMBER_3:
5645 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5646 &get_port_name->u.response);
5647 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5648 break;
5649 default:
5650 break;
5651 }
5652
5653 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5654 phba->Port[0] = cport_name;
5655 phba->Port[1] = '\0';
5656 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5657 "3091 SLI get port name: %s\n", phba->Port);
5658 }
5659
5660out_free_mboxq:
5661 if (rc != MBX_TIMEOUT) {
5662 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5663 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5664 else
5665 mempool_free(mboxq, phba->mbox_mem_pool);
5666 }
5667 return rc;
5668}
5669
e59058c4 5670/**
da0436e9
JS
5671 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5672 * @phba: pointer to lpfc hba data structure.
e59058c4 5673 *
da0436e9
JS
5674 * This routine is called to explicitly arm the SLI4 device's completion and
5675 * event queues
5676 **/
5677static void
5678lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5679{
895427bd 5680 int qidx;
b71413dd 5681 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
cdb42bec 5682 struct lpfc_sli4_hdw_queue *qp;
657add4e 5683 struct lpfc_queue *eq;
da0436e9 5684
32517fc0
JS
5685 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5686 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
b71413dd 5687 if (sli4_hba->nvmels_cq)
32517fc0
JS
5688 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5689 LPFC_QUEUE_REARM);
1ba981fd 5690
cdb42bec 5691 if (sli4_hba->hdwq) {
657add4e 5692 /* Loop thru all Hardware Queues */
cdb42bec 5693 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e
JS
5694 qp = &sli4_hba->hdwq[qidx];
5695 /* ARM the corresponding CQ */
01f2ef6d 5696 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
c00f62e6 5697 LPFC_QUEUE_REARM);
cdb42bec 5698 }
1ba981fd 5699
657add4e
JS
5700 /* Loop thru all IRQ vectors */
5701 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5702 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5703 /* ARM the corresponding EQ */
5704 sli4_hba->sli4_write_eq_db(phba, eq,
5705 0, LPFC_QUEUE_REARM);
5706 }
cdb42bec 5707 }
1ba981fd 5708
2d7dbc4c
JS
5709 if (phba->nvmet_support) {
5710 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
32517fc0
JS
5711 sli4_hba->sli4_write_cq_db(phba,
5712 sli4_hba->nvmet_cqset[qidx], 0,
2d7dbc4c
JS
5713 LPFC_QUEUE_REARM);
5714 }
2e90f4b5 5715 }
da0436e9
JS
5716}
5717
6d368e53
JS
5718/**
5719 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5720 * @phba: Pointer to HBA context object.
5721 * @type: The resource extent type.
b76f2dc9
JS
5722 * @extnt_count: buffer to hold port available extent count.
5723 * @extnt_size: buffer to hold element count per extent.
6d368e53 5724 *
b76f2dc9
JS
5725 * This function calls the port and retrievs the number of available
5726 * extents and their size for a particular extent type.
5727 *
5728 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5729 **/
b76f2dc9 5730int
6d368e53
JS
5731lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5732 uint16_t *extnt_count, uint16_t *extnt_size)
5733{
5734 int rc = 0;
5735 uint32_t length;
5736 uint32_t mbox_tmo;
5737 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5738 LPFC_MBOXQ_t *mbox;
5739
5740 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5741 if (!mbox)
5742 return -ENOMEM;
5743
5744 /* Find out how many extents are available for this resource type */
5745 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5746 sizeof(struct lpfc_sli4_cfg_mhdr));
5747 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5748 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5749 length, LPFC_SLI4_MBX_EMBED);
5750
5751 /* Send an extents count of 0 - the GET doesn't use it. */
5752 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5753 LPFC_SLI4_MBX_EMBED);
5754 if (unlikely(rc)) {
5755 rc = -EIO;
5756 goto err_exit;
5757 }
5758
5759 if (!phba->sli4_hba.intr_enable)
5760 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5761 else {
a183a15f 5762 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5763 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5764 }
5765 if (unlikely(rc)) {
5766 rc = -EIO;
5767 goto err_exit;
5768 }
5769
5770 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5771 if (bf_get(lpfc_mbox_hdr_status,
5772 &rsrc_info->header.cfg_shdr.response)) {
5773 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5774 "2930 Failed to get resource extents "
5775 "Status 0x%x Add'l Status 0x%x\n",
5776 bf_get(lpfc_mbox_hdr_status,
5777 &rsrc_info->header.cfg_shdr.response),
5778 bf_get(lpfc_mbox_hdr_add_status,
5779 &rsrc_info->header.cfg_shdr.response));
5780 rc = -EIO;
5781 goto err_exit;
5782 }
5783
5784 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5785 &rsrc_info->u.rsp);
5786 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5787 &rsrc_info->u.rsp);
8a9d2e80
JS
5788
5789 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5790 "3162 Retrieved extents type-%d from port: count:%d, "
5791 "size:%d\n", type, *extnt_count, *extnt_size);
5792
5793err_exit:
6d368e53
JS
5794 mempool_free(mbox, phba->mbox_mem_pool);
5795 return rc;
5796}
5797
5798/**
5799 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5800 * @phba: Pointer to HBA context object.
5801 * @type: The extent type to check.
5802 *
5803 * This function reads the current available extents from the port and checks
5804 * if the extent count or extent size has changed since the last access.
5805 * Callers use this routine post port reset to understand if there is a
5806 * extent reprovisioning requirement.
5807 *
5808 * Returns:
5809 * -Error: error indicates problem.
5810 * 1: Extent count or size has changed.
5811 * 0: No changes.
5812 **/
5813static int
5814lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5815{
5816 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5817 uint16_t size_diff, rsrc_ext_size;
5818 int rc = 0;
5819 struct lpfc_rsrc_blks *rsrc_entry;
5820 struct list_head *rsrc_blk_list = NULL;
5821
5822 size_diff = 0;
5823 curr_ext_cnt = 0;
5824 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5825 &rsrc_ext_cnt,
5826 &rsrc_ext_size);
5827 if (unlikely(rc))
5828 return -EIO;
5829
5830 switch (type) {
5831 case LPFC_RSC_TYPE_FCOE_RPI:
5832 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5833 break;
5834 case LPFC_RSC_TYPE_FCOE_VPI:
5835 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5836 break;
5837 case LPFC_RSC_TYPE_FCOE_XRI:
5838 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5839 break;
5840 case LPFC_RSC_TYPE_FCOE_VFI:
5841 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5842 break;
5843 default:
5844 break;
5845 }
5846
5847 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5848 curr_ext_cnt++;
5849 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5850 size_diff++;
5851 }
5852
5853 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5854 rc = 1;
5855
5856 return rc;
5857}
5858
5859/**
5860 * lpfc_sli4_cfg_post_extnts -
5861 * @phba: Pointer to HBA context object.
5862 * @extnt_cnt - number of available extents.
5863 * @type - the extent type (rpi, xri, vfi, vpi).
5864 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5865 * @mbox - pointer to the caller's allocated mailbox structure.
5866 *
5867 * This function executes the extents allocation request. It also
5868 * takes care of the amount of memory needed to allocate or get the
5869 * allocated extents. It is the caller's responsibility to evaluate
5870 * the response.
5871 *
5872 * Returns:
5873 * -Error: Error value describes the condition found.
5874 * 0: if successful
5875 **/
5876static int
8a9d2e80 5877lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5878 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5879{
5880 int rc = 0;
5881 uint32_t req_len;
5882 uint32_t emb_len;
5883 uint32_t alloc_len, mbox_tmo;
5884
5885 /* Calculate the total requested length of the dma memory */
8a9d2e80 5886 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5887
5888 /*
5889 * Calculate the size of an embedded mailbox. The uint32_t
5890 * accounts for extents-specific word.
5891 */
5892 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5893 sizeof(uint32_t);
5894
5895 /*
5896 * Presume the allocation and response will fit into an embedded
5897 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5898 */
5899 *emb = LPFC_SLI4_MBX_EMBED;
5900 if (req_len > emb_len) {
8a9d2e80 5901 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5902 sizeof(union lpfc_sli4_cfg_shdr) +
5903 sizeof(uint32_t);
5904 *emb = LPFC_SLI4_MBX_NEMBED;
5905 }
5906
5907 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5908 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5909 req_len, *emb);
5910 if (alloc_len < req_len) {
5911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5912 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5913 "less than the requested DMA memory "
5914 "size (x%x)\n", alloc_len, req_len);
5915 return -ENOMEM;
5916 }
8a9d2e80 5917 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5918 if (unlikely(rc))
5919 return -EIO;
5920
5921 if (!phba->sli4_hba.intr_enable)
5922 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5923 else {
a183a15f 5924 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5925 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5926 }
5927
5928 if (unlikely(rc))
5929 rc = -EIO;
5930 return rc;
5931}
5932
5933/**
5934 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5935 * @phba: Pointer to HBA context object.
5936 * @type: The resource extent type to allocate.
5937 *
5938 * This function allocates the number of elements for the specified
5939 * resource type.
5940 **/
5941static int
5942lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5943{
5944 bool emb = false;
5945 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5946 uint16_t rsrc_id, rsrc_start, j, k;
5947 uint16_t *ids;
5948 int i, rc;
5949 unsigned long longs;
5950 unsigned long *bmask;
5951 struct lpfc_rsrc_blks *rsrc_blks;
5952 LPFC_MBOXQ_t *mbox;
5953 uint32_t length;
5954 struct lpfc_id_range *id_array = NULL;
5955 void *virtaddr = NULL;
5956 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5957 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5958 struct list_head *ext_blk_list;
5959
5960 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5961 &rsrc_cnt,
5962 &rsrc_size);
5963 if (unlikely(rc))
5964 return -EIO;
5965
5966 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5967 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5968 "3009 No available Resource Extents "
5969 "for resource type 0x%x: Count: 0x%x, "
5970 "Size 0x%x\n", type, rsrc_cnt,
5971 rsrc_size);
5972 return -ENOMEM;
5973 }
5974
8a9d2e80
JS
5975 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5976 "2903 Post resource extents type-0x%x: "
5977 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5978
5979 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5980 if (!mbox)
5981 return -ENOMEM;
5982
8a9d2e80 5983 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5984 if (unlikely(rc)) {
5985 rc = -EIO;
5986 goto err_exit;
5987 }
5988
5989 /*
5990 * Figure out where the response is located. Then get local pointers
5991 * to the response data. The port does not guarantee to respond to
5992 * all extents counts request so update the local variable with the
5993 * allocated count from the port.
5994 */
5995 if (emb == LPFC_SLI4_MBX_EMBED) {
5996 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5997 id_array = &rsrc_ext->u.rsp.id[0];
5998 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5999 } else {
6000 virtaddr = mbox->sge_array->addr[0];
6001 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6002 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6003 id_array = &n_rsrc->id;
6004 }
6005
6006 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6007 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6008
6009 /*
6010 * Based on the resource size and count, correct the base and max
6011 * resource values.
6012 */
6013 length = sizeof(struct lpfc_rsrc_blks);
6014 switch (type) {
6015 case LPFC_RSC_TYPE_FCOE_RPI:
6396bb22 6016 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6017 sizeof(unsigned long),
6018 GFP_KERNEL);
6019 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6020 rc = -ENOMEM;
6021 goto err_exit;
6022 }
6396bb22 6023 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
6024 sizeof(uint16_t),
6025 GFP_KERNEL);
6026 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6027 kfree(phba->sli4_hba.rpi_bmask);
6028 rc = -ENOMEM;
6029 goto err_exit;
6030 }
6031
6032 /*
6033 * The next_rpi was initialized with the maximum available
6034 * count but the port may allocate a smaller number. Catch
6035 * that case and update the next_rpi.
6036 */
6037 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6038
6039 /* Initialize local ptrs for common extent processing later. */
6040 bmask = phba->sli4_hba.rpi_bmask;
6041 ids = phba->sli4_hba.rpi_ids;
6042 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6043 break;
6044 case LPFC_RSC_TYPE_FCOE_VPI:
6396bb22 6045 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6046 GFP_KERNEL);
6047 if (unlikely(!phba->vpi_bmask)) {
6048 rc = -ENOMEM;
6049 goto err_exit;
6050 }
6396bb22 6051 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6d368e53
JS
6052 GFP_KERNEL);
6053 if (unlikely(!phba->vpi_ids)) {
6054 kfree(phba->vpi_bmask);
6055 rc = -ENOMEM;
6056 goto err_exit;
6057 }
6058
6059 /* Initialize local ptrs for common extent processing later. */
6060 bmask = phba->vpi_bmask;
6061 ids = phba->vpi_ids;
6062 ext_blk_list = &phba->lpfc_vpi_blk_list;
6063 break;
6064 case LPFC_RSC_TYPE_FCOE_XRI:
6396bb22 6065 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6066 sizeof(unsigned long),
6067 GFP_KERNEL);
6068 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6069 rc = -ENOMEM;
6070 goto err_exit;
6071 }
8a9d2e80 6072 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6073 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
6074 sizeof(uint16_t),
6075 GFP_KERNEL);
6076 if (unlikely(!phba->sli4_hba.xri_ids)) {
6077 kfree(phba->sli4_hba.xri_bmask);
6078 rc = -ENOMEM;
6079 goto err_exit;
6080 }
6081
6082 /* Initialize local ptrs for common extent processing later. */
6083 bmask = phba->sli4_hba.xri_bmask;
6084 ids = phba->sli4_hba.xri_ids;
6085 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6086 break;
6087 case LPFC_RSC_TYPE_FCOE_VFI:
6396bb22 6088 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6089 sizeof(unsigned long),
6090 GFP_KERNEL);
6091 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6092 rc = -ENOMEM;
6093 goto err_exit;
6094 }
6396bb22 6095 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
6096 sizeof(uint16_t),
6097 GFP_KERNEL);
6098 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6099 kfree(phba->sli4_hba.vfi_bmask);
6100 rc = -ENOMEM;
6101 goto err_exit;
6102 }
6103
6104 /* Initialize local ptrs for common extent processing later. */
6105 bmask = phba->sli4_hba.vfi_bmask;
6106 ids = phba->sli4_hba.vfi_ids;
6107 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6108 break;
6109 default:
6110 /* Unsupported Opcode. Fail call. */
6111 id_array = NULL;
6112 bmask = NULL;
6113 ids = NULL;
6114 ext_blk_list = NULL;
6115 goto err_exit;
6116 }
6117
6118 /*
6119 * Complete initializing the extent configuration with the
6120 * allocated ids assigned to this function. The bitmask serves
6121 * as an index into the array and manages the available ids. The
6122 * array just stores the ids communicated to the port via the wqes.
6123 */
6124 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6125 if ((i % 2) == 0)
6126 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6127 &id_array[k]);
6128 else
6129 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6130 &id_array[k]);
6131
6132 rsrc_blks = kzalloc(length, GFP_KERNEL);
6133 if (unlikely(!rsrc_blks)) {
6134 rc = -ENOMEM;
6135 kfree(bmask);
6136 kfree(ids);
6137 goto err_exit;
6138 }
6139 rsrc_blks->rsrc_start = rsrc_id;
6140 rsrc_blks->rsrc_size = rsrc_size;
6141 list_add_tail(&rsrc_blks->list, ext_blk_list);
6142 rsrc_start = rsrc_id;
895427bd 6143 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5e5b511d 6144 phba->sli4_hba.io_xri_start = rsrc_start +
895427bd 6145 lpfc_sli4_get_iocb_cnt(phba);
895427bd 6146 }
6d368e53
JS
6147
6148 while (rsrc_id < (rsrc_start + rsrc_size)) {
6149 ids[j] = rsrc_id;
6150 rsrc_id++;
6151 j++;
6152 }
6153 /* Entire word processed. Get next word.*/
6154 if ((i % 2) == 1)
6155 k++;
6156 }
6157 err_exit:
6158 lpfc_sli4_mbox_cmd_free(phba, mbox);
6159 return rc;
6160}
6161
895427bd
JS
6162
6163
6d368e53
JS
6164/**
6165 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6166 * @phba: Pointer to HBA context object.
6167 * @type: the extent's type.
6168 *
6169 * This function deallocates all extents of a particular resource type.
6170 * SLI4 does not allow for deallocating a particular extent range. It
6171 * is the caller's responsibility to release all kernel memory resources.
6172 **/
6173static int
6174lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6175{
6176 int rc;
6177 uint32_t length, mbox_tmo = 0;
6178 LPFC_MBOXQ_t *mbox;
6179 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6180 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6181
6182 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6183 if (!mbox)
6184 return -ENOMEM;
6185
6186 /*
6187 * This function sends an embedded mailbox because it only sends the
6188 * the resource type. All extents of this type are released by the
6189 * port.
6190 */
6191 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6192 sizeof(struct lpfc_sli4_cfg_mhdr));
6193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6194 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6195 length, LPFC_SLI4_MBX_EMBED);
6196
6197 /* Send an extents count of 0 - the dealloc doesn't use it. */
6198 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6199 LPFC_SLI4_MBX_EMBED);
6200 if (unlikely(rc)) {
6201 rc = -EIO;
6202 goto out_free_mbox;
6203 }
6204 if (!phba->sli4_hba.intr_enable)
6205 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6206 else {
a183a15f 6207 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
6208 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6209 }
6210 if (unlikely(rc)) {
6211 rc = -EIO;
6212 goto out_free_mbox;
6213 }
6214
6215 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6216 if (bf_get(lpfc_mbox_hdr_status,
6217 &dealloc_rsrc->header.cfg_shdr.response)) {
6218 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6219 "2919 Failed to release resource extents "
6220 "for type %d - Status 0x%x Add'l Status 0x%x. "
6221 "Resource memory not released.\n",
6222 type,
6223 bf_get(lpfc_mbox_hdr_status,
6224 &dealloc_rsrc->header.cfg_shdr.response),
6225 bf_get(lpfc_mbox_hdr_add_status,
6226 &dealloc_rsrc->header.cfg_shdr.response));
6227 rc = -EIO;
6228 goto out_free_mbox;
6229 }
6230
6231 /* Release kernel memory resources for the specific type. */
6232 switch (type) {
6233 case LPFC_RSC_TYPE_FCOE_VPI:
6234 kfree(phba->vpi_bmask);
6235 kfree(phba->vpi_ids);
6236 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6237 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6238 &phba->lpfc_vpi_blk_list, list) {
6239 list_del_init(&rsrc_blk->list);
6240 kfree(rsrc_blk);
6241 }
16a3a208 6242 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6243 break;
6244 case LPFC_RSC_TYPE_FCOE_XRI:
6245 kfree(phba->sli4_hba.xri_bmask);
6246 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6247 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6248 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6249 list_del_init(&rsrc_blk->list);
6250 kfree(rsrc_blk);
6251 }
6252 break;
6253 case LPFC_RSC_TYPE_FCOE_VFI:
6254 kfree(phba->sli4_hba.vfi_bmask);
6255 kfree(phba->sli4_hba.vfi_ids);
6256 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6257 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6258 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6259 list_del_init(&rsrc_blk->list);
6260 kfree(rsrc_blk);
6261 }
6262 break;
6263 case LPFC_RSC_TYPE_FCOE_RPI:
6264 /* RPI bitmask and physical id array are cleaned up earlier. */
6265 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6266 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6267 list_del_init(&rsrc_blk->list);
6268 kfree(rsrc_blk);
6269 }
6270 break;
6271 default:
6272 break;
6273 }
6274
6275 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6276
6277 out_free_mbox:
6278 mempool_free(mbox, phba->mbox_mem_pool);
6279 return rc;
6280}
6281
bd4b3e5c 6282static void
7bdedb34
JS
6283lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6284 uint32_t feature)
65791f1f 6285{
65791f1f 6286 uint32_t len;
65791f1f 6287
65791f1f
JS
6288 len = sizeof(struct lpfc_mbx_set_feature) -
6289 sizeof(struct lpfc_sli4_cfg_mhdr);
6290 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6291 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6292 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
6293
6294 switch (feature) {
6295 case LPFC_SET_UE_RECOVERY:
6296 bf_set(lpfc_mbx_set_feature_UER,
6297 &mbox->u.mqe.un.set_feature, 1);
6298 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6299 mbox->u.mqe.un.set_feature.param_len = 8;
6300 break;
6301 case LPFC_SET_MDS_DIAGS:
6302 bf_set(lpfc_mbx_set_feature_mds,
6303 &mbox->u.mqe.un.set_feature, 1);
6304 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
ae9e28f3 6305 &mbox->u.mqe.un.set_feature, 1);
7bdedb34
JS
6306 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6307 mbox->u.mqe.un.set_feature.param_len = 8;
6308 break;
171f6c41
JS
6309 case LPFC_SET_DUAL_DUMP:
6310 bf_set(lpfc_mbx_set_feature_dd,
6311 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6312 bf_set(lpfc_mbx_set_feature_ddquery,
6313 &mbox->u.mqe.un.set_feature, 0);
6314 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6315 mbox->u.mqe.un.set_feature.param_len = 4;
6316 break;
65791f1f 6317 }
7bdedb34
JS
6318
6319 return;
65791f1f
JS
6320}
6321
1165a5c2
JS
6322/**
6323 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6324 * @phba: Pointer to HBA context object.
6325 *
6326 * Disable FW logging into host memory on the adapter. To
6327 * be done before reading logs from the host memory.
6328 **/
6329void
6330lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6331{
6332 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6333
95bfc6d8
JS
6334 spin_lock_irq(&phba->hbalock);
6335 ras_fwlog->state = INACTIVE;
6336 spin_unlock_irq(&phba->hbalock);
1165a5c2
JS
6337
6338 /* Disable FW logging to host memory */
6339 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6340 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
95bfc6d8
JS
6341
6342 /* Wait 10ms for firmware to stop using DMA buffer */
6343 usleep_range(10 * 1000, 20 * 1000);
1165a5c2
JS
6344}
6345
d2cc9bcd
JS
6346/**
6347 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6348 * @phba: Pointer to HBA context object.
6349 *
6350 * This function is called to free memory allocated for RAS FW logging
6351 * support in the driver.
6352 **/
6353void
6354lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6355{
6356 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6357 struct lpfc_dmabuf *dmabuf, *next;
6358
6359 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6360 list_for_each_entry_safe(dmabuf, next,
6361 &ras_fwlog->fwlog_buff_list,
6362 list) {
6363 list_del(&dmabuf->list);
6364 dma_free_coherent(&phba->pcidev->dev,
6365 LPFC_RAS_MAX_ENTRY_SIZE,
6366 dmabuf->virt, dmabuf->phys);
6367 kfree(dmabuf);
6368 }
6369 }
6370
6371 if (ras_fwlog->lwpd.virt) {
6372 dma_free_coherent(&phba->pcidev->dev,
6373 sizeof(uint32_t) * 2,
6374 ras_fwlog->lwpd.virt,
6375 ras_fwlog->lwpd.phys);
6376 ras_fwlog->lwpd.virt = NULL;
6377 }
6378
95bfc6d8
JS
6379 spin_lock_irq(&phba->hbalock);
6380 ras_fwlog->state = INACTIVE;
6381 spin_unlock_irq(&phba->hbalock);
d2cc9bcd
JS
6382}
6383
6384/**
6385 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6386 * @phba: Pointer to HBA context object.
6387 * @fwlog_buff_count: Count of buffers to be created.
6388 *
6389 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6390 * to update FW log is posted to the adapter.
6391 * Buffer count is calculated based on module param ras_fwlog_buffsize
6392 * Size of each buffer posted to FW is 64K.
6393 **/
6394
6395static int
6396lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6397 uint32_t fwlog_buff_count)
6398{
6399 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6400 struct lpfc_dmabuf *dmabuf;
6401 int rc = 0, i = 0;
6402
6403 /* Initialize List */
6404 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6405
6406 /* Allocate memory for the LWPD */
6407 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6408 sizeof(uint32_t) * 2,
6409 &ras_fwlog->lwpd.phys,
6410 GFP_KERNEL);
6411 if (!ras_fwlog->lwpd.virt) {
cb34990b 6412 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6413 "6185 LWPD Memory Alloc Failed\n");
6414
6415 return -ENOMEM;
6416 }
6417
6418 ras_fwlog->fw_buffcount = fwlog_buff_count;
6419 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6420 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6421 GFP_KERNEL);
6422 if (!dmabuf) {
6423 rc = -ENOMEM;
6424 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6425 "6186 Memory Alloc failed FW logging");
6426 goto free_mem;
6427 }
6428
750afb08 6429 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
d2cc9bcd 6430 LPFC_RAS_MAX_ENTRY_SIZE,
750afb08 6431 &dmabuf->phys, GFP_KERNEL);
d2cc9bcd
JS
6432 if (!dmabuf->virt) {
6433 kfree(dmabuf);
6434 rc = -ENOMEM;
6435 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6436 "6187 DMA Alloc Failed FW logging");
6437 goto free_mem;
6438 }
d2cc9bcd
JS
6439 dmabuf->buffer_tag = i;
6440 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6441 }
6442
6443free_mem:
6444 if (rc)
6445 lpfc_sli4_ras_dma_free(phba);
6446
6447 return rc;
6448}
6449
6450/**
6451 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6452 * @phba: pointer to lpfc hba data structure.
6453 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6454 *
6455 * Completion handler for driver's RAS MBX command to the device.
6456 **/
6457static void
6458lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6459{
6460 MAILBOX_t *mb;
6461 union lpfc_sli4_cfg_shdr *shdr;
6462 uint32_t shdr_status, shdr_add_status;
6463 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6464
6465 mb = &pmb->u.mb;
6466
6467 shdr = (union lpfc_sli4_cfg_shdr *)
6468 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6469 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6470 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6471
6472 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
cb34990b 6473 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
d2cc9bcd
JS
6474 "6188 FW LOG mailbox "
6475 "completed with status x%x add_status x%x,"
6476 " mbx status x%x\n",
6477 shdr_status, shdr_add_status, mb->mbxStatus);
cb34990b
JS
6478
6479 ras_fwlog->ras_hwsupport = false;
d2cc9bcd
JS
6480 goto disable_ras;
6481 }
6482
95bfc6d8
JS
6483 spin_lock_irq(&phba->hbalock);
6484 ras_fwlog->state = ACTIVE;
6485 spin_unlock_irq(&phba->hbalock);
d2cc9bcd
JS
6486 mempool_free(pmb, phba->mbox_mem_pool);
6487
6488 return;
6489
6490disable_ras:
6491 /* Free RAS DMA memory */
6492 lpfc_sli4_ras_dma_free(phba);
6493 mempool_free(pmb, phba->mbox_mem_pool);
6494}
6495
6496/**
6497 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6498 * @phba: pointer to lpfc hba data structure.
6499 * @fwlog_level: Logging verbosity level.
6500 * @fwlog_enable: Enable/Disable logging.
6501 *
6502 * Initialize memory and post mailbox command to enable FW logging in host
6503 * memory.
6504 **/
6505int
6506lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6507 uint32_t fwlog_level,
6508 uint32_t fwlog_enable)
6509{
6510 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6511 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6512 struct lpfc_dmabuf *dmabuf;
6513 LPFC_MBOXQ_t *mbox;
6514 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6515 int rc = 0;
6516
95bfc6d8
JS
6517 spin_lock_irq(&phba->hbalock);
6518 ras_fwlog->state = INACTIVE;
6519 spin_unlock_irq(&phba->hbalock);
6520
d2cc9bcd
JS
6521 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6522 phba->cfg_ras_fwlog_buffsize);
6523 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6524
6525 /*
6526 * If re-enabling FW logging support use earlier allocated
6527 * DMA buffers while posting MBX command.
6528 **/
6529 if (!ras_fwlog->lwpd.virt) {
6530 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6531 if (rc) {
6532 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
cb34990b 6533 "6189 FW Log Memory Allocation Failed");
d2cc9bcd
JS
6534 return rc;
6535 }
6536 }
6537
6538 /* Setup Mailbox command */
6539 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6540 if (!mbox) {
cb34990b 6541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6542 "6190 RAS MBX Alloc Failed");
6543 rc = -ENOMEM;
6544 goto mem_free;
6545 }
6546
6547 ras_fwlog->fw_loglevel = fwlog_level;
6548 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6549 sizeof(struct lpfc_sli4_cfg_mhdr));
6550
6551 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6552 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6553 len, LPFC_SLI4_MBX_EMBED);
6554
6555 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6556 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6557 fwlog_enable);
6558 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6559 ras_fwlog->fw_loglevel);
6560 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6561 ras_fwlog->fw_buffcount);
6562 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6563 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6564
6565 /* Update DMA buffer address */
6566 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6567 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6568
6569 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6570 putPaddrLow(dmabuf->phys);
6571
6572 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6573 putPaddrHigh(dmabuf->phys);
6574 }
6575
6576 /* Update LPWD address */
6577 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6578 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6579
95bfc6d8
JS
6580 spin_lock_irq(&phba->hbalock);
6581 ras_fwlog->state = REG_INPROGRESS;
6582 spin_unlock_irq(&phba->hbalock);
d2cc9bcd
JS
6583 mbox->vport = phba->pport;
6584 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6585
6586 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6587
6588 if (rc == MBX_NOT_FINISHED) {
cb34990b
JS
6589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6590 "6191 FW-Log Mailbox failed. "
d2cc9bcd
JS
6591 "status %d mbxStatus : x%x", rc,
6592 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6593 mempool_free(mbox, phba->mbox_mem_pool);
6594 rc = -EIO;
6595 goto mem_free;
6596 } else
6597 rc = 0;
6598mem_free:
6599 if (rc)
6600 lpfc_sli4_ras_dma_free(phba);
6601
6602 return rc;
6603}
6604
6605/**
6606 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6607 * @phba: Pointer to HBA context object.
6608 *
6609 * Check if RAS is supported on the adapter and initialize it.
6610 **/
6611void
6612lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6613{
6614 /* Check RAS FW Log needs to be enabled or not */
6615 if (lpfc_check_fwlog_support(phba))
6616 return;
6617
6618 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6619 LPFC_RAS_ENABLE_LOGGING);
6620}
6621
6d368e53
JS
6622/**
6623 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6624 * @phba: Pointer to HBA context object.
6625 *
6626 * This function allocates all SLI4 resource identifiers.
6627 **/
6628int
6629lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6630{
6631 int i, rc, error = 0;
6632 uint16_t count, base;
6633 unsigned long longs;
6634
ff78d8f9
JS
6635 if (!phba->sli4_hba.rpi_hdrs_in_use)
6636 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
6637 if (phba->sli4_hba.extents_in_use) {
6638 /*
6639 * The port supports resource extents. The XRI, VPI, VFI, RPI
6640 * resource extent count must be read and allocated before
6641 * provisioning the resource id arrays.
6642 */
6643 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6644 LPFC_IDX_RSRC_RDY) {
6645 /*
6646 * Extent-based resources are set - the driver could
6647 * be in a port reset. Figure out if any corrective
6648 * actions need to be taken.
6649 */
6650 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6651 LPFC_RSC_TYPE_FCOE_VFI);
6652 if (rc != 0)
6653 error++;
6654 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6655 LPFC_RSC_TYPE_FCOE_VPI);
6656 if (rc != 0)
6657 error++;
6658 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6659 LPFC_RSC_TYPE_FCOE_XRI);
6660 if (rc != 0)
6661 error++;
6662 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6663 LPFC_RSC_TYPE_FCOE_RPI);
6664 if (rc != 0)
6665 error++;
6666
6667 /*
6668 * It's possible that the number of resources
6669 * provided to this port instance changed between
6670 * resets. Detect this condition and reallocate
6671 * resources. Otherwise, there is no action.
6672 */
6673 if (error) {
6674 lpfc_printf_log(phba, KERN_INFO,
6675 LOG_MBOX | LOG_INIT,
6676 "2931 Detected extent resource "
6677 "change. Reallocating all "
6678 "extents.\n");
6679 rc = lpfc_sli4_dealloc_extent(phba,
6680 LPFC_RSC_TYPE_FCOE_VFI);
6681 rc = lpfc_sli4_dealloc_extent(phba,
6682 LPFC_RSC_TYPE_FCOE_VPI);
6683 rc = lpfc_sli4_dealloc_extent(phba,
6684 LPFC_RSC_TYPE_FCOE_XRI);
6685 rc = lpfc_sli4_dealloc_extent(phba,
6686 LPFC_RSC_TYPE_FCOE_RPI);
6687 } else
6688 return 0;
6689 }
6690
6691 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6692 if (unlikely(rc))
6693 goto err_exit;
6694
6695 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6696 if (unlikely(rc))
6697 goto err_exit;
6698
6699 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6700 if (unlikely(rc))
6701 goto err_exit;
6702
6703 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6704 if (unlikely(rc))
6705 goto err_exit;
6706 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6707 LPFC_IDX_RSRC_RDY);
6708 return rc;
6709 } else {
6710 /*
6711 * The port does not support resource extents. The XRI, VPI,
6712 * VFI, RPI resource ids were determined from READ_CONFIG.
6713 * Just allocate the bitmasks and provision the resource id
6714 * arrays. If a port reset is active, the resources don't
6715 * need any action - just exit.
6716 */
6717 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6718 LPFC_IDX_RSRC_RDY) {
6719 lpfc_sli4_dealloc_resource_identifiers(phba);
6720 lpfc_sli4_remove_rpis(phba);
6721 }
6d368e53
JS
6722 /* RPIs. */
6723 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6724 if (count <= 0) {
6725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6726 "3279 Invalid provisioning of "
6727 "rpi:%d\n", count);
6728 rc = -EINVAL;
6729 goto err_exit;
6730 }
6d368e53
JS
6731 base = phba->sli4_hba.max_cfg_param.rpi_base;
6732 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6733 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6734 sizeof(unsigned long),
6735 GFP_KERNEL);
6736 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6737 rc = -ENOMEM;
6738 goto err_exit;
6739 }
6396bb22 6740 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6741 GFP_KERNEL);
6742 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6743 rc = -ENOMEM;
6744 goto free_rpi_bmask;
6745 }
6746
6747 for (i = 0; i < count; i++)
6748 phba->sli4_hba.rpi_ids[i] = base + i;
6749
6750 /* VPIs. */
6751 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6752 if (count <= 0) {
6753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6754 "3280 Invalid provisioning of "
6755 "vpi:%d\n", count);
6756 rc = -EINVAL;
6757 goto free_rpi_ids;
6758 }
6d368e53
JS
6759 base = phba->sli4_hba.max_cfg_param.vpi_base;
6760 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6761 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6762 GFP_KERNEL);
6763 if (unlikely(!phba->vpi_bmask)) {
6764 rc = -ENOMEM;
6765 goto free_rpi_ids;
6766 }
6396bb22 6767 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6768 GFP_KERNEL);
6769 if (unlikely(!phba->vpi_ids)) {
6770 rc = -ENOMEM;
6771 goto free_vpi_bmask;
6772 }
6773
6774 for (i = 0; i < count; i++)
6775 phba->vpi_ids[i] = base + i;
6776
6777 /* XRIs. */
6778 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6779 if (count <= 0) {
6780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6781 "3281 Invalid provisioning of "
6782 "xri:%d\n", count);
6783 rc = -EINVAL;
6784 goto free_vpi_ids;
6785 }
6d368e53
JS
6786 base = phba->sli4_hba.max_cfg_param.xri_base;
6787 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6788 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6789 sizeof(unsigned long),
6790 GFP_KERNEL);
6791 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6792 rc = -ENOMEM;
6793 goto free_vpi_ids;
6794 }
41899be7 6795 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6796 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6797 GFP_KERNEL);
6798 if (unlikely(!phba->sli4_hba.xri_ids)) {
6799 rc = -ENOMEM;
6800 goto free_xri_bmask;
6801 }
6802
6803 for (i = 0; i < count; i++)
6804 phba->sli4_hba.xri_ids[i] = base + i;
6805
6806 /* VFIs. */
6807 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6808 if (count <= 0) {
6809 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6810 "3282 Invalid provisioning of "
6811 "vfi:%d\n", count);
6812 rc = -EINVAL;
6813 goto free_xri_ids;
6814 }
6d368e53
JS
6815 base = phba->sli4_hba.max_cfg_param.vfi_base;
6816 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6817 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6818 sizeof(unsigned long),
6819 GFP_KERNEL);
6820 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6821 rc = -ENOMEM;
6822 goto free_xri_ids;
6823 }
6396bb22 6824 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6825 GFP_KERNEL);
6826 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6827 rc = -ENOMEM;
6828 goto free_vfi_bmask;
6829 }
6830
6831 for (i = 0; i < count; i++)
6832 phba->sli4_hba.vfi_ids[i] = base + i;
6833
6834 /*
6835 * Mark all resources ready. An HBA reset doesn't need
6836 * to reset the initialization.
6837 */
6838 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6839 LPFC_IDX_RSRC_RDY);
6840 return 0;
6841 }
6842
6843 free_vfi_bmask:
6844 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6845 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6846 free_xri_ids:
6847 kfree(phba->sli4_hba.xri_ids);
cd60be49 6848 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6849 free_xri_bmask:
6850 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6851 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6852 free_vpi_ids:
6853 kfree(phba->vpi_ids);
cd60be49 6854 phba->vpi_ids = NULL;
6d368e53
JS
6855 free_vpi_bmask:
6856 kfree(phba->vpi_bmask);
cd60be49 6857 phba->vpi_bmask = NULL;
6d368e53
JS
6858 free_rpi_ids:
6859 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6860 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6861 free_rpi_bmask:
6862 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6863 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6864 err_exit:
6865 return rc;
6866}
6867
6868/**
6869 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6870 * @phba: Pointer to HBA context object.
6871 *
6872 * This function allocates the number of elements for the specified
6873 * resource type.
6874 **/
6875int
6876lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6877{
6878 if (phba->sli4_hba.extents_in_use) {
6879 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6880 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6881 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6882 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6883 } else {
6884 kfree(phba->vpi_bmask);
16a3a208 6885 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6886 kfree(phba->vpi_ids);
6887 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6888 kfree(phba->sli4_hba.xri_bmask);
6889 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6890 kfree(phba->sli4_hba.vfi_bmask);
6891 kfree(phba->sli4_hba.vfi_ids);
6892 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6893 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6894 }
6895
6896 return 0;
6897}
6898
b76f2dc9
JS
6899/**
6900 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6901 * @phba: Pointer to HBA context object.
6902 * @type: The resource extent type.
6903 * @extnt_count: buffer to hold port extent count response
6904 * @extnt_size: buffer to hold port extent size response.
6905 *
6906 * This function calls the port to read the host allocated extents
6907 * for a particular type.
6908 **/
6909int
6910lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6911 uint16_t *extnt_cnt, uint16_t *extnt_size)
6912{
6913 bool emb;
6914 int rc = 0;
6915 uint16_t curr_blks = 0;
6916 uint32_t req_len, emb_len;
6917 uint32_t alloc_len, mbox_tmo;
6918 struct list_head *blk_list_head;
6919 struct lpfc_rsrc_blks *rsrc_blk;
6920 LPFC_MBOXQ_t *mbox;
6921 void *virtaddr = NULL;
6922 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6923 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6924 union lpfc_sli4_cfg_shdr *shdr;
6925
6926 switch (type) {
6927 case LPFC_RSC_TYPE_FCOE_VPI:
6928 blk_list_head = &phba->lpfc_vpi_blk_list;
6929 break;
6930 case LPFC_RSC_TYPE_FCOE_XRI:
6931 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6932 break;
6933 case LPFC_RSC_TYPE_FCOE_VFI:
6934 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6935 break;
6936 case LPFC_RSC_TYPE_FCOE_RPI:
6937 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6938 break;
6939 default:
6940 return -EIO;
6941 }
6942
6943 /* Count the number of extents currently allocatd for this type. */
6944 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6945 if (curr_blks == 0) {
6946 /*
6947 * The GET_ALLOCATED mailbox does not return the size,
6948 * just the count. The size should be just the size
6949 * stored in the current allocated block and all sizes
6950 * for an extent type are the same so set the return
6951 * value now.
6952 */
6953 *extnt_size = rsrc_blk->rsrc_size;
6954 }
6955 curr_blks++;
6956 }
6957
b76f2dc9
JS
6958 /*
6959 * Calculate the size of an embedded mailbox. The uint32_t
6960 * accounts for extents-specific word.
6961 */
6962 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6963 sizeof(uint32_t);
6964
6965 /*
6966 * Presume the allocation and response will fit into an embedded
6967 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6968 */
6969 emb = LPFC_SLI4_MBX_EMBED;
6970 req_len = emb_len;
6971 if (req_len > emb_len) {
6972 req_len = curr_blks * sizeof(uint16_t) +
6973 sizeof(union lpfc_sli4_cfg_shdr) +
6974 sizeof(uint32_t);
6975 emb = LPFC_SLI4_MBX_NEMBED;
6976 }
6977
6978 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6979 if (!mbox)
6980 return -ENOMEM;
6981 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6982
6983 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6984 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6985 req_len, emb);
6986 if (alloc_len < req_len) {
6987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6988 "2983 Allocated DMA memory size (x%x) is "
6989 "less than the requested DMA memory "
6990 "size (x%x)\n", alloc_len, req_len);
6991 rc = -ENOMEM;
6992 goto err_exit;
6993 }
6994 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6995 if (unlikely(rc)) {
6996 rc = -EIO;
6997 goto err_exit;
6998 }
6999
7000 if (!phba->sli4_hba.intr_enable)
7001 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7002 else {
a183a15f 7003 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
7004 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7005 }
7006
7007 if (unlikely(rc)) {
7008 rc = -EIO;
7009 goto err_exit;
7010 }
7011
7012 /*
7013 * Figure out where the response is located. Then get local pointers
7014 * to the response data. The port does not guarantee to respond to
7015 * all extents counts request so update the local variable with the
7016 * allocated count from the port.
7017 */
7018 if (emb == LPFC_SLI4_MBX_EMBED) {
7019 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7020 shdr = &rsrc_ext->header.cfg_shdr;
7021 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7022 } else {
7023 virtaddr = mbox->sge_array->addr[0];
7024 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7025 shdr = &n_rsrc->cfg_shdr;
7026 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7027 }
7028
7029 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7030 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
7031 "2984 Failed to read allocated resources "
7032 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7033 type,
7034 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7035 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7036 rc = -EIO;
7037 goto err_exit;
7038 }
7039 err_exit:
7040 lpfc_sli4_mbox_cmd_free(phba, mbox);
7041 return rc;
7042}
7043
8a9d2e80 7044/**
0ef69968 7045 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 7046 * @phba: pointer to lpfc hba data structure.
895427bd
JS
7047 * @pring: Pointer to driver SLI ring object.
7048 * @sgl_list: linked link of sgl buffers to post
7049 * @cnt: number of linked list buffers
8a9d2e80 7050 *
895427bd 7051 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
7052 * repost them to the port by using SGL block post. This is needed after a
7053 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
7054 * of buffer sgls which contains contiguous xris and uses the non-embedded
7055 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
7056 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7057 * mailbox command for posting.
7058 *
7059 * Returns: 0 = success, non-zero failure.
7060 **/
7061static int
895427bd
JS
7062lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7063 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
7064{
7065 struct lpfc_sglq *sglq_entry = NULL;
7066 struct lpfc_sglq *sglq_entry_next = NULL;
7067 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
7068 int status, total_cnt;
7069 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
7070 int last_xritag = NO_XRI;
7071 LIST_HEAD(prep_sgl_list);
7072 LIST_HEAD(blck_sgl_list);
7073 LIST_HEAD(allc_sgl_list);
7074 LIST_HEAD(post_sgl_list);
7075 LIST_HEAD(free_sgl_list);
7076
38c20673 7077 spin_lock_irq(&phba->hbalock);
895427bd
JS
7078 spin_lock(&phba->sli4_hba.sgl_list_lock);
7079 list_splice_init(sgl_list, &allc_sgl_list);
7080 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7081 spin_unlock_irq(&phba->hbalock);
8a9d2e80 7082
895427bd 7083 total_cnt = cnt;
8a9d2e80
JS
7084 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7085 &allc_sgl_list, list) {
7086 list_del_init(&sglq_entry->list);
7087 block_cnt++;
7088 if ((last_xritag != NO_XRI) &&
7089 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7090 /* a hole in xri block, form a sgl posting block */
7091 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7092 post_cnt = block_cnt - 1;
7093 /* prepare list for next posting block */
7094 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7095 block_cnt = 1;
7096 } else {
7097 /* prepare list for next posting block */
7098 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7099 /* enough sgls for non-embed sgl mbox command */
7100 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7101 list_splice_init(&prep_sgl_list,
7102 &blck_sgl_list);
7103 post_cnt = block_cnt;
7104 block_cnt = 0;
7105 }
7106 }
7107 num_posted++;
7108
7109 /* keep track of last sgl's xritag */
7110 last_xritag = sglq_entry->sli4_xritag;
7111
895427bd
JS
7112 /* end of repost sgl list condition for buffers */
7113 if (num_posted == total_cnt) {
8a9d2e80
JS
7114 if (post_cnt == 0) {
7115 list_splice_init(&prep_sgl_list,
7116 &blck_sgl_list);
7117 post_cnt = block_cnt;
7118 } else if (block_cnt == 1) {
7119 status = lpfc_sli4_post_sgl(phba,
7120 sglq_entry->phys, 0,
7121 sglq_entry->sli4_xritag);
7122 if (!status) {
7123 /* successful, put sgl to posted list */
7124 list_add_tail(&sglq_entry->list,
7125 &post_sgl_list);
7126 } else {
7127 /* Failure, put sgl to free list */
7128 lpfc_printf_log(phba, KERN_WARNING,
7129 LOG_SLI,
895427bd 7130 "3159 Failed to post "
8a9d2e80
JS
7131 "sgl, xritag:x%x\n",
7132 sglq_entry->sli4_xritag);
7133 list_add_tail(&sglq_entry->list,
7134 &free_sgl_list);
711ea882 7135 total_cnt--;
8a9d2e80
JS
7136 }
7137 }
7138 }
7139
7140 /* continue until a nembed page worth of sgls */
7141 if (post_cnt == 0)
7142 continue;
7143
895427bd
JS
7144 /* post the buffer list sgls as a block */
7145 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7146 post_cnt);
8a9d2e80
JS
7147
7148 if (!status) {
7149 /* success, put sgl list to posted sgl list */
7150 list_splice_init(&blck_sgl_list, &post_sgl_list);
7151 } else {
7152 /* Failure, put sgl list to free sgl list */
7153 sglq_entry_first = list_first_entry(&blck_sgl_list,
7154 struct lpfc_sglq,
7155 list);
7156 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 7157 "3160 Failed to post sgl-list, "
8a9d2e80
JS
7158 "xritag:x%x-x%x\n",
7159 sglq_entry_first->sli4_xritag,
7160 (sglq_entry_first->sli4_xritag +
7161 post_cnt - 1));
7162 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 7163 total_cnt -= post_cnt;
8a9d2e80
JS
7164 }
7165
7166 /* don't reset xirtag due to hole in xri block */
7167 if (block_cnt == 0)
7168 last_xritag = NO_XRI;
7169
895427bd 7170 /* reset sgl post count for next round of posting */
8a9d2e80
JS
7171 post_cnt = 0;
7172 }
7173
895427bd 7174 /* free the sgls failed to post */
8a9d2e80
JS
7175 lpfc_free_sgl_list(phba, &free_sgl_list);
7176
895427bd 7177 /* push sgls posted to the available list */
8a9d2e80 7178 if (!list_empty(&post_sgl_list)) {
38c20673 7179 spin_lock_irq(&phba->hbalock);
895427bd
JS
7180 spin_lock(&phba->sli4_hba.sgl_list_lock);
7181 list_splice_init(&post_sgl_list, sgl_list);
7182 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7183 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
7184 } else {
7185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 7186 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
7187 return -EIO;
7188 }
895427bd
JS
7189
7190 /* return the number of XRIs actually posted */
7191 return total_cnt;
8a9d2e80
JS
7192}
7193
0794d601 7194/**
5e5b511d 7195 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
0794d601
JS
7196 * @phba: pointer to lpfc hba data structure.
7197 *
7198 * This routine walks the list of nvme buffers that have been allocated and
7199 * repost them to the port by using SGL block post. This is needed after a
7200 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7201 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
5e5b511d 7202 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
0794d601
JS
7203 *
7204 * Returns: 0 = success, non-zero failure.
7205 **/
3999df75 7206static int
5e5b511d 7207lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
0794d601
JS
7208{
7209 LIST_HEAD(post_nblist);
7210 int num_posted, rc = 0;
7211
7212 /* get all NVME buffers need to repost to a local list */
5e5b511d 7213 lpfc_io_buf_flush(phba, &post_nblist);
0794d601
JS
7214
7215 /* post the list of nvme buffer sgls to port if available */
7216 if (!list_empty(&post_nblist)) {
5e5b511d
JS
7217 num_posted = lpfc_sli4_post_io_sgl_list(
7218 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
0794d601
JS
7219 /* failed to post any nvme buffer, return error */
7220 if (num_posted == 0)
7221 rc = -EIO;
7222 }
7223 return rc;
7224}
7225
3999df75 7226static void
61bda8f7
JS
7227lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7228{
7229 uint32_t len;
7230
7231 len = sizeof(struct lpfc_mbx_set_host_data) -
7232 sizeof(struct lpfc_sli4_cfg_mhdr);
7233 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7234 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7235 LPFC_SLI4_MBX_EMBED);
7236
7237 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
7238 mbox->u.mqe.un.set_host_data.param_len =
7239 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
7240 snprintf(mbox->u.mqe.un.set_host_data.data,
7241 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7242 "Linux %s v"LPFC_DRIVER_VERSION,
7243 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7244}
7245
a8cf5dfe 7246int
6c621a22 7247lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
a8cf5dfe 7248 struct lpfc_queue *drq, int count, int idx)
6c621a22
JS
7249{
7250 int rc, i;
7251 struct lpfc_rqe hrqe;
7252 struct lpfc_rqe drqe;
7253 struct lpfc_rqb *rqbp;
411de511 7254 unsigned long flags;
6c621a22
JS
7255 struct rqb_dmabuf *rqb_buffer;
7256 LIST_HEAD(rqb_buf_list);
7257
411de511 7258 spin_lock_irqsave(&phba->hbalock, flags);
6c621a22
JS
7259 rqbp = hrq->rqbp;
7260 for (i = 0; i < count; i++) {
7261 /* IF RQ is already full, don't bother */
7262 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7263 break;
7264 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7265 if (!rqb_buffer)
7266 break;
7267 rqb_buffer->hrq = hrq;
7268 rqb_buffer->drq = drq;
a8cf5dfe 7269 rqb_buffer->idx = idx;
6c621a22
JS
7270 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7271 }
7272 while (!list_empty(&rqb_buf_list)) {
7273 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7274 hbuf.list);
7275
7276 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7277 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7278 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7279 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7280 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7281 if (rc < 0) {
411de511
JS
7282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7283 "6421 Cannot post to HRQ %d: %x %x %x "
7284 "DRQ %x %x\n",
7285 hrq->queue_id,
7286 hrq->host_index,
7287 hrq->hba_index,
7288 hrq->entry_count,
7289 drq->host_index,
7290 drq->hba_index);
6c621a22
JS
7291 rqbp->rqb_free_buffer(phba, rqb_buffer);
7292 } else {
7293 list_add_tail(&rqb_buffer->hbuf.list,
7294 &rqbp->rqb_buffer_list);
7295 rqbp->buffer_count++;
7296 }
7297 }
411de511 7298 spin_unlock_irqrestore(&phba->hbalock, flags);
6c621a22
JS
7299 return 1;
7300}
7301
da0436e9 7302/**
183b8021 7303 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
7304 * @phba: Pointer to HBA context object.
7305 *
183b8021
MY
7306 * This function is the main SLI4 device initialization PCI function. This
7307 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
7308 * HBA error attention handler code. Caller is not required to hold any
7309 * locks.
7310 **/
7311int
7312lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7313{
171f6c41 7314 int rc, i, cnt, len, dd;
da0436e9
JS
7315 LPFC_MBOXQ_t *mboxq;
7316 struct lpfc_mqe *mqe;
7317 uint8_t *vpd;
7318 uint32_t vpd_size;
7319 uint32_t ftr_rsp = 0;
7320 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7321 struct lpfc_vport *vport = phba->pport;
7322 struct lpfc_dmabuf *mp;
2d7dbc4c 7323 struct lpfc_rqb *rqbp;
da0436e9
JS
7324
7325 /* Perform a PCI function reset to start from clean */
7326 rc = lpfc_pci_function_reset(phba);
7327 if (unlikely(rc))
7328 return -ENODEV;
7329
7330 /* Check the HBA Host Status Register for readyness */
7331 rc = lpfc_sli4_post_status_check(phba);
7332 if (unlikely(rc))
7333 return -ENODEV;
7334 else {
7335 spin_lock_irq(&phba->hbalock);
7336 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7337 spin_unlock_irq(&phba->hbalock);
7338 }
7339
7340 /*
7341 * Allocate a single mailbox container for initializing the
7342 * port.
7343 */
7344 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7345 if (!mboxq)
7346 return -ENOMEM;
7347
da0436e9 7348 /* Issue READ_REV to collect vpd and FW information. */
49198b37 7349 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
7350 vpd = kzalloc(vpd_size, GFP_KERNEL);
7351 if (!vpd) {
7352 rc = -ENOMEM;
7353 goto out_free_mbox;
7354 }
7355
7356 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
7357 if (unlikely(rc)) {
7358 kfree(vpd);
7359 goto out_free_mbox;
7360 }
572709e2 7361
da0436e9 7362 mqe = &mboxq->u.mqe;
f1126688 7363 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 7364 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 7365 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
7366 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7367 } else {
76a95d75 7368 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 7369 }
45ed1190
JS
7370
7371 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7372 LPFC_DCBX_CEE_MODE)
7373 phba->hba_flag |= HBA_FIP_SUPPORT;
7374 else
7375 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7376
c00f62e6 7377 phba->hba_flag &= ~HBA_IOQ_FLUSH;
4f2e66c6 7378
c31098ce 7379 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
7380 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7381 "0376 READ_REV Error. SLI Level %d "
7382 "FCoE enabled %d\n",
76a95d75 7383 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 7384 rc = -EIO;
76a95d75
JS
7385 kfree(vpd);
7386 goto out_free_mbox;
da0436e9 7387 }
cd1c8301 7388
ff78d8f9
JS
7389 /*
7390 * Continue initialization with default values even if driver failed
7391 * to read FCoE param config regions, only read parameters if the
7392 * board is FCoE
7393 */
7394 if (phba->hba_flag & HBA_FCOE_MODE &&
7395 lpfc_sli4_read_fcoe_params(phba))
7396 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7397 "2570 Failed to read FCoE parameters\n");
7398
cd1c8301
JS
7399 /*
7400 * Retrieve sli4 device physical port name, failure of doing it
7401 * is considered as non-fatal.
7402 */
7403 rc = lpfc_sli4_retrieve_pport_name(phba);
7404 if (!rc)
7405 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7406 "3080 Successful retrieving SLI4 device "
7407 "physical port name: %s.\n", phba->Port);
7408
b3b4f3e1
JS
7409 rc = lpfc_sli4_get_ctl_attr(phba);
7410 if (!rc)
7411 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7412 "8351 Successful retrieving SLI4 device "
7413 "CTL ATTR\n");
7414
da0436e9
JS
7415 /*
7416 * Evaluate the read rev and vpd data. Populate the driver
7417 * state with the results. If this routine fails, the failure
7418 * is not fatal as the driver will use generic values.
7419 */
7420 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7421 if (unlikely(!rc)) {
7422 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7423 "0377 Error %d parsing vpd. "
7424 "Using defaults.\n", rc);
7425 rc = 0;
7426 }
76a95d75 7427 kfree(vpd);
da0436e9 7428
f1126688
JS
7429 /* Save information as VPD data */
7430 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7431 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4e565cf0
JS
7432
7433 /*
7434 * This is because first G7 ASIC doesn't support the standard
7435 * 0x5a NVME cmd descriptor type/subtype
7436 */
7437 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7438 LPFC_SLI_INTF_IF_TYPE_6) &&
7439 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7440 (phba->vpd.rev.smRev == 0) &&
7441 (phba->cfg_nvme_embed_cmd == 1))
7442 phba->cfg_nvme_embed_cmd = 0;
7443
f1126688
JS
7444 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7445 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7446 &mqe->un.read_rev);
7447 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7448 &mqe->un.read_rev);
7449 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7450 &mqe->un.read_rev);
7451 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7452 &mqe->un.read_rev);
7453 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7454 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7455 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7456 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7457 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7458 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7459 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7460 "(%d):0380 READ_REV Status x%x "
7461 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7462 mboxq->vport ? mboxq->vport->vpi : 0,
7463 bf_get(lpfc_mqe_status, mqe),
7464 phba->vpd.rev.opFwName,
7465 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7466 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 7467
65791f1f 7468 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
7469 LPFC_SLI_INTF_IF_TYPE_0) {
7470 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7471 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7472 if (rc == MBX_SUCCESS) {
7473 phba->hba_flag |= HBA_RECOVERABLE_UE;
7474 /* Set 1Sec interval to detect UE */
7475 phba->eratt_poll_interval = 1;
7476 phba->sli4_hba.ue_to_sr = bf_get(
7477 lpfc_mbx_set_feature_UESR,
7478 &mboxq->u.mqe.un.set_feature);
7479 phba->sli4_hba.ue_to_rp = bf_get(
7480 lpfc_mbx_set_feature_UERP,
7481 &mboxq->u.mqe.un.set_feature);
7482 }
7483 }
7484
7485 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7486 /* Enable MDS Diagnostics only if the SLI Port supports it */
7487 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7488 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7489 if (rc != MBX_SUCCESS)
7490 phba->mds_diags_support = 0;
7491 }
572709e2 7492
da0436e9
JS
7493 /*
7494 * Discover the port's supported feature set and match it against the
7495 * hosts requests.
7496 */
7497 lpfc_request_features(phba, mboxq);
7498 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7499 if (unlikely(rc)) {
7500 rc = -EIO;
76a95d75 7501 goto out_free_mbox;
da0436e9
JS
7502 }
7503
7504 /*
7505 * The port must support FCP initiator mode as this is the
7506 * only mode running in the host.
7507 */
7508 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7509 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7510 "0378 No support for fcpi mode.\n");
7511 ftr_rsp++;
7512 }
0bc2b7c5
JS
7513
7514 /* Performance Hints are ONLY for FCoE */
7515 if (phba->hba_flag & HBA_FCOE_MODE) {
7516 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7517 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7518 else
7519 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7520 }
7521
da0436e9
JS
7522 /*
7523 * If the port cannot support the host's requested features
7524 * then turn off the global config parameters to disable the
7525 * feature in the driver. This is not a fatal error.
7526 */
f44ac12f
JS
7527 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7528 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7529 phba->cfg_enable_bg = 0;
7530 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
bf08611b 7531 ftr_rsp++;
f44ac12f 7532 }
bf08611b 7533 }
da0436e9
JS
7534
7535 if (phba->max_vpi && phba->cfg_enable_npiv &&
7536 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7537 ftr_rsp++;
7538
7539 if (ftr_rsp) {
7540 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7541 "0379 Feature Mismatch Data: x%08x %08x "
7542 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7543 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7544 phba->cfg_enable_npiv, phba->max_vpi);
7545 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7546 phba->cfg_enable_bg = 0;
7547 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7548 phba->cfg_enable_npiv = 0;
7549 }
7550
7551 /* These SLI3 features are assumed in SLI4 */
7552 spin_lock_irq(&phba->hbalock);
7553 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7554 spin_unlock_irq(&phba->hbalock);
7555
171f6c41
JS
7556 /* Always try to enable dual dump feature if we can */
7557 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7558 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7559 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7560 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7561 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
7562 "6448 Dual Dump is enabled\n");
7563 else
7564 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7565 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7566 "rc:x%x dd:x%x\n",
7567 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7568 lpfc_sli_config_mbox_subsys_get(
7569 phba, mboxq),
7570 lpfc_sli_config_mbox_opcode_get(
7571 phba, mboxq),
7572 rc, dd);
6d368e53
JS
7573 /*
7574 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7575 * calls depends on these resources to complete port setup.
7576 */
7577 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7578 if (rc) {
7579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7580 "2920 Failed to alloc Resource IDs "
7581 "rc = x%x\n", rc);
7582 goto out_free_mbox;
7583 }
7584
61bda8f7
JS
7585 lpfc_set_host_data(phba, mboxq);
7586
7587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7588 if (rc) {
7589 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7590 "2134 Failed to set host os driver version %x",
7591 rc);
7592 }
7593
da0436e9 7594 /* Read the port's service parameters. */
9f1177a3
JS
7595 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7596 if (rc) {
7597 phba->link_state = LPFC_HBA_ERROR;
7598 rc = -ENOMEM;
76a95d75 7599 goto out_free_mbox;
9f1177a3
JS
7600 }
7601
da0436e9
JS
7602 mboxq->vport = vport;
7603 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3e1f0718 7604 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
7605 if (rc == MBX_SUCCESS) {
7606 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7607 rc = 0;
7608 }
7609
7610 /*
7611 * This memory was allocated by the lpfc_read_sparam routine. Release
7612 * it to the mbuf pool.
7613 */
7614 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7615 kfree(mp);
3e1f0718 7616 mboxq->ctx_buf = NULL;
da0436e9
JS
7617 if (unlikely(rc)) {
7618 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7619 "0382 READ_SPARAM command failed "
7620 "status %d, mbxStatus x%x\n",
7621 rc, bf_get(lpfc_mqe_status, mqe));
7622 phba->link_state = LPFC_HBA_ERROR;
7623 rc = -EIO;
76a95d75 7624 goto out_free_mbox;
da0436e9
JS
7625 }
7626
0558056c 7627 lpfc_update_vport_wwn(vport);
da0436e9
JS
7628
7629 /* Update the fc_host data structures with new wwn. */
7630 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7631 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7632
895427bd
JS
7633 /* Create all the SLI4 queues */
7634 rc = lpfc_sli4_queue_create(phba);
7635 if (rc) {
7636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7637 "3089 Failed to allocate queues\n");
7638 rc = -ENODEV;
7639 goto out_free_mbox;
7640 }
7641 /* Set up all the queues to the device */
7642 rc = lpfc_sli4_queue_setup(phba);
7643 if (unlikely(rc)) {
7644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7645 "0381 Error %d during queue setup.\n ", rc);
7646 goto out_stop_timers;
7647 }
7648 /* Initialize the driver internal SLI layer lists. */
7649 lpfc_sli4_setup(phba);
7650 lpfc_sli4_queue_init(phba);
7651
7652 /* update host els xri-sgl sizes and mappings */
7653 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
7654 if (unlikely(rc)) {
7655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7656 "1400 Failed to update xri-sgl size and "
7657 "mapping: %d\n", rc);
895427bd 7658 goto out_destroy_queue;
da0436e9
JS
7659 }
7660
8a9d2e80 7661 /* register the els sgl pool to the port */
895427bd
JS
7662 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7663 phba->sli4_hba.els_xri_cnt);
7664 if (unlikely(rc < 0)) {
8a9d2e80
JS
7665 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7666 "0582 Error %d during els sgl post "
7667 "operation\n", rc);
7668 rc = -ENODEV;
895427bd 7669 goto out_destroy_queue;
8a9d2e80 7670 }
895427bd 7671 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 7672
f358dd0c
JS
7673 if (phba->nvmet_support) {
7674 /* update host nvmet xri-sgl sizes and mappings */
7675 rc = lpfc_sli4_nvmet_sgl_update(phba);
7676 if (unlikely(rc)) {
7677 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7678 "6308 Failed to update nvmet-sgl size "
7679 "and mapping: %d\n", rc);
7680 goto out_destroy_queue;
7681 }
7682
7683 /* register the nvmet sgl pool to the port */
7684 rc = lpfc_sli4_repost_sgl_list(
7685 phba,
7686 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7687 phba->sli4_hba.nvmet_xri_cnt);
7688 if (unlikely(rc < 0)) {
7689 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7690 "3117 Error %d during nvmet "
7691 "sgl post\n", rc);
7692 rc = -ENODEV;
7693 goto out_destroy_queue;
7694 }
7695 phba->sli4_hba.nvmet_xri_cnt = rc;
6c621a22 7696
a5f7337f
JS
7697 /* We allocate an iocbq for every receive context SGL.
7698 * The additional allocation is for abort and ls handling.
7699 */
7700 cnt = phba->sli4_hba.nvmet_xri_cnt +
7701 phba->sli4_hba.max_cfg_param.max_xri;
f358dd0c 7702 } else {
0794d601 7703 /* update host common xri-sgl sizes and mappings */
5e5b511d 7704 rc = lpfc_sli4_io_sgl_update(phba);
895427bd
JS
7705 if (unlikely(rc)) {
7706 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601 7707 "6082 Failed to update nvme-sgl size "
895427bd
JS
7708 "and mapping: %d\n", rc);
7709 goto out_destroy_queue;
7710 }
7711
0794d601 7712 /* register the allocated common sgl pool to the port */
5e5b511d 7713 rc = lpfc_sli4_repost_io_sgl_list(phba);
895427bd
JS
7714 if (unlikely(rc)) {
7715 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601
JS
7716 "6116 Error %d during nvme sgl post "
7717 "operation\n", rc);
7718 /* Some NVME buffers were moved to abort nvme list */
7719 /* A pci function reset will repost them */
7720 rc = -ENODEV;
895427bd
JS
7721 goto out_destroy_queue;
7722 }
a5f7337f
JS
7723 /* Each lpfc_io_buf job structure has an iocbq element.
7724 * This cnt provides for abort, els, ct and ls requests.
7725 */
7726 cnt = phba->sli4_hba.max_cfg_param.max_xri;
11e644e2
JS
7727 }
7728
7729 if (!phba->sli.iocbq_lookup) {
6c621a22
JS
7730 /* Initialize and populate the iocb list per host */
7731 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a5f7337f
JS
7732 "2821 initialize iocb list with %d entries\n",
7733 cnt);
6c621a22
JS
7734 rc = lpfc_init_iocb_list(phba, cnt);
7735 if (rc) {
7736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11e644e2 7737 "1413 Failed to init iocb list.\n");
6c621a22
JS
7738 goto out_destroy_queue;
7739 }
895427bd
JS
7740 }
7741
11e644e2
JS
7742 if (phba->nvmet_support)
7743 lpfc_nvmet_create_targetport(phba);
7744
2d7dbc4c 7745 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
2d7dbc4c
JS
7746 /* Post initial buffers to all RQs created */
7747 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7748 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7749 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7750 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7751 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 7752 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
7753 rqbp->buffer_count = 0;
7754
2d7dbc4c
JS
7755 lpfc_post_rq_buffer(
7756 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7757 phba->sli4_hba.nvmet_mrq_data[i],
2448e484 7758 phba->cfg_nvmet_mrq_post, i);
2d7dbc4c
JS
7759 }
7760 }
7761
da0436e9
JS
7762 /* Post the rpi header region to the device. */
7763 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7764 if (unlikely(rc)) {
7765 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7766 "0393 Error %d during rpi post operation\n",
7767 rc);
7768 rc = -ENODEV;
895427bd 7769 goto out_destroy_queue;
da0436e9 7770 }
97f2ecf1 7771 lpfc_sli4_node_prep(phba);
da0436e9 7772
895427bd 7773 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 7774 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
7775 /*
7776 * The FC Port needs to register FCFI (index 0)
7777 */
7778 lpfc_reg_fcfi(phba, mboxq);
7779 mboxq->vport = phba->pport;
7780 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7781 if (rc != MBX_SUCCESS)
7782 goto out_unset_queue;
7783 rc = 0;
7784 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7785 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
7786 } else {
7787 /* We are a NVME Target mode with MRQ > 1 */
7788
7789 /* First register the FCFI */
7790 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7791 mboxq->vport = phba->pport;
7792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7793 if (rc != MBX_SUCCESS)
7794 goto out_unset_queue;
7795 rc = 0;
7796 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7797 &mboxq->u.mqe.un.reg_fcfi_mrq);
7798
7799 /* Next register the MRQs */
7800 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7801 mboxq->vport = phba->pport;
7802 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7803 if (rc != MBX_SUCCESS)
7804 goto out_unset_queue;
7805 rc = 0;
895427bd
JS
7806 }
7807 /* Check if the port is configured to be disabled */
7808 lpfc_sli_read_link_ste(phba);
da0436e9
JS
7809 }
7810
c490850a
JS
7811 /* Don't post more new bufs if repost already recovered
7812 * the nvme sgls.
7813 */
7814 if (phba->nvmet_support == 0) {
7815 if (phba->sli4_hba.io_xri_cnt == 0) {
7816 len = lpfc_new_io_buf(
7817 phba, phba->sli4_hba.io_xri_max);
7818 if (len == 0) {
7819 rc = -ENOMEM;
7820 goto out_unset_queue;
7821 }
7822
7823 if (phba->cfg_xri_rebalancing)
7824 lpfc_create_multixri_pools(phba);
7825 }
7826 } else {
7827 phba->cfg_xri_rebalancing = 0;
7828 }
7829
da0436e9
JS
7830 /* Allow asynchronous mailbox command to go through */
7831 spin_lock_irq(&phba->hbalock);
7832 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7833 spin_unlock_irq(&phba->hbalock);
7834
7835 /* Post receive buffers to the device */
7836 lpfc_sli4_rb_setup(phba);
7837
fc2b989b
JS
7838 /* Reset HBA FCF states after HBA reset */
7839 phba->fcf.fcf_flag = 0;
7840 phba->fcf.current_rec.flag = 0;
7841
da0436e9 7842 /* Start the ELS watchdog timer */
8fa38513 7843 mod_timer(&vport->els_tmofunc,
256ec0d0 7844 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7845
7846 /* Start heart beat timer */
7847 mod_timer(&phba->hb_tmofunc,
256ec0d0 7848 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7849 phba->hb_outstanding = 0;
7850 phba->last_completion_time = jiffies;
7851
32517fc0
JS
7852 /* start eq_delay heartbeat */
7853 if (phba->cfg_auto_imax)
7854 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7855 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7856
da0436e9 7857 /* Start error attention (ERATT) polling timer */
256ec0d0 7858 mod_timer(&phba->eratt_poll,
65791f1f 7859 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7860
75baf696
JS
7861 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7862 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7863 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7864 if (!rc) {
7865 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7866 "2829 This device supports "
7867 "Advanced Error Reporting (AER)\n");
7868 spin_lock_irq(&phba->hbalock);
7869 phba->hba_flag |= HBA_AER_ENABLED;
7870 spin_unlock_irq(&phba->hbalock);
7871 } else {
7872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7873 "2830 This device does not support "
7874 "Advanced Error Reporting (AER)\n");
7875 phba->cfg_aer_support = 0;
7876 }
0a96e975 7877 rc = 0;
75baf696
JS
7878 }
7879
da0436e9
JS
7880 /*
7881 * The port is ready, set the host's link state to LINK_DOWN
7882 * in preparation for link interrupts.
7883 */
da0436e9
JS
7884 spin_lock_irq(&phba->hbalock);
7885 phba->link_state = LPFC_LINK_DOWN;
1dc5ec24
JS
7886
7887 /* Check if physical ports are trunked */
7888 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7889 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7890 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7891 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7892 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7893 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7894 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7895 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
da0436e9 7896 spin_unlock_irq(&phba->hbalock);
1dc5ec24 7897
e8869f5b
JS
7898 /* Arm the CQs and then EQs on device */
7899 lpfc_sli4_arm_cqeq_intr(phba);
7900
7901 /* Indicate device interrupt mode */
7902 phba->sli4_hba.intr_enable = 1;
7903
026abb87
JS
7904 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7905 (phba->hba_flag & LINK_DISABLED)) {
7906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7907 "3103 Adapter Link is disabled.\n");
7908 lpfc_down_link(phba, mboxq);
7909 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7910 if (rc != MBX_SUCCESS) {
7911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7912 "3104 Adapter failed to issue "
7913 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
c490850a 7914 goto out_io_buff_free;
026abb87
JS
7915 }
7916 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7917 /* don't perform init_link on SLI4 FC port loopback test */
7918 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7920 if (rc)
c490850a 7921 goto out_io_buff_free;
1b51197d 7922 }
5350d872
JS
7923 }
7924 mempool_free(mboxq, phba->mbox_mem_pool);
7925 return rc;
c490850a
JS
7926out_io_buff_free:
7927 /* Free allocated IO Buffers */
7928 lpfc_io_free(phba);
76a95d75 7929out_unset_queue:
da0436e9 7930 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7931 lpfc_sli4_queue_unset(phba);
7932out_destroy_queue:
6c621a22 7933 lpfc_free_iocb_list(phba);
5350d872 7934 lpfc_sli4_queue_destroy(phba);
da0436e9 7935out_stop_timers:
5350d872 7936 lpfc_stop_hba_timers(phba);
da0436e9
JS
7937out_free_mbox:
7938 mempool_free(mboxq, phba->mbox_mem_pool);
7939 return rc;
7940}
7941
7942/**
7943 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7944 * @ptr: context object - pointer to hba structure.
7945 *
7946 * This is the callback function for mailbox timer. The mailbox
7947 * timer is armed when a new mailbox command is issued and the timer
7948 * is deleted when the mailbox complete. The function is called by
7949 * the kernel timer code when a mailbox does not complete within
7950 * expected time. This function wakes up the worker thread to
7951 * process the mailbox timeout and returns. All the processing is
7952 * done by the worker thread function lpfc_mbox_timeout_handler.
7953 **/
7954void
f22eb4d3 7955lpfc_mbox_timeout(struct timer_list *t)
da0436e9 7956{
f22eb4d3 7957 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
da0436e9
JS
7958 unsigned long iflag;
7959 uint32_t tmo_posted;
7960
7961 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7962 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7963 if (!tmo_posted)
7964 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7965 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7966
7967 if (!tmo_posted)
7968 lpfc_worker_wake_up(phba);
7969 return;
7970}
7971
e8d3c3b1
JS
7972/**
7973 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7974 * are pending
7975 * @phba: Pointer to HBA context object.
7976 *
7977 * This function checks if any mailbox completions are present on the mailbox
7978 * completion queue.
7979 **/
3bb11fc5 7980static bool
e8d3c3b1
JS
7981lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7982{
7983
7984 uint32_t idx;
7985 struct lpfc_queue *mcq;
7986 struct lpfc_mcqe *mcqe;
7987 bool pending_completions = false;
7365f6fd 7988 uint8_t qe_valid;
e8d3c3b1
JS
7989
7990 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7991 return false;
7992
7993 /* Check for completions on mailbox completion queue */
7994
7995 mcq = phba->sli4_hba.mbx_cq;
7996 idx = mcq->hba_index;
7365f6fd 7997 qe_valid = mcq->qe_valid;
9afbee3d
JS
7998 while (bf_get_le32(lpfc_cqe_valid,
7999 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8000 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
e8d3c3b1
JS
8001 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8002 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8003 pending_completions = true;
8004 break;
8005 }
8006 idx = (idx + 1) % mcq->entry_count;
8007 if (mcq->hba_index == idx)
8008 break;
7365f6fd
JS
8009
8010 /* if the index wrapped around, toggle the valid bit */
8011 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8012 qe_valid = (qe_valid) ? 0 : 1;
e8d3c3b1
JS
8013 }
8014 return pending_completions;
8015
8016}
8017
8018/**
8019 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8020 * that were missed.
8021 * @phba: Pointer to HBA context object.
8022 *
8023 * For sli4, it is possible to miss an interrupt. As such mbox completions
8024 * maybe missed causing erroneous mailbox timeouts to occur. This function
8025 * checks to see if mbox completions are on the mailbox completion queue
8026 * and will process all the completions associated with the eq for the
8027 * mailbox completion queue.
8028 **/
d7b761b0 8029static bool
e8d3c3b1
JS
8030lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8031{
b71413dd 8032 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
e8d3c3b1
JS
8033 uint32_t eqidx;
8034 struct lpfc_queue *fpeq = NULL;
657add4e 8035 struct lpfc_queue *eq;
e8d3c3b1
JS
8036 bool mbox_pending;
8037
8038 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8039 return false;
8040
657add4e
JS
8041 /* Find the EQ associated with the mbox CQ */
8042 if (sli4_hba->hdwq) {
8043 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8044 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
535fb49e 8045 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
657add4e 8046 fpeq = eq;
e8d3c3b1
JS
8047 break;
8048 }
657add4e
JS
8049 }
8050 }
e8d3c3b1
JS
8051 if (!fpeq)
8052 return false;
8053
8054 /* Turn off interrupts from this EQ */
8055
b71413dd 8056 sli4_hba->sli4_eq_clr_intr(fpeq);
e8d3c3b1
JS
8057
8058 /* Check to see if a mbox completion is pending */
8059
8060 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8061
8062 /*
8063 * If a mbox completion is pending, process all the events on EQ
8064 * associated with the mbox completion queue (this could include
8065 * mailbox commands, async events, els commands, receive queue data
8066 * and fcp commands)
8067 */
8068
8069 if (mbox_pending)
32517fc0 8070 /* process and rearm the EQ */
93a4d6f4 8071 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
32517fc0
JS
8072 else
8073 /* Always clear and re-arm the EQ */
8074 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
e8d3c3b1
JS
8075
8076 return mbox_pending;
8077
8078}
da0436e9
JS
8079
8080/**
8081 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8082 * @phba: Pointer to HBA context object.
8083 *
8084 * This function is called from worker thread when a mailbox command times out.
8085 * The caller is not required to hold any locks. This function will reset the
8086 * HBA and recover all the pending commands.
8087 **/
8088void
8089lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8090{
8091 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
8092 MAILBOX_t *mb = NULL;
8093
da0436e9 8094 struct lpfc_sli *psli = &phba->sli;
da0436e9 8095
e8d3c3b1
JS
8096 /* If the mailbox completed, process the completion and return */
8097 if (lpfc_sli4_process_missed_mbox_completions(phba))
8098 return;
8099
eb016566
JS
8100 if (pmbox != NULL)
8101 mb = &pmbox->u.mb;
da0436e9
JS
8102 /* Check the pmbox pointer first. There is a race condition
8103 * between the mbox timeout handler getting executed in the
8104 * worklist and the mailbox actually completing. When this
8105 * race condition occurs, the mbox_active will be NULL.
8106 */
8107 spin_lock_irq(&phba->hbalock);
8108 if (pmbox == NULL) {
8109 lpfc_printf_log(phba, KERN_WARNING,
8110 LOG_MBOX | LOG_SLI,
8111 "0353 Active Mailbox cleared - mailbox timeout "
8112 "exiting\n");
8113 spin_unlock_irq(&phba->hbalock);
8114 return;
8115 }
8116
8117 /* Mbox cmd <mbxCommand> timeout */
8118 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
32350664 8119 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
da0436e9
JS
8120 mb->mbxCommand,
8121 phba->pport->port_state,
8122 phba->sli.sli_flag,
8123 phba->sli.mbox_active);
8124 spin_unlock_irq(&phba->hbalock);
8125
8126 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8127 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 8128 * it to fail all outstanding SCSI IO.
da0436e9
JS
8129 */
8130 spin_lock_irq(&phba->pport->work_port_lock);
8131 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8132 spin_unlock_irq(&phba->pport->work_port_lock);
8133 spin_lock_irq(&phba->hbalock);
8134 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 8135 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
8136 spin_unlock_irq(&phba->hbalock);
8137
db55fba8 8138 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
8139
8140 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8141 "0345 Resetting board due to mailbox timeout\n");
8142
8143 /* Reset the HBA device */
8144 lpfc_reset_hba(phba);
8145}
8146
8147/**
8148 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8149 * @phba: Pointer to HBA context object.
8150 * @pmbox: Pointer to mailbox object.
8151 * @flag: Flag indicating how the mailbox need to be processed.
8152 *
8153 * This function is called by discovery code and HBA management code
8154 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8155 * function gets the hbalock to protect the data structures.
8156 * The mailbox command can be submitted in polling mode, in which case
8157 * this function will wait in a polling loop for the completion of the
8158 * mailbox.
8159 * If the mailbox is submitted in no_wait mode (not polling) the
8160 * function will submit the command and returns immediately without waiting
8161 * for the mailbox completion. The no_wait is supported only when HBA
8162 * is in SLI2/SLI3 mode - interrupts are enabled.
8163 * The SLI interface allows only one mailbox pending at a time. If the
8164 * mailbox is issued in polling mode and there is already a mailbox
8165 * pending, then the function will return an error. If the mailbox is issued
8166 * in NO_WAIT mode and there is a mailbox pending already, the function
8167 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8168 * The sli layer owns the mailbox object until the completion of mailbox
8169 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8170 * return codes the caller owns the mailbox command after the return of
8171 * the function.
e59058c4 8172 **/
3772a991
JS
8173static int
8174lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8175 uint32_t flag)
dea3101e 8176{
bf07bdea 8177 MAILBOX_t *mbx;
2e0fef85 8178 struct lpfc_sli *psli = &phba->sli;
dea3101e 8179 uint32_t status, evtctr;
9940b97b 8180 uint32_t ha_copy, hc_copy;
dea3101e 8181 int i;
09372820 8182 unsigned long timeout;
dea3101e 8183 unsigned long drvr_flag = 0;
34b02dcd 8184 uint32_t word0, ldata;
dea3101e 8185 void __iomem *to_slim;
58da1ffb
JS
8186 int processing_queue = 0;
8187
8188 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8189 if (!pmbox) {
8568a4d2 8190 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 8191 /* processing mbox queue from intr_handler */
3772a991
JS
8192 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8193 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8194 return MBX_SUCCESS;
8195 }
58da1ffb 8196 processing_queue = 1;
58da1ffb
JS
8197 pmbox = lpfc_mbox_get(phba);
8198 if (!pmbox) {
8199 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8200 return MBX_SUCCESS;
8201 }
8202 }
dea3101e 8203
ed957684 8204 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 8205 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 8206 if(!pmbox->vport) {
58da1ffb 8207 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 8208 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 8209 LOG_MBOX | LOG_VPORT,
e8b62011 8210 "1806 Mbox x%x failed. No vport\n",
3772a991 8211 pmbox->u.mb.mbxCommand);
ed957684 8212 dump_stack();
58da1ffb 8213 goto out_not_finished;
ed957684
JS
8214 }
8215 }
8216
8d63f375 8217 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
8218 if (unlikely(pci_channel_offline(phba->pcidev))) {
8219 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8220 goto out_not_finished;
8221 }
8d63f375 8222
a257bf90
JS
8223 /* If HBA has a deferred error attention, fail the iocb. */
8224 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8225 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8226 goto out_not_finished;
8227 }
8228
dea3101e 8229 psli = &phba->sli;
92d7f7b0 8230
bf07bdea 8231 mbx = &pmbox->u.mb;
dea3101e
JB
8232 status = MBX_SUCCESS;
8233
2e0fef85
JS
8234 if (phba->link_state == LPFC_HBA_ERROR) {
8235 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
8236
8237 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8238 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8239 "(%d):0311 Mailbox command x%x cannot "
8240 "issue Data: x%x x%x\n",
8241 pmbox->vport ? pmbox->vport->vpi : 0,
8242 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 8243 goto out_not_finished;
41415862
JW
8244 }
8245
bf07bdea 8246 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
8247 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8248 !(hc_copy & HC_MBINT_ENA)) {
8249 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8250 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
8251 "(%d):2528 Mailbox command x%x cannot "
8252 "issue Data: x%x x%x\n",
8253 pmbox->vport ? pmbox->vport->vpi : 0,
8254 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
8255 goto out_not_finished;
8256 }
9290831f
JS
8257 }
8258
dea3101e
JB
8259 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8260 /* Polling for a mbox command when another one is already active
8261 * is not allowed in SLI. Also, the driver must have established
8262 * SLI2 mode to queue and process multiple mbox commands.
8263 */
8264
8265 if (flag & MBX_POLL) {
2e0fef85 8266 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
8267
8268 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8269 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8270 "(%d):2529 Mailbox command x%x "
8271 "cannot issue Data: x%x x%x\n",
8272 pmbox->vport ? pmbox->vport->vpi : 0,
8273 pmbox->u.mb.mbxCommand,
8274 psli->sli_flag, flag);
58da1ffb 8275 goto out_not_finished;
dea3101e
JB
8276 }
8277
3772a991 8278 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 8279 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8280 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8281 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8282 "(%d):2530 Mailbox command x%x "
8283 "cannot issue Data: x%x x%x\n",
8284 pmbox->vport ? pmbox->vport->vpi : 0,
8285 pmbox->u.mb.mbxCommand,
8286 psli->sli_flag, flag);
58da1ffb 8287 goto out_not_finished;
dea3101e
JB
8288 }
8289
dea3101e
JB
8290 /* Another mailbox command is still being processed, queue this
8291 * command to be processed later.
8292 */
8293 lpfc_mbox_put(phba, pmbox);
8294
8295 /* Mbox cmd issue - BUSY */
ed957684 8296 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8297 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 8298 "x%x x%x x%x x%x\n",
92d7f7b0 8299 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
e92974f6
JS
8300 mbx->mbxCommand,
8301 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8302 psli->sli_flag, flag);
dea3101e
JB
8303
8304 psli->slistat.mbox_busy++;
2e0fef85 8305 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8306
858c9f6c
JS
8307 if (pmbox->vport) {
8308 lpfc_debugfs_disc_trc(pmbox->vport,
8309 LPFC_DISC_TRC_MBOX_VPORT,
8310 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8311 (uint32_t)mbx->mbxCommand,
8312 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8313 }
8314 else {
8315 lpfc_debugfs_disc_trc(phba->pport,
8316 LPFC_DISC_TRC_MBOX,
8317 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8318 (uint32_t)mbx->mbxCommand,
8319 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8320 }
8321
2e0fef85 8322 return MBX_BUSY;
dea3101e
JB
8323 }
8324
dea3101e
JB
8325 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8326
8327 /* If we are not polling, we MUST be in SLI2 mode */
8328 if (flag != MBX_POLL) {
3772a991 8329 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 8330 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 8331 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8332 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8333 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8334 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8335 "(%d):2531 Mailbox command x%x "
8336 "cannot issue Data: x%x x%x\n",
8337 pmbox->vport ? pmbox->vport->vpi : 0,
8338 pmbox->u.mb.mbxCommand,
8339 psli->sli_flag, flag);
58da1ffb 8340 goto out_not_finished;
dea3101e
JB
8341 }
8342 /* timeout active mbox command */
256ec0d0
JS
8343 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8344 1000);
8345 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e
JB
8346 }
8347
8348 /* Mailbox cmd <cmd> issue */
ed957684 8349 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8350 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 8351 "x%x\n",
e8b62011 8352 pmbox->vport ? pmbox->vport->vpi : 0,
e92974f6
JS
8353 mbx->mbxCommand,
8354 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8355 psli->sli_flag, flag);
dea3101e 8356
bf07bdea 8357 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
8358 if (pmbox->vport) {
8359 lpfc_debugfs_disc_trc(pmbox->vport,
8360 LPFC_DISC_TRC_MBOX_VPORT,
8361 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8362 (uint32_t)mbx->mbxCommand,
8363 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8364 }
8365 else {
8366 lpfc_debugfs_disc_trc(phba->pport,
8367 LPFC_DISC_TRC_MBOX,
8368 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8369 (uint32_t)mbx->mbxCommand,
8370 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8371 }
8372 }
8373
dea3101e
JB
8374 psli->slistat.mbox_cmd++;
8375 evtctr = psli->slistat.mbox_event;
8376
8377 /* next set own bit for the adapter and copy over command word */
bf07bdea 8378 mbx->mbxOwner = OWN_CHIP;
dea3101e 8379
3772a991 8380 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
8381 /* Populate mbox extension offset word. */
8382 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 8383 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8384 = (uint8_t *)phba->mbox_ext
8385 - (uint8_t *)phba->mbox;
8386 }
8387
8388 /* Copy the mailbox extension data */
3e1f0718
JS
8389 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8390 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8391 (uint8_t *)phba->mbox_ext,
8392 pmbox->in_ext_byte_len);
7a470277
JS
8393 }
8394 /* Copy command data to host SLIM area */
bf07bdea 8395 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 8396 } else {
7a470277
JS
8397 /* Populate mbox extension offset word. */
8398 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 8399 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8400 = MAILBOX_HBA_EXT_OFFSET;
8401
8402 /* Copy the mailbox extension data */
3e1f0718 8403 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
7a470277
JS
8404 lpfc_memcpy_to_slim(phba->MBslimaddr +
8405 MAILBOX_HBA_EXT_OFFSET,
3e1f0718 8406 pmbox->ctx_buf, pmbox->in_ext_byte_len);
7a470277 8407
895427bd 8408 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8409 /* copy command data into host mbox for cmpl */
895427bd
JS
8410 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8411 MAILBOX_CMD_SIZE);
dea3101e
JB
8412
8413 /* First copy mbox command data to HBA SLIM, skip past first
8414 word */
8415 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 8416 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e
JB
8417 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8418
8419 /* Next copy over first word, with mbxOwner set */
bf07bdea 8420 ldata = *((uint32_t *)mbx);
dea3101e
JB
8421 to_slim = phba->MBslimaddr;
8422 writel(ldata, to_slim);
8423 readl(to_slim); /* flush */
8424
895427bd 8425 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8426 /* switch over to host mailbox */
3772a991 8427 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e
JB
8428 }
8429
8430 wmb();
dea3101e
JB
8431
8432 switch (flag) {
8433 case MBX_NOWAIT:
09372820 8434 /* Set up reference to mailbox command */
dea3101e 8435 psli->mbox_active = pmbox;
09372820
JS
8436 /* Interrupt board to do it */
8437 writel(CA_MBATT, phba->CAregaddr);
8438 readl(phba->CAregaddr); /* flush */
8439 /* Don't wait for it to finish, just return */
dea3101e
JB
8440 break;
8441
8442 case MBX_POLL:
09372820 8443 /* Set up null reference to mailbox command */
dea3101e 8444 psli->mbox_active = NULL;
09372820
JS
8445 /* Interrupt board to do it */
8446 writel(CA_MBATT, phba->CAregaddr);
8447 readl(phba->CAregaddr); /* flush */
8448
3772a991 8449 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8450 /* First read mbox status word */
34b02dcd 8451 word0 = *((uint32_t *)phba->mbox);
dea3101e
JB
8452 word0 = le32_to_cpu(word0);
8453 } else {
8454 /* First read mbox status word */
9940b97b
JS
8455 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8456 spin_unlock_irqrestore(&phba->hbalock,
8457 drvr_flag);
8458 goto out_not_finished;
8459 }
dea3101e
JB
8460 }
8461
8462 /* Read the HBA Host Attention Register */
9940b97b
JS
8463 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8464 spin_unlock_irqrestore(&phba->hbalock,
8465 drvr_flag);
8466 goto out_not_finished;
8467 }
a183a15f
JS
8468 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8469 1000) + jiffies;
09372820 8470 i = 0;
dea3101e 8471 /* Wait for command to complete */
41415862
JW
8472 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8473 (!(ha_copy & HA_MBATT) &&
2e0fef85 8474 (phba->link_state > LPFC_WARM_START))) {
09372820 8475 if (time_after(jiffies, timeout)) {
dea3101e 8476 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8477 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 8478 drvr_flag);
58da1ffb 8479 goto out_not_finished;
dea3101e
JB
8480 }
8481
8482 /* Check if we took a mbox interrupt while we were
8483 polling */
8484 if (((word0 & OWN_CHIP) != OWN_CHIP)
8485 && (evtctr != psli->slistat.mbox_event))
8486 break;
8487
09372820
JS
8488 if (i++ > 10) {
8489 spin_unlock_irqrestore(&phba->hbalock,
8490 drvr_flag);
8491 msleep(1);
8492 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8493 }
dea3101e 8494
3772a991 8495 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8496 /* First copy command data */
34b02dcd 8497 word0 = *((uint32_t *)phba->mbox);
dea3101e 8498 word0 = le32_to_cpu(word0);
bf07bdea 8499 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 8500 MAILBOX_t *slimmb;
34b02dcd 8501 uint32_t slimword0;
dea3101e
JB
8502 /* Check real SLIM for any errors */
8503 slimword0 = readl(phba->MBslimaddr);
8504 slimmb = (MAILBOX_t *) & slimword0;
8505 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8506 && slimmb->mbxStatus) {
8507 psli->sli_flag &=
3772a991 8508 ~LPFC_SLI_ACTIVE;
dea3101e
JB
8509 word0 = slimword0;
8510 }
8511 }
8512 } else {
8513 /* First copy command data */
8514 word0 = readl(phba->MBslimaddr);
8515 }
8516 /* Read the HBA Host Attention Register */
9940b97b
JS
8517 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8518 spin_unlock_irqrestore(&phba->hbalock,
8519 drvr_flag);
8520 goto out_not_finished;
8521 }
dea3101e
JB
8522 }
8523
3772a991 8524 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8525 /* copy results back to user */
2ea259ee
JS
8526 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8527 MAILBOX_CMD_SIZE);
7a470277 8528 /* Copy the mailbox extension data */
3e1f0718 8529 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
7a470277 8530 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
3e1f0718 8531 pmbox->ctx_buf,
7a470277
JS
8532 pmbox->out_ext_byte_len);
8533 }
dea3101e
JB
8534 } else {
8535 /* First copy command data */
bf07bdea 8536 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 8537 MAILBOX_CMD_SIZE);
7a470277 8538 /* Copy the mailbox extension data */
3e1f0718
JS
8539 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8540 lpfc_memcpy_from_slim(
8541 pmbox->ctx_buf,
7a470277
JS
8542 phba->MBslimaddr +
8543 MAILBOX_HBA_EXT_OFFSET,
8544 pmbox->out_ext_byte_len);
dea3101e
JB
8545 }
8546 }
8547
8548 writel(HA_MBATT, phba->HAregaddr);
8549 readl(phba->HAregaddr); /* flush */
8550
8551 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 8552 status = mbx->mbxStatus;
dea3101e
JB
8553 }
8554
2e0fef85
JS
8555 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8556 return status;
58da1ffb
JS
8557
8558out_not_finished:
8559 if (processing_queue) {
da0436e9 8560 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
8561 lpfc_mbox_cmpl_put(phba, pmbox);
8562 }
8563 return MBX_NOT_FINISHED;
dea3101e
JB
8564}
8565
f1126688
JS
8566/**
8567 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8568 * @phba: Pointer to HBA context object.
8569 *
8570 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8571 * the driver internal pending mailbox queue. It will then try to wait out the
8572 * possible outstanding mailbox command before return.
8573 *
8574 * Returns:
8575 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8576 * the outstanding mailbox command timed out.
8577 **/
8578static int
8579lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8580{
8581 struct lpfc_sli *psli = &phba->sli;
f1126688 8582 int rc = 0;
a183a15f 8583 unsigned long timeout = 0;
f1126688
JS
8584
8585 /* Mark the asynchronous mailbox command posting as blocked */
8586 spin_lock_irq(&phba->hbalock);
8587 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
8588 /* Determine how long we might wait for the active mailbox
8589 * command to be gracefully completed by firmware.
8590 */
a183a15f
JS
8591 if (phba->sli.mbox_active)
8592 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8593 phba->sli.mbox_active) *
8594 1000) + jiffies;
8595 spin_unlock_irq(&phba->hbalock);
8596
e8d3c3b1
JS
8597 /* Make sure the mailbox is really active */
8598 if (timeout)
8599 lpfc_sli4_process_missed_mbox_completions(phba);
8600
f1126688
JS
8601 /* Wait for the outstnading mailbox command to complete */
8602 while (phba->sli.mbox_active) {
8603 /* Check active mailbox complete status every 2ms */
8604 msleep(2);
8605 if (time_after(jiffies, timeout)) {
8606 /* Timeout, marked the outstanding cmd not complete */
8607 rc = 1;
8608 break;
8609 }
8610 }
8611
8612 /* Can not cleanly block async mailbox command, fails it */
8613 if (rc) {
8614 spin_lock_irq(&phba->hbalock);
8615 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8616 spin_unlock_irq(&phba->hbalock);
8617 }
8618 return rc;
8619}
8620
8621/**
8622 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8623 * @phba: Pointer to HBA context object.
8624 *
8625 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8626 * commands from the driver internal pending mailbox queue. It makes sure
8627 * that there is no outstanding mailbox command before resuming posting
8628 * asynchronous mailbox commands. If, for any reason, there is outstanding
8629 * mailbox command, it will try to wait it out before resuming asynchronous
8630 * mailbox command posting.
8631 **/
8632static void
8633lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8634{
8635 struct lpfc_sli *psli = &phba->sli;
8636
8637 spin_lock_irq(&phba->hbalock);
8638 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8639 /* Asynchronous mailbox posting is not blocked, do nothing */
8640 spin_unlock_irq(&phba->hbalock);
8641 return;
8642 }
8643
8644 /* Outstanding synchronous mailbox command is guaranteed to be done,
8645 * successful or timeout, after timing-out the outstanding mailbox
8646 * command shall always be removed, so just unblock posting async
8647 * mailbox command and resume
8648 */
8649 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8650 spin_unlock_irq(&phba->hbalock);
8651
291c2548 8652 /* wake up worker thread to post asynchronous mailbox command */
f1126688
JS
8653 lpfc_worker_wake_up(phba);
8654}
8655
2d843edc
JS
8656/**
8657 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8658 * @phba: Pointer to HBA context object.
8659 * @mboxq: Pointer to mailbox object.
8660 *
8661 * The function waits for the bootstrap mailbox register ready bit from
8662 * port for twice the regular mailbox command timeout value.
8663 *
8664 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8665 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8666 **/
8667static int
8668lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8669{
8670 uint32_t db_ready;
8671 unsigned long timeout;
8672 struct lpfc_register bmbx_reg;
8673
8674 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8675 * 1000) + jiffies;
8676
8677 do {
8678 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8679 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8680 if (!db_ready)
e2ffe4d5 8681 mdelay(2);
2d843edc
JS
8682
8683 if (time_after(jiffies, timeout))
8684 return MBXERR_ERROR;
8685 } while (!db_ready);
8686
8687 return 0;
8688}
8689
da0436e9
JS
8690/**
8691 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8692 * @phba: Pointer to HBA context object.
8693 * @mboxq: Pointer to mailbox object.
8694 *
8695 * The function posts a mailbox to the port. The mailbox is expected
8696 * to be comletely filled in and ready for the port to operate on it.
8697 * This routine executes a synchronous completion operation on the
8698 * mailbox by polling for its completion.
8699 *
8700 * The caller must not be holding any locks when calling this routine.
8701 *
8702 * Returns:
8703 * MBX_SUCCESS - mailbox posted successfully
8704 * Any of the MBX error values.
8705 **/
8706static int
8707lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8708{
8709 int rc = MBX_SUCCESS;
8710 unsigned long iflag;
da0436e9
JS
8711 uint32_t mcqe_status;
8712 uint32_t mbx_cmnd;
da0436e9
JS
8713 struct lpfc_sli *psli = &phba->sli;
8714 struct lpfc_mqe *mb = &mboxq->u.mqe;
8715 struct lpfc_bmbx_create *mbox_rgn;
8716 struct dma_address *dma_address;
da0436e9
JS
8717
8718 /*
8719 * Only one mailbox can be active to the bootstrap mailbox region
8720 * at a time and there is no queueing provided.
8721 */
8722 spin_lock_irqsave(&phba->hbalock, iflag);
8723 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8724 spin_unlock_irqrestore(&phba->hbalock, iflag);
8725 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8726 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8727 "cannot issue Data: x%x x%x\n",
8728 mboxq->vport ? mboxq->vport->vpi : 0,
8729 mboxq->u.mb.mbxCommand,
a183a15f
JS
8730 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8731 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8732 psli->sli_flag, MBX_POLL);
8733 return MBXERR_ERROR;
8734 }
8735 /* The server grabs the token and owns it until release */
8736 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8737 phba->sli.mbox_active = mboxq;
8738 spin_unlock_irqrestore(&phba->hbalock, iflag);
8739
2d843edc
JS
8740 /* wait for bootstrap mbox register for readyness */
8741 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8742 if (rc)
8743 goto exit;
da0436e9
JS
8744 /*
8745 * Initialize the bootstrap memory region to avoid stale data areas
8746 * in the mailbox post. Then copy the caller's mailbox contents to
8747 * the bmbx mailbox region.
8748 */
8749 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8750 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
48f8fdb4
JS
8751 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8752 sizeof(struct lpfc_mqe));
da0436e9
JS
8753
8754 /* Post the high mailbox dma address to the port and wait for ready. */
8755 dma_address = &phba->sli4_hba.bmbx.dma_address;
8756 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8757
2d843edc
JS
8758 /* wait for bootstrap mbox register for hi-address write done */
8759 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8760 if (rc)
8761 goto exit;
da0436e9
JS
8762
8763 /* Post the low mailbox dma address to the port. */
8764 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 8765
2d843edc
JS
8766 /* wait for bootstrap mbox register for low address write done */
8767 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8768 if (rc)
8769 goto exit;
da0436e9
JS
8770
8771 /*
8772 * Read the CQ to ensure the mailbox has completed.
8773 * If so, update the mailbox status so that the upper layers
8774 * can complete the request normally.
8775 */
48f8fdb4
JS
8776 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8777 sizeof(struct lpfc_mqe));
da0436e9 8778 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
48f8fdb4
JS
8779 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8780 sizeof(struct lpfc_mcqe));
da0436e9 8781 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
8782 /*
8783 * When the CQE status indicates a failure and the mailbox status
8784 * indicates success then copy the CQE status into the mailbox status
8785 * (and prefix it with x4000).
8786 */
da0436e9 8787 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
8788 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8789 bf_set(lpfc_mqe_status, mb,
8790 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 8791 rc = MBXERR_ERROR;
d7c47992
JS
8792 } else
8793 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
8794
8795 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8796 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
8797 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8798 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
8799 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8800 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8801 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8802 bf_get(lpfc_mqe_status, mb),
8803 mb->un.mb_words[0], mb->un.mb_words[1],
8804 mb->un.mb_words[2], mb->un.mb_words[3],
8805 mb->un.mb_words[4], mb->un.mb_words[5],
8806 mb->un.mb_words[6], mb->un.mb_words[7],
8807 mb->un.mb_words[8], mb->un.mb_words[9],
8808 mb->un.mb_words[10], mb->un.mb_words[11],
8809 mb->un.mb_words[12], mboxq->mcqe.word0,
8810 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8811 mboxq->mcqe.trailer);
8812exit:
8813 /* We are holding the token, no needed for lock when release */
8814 spin_lock_irqsave(&phba->hbalock, iflag);
8815 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8816 phba->sli.mbox_active = NULL;
8817 spin_unlock_irqrestore(&phba->hbalock, iflag);
8818 return rc;
8819}
8820
8821/**
8822 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8823 * @phba: Pointer to HBA context object.
8824 * @pmbox: Pointer to mailbox object.
8825 * @flag: Flag indicating how the mailbox need to be processed.
8826 *
8827 * This function is called by discovery code and HBA management code to submit
8828 * a mailbox command to firmware with SLI-4 interface spec.
8829 *
8830 * Return codes the caller owns the mailbox command after the return of the
8831 * function.
8832 **/
8833static int
8834lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8835 uint32_t flag)
8836{
8837 struct lpfc_sli *psli = &phba->sli;
8838 unsigned long iflags;
8839 int rc;
8840
b76f2dc9
JS
8841 /* dump from issue mailbox command if setup */
8842 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8843
8fa38513
JS
8844 rc = lpfc_mbox_dev_check(phba);
8845 if (unlikely(rc)) {
8846 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8847 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
8848 "cannot issue Data: x%x x%x\n",
8849 mboxq->vport ? mboxq->vport->vpi : 0,
8850 mboxq->u.mb.mbxCommand,
a183a15f
JS
8851 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8852 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
8853 psli->sli_flag, flag);
8854 goto out_not_finished;
8855 }
8856
da0436e9
JS
8857 /* Detect polling mode and jump to a handler */
8858 if (!phba->sli4_hba.intr_enable) {
8859 if (flag == MBX_POLL)
8860 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8861 else
8862 rc = -EIO;
8863 if (rc != MBX_SUCCESS)
0558056c 8864 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8865 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8866 "(x%x/x%x) failure: "
8867 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8868 "Data: x%x x%x\n,",
da0436e9
JS
8869 mboxq->vport ? mboxq->vport->vpi : 0,
8870 mboxq->u.mb.mbxCommand,
a183a15f
JS
8871 lpfc_sli_config_mbox_subsys_get(phba,
8872 mboxq),
8873 lpfc_sli_config_mbox_opcode_get(phba,
8874 mboxq),
cc459f19
JS
8875 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8876 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8877 bf_get(lpfc_mcqe_ext_status,
8878 &mboxq->mcqe),
da0436e9
JS
8879 psli->sli_flag, flag);
8880 return rc;
8881 } else if (flag == MBX_POLL) {
f1126688
JS
8882 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8883 "(%d):2542 Try to issue mailbox command "
7365f6fd 8884 "x%x (x%x/x%x) synchronously ahead of async "
f1126688 8885 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8886 mboxq->vport ? mboxq->vport->vpi : 0,
8887 mboxq->u.mb.mbxCommand,
a183a15f
JS
8888 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8889 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8890 psli->sli_flag, flag);
f1126688
JS
8891 /* Try to block the asynchronous mailbox posting */
8892 rc = lpfc_sli4_async_mbox_block(phba);
8893 if (!rc) {
8894 /* Successfully blocked, now issue sync mbox cmd */
8895 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8896 if (rc != MBX_SUCCESS)
cc459f19 8897 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8898 LOG_MBOX | LOG_SLI,
cc459f19
JS
8899 "(%d):2597 Sync Mailbox command "
8900 "x%x (x%x/x%x) failure: "
8901 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8902 "Data: x%x x%x\n,",
8903 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8904 mboxq->u.mb.mbxCommand,
8905 lpfc_sli_config_mbox_subsys_get(phba,
8906 mboxq),
8907 lpfc_sli_config_mbox_opcode_get(phba,
8908 mboxq),
cc459f19
JS
8909 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8910 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8911 bf_get(lpfc_mcqe_ext_status,
8912 &mboxq->mcqe),
a183a15f 8913 psli->sli_flag, flag);
f1126688
JS
8914 /* Unblock the async mailbox posting afterward */
8915 lpfc_sli4_async_mbox_unblock(phba);
8916 }
8917 return rc;
da0436e9
JS
8918 }
8919
291c2548 8920 /* Now, interrupt mode asynchronous mailbox command */
da0436e9
JS
8921 rc = lpfc_mbox_cmd_check(phba, mboxq);
8922 if (rc) {
8923 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8924 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8925 "cannot issue Data: x%x x%x\n",
8926 mboxq->vport ? mboxq->vport->vpi : 0,
8927 mboxq->u.mb.mbxCommand,
a183a15f
JS
8928 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8929 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8930 psli->sli_flag, flag);
8931 goto out_not_finished;
8932 }
da0436e9
JS
8933
8934 /* Put the mailbox command to the driver internal FIFO */
8935 psli->slistat.mbox_busy++;
8936 spin_lock_irqsave(&phba->hbalock, iflags);
8937 lpfc_mbox_put(phba, mboxq);
8938 spin_unlock_irqrestore(&phba->hbalock, iflags);
8939 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8940 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8941 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8942 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8943 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8944 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8945 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8946 phba->pport->port_state,
8947 psli->sli_flag, MBX_NOWAIT);
8948 /* Wake up worker thread to transport mailbox command from head */
8949 lpfc_worker_wake_up(phba);
8950
8951 return MBX_BUSY;
8952
8953out_not_finished:
8954 return MBX_NOT_FINISHED;
8955}
8956
8957/**
8958 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8959 * @phba: Pointer to HBA context object.
8960 *
8961 * This function is called by worker thread to send a mailbox command to
8962 * SLI4 HBA firmware.
8963 *
8964 **/
8965int
8966lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8967{
8968 struct lpfc_sli *psli = &phba->sli;
8969 LPFC_MBOXQ_t *mboxq;
8970 int rc = MBX_SUCCESS;
8971 unsigned long iflags;
8972 struct lpfc_mqe *mqe;
8973 uint32_t mbx_cmnd;
8974
8975 /* Check interrupt mode before post async mailbox command */
8976 if (unlikely(!phba->sli4_hba.intr_enable))
8977 return MBX_NOT_FINISHED;
8978
8979 /* Check for mailbox command service token */
8980 spin_lock_irqsave(&phba->hbalock, iflags);
8981 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8982 spin_unlock_irqrestore(&phba->hbalock, iflags);
8983 return MBX_NOT_FINISHED;
8984 }
8985 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8986 spin_unlock_irqrestore(&phba->hbalock, iflags);
8987 return MBX_NOT_FINISHED;
8988 }
8989 if (unlikely(phba->sli.mbox_active)) {
8990 spin_unlock_irqrestore(&phba->hbalock, iflags);
8991 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8992 "0384 There is pending active mailbox cmd\n");
8993 return MBX_NOT_FINISHED;
8994 }
8995 /* Take the mailbox command service token */
8996 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8997
8998 /* Get the next mailbox command from head of queue */
8999 mboxq = lpfc_mbox_get(phba);
9000
9001 /* If no more mailbox command waiting for post, we're done */
9002 if (!mboxq) {
9003 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9004 spin_unlock_irqrestore(&phba->hbalock, iflags);
9005 return MBX_SUCCESS;
9006 }
9007 phba->sli.mbox_active = mboxq;
9008 spin_unlock_irqrestore(&phba->hbalock, iflags);
9009
9010 /* Check device readiness for posting mailbox command */
9011 rc = lpfc_mbox_dev_check(phba);
9012 if (unlikely(rc))
9013 /* Driver clean routine will clean up pending mailbox */
9014 goto out_not_finished;
9015
9016 /* Prepare the mbox command to be posted */
9017 mqe = &mboxq->u.mqe;
9018 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9019
9020 /* Start timer for the mbox_tmo and log some mailbox post messages */
9021 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 9022 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
9023
9024 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 9025 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
9026 "x%x x%x\n",
9027 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
9028 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9029 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
9030 phba->pport->port_state, psli->sli_flag);
9031
9032 if (mbx_cmnd != MBX_HEARTBEAT) {
9033 if (mboxq->vport) {
9034 lpfc_debugfs_disc_trc(mboxq->vport,
9035 LPFC_DISC_TRC_MBOX_VPORT,
9036 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9037 mbx_cmnd, mqe->un.mb_words[0],
9038 mqe->un.mb_words[1]);
9039 } else {
9040 lpfc_debugfs_disc_trc(phba->pport,
9041 LPFC_DISC_TRC_MBOX,
9042 "MBOX Send: cmd:x%x mb:x%x x%x",
9043 mbx_cmnd, mqe->un.mb_words[0],
9044 mqe->un.mb_words[1]);
9045 }
9046 }
9047 psli->slistat.mbox_cmd++;
9048
9049 /* Post the mailbox command to the port */
9050 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9051 if (rc != MBX_SUCCESS) {
9052 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 9053 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
9054 "cannot issue Data: x%x x%x\n",
9055 mboxq->vport ? mboxq->vport->vpi : 0,
9056 mboxq->u.mb.mbxCommand,
a183a15f
JS
9057 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9058 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
9059 psli->sli_flag, MBX_NOWAIT);
9060 goto out_not_finished;
9061 }
9062
9063 return rc;
9064
9065out_not_finished:
9066 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
9067 if (phba->sli.mbox_active) {
9068 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9069 __lpfc_mbox_cmpl_put(phba, mboxq);
9070 /* Release the token */
9071 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9072 phba->sli.mbox_active = NULL;
9073 }
da0436e9
JS
9074 spin_unlock_irqrestore(&phba->hbalock, iflags);
9075
9076 return MBX_NOT_FINISHED;
9077}
9078
9079/**
9080 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9081 * @phba: Pointer to HBA context object.
9082 * @pmbox: Pointer to mailbox object.
9083 * @flag: Flag indicating how the mailbox need to be processed.
9084 *
9085 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9086 * the API jump table function pointer from the lpfc_hba struct.
9087 *
9088 * Return codes the caller owns the mailbox command after the return of the
9089 * function.
9090 **/
9091int
9092lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9093{
9094 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9095}
9096
9097/**
25985edc 9098 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
9099 * @phba: The hba struct for which this call is being executed.
9100 * @dev_grp: The HBA PCI-Device group number.
9101 *
9102 * This routine sets up the mbox interface API function jump table in @phba
9103 * struct.
9104 * Returns: 0 - success, -ENODEV - failure.
9105 **/
9106int
9107lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9108{
9109
9110 switch (dev_grp) {
9111 case LPFC_PCI_DEV_LP:
9112 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9113 phba->lpfc_sli_handle_slow_ring_event =
9114 lpfc_sli_handle_slow_ring_event_s3;
9115 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9116 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9117 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9118 break;
9119 case LPFC_PCI_DEV_OC:
9120 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9121 phba->lpfc_sli_handle_slow_ring_event =
9122 lpfc_sli_handle_slow_ring_event_s4;
9123 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9124 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9125 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9126 break;
9127 default:
9128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9129 "1420 Invalid HBA PCI-device group: 0x%x\n",
9130 dev_grp);
9131 return -ENODEV;
9132 break;
9133 }
9134 return 0;
9135}
9136
e59058c4 9137/**
3621a710 9138 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
9139 * @phba: Pointer to HBA context object.
9140 * @pring: Pointer to driver SLI ring object.
9141 * @piocb: Pointer to address of newly added command iocb.
9142 *
27f3efd6
JS
9143 * This function is called with hbalock held for SLI3 ports or
9144 * the ring lock held for SLI4 ports to add a command
e59058c4
JS
9145 * iocb to the txq when SLI layer cannot submit the command iocb
9146 * to the ring.
9147 **/
2a9bf3d0 9148void
92d7f7b0 9149__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9150 struct lpfc_iocbq *piocb)
dea3101e 9151{
27f3efd6
JS
9152 if (phba->sli_rev == LPFC_SLI_REV4)
9153 lockdep_assert_held(&pring->ring_lock);
9154 else
9155 lockdep_assert_held(&phba->hbalock);
dea3101e
JB
9156 /* Insert the caller's iocb in the txq tail for later processing. */
9157 list_add_tail(&piocb->list, &pring->txq);
dea3101e
JB
9158}
9159
e59058c4 9160/**
3621a710 9161 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
9162 * @phba: Pointer to HBA context object.
9163 * @pring: Pointer to driver SLI ring object.
9164 * @piocb: Pointer to address of newly added command iocb.
9165 *
9166 * This function is called with hbalock held before a new
9167 * iocb is submitted to the firmware. This function checks
9168 * txq to flush the iocbs in txq to Firmware before
9169 * submitting new iocbs to the Firmware.
9170 * If there are iocbs in the txq which need to be submitted
9171 * to firmware, lpfc_sli_next_iocb returns the first element
9172 * of the txq after dequeuing it from txq.
9173 * If there is no iocb in the txq then the function will return
9174 * *piocb and *piocb is set to NULL. Caller needs to check
9175 * *piocb to find if there are more commands in the txq.
9176 **/
dea3101e
JB
9177static struct lpfc_iocbq *
9178lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9179 struct lpfc_iocbq **piocb)
dea3101e
JB
9180{
9181 struct lpfc_iocbq * nextiocb;
9182
1c2ba475
JT
9183 lockdep_assert_held(&phba->hbalock);
9184
dea3101e
JB
9185 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9186 if (!nextiocb) {
9187 nextiocb = *piocb;
9188 *piocb = NULL;
9189 }
9190
9191 return nextiocb;
9192}
9193
e59058c4 9194/**
3772a991 9195 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 9196 * @phba: Pointer to HBA context object.
3772a991 9197 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
9198 * @piocb: Pointer to command iocb.
9199 * @flag: Flag indicating if this command can be put into txq.
9200 *
3772a991
JS
9201 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9202 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9203 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9204 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9205 * this function allows only iocbs for posting buffers. This function finds
9206 * next available slot in the command ring and posts the command to the
9207 * available slot and writes the port attention register to request HBA start
9208 * processing new iocb. If there is no slot available in the ring and
9209 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9210 * the function returns IOCB_BUSY.
e59058c4 9211 *
3772a991
JS
9212 * This function is called with hbalock held. The function will return success
9213 * after it successfully submit the iocb to firmware or after adding to the
9214 * txq.
e59058c4 9215 **/
98c9ea5c 9216static int
3772a991 9217__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e
JB
9218 struct lpfc_iocbq *piocb, uint32_t flag)
9219{
9220 struct lpfc_iocbq *nextiocb;
9221 IOCB_t *iocb;
895427bd 9222 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 9223
1c2ba475
JT
9224 lockdep_assert_held(&phba->hbalock);
9225
92d7f7b0
JS
9226 if (piocb->iocb_cmpl && (!piocb->vport) &&
9227 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9228 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9229 lpfc_printf_log(phba, KERN_ERR,
9230 LOG_SLI | LOG_VPORT,
e8b62011 9231 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
9232 piocb->iocb.ulpCommand);
9233 dump_stack();
9234 return IOCB_ERROR;
9235 }
9236
9237
8d63f375
LV
9238 /* If the PCI channel is in offline state, do not post iocbs. */
9239 if (unlikely(pci_channel_offline(phba->pcidev)))
9240 return IOCB_ERROR;
9241
a257bf90
JS
9242 /* If HBA has a deferred error attention, fail the iocb. */
9243 if (unlikely(phba->hba_flag & DEFER_ERATT))
9244 return IOCB_ERROR;
9245
dea3101e
JB
9246 /*
9247 * We should never get an IOCB if we are in a < LINK_DOWN state
9248 */
2e0fef85 9249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e
JB
9250 return IOCB_ERROR;
9251
9252 /*
9253 * Check to see if we are blocking IOCB processing because of a
0b727fea 9254 * outstanding event.
dea3101e 9255 */
0b727fea 9256 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e
JB
9257 goto iocb_busy;
9258
2e0fef85 9259 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 9260 /*
2680eeaa 9261 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e
JB
9262 * can be issued if the link is not up.
9263 */
9264 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
9265 case CMD_GEN_REQUEST64_CR:
9266 case CMD_GEN_REQUEST64_CX:
9267 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9268 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 9269 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
9270 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9271 MENLO_TRANSPORT_TYPE))
9272
9273 goto iocb_busy;
9274 break;
dea3101e
JB
9275 case CMD_QUE_RING_BUF_CN:
9276 case CMD_QUE_RING_BUF64_CN:
dea3101e
JB
9277 /*
9278 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9279 * completion, iocb_cmpl MUST be 0.
9280 */
9281 if (piocb->iocb_cmpl)
9282 piocb->iocb_cmpl = NULL;
9283 /*FALLTHROUGH*/
9284 case CMD_CREATE_XRI_CR:
2680eeaa
JS
9285 case CMD_CLOSE_XRI_CN:
9286 case CMD_CLOSE_XRI_CX:
dea3101e
JB
9287 break;
9288 default:
9289 goto iocb_busy;
9290 }
9291
9292 /*
9293 * For FCP commands, we must be in a state where we can process link
9294 * attention events.
9295 */
895427bd 9296 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 9297 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 9298 goto iocb_busy;
92d7f7b0 9299 }
dea3101e 9300
dea3101e
JB
9301 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9302 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9303 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9304
9305 if (iocb)
9306 lpfc_sli_update_ring(phba, pring);
9307 else
9308 lpfc_sli_update_full_ring(phba, pring);
9309
9310 if (!piocb)
9311 return IOCB_SUCCESS;
9312
9313 goto out_busy;
9314
9315 iocb_busy:
9316 pring->stats.iocb_cmd_delay++;
9317
9318 out_busy:
9319
9320 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 9321 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e
JB
9322 return IOCB_SUCCESS;
9323 }
9324
9325 return IOCB_BUSY;
9326}
9327
3772a991 9328/**
4f774513
JS
9329 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9330 * @phba: Pointer to HBA context object.
9331 * @piocb: Pointer to command iocb.
9332 * @sglq: Pointer to the scatter gather queue object.
9333 *
9334 * This routine converts the bpl or bde that is in the IOCB
9335 * to a sgl list for the sli4 hardware. The physical address
9336 * of the bpl/bde is converted back to a virtual address.
9337 * If the IOCB contains a BPL then the list of BDE's is
9338 * converted to sli4_sge's. If the IOCB contains a single
9339 * BDE then it is converted to a single sli_sge.
9340 * The IOCB is still in cpu endianess so the contents of
9341 * the bpl can be used without byte swapping.
9342 *
9343 * Returns valid XRI = Success, NO_XRI = Failure.
9344**/
9345static uint16_t
9346lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9347 struct lpfc_sglq *sglq)
3772a991 9348{
4f774513
JS
9349 uint16_t xritag = NO_XRI;
9350 struct ulp_bde64 *bpl = NULL;
9351 struct ulp_bde64 bde;
9352 struct sli4_sge *sgl = NULL;
1b51197d 9353 struct lpfc_dmabuf *dmabuf;
4f774513
JS
9354 IOCB_t *icmd;
9355 int numBdes = 0;
9356 int i = 0;
63e801ce
JS
9357 uint32_t offset = 0; /* accumulated offset in the sg request list */
9358 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 9359
4f774513
JS
9360 if (!piocbq || !sglq)
9361 return xritag;
9362
9363 sgl = (struct sli4_sge *)sglq->sgl;
9364 icmd = &piocbq->iocb;
6b5151fd
JS
9365 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9366 return sglq->sli4_xritag;
4f774513
JS
9367 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9368 numBdes = icmd->un.genreq64.bdl.bdeSize /
9369 sizeof(struct ulp_bde64);
9370 /* The addrHigh and addrLow fields within the IOCB
9371 * have not been byteswapped yet so there is no
9372 * need to swap them back.
9373 */
1b51197d
JS
9374 if (piocbq->context3)
9375 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9376 else
9377 return xritag;
4f774513 9378
1b51197d 9379 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
9380 if (!bpl)
9381 return xritag;
9382
9383 for (i = 0; i < numBdes; i++) {
9384 /* Should already be byte swapped. */
28baac74
JS
9385 sgl->addr_hi = bpl->addrHigh;
9386 sgl->addr_lo = bpl->addrLow;
9387
0558056c 9388 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9389 if ((i+1) == numBdes)
9390 bf_set(lpfc_sli4_sge_last, sgl, 1);
9391 else
9392 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
9393 /* swap the size field back to the cpu so we
9394 * can assign it to the sgl.
9395 */
9396 bde.tus.w = le32_to_cpu(bpl->tus.w);
9397 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
9398 /* The offsets in the sgl need to be accumulated
9399 * separately for the request and reply lists.
9400 * The request is always first, the reply follows.
9401 */
9402 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9403 /* add up the reply sg entries */
9404 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9405 inbound++;
9406 /* first inbound? reset the offset */
9407 if (inbound == 1)
9408 offset = 0;
9409 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
9410 bf_set(lpfc_sli4_sge_type, sgl,
9411 LPFC_SGE_TYPE_DATA);
63e801ce
JS
9412 offset += bde.tus.f.bdeSize;
9413 }
546fc854 9414 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
9415 bpl++;
9416 sgl++;
9417 }
9418 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9419 /* The addrHigh and addrLow fields of the BDE have not
9420 * been byteswapped yet so they need to be swapped
9421 * before putting them in the sgl.
9422 */
9423 sgl->addr_hi =
9424 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9425 sgl->addr_lo =
9426 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 9427 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9428 bf_set(lpfc_sli4_sge_last, sgl, 1);
9429 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
9430 sgl->sge_len =
9431 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
9432 }
9433 return sglq->sli4_xritag;
3772a991 9434}
92d7f7b0 9435
e59058c4 9436/**
4f774513 9437 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 9438 * @phba: Pointer to HBA context object.
4f774513
JS
9439 * @piocb: Pointer to command iocb.
9440 * @wqe: Pointer to the work queue entry.
e59058c4 9441 *
4f774513
JS
9442 * This routine converts the iocb command to its Work Queue Entry
9443 * equivalent. The wqe pointer should not have any fields set when
9444 * this routine is called because it will memcpy over them.
9445 * This routine does not set the CQ_ID or the WQEC bits in the
9446 * wqe.
e59058c4 9447 *
4f774513 9448 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 9449 **/
cf5bf97e 9450static int
4f774513 9451lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
205e8240 9452 union lpfc_wqe128 *wqe)
cf5bf97e 9453{
5ffc266e 9454 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
9455 uint8_t ct = 0;
9456 uint32_t fip;
9457 uint32_t abort_tag;
9458 uint8_t command_type = ELS_COMMAND_NON_FIP;
9459 uint8_t cmnd;
9460 uint16_t xritag;
dcf2a4e0
JS
9461 uint16_t abrt_iotag;
9462 struct lpfc_iocbq *abrtiocbq;
4f774513 9463 struct ulp_bde64 *bpl = NULL;
f0d9bccc 9464 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
9465 int numBdes, i;
9466 struct ulp_bde64 bde;
c31098ce 9467 struct lpfc_nodelist *ndlp;
ff78d8f9 9468 uint32_t *pcmd;
1b51197d 9469 uint32_t if_type;
4f774513 9470
45ed1190 9471 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 9472 /* The fcp commands will set command type */
0c287589 9473 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 9474 command_type = FCP_COMMAND;
c868595d 9475 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
9476 command_type = ELS_COMMAND_FIP;
9477 else
9478 command_type = ELS_COMMAND_NON_FIP;
9479
b5c53958
JS
9480 if (phba->fcp_embed_io)
9481 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
9482 /* Some of the fields are in the right position already */
9483 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
e62245d9
JS
9484 /* The ct field has moved so reset */
9485 wqe->generic.wqe_com.word7 = 0;
9486 wqe->generic.wqe_com.word10 = 0;
b5c53958
JS
9487
9488 abort_tag = (uint32_t) iocbq->iotag;
9489 xritag = iocbq->sli4_xritag;
4f774513
JS
9490 /* words0-2 bpl convert bde */
9491 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
9492 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9493 sizeof(struct ulp_bde64);
4f774513
JS
9494 bpl = (struct ulp_bde64 *)
9495 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9496 if (!bpl)
9497 return IOCB_ERROR;
cf5bf97e 9498
4f774513
JS
9499 /* Should already be byte swapped. */
9500 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9501 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9502 /* swap the size field back to the cpu so we
9503 * can assign it to the sgl.
9504 */
9505 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
9506 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9507 total_len = 0;
9508 for (i = 0; i < numBdes; i++) {
9509 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9510 total_len += bde.tus.f.bdeSize;
9511 }
4f774513 9512 } else
5ffc266e 9513 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 9514
4f774513
JS
9515 iocbq->iocb.ulpIoTag = iocbq->iotag;
9516 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 9517
4f774513
JS
9518 switch (iocbq->iocb.ulpCommand) {
9519 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
9520 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9521 ndlp = iocbq->context_un.ndlp;
9522 else
9523 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
9524 if (!iocbq->iocb.ulpLe) {
9525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9526 "2007 Only Limited Edition cmd Format"
9527 " supported 0x%x\n",
9528 iocbq->iocb.ulpCommand);
9529 return IOCB_ERROR;
9530 }
ff78d8f9 9531
5ffc266e 9532 wqe->els_req.payload_len = xmit_len;
4f774513
JS
9533 /* Els_reguest64 has a TMO */
9534 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9535 iocbq->iocb.ulpTimeout);
9536 /* Need a VF for word 4 set the vf bit*/
9537 bf_set(els_req64_vf, &wqe->els_req, 0);
9538 /* And a VFID for word 12 */
9539 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 9540 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
9541 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9542 iocbq->iocb.ulpContext);
9543 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9544 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 9545 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 9546 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
9547 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9548 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
9549 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9550 iocbq->context2)->virt);
1b51197d
JS
9551 if_type = bf_get(lpfc_sli_intf_if_type,
9552 &phba->sli4_hba.sli_intf);
27d6ac0a 9553 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 9554 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 9555 *pcmd == ELS_CMD_SCR ||
df3fe766 9556 *pcmd == ELS_CMD_RDF ||
f60cb93b 9557 *pcmd == ELS_CMD_RSCN_XMT ||
6b5151fd 9558 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 9559 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
9560 *pcmd == ELS_CMD_PLOGI)) {
9561 bf_set(els_req64_sp, &wqe->els_req, 1);
9562 bf_set(els_req64_sid, &wqe->els_req,
9563 iocbq->vport->fc_myDID);
939723a4
JS
9564 if ((*pcmd == ELS_CMD_FLOGI) &&
9565 !(phba->fc_topology ==
9566 LPFC_TOPOLOGY_LOOP))
9567 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
9568 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9569 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 9570 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 9571 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
9572 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9573 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9574 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9575 }
c868595d 9576 }
6d368e53
JS
9577 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9578 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
9579 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9580 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9581 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9582 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9583 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9584 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 9585 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 9586 break;
5ffc266e 9587 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
9588 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9589 iocbq->iocb.un.ulpWord[3]);
9590 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 9591 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
9592 /* The entire sequence is transmitted for this IOCB */
9593 xmit_len = total_len;
9594 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
9595 if (phba->link_flag & LS_LOOPBACK_MODE)
9596 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
5bd5f66c 9597 /* fall through */
4f774513 9598 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
9599 /* word3 iocb=io_tag32 wqe=reserved */
9600 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
9601 /* word4 relative_offset memcpy */
9602 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
9603 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9604 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9605 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9606 LPFC_WQE_IOD_WRITE);
9607 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9608 LPFC_WQE_LENLOC_WORD12);
9609 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
9610 wqe->xmit_sequence.xmit_len = xmit_len;
9611 command_type = OTHER_COMMAND;
7851fe2c 9612 break;
4f774513 9613 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
9614 /* word3 iocb=iotag32 wqe=seq_payload_len */
9615 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
9616 /* word4 iocb=rsvd wqe=rsvd */
9617 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9618 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 9619 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 9620 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
9621 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9622 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9623 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9624 LPFC_WQE_LENLOC_WORD3);
9625 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 9626 break;
4f774513
JS
9627 case CMD_FCP_IWRITE64_CR:
9628 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
9629 /* word3 iocb=iotag wqe=payload_offset_len */
9630 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9631 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9632 xmit_len + sizeof(struct fcp_rsp));
9633 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9634 0);
f0d9bccc
JS
9635 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9636 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9637 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9638 iocbq->iocb.ulpFCP2Rcvy);
9639 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9640 /* Always open the exchange */
f0d9bccc
JS
9641 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9642 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9643 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9644 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9645 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
9646 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9647 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
9648 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9649 if (iocbq->priority) {
9650 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9651 (iocbq->priority << 1));
9652 } else {
1ba981fd
JS
9653 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9654 (phba->cfg_XLanePriority << 1));
9655 }
9656 }
b5c53958
JS
9657 /* Note, word 10 is already initialized to 0 */
9658
414abe0a
JS
9659 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9660 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9661 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9662 else
9663 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9664
b5c53958 9665 if (phba->fcp_embed_io) {
c490850a 9666 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9667 struct sli4_sge *sgl;
b5c53958
JS
9668 struct fcp_cmnd *fcp_cmnd;
9669 uint32_t *ptr;
9670
9671 /* 128 byte wqe support here */
b5c53958
JS
9672
9673 lpfc_cmd = iocbq->context1;
0794d601 9674 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9675 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9676
9677 /* Word 0-2 - FCP_CMND */
205e8240 9678 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9679 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9680 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9681 wqe->generic.bde.addrHigh = 0;
9682 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9683
205e8240
JS
9684 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9685 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
b5c53958
JS
9686
9687 /* Word 22-29 FCP CMND Payload */
205e8240 9688 ptr = &wqe->words[22];
b5c53958
JS
9689 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9690 }
7851fe2c 9691 break;
4f774513 9692 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
9693 /* word3 iocb=iotag wqe=payload_offset_len */
9694 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9695 bf_set(payload_offset_len, &wqe->fcp_iread,
9696 xmit_len + sizeof(struct fcp_rsp));
9697 bf_set(cmd_buff_len, &wqe->fcp_iread,
9698 0);
f0d9bccc
JS
9699 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9700 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9701 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9702 iocbq->iocb.ulpFCP2Rcvy);
9703 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 9704 /* Always open the exchange */
f0d9bccc
JS
9705 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9706 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9707 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9708 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9709 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
9710 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9711 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
9712 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9713 if (iocbq->priority) {
9714 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9715 (iocbq->priority << 1));
9716 } else {
1ba981fd
JS
9717 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9718 (phba->cfg_XLanePriority << 1));
9719 }
9720 }
b5c53958
JS
9721 /* Note, word 10 is already initialized to 0 */
9722
414abe0a
JS
9723 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9724 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9725 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9726 else
9727 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9728
b5c53958 9729 if (phba->fcp_embed_io) {
c490850a 9730 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9731 struct sli4_sge *sgl;
b5c53958
JS
9732 struct fcp_cmnd *fcp_cmnd;
9733 uint32_t *ptr;
9734
9735 /* 128 byte wqe support here */
b5c53958
JS
9736
9737 lpfc_cmd = iocbq->context1;
0794d601 9738 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9739 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9740
9741 /* Word 0-2 - FCP_CMND */
205e8240 9742 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9743 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9744 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9745 wqe->generic.bde.addrHigh = 0;
9746 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9747
205e8240
JS
9748 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9749 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
b5c53958
JS
9750
9751 /* Word 22-29 FCP CMND Payload */
205e8240 9752 ptr = &wqe->words[22];
b5c53958
JS
9753 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9754 }
7851fe2c 9755 break;
4f774513 9756 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
9757 /* word3 iocb=iotag wqe=payload_offset_len */
9758 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9759 bf_set(payload_offset_len, &wqe->fcp_icmd,
9760 xmit_len + sizeof(struct fcp_rsp));
9761 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9762 0);
f0d9bccc 9763 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 9764 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 9765 /* Always open the exchange */
f0d9bccc
JS
9766 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9767 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9768 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9769 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9770 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
9771 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9772 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
9773 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9774 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
9775 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9776 if (iocbq->priority) {
9777 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9778 (iocbq->priority << 1));
9779 } else {
1ba981fd
JS
9780 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9781 (phba->cfg_XLanePriority << 1));
9782 }
9783 }
b5c53958
JS
9784 /* Note, word 10 is already initialized to 0 */
9785
9786 if (phba->fcp_embed_io) {
c490850a 9787 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9788 struct sli4_sge *sgl;
b5c53958
JS
9789 struct fcp_cmnd *fcp_cmnd;
9790 uint32_t *ptr;
9791
9792 /* 128 byte wqe support here */
b5c53958
JS
9793
9794 lpfc_cmd = iocbq->context1;
0794d601 9795 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9796 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9797
9798 /* Word 0-2 - FCP_CMND */
205e8240 9799 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9800 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9801 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9802 wqe->generic.bde.addrHigh = 0;
9803 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9804
205e8240
JS
9805 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9806 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
b5c53958
JS
9807
9808 /* Word 22-29 FCP CMND Payload */
205e8240 9809 ptr = &wqe->words[22];
b5c53958
JS
9810 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9811 }
7851fe2c 9812 break;
4f774513 9813 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
9814 /* For this command calculate the xmit length of the
9815 * request bde.
9816 */
9817 xmit_len = 0;
9818 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9819 sizeof(struct ulp_bde64);
9820 for (i = 0; i < numBdes; i++) {
63e801ce 9821 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
9822 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9823 break;
63e801ce
JS
9824 xmit_len += bde.tus.f.bdeSize;
9825 }
f0d9bccc
JS
9826 /* word3 iocb=IO_TAG wqe=request_payload_len */
9827 wqe->gen_req.request_payload_len = xmit_len;
9828 /* word4 iocb=parameter wqe=relative_offset memcpy */
9829 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
9830 /* word6 context tag copied in memcpy */
9831 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9832 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9833 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9834 "2015 Invalid CT %x command 0x%x\n",
9835 ct, iocbq->iocb.ulpCommand);
9836 return IOCB_ERROR;
9837 }
f0d9bccc
JS
9838 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9839 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9840 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9841 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9842 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9843 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9844 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9845 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 9846 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 9847 command_type = OTHER_COMMAND;
7851fe2c 9848 break;
4f774513 9849 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 9850 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 9851 /* words0-2 BDE memcpy */
f0d9bccc
JS
9852 /* word3 iocb=iotag32 wqe=response_payload_len */
9853 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
9854 /* word4 */
9855 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
9856 /* word5 iocb=rsvd wge=did */
9857 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
9858 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9859
9860 if_type = bf_get(lpfc_sli_intf_if_type,
9861 &phba->sli4_hba.sli_intf);
27d6ac0a 9862 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
939723a4
JS
9863 if (iocbq->vport->fc_flag & FC_PT2PT) {
9864 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9865 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9866 iocbq->vport->fc_myDID);
9867 if (iocbq->vport->fc_myDID == Fabric_DID) {
9868 bf_set(wqe_els_did,
9869 &wqe->xmit_els_rsp.wqe_dest, 0);
9870 }
9871 }
9872 }
f0d9bccc
JS
9873 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9874 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9875 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9876 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9877 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9878 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9879 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9880 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9881 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9882 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9883 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9884 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9885 LPFC_WQE_LENLOC_WORD3);
9886 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9887 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9888 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9889 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9890 iocbq->context2)->virt);
9891 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9892 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9893 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9894 iocbq->vport->fc_myDID);
939723a4
JS
9895 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9896 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9897 phba->vpi_ids[phba->pport->vpi]);
9898 }
4f774513 9899 command_type = OTHER_COMMAND;
7851fe2c 9900 break;
4f774513
JS
9901 case CMD_CLOSE_XRI_CN:
9902 case CMD_ABORT_XRI_CN:
9903 case CMD_ABORT_XRI_CX:
9904 /* words 0-2 memcpy should be 0 rserved */
9905 /* port will send abts */
dcf2a4e0
JS
9906 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9907 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9908 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9909 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9910 } else
9911 fip = 0;
9912
9913 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9914 /*
dcf2a4e0
JS
9915 * The link is down, or the command was ELS_FIP
9916 * so the fw does not need to send abts
4f774513
JS
9917 * on the wire.
9918 */
9919 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9920 else
9921 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9922 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9923 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9924 wqe->abort_cmd.rsrvd5 = 0;
9925 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9926 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9927 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9928 /*
9929 * The abort handler will send us CMD_ABORT_XRI_CN or
9930 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9931 */
f0d9bccc
JS
9932 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9933 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9934 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9935 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9936 cmnd = CMD_ABORT_XRI_CX;
9937 command_type = OTHER_COMMAND;
9938 xritag = 0;
7851fe2c 9939 break;
6669f9bb 9940 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9941 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9942 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9943 * we re-construct this WQE here based on information in
9944 * iocbq from scratch.
9945 */
d9f492a1 9946 memset(wqe, 0, sizeof(*wqe));
5ffc266e 9947 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9948 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9949 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9950 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9951 LPFC_ABTS_UNSOL_INT) {
9952 /* ABTS sent by initiator to CT exchange, the
9953 * RX_ID field will be filled with the newly
9954 * allocated responder XRI.
9955 */
9956 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9957 iocbq->sli4_xritag);
9958 } else {
9959 /* ABTS sent by responder to CT exchange, the
9960 * RX_ID field will be filled with the responder
9961 * RX_ID from ABTS.
9962 */
9963 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9964 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9965 }
6669f9bb
JS
9966 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9967 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9968
9969 /* Use CT=VPI */
9970 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9971 ndlp->nlp_DID);
9972 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9973 iocbq->iocb.ulpContext);
9974 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9975 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9976 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9977 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9978 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9979 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9980 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9981 command_type = OTHER_COMMAND;
546fc854
JS
9982 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9983 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9984 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9985 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9986 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9987 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9988 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9989 }
9990
7851fe2c 9991 break;
ae9e28f3 9992 case CMD_SEND_FRAME:
e62245d9
JS
9993 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9994 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9995 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9996 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9997 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9998 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9999 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10000 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10001 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
ae9e28f3
JS
10002 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10003 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10004 return 0;
4f774513
JS
10005 case CMD_XRI_ABORTED_CX:
10006 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
10007 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10008 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10009 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10010 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10011 default:
10012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10013 "2014 Invalid command 0x%x\n",
10014 iocbq->iocb.ulpCommand);
10015 return IOCB_ERROR;
7851fe2c 10016 break;
4f774513 10017 }
6d368e53 10018
8012cc38
JS
10019 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10020 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10021 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10022 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10023 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10024 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10025 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10026 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
10027 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10028 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10029 wqe->generic.wqe_com.abort_tag = abort_tag;
10030 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10031 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10032 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10033 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
10034 return 0;
10035}
10036
10037/**
10038 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10039 * @phba: Pointer to HBA context object.
10040 * @ring_number: SLI ring number to issue iocb on.
10041 * @piocb: Pointer to command iocb.
10042 * @flag: Flag indicating if this command can be put into txq.
10043 *
10044 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10045 * an iocb command to an HBA with SLI-4 interface spec.
10046 *
27f3efd6 10047 * This function is called with ringlock held. The function will return success
4f774513
JS
10048 * after it successfully submit the iocb to firmware or after adding to the
10049 * txq.
10050 **/
10051static int
10052__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10053 struct lpfc_iocbq *piocb, uint32_t flag)
10054{
10055 struct lpfc_sglq *sglq;
205e8240 10056 union lpfc_wqe128 wqe;
1ba981fd 10057 struct lpfc_queue *wq;
895427bd 10058 struct lpfc_sli_ring *pring;
4f774513 10059
895427bd
JS
10060 /* Get the WQ */
10061 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10062 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
c00f62e6 10063 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
895427bd
JS
10064 } else {
10065 wq = phba->sli4_hba.els_wq;
10066 }
10067
10068 /* Get corresponding ring */
10069 pring = wq->pring;
1c2ba475 10070
b5c53958
JS
10071 /*
10072 * The WQE can be either 64 or 128 bytes,
b5c53958 10073 */
b5c53958 10074
cda7fa18 10075 lockdep_assert_held(&pring->ring_lock);
895427bd 10076
4f774513
JS
10077 if (piocb->sli4_xritag == NO_XRI) {
10078 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 10079 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
10080 sglq = NULL;
10081 else {
0e9bb8d7 10082 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
10083 if (!(flag & SLI_IOCB_RET_IOCB)) {
10084 __lpfc_sli_ringtx_put(phba,
10085 pring, piocb);
10086 return IOCB_SUCCESS;
10087 } else {
10088 return IOCB_BUSY;
10089 }
10090 } else {
895427bd 10091 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
10092 if (!sglq) {
10093 if (!(flag & SLI_IOCB_RET_IOCB)) {
10094 __lpfc_sli_ringtx_put(phba,
10095 pring,
10096 piocb);
10097 return IOCB_SUCCESS;
10098 } else
10099 return IOCB_BUSY;
10100 }
10101 }
4f774513 10102 }
2ea259ee 10103 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
10104 /* These IO's already have an XRI and a mapped sgl. */
10105 sglq = NULL;
2ea259ee 10106 else {
6d368e53
JS
10107 /*
10108 * This is a continuation of a commandi,(CX) so this
4f774513
JS
10109 * sglq is on the active list
10110 */
edccdc17 10111 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
10112 if (!sglq)
10113 return IOCB_ERROR;
10114 }
10115
10116 if (sglq) {
6d368e53 10117 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 10118 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 10119 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
10120 return IOCB_ERROR;
10121 }
10122
205e8240 10123 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
4f774513
JS
10124 return IOCB_ERROR;
10125
205e8240 10126 if (lpfc_sli4_wq_put(wq, &wqe))
895427bd 10127 return IOCB_ERROR;
4f774513
JS
10128 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10129
10130 return 0;
10131}
10132
10133/**
10134 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10135 *
10136 * This routine wraps the actual lockless version for issusing IOCB function
10137 * pointer from the lpfc_hba struct.
10138 *
10139 * Return codes:
b5c53958
JS
10140 * IOCB_ERROR - Error
10141 * IOCB_SUCCESS - Success
10142 * IOCB_BUSY - Busy
4f774513 10143 **/
2a9bf3d0 10144int
4f774513
JS
10145__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10146 struct lpfc_iocbq *piocb, uint32_t flag)
10147{
10148 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10149}
10150
10151/**
25985edc 10152 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
10153 * @phba: The hba struct for which this call is being executed.
10154 * @dev_grp: The HBA PCI-Device group number.
10155 *
10156 * This routine sets up the SLI interface API function jump table in @phba
10157 * struct.
10158 * Returns: 0 - success, -ENODEV - failure.
10159 **/
10160int
10161lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10162{
10163
10164 switch (dev_grp) {
10165 case LPFC_PCI_DEV_LP:
10166 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10167 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10168 break;
10169 case LPFC_PCI_DEV_OC:
10170 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10171 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10172 break;
10173 default:
10174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10175 "1419 Invalid HBA PCI-device group: 0x%x\n",
10176 dev_grp);
10177 return -ENODEV;
10178 break;
10179 }
10180 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10181 return 0;
10182}
10183
a1efe163 10184/**
895427bd 10185 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 10186 * @phba: Pointer to HBA context object.
a1efe163
JS
10187 * @piocb: Pointer to command iocb.
10188 *
895427bd
JS
10189 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10190 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 10191 * Since ABORTS must go on the same WQ of the command they are
895427bd 10192 * aborting, we use command's hba_wqidx.
a1efe163 10193 */
895427bd
JS
10194struct lpfc_sli_ring *
10195lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 10196{
c490850a 10197 struct lpfc_io_buf *lpfc_cmd;
5e5b511d 10198
895427bd 10199 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
cdb42bec 10200 if (unlikely(!phba->sli4_hba.hdwq))
7370d10a
JS
10201 return NULL;
10202 /*
10203 * for abort iocb hba_wqidx should already
10204 * be setup based on what work queue we used.
10205 */
10206 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
c490850a 10207 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
1fbf9742 10208 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
9bd2bff5 10209 }
c00f62e6 10210 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
895427bd
JS
10211 } else {
10212 if (unlikely(!phba->sli4_hba.els_wq))
10213 return NULL;
10214 piocb->hba_wqidx = 0;
10215 return phba->sli4_hba.els_wq->pring;
9bd2bff5 10216 }
9bd2bff5
JS
10217}
10218
4f774513
JS
10219/**
10220 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10221 * @phba: Pointer to HBA context object.
10222 * @pring: Pointer to driver SLI ring object.
10223 * @piocb: Pointer to command iocb.
10224 * @flag: Flag indicating if this command can be put into txq.
10225 *
10226 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10227 * function. This function gets the hbalock and calls
10228 * __lpfc_sli_issue_iocb function and will return the error returned
10229 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10230 * functions which do not hold hbalock.
10231 **/
10232int
10233lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10234 struct lpfc_iocbq *piocb, uint32_t flag)
10235{
2a76a283 10236 struct lpfc_sli_ring *pring;
93a4d6f4 10237 struct lpfc_queue *eq;
4f774513 10238 unsigned long iflags;
6a828b0f 10239 int rc;
4f774513 10240
7e56aa25 10241 if (phba->sli_rev == LPFC_SLI_REV4) {
93a4d6f4
JS
10242 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10243
895427bd
JS
10244 pring = lpfc_sli4_calc_ring(phba, piocb);
10245 if (unlikely(pring == NULL))
9bd2bff5 10246 return IOCB_ERROR;
ba20c853 10247
9bd2bff5
JS
10248 spin_lock_irqsave(&pring->ring_lock, iflags);
10249 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10250 spin_unlock_irqrestore(&pring->ring_lock, iflags);
93a4d6f4
JS
10251
10252 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
7e56aa25
JS
10253 } else {
10254 /* For now, SLI2/3 will still use hbalock */
10255 spin_lock_irqsave(&phba->hbalock, iflags);
10256 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10257 spin_unlock_irqrestore(&phba->hbalock, iflags);
10258 }
4f774513
JS
10259 return rc;
10260}
10261
10262/**
10263 * lpfc_extra_ring_setup - Extra ring setup function
10264 * @phba: Pointer to HBA context object.
10265 *
10266 * This function is called while driver attaches with the
10267 * HBA to setup the extra ring. The extra ring is used
10268 * only when driver needs to support target mode functionality
10269 * or IP over FC functionalities.
10270 *
895427bd 10271 * This function is called with no lock held. SLI3 only.
4f774513
JS
10272 **/
10273static int
10274lpfc_extra_ring_setup( struct lpfc_hba *phba)
10275{
10276 struct lpfc_sli *psli;
10277 struct lpfc_sli_ring *pring;
10278
10279 psli = &phba->sli;
10280
10281 /* Adjust cmd/rsp ring iocb entries more evenly */
10282
10283 /* Take some away from the FCP ring */
895427bd 10284 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
10285 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10286 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10287 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10288 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 10289
a4bc3379 10290 /* and give them to the extra ring */
895427bd 10291 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 10292
7e56aa25
JS
10293 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10294 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10295 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10296 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
10297
10298 /* Setup default profile for this ring */
10299 pring->iotag_max = 4096;
10300 pring->num_mask = 1;
10301 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
10302 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10303 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
10304 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10305 return 0;
10306}
10307
cb69f7de
JS
10308/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10309 * @phba: Pointer to HBA context object.
10310 * @iocbq: Pointer to iocb object.
10311 *
10312 * The async_event handler calls this routine when it receives
10313 * an ASYNC_STATUS_CN event from the port. The port generates
10314 * this event when an Abort Sequence request to an rport fails
10315 * twice in succession. The abort could be originated by the
10316 * driver or by the port. The ABTS could have been for an ELS
10317 * or FCP IO. The port only generates this event when an ABTS
10318 * fails to complete after one retry.
10319 */
10320static void
10321lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10322 struct lpfc_iocbq *iocbq)
10323{
10324 struct lpfc_nodelist *ndlp = NULL;
10325 uint16_t rpi = 0, vpi = 0;
10326 struct lpfc_vport *vport = NULL;
10327
10328 /* The rpi in the ulpContext is vport-sensitive. */
10329 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10330 rpi = iocbq->iocb.ulpContext;
10331
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10333 "3092 Port generated ABTS async event "
10334 "on vpi %d rpi %d status 0x%x\n",
10335 vpi, rpi, iocbq->iocb.ulpStatus);
10336
10337 vport = lpfc_find_vport_by_vpid(phba, vpi);
10338 if (!vport)
10339 goto err_exit;
10340 ndlp = lpfc_findnode_rpi(vport, rpi);
10341 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10342 goto err_exit;
10343
10344 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10345 lpfc_sli_abts_recover_port(vport, ndlp);
10346 return;
10347
10348 err_exit:
10349 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10350 "3095 Event Context not found, no "
10351 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10352 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10353 vpi, rpi);
10354}
10355
10356/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10357 * @phba: pointer to HBA context object.
10358 * @ndlp: nodelist pointer for the impacted rport.
10359 * @axri: pointer to the wcqe containing the failed exchange.
10360 *
10361 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10362 * port. The port generates this event when an abort exchange request to an
10363 * rport fails twice in succession with no reply. The abort could be originated
10364 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10365 */
10366void
10367lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10368 struct lpfc_nodelist *ndlp,
10369 struct sli4_wcqe_xri_aborted *axri)
10370{
10371 struct lpfc_vport *vport;
5c1db2ac 10372 uint32_t ext_status = 0;
cb69f7de 10373
6b5151fd 10374 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
10375 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10376 "3115 Node Context not found, driver "
10377 "ignoring abts err event\n");
6b5151fd
JS
10378 return;
10379 }
10380
cb69f7de
JS
10381 vport = ndlp->vport;
10382 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10383 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 10384 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 10385 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 10386 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
10387 bf_get(lpfc_wcqe_xa_status, axri),
10388 axri->parameter);
cb69f7de 10389
5c1db2ac
JS
10390 /*
10391 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10392 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10393 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10394 */
e3d2b802 10395 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
10396 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10397 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
10398 lpfc_sli_abts_recover_port(vport, ndlp);
10399}
10400
e59058c4 10401/**
3621a710 10402 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
10403 * @phba: Pointer to HBA context object.
10404 * @pring: Pointer to driver SLI ring object.
10405 * @iocbq: Pointer to iocb object.
10406 *
10407 * This function is called by the slow ring event handler
10408 * function when there is an ASYNC event iocb in the ring.
10409 * This function is called with no lock held.
10410 * Currently this function handles only temperature related
10411 * ASYNC events. The function decodes the temperature sensor
10412 * event message and posts events for the management applications.
10413 **/
98c9ea5c 10414static void
57127f15
JS
10415lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10416 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10417{
10418 IOCB_t *icmd;
10419 uint16_t evt_code;
57127f15
JS
10420 struct temp_event temp_event_data;
10421 struct Scsi_Host *shost;
a257bf90 10422 uint32_t *iocb_w;
57127f15
JS
10423
10424 icmd = &iocbq->iocb;
10425 evt_code = icmd->un.asyncstat.evt_code;
57127f15 10426
cb69f7de
JS
10427 switch (evt_code) {
10428 case ASYNC_TEMP_WARN:
10429 case ASYNC_TEMP_SAFE:
10430 temp_event_data.data = (uint32_t) icmd->ulpContext;
10431 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10432 if (evt_code == ASYNC_TEMP_WARN) {
10433 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10434 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10435 "0347 Adapter is very hot, please take "
10436 "corrective action. temperature : %d Celsius\n",
10437 (uint32_t) icmd->ulpContext);
10438 } else {
10439 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10440 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10441 "0340 Adapter temperature is OK now. "
10442 "temperature : %d Celsius\n",
10443 (uint32_t) icmd->ulpContext);
10444 }
10445
10446 /* Send temperature change event to applications */
10447 shost = lpfc_shost_from_vport(phba->pport);
10448 fc_host_post_vendor_event(shost, fc_get_event_number(),
10449 sizeof(temp_event_data), (char *) &temp_event_data,
10450 LPFC_NL_VENDOR_ID);
10451 break;
10452 case ASYNC_STATUS_CN:
10453 lpfc_sli_abts_err_handler(phba, iocbq);
10454 break;
10455 default:
a257bf90 10456 iocb_w = (uint32_t *) icmd;
cb69f7de 10457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 10458 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 10459 " evt_code 0x%x\n"
a257bf90
JS
10460 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10461 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10462 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10463 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 10464 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
10465 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10466 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10467 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10468 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10469
cb69f7de 10470 break;
57127f15 10471 }
57127f15
JS
10472}
10473
10474
e59058c4 10475/**
895427bd 10476 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
10477 * @phba: Pointer to HBA context object.
10478 *
10479 * lpfc_sli_setup sets up rings of the SLI interface with
10480 * number of iocbs per ring and iotags. This function is
10481 * called while driver attach to the HBA and before the
10482 * interrupts are enabled. So there is no need for locking.
10483 *
10484 * This function always returns 0.
10485 **/
dea3101e 10486int
895427bd
JS
10487lpfc_sli4_setup(struct lpfc_hba *phba)
10488{
10489 struct lpfc_sli_ring *pring;
10490
10491 pring = phba->sli4_hba.els_wq->pring;
10492 pring->num_mask = LPFC_MAX_RING_MASK;
10493 pring->prt[0].profile = 0; /* Mask 0 */
10494 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10495 pring->prt[0].type = FC_TYPE_ELS;
10496 pring->prt[0].lpfc_sli_rcv_unsol_event =
10497 lpfc_els_unsol_event;
10498 pring->prt[1].profile = 0; /* Mask 1 */
10499 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10500 pring->prt[1].type = FC_TYPE_ELS;
10501 pring->prt[1].lpfc_sli_rcv_unsol_event =
10502 lpfc_els_unsol_event;
10503 pring->prt[2].profile = 0; /* Mask 2 */
10504 /* NameServer Inquiry */
10505 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10506 /* NameServer */
10507 pring->prt[2].type = FC_TYPE_CT;
10508 pring->prt[2].lpfc_sli_rcv_unsol_event =
10509 lpfc_ct_unsol_event;
10510 pring->prt[3].profile = 0; /* Mask 3 */
10511 /* NameServer response */
10512 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10513 /* NameServer */
10514 pring->prt[3].type = FC_TYPE_CT;
10515 pring->prt[3].lpfc_sli_rcv_unsol_event =
10516 lpfc_ct_unsol_event;
10517 return 0;
10518}
10519
10520/**
10521 * lpfc_sli_setup - SLI ring setup function
10522 * @phba: Pointer to HBA context object.
10523 *
10524 * lpfc_sli_setup sets up rings of the SLI interface with
10525 * number of iocbs per ring and iotags. This function is
10526 * called while driver attach to the HBA and before the
10527 * interrupts are enabled. So there is no need for locking.
10528 *
10529 * This function always returns 0. SLI3 only.
10530 **/
10531int
dea3101e
JB
10532lpfc_sli_setup(struct lpfc_hba *phba)
10533{
ed957684 10534 int i, totiocbsize = 0;
dea3101e
JB
10535 struct lpfc_sli *psli = &phba->sli;
10536 struct lpfc_sli_ring *pring;
10537
2a76a283 10538 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 10539 psli->sli_flag = 0;
dea3101e 10540
604a3e30
JB
10541 psli->iocbq_lookup = NULL;
10542 psli->iocbq_lookup_len = 0;
10543 psli->last_iotag = 0;
10544
dea3101e 10545 for (i = 0; i < psli->num_rings; i++) {
895427bd 10546 pring = &psli->sli3_ring[i];
dea3101e
JB
10547 switch (i) {
10548 case LPFC_FCP_RING: /* ring 0 - FCP */
10549 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10550 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10551 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10552 pring->sli.sli3.numCiocb +=
10553 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10554 pring->sli.sli3.numRiocb +=
10555 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10556 pring->sli.sli3.numCiocb +=
10557 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10558 pring->sli.sli3.numRiocb +=
10559 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10560 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10561 SLI3_IOCB_CMD_SIZE :
10562 SLI2_IOCB_CMD_SIZE;
7e56aa25 10563 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10564 SLI3_IOCB_RSP_SIZE :
10565 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
10566 pring->iotag_ctr = 0;
10567 pring->iotag_max =
92d7f7b0 10568 (phba->cfg_hba_queue_depth * 2);
dea3101e
JB
10569 pring->fast_iotag = pring->iotag_max;
10570 pring->num_mask = 0;
10571 break;
a4bc3379 10572 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 10573 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10574 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10575 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10576 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10577 SLI3_IOCB_CMD_SIZE :
10578 SLI2_IOCB_CMD_SIZE;
7e56aa25 10579 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10580 SLI3_IOCB_RSP_SIZE :
10581 SLI2_IOCB_RSP_SIZE;
2e0fef85 10582 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e
JB
10583 pring->num_mask = 0;
10584 break;
10585 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10586 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10587 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10588 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10589 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10590 SLI3_IOCB_CMD_SIZE :
10591 SLI2_IOCB_CMD_SIZE;
7e56aa25 10592 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10593 SLI3_IOCB_RSP_SIZE :
10594 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
10595 pring->fast_iotag = 0;
10596 pring->iotag_ctr = 0;
10597 pring->iotag_max = 4096;
57127f15
JS
10598 pring->lpfc_sli_rcv_async_status =
10599 lpfc_sli_async_event_handler;
6669f9bb 10600 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 10601 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
10602 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10603 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 10604 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 10605 lpfc_els_unsol_event;
dea3101e 10606 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
10607 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10608 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 10609 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 10610 lpfc_els_unsol_event;
dea3101e
JB
10611 pring->prt[2].profile = 0; /* Mask 2 */
10612 /* NameServer Inquiry */
6a9c52cf 10613 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 10614 /* NameServer */
6a9c52cf 10615 pring->prt[2].type = FC_TYPE_CT;
dea3101e 10616 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 10617 lpfc_ct_unsol_event;
dea3101e
JB
10618 pring->prt[3].profile = 0; /* Mask 3 */
10619 /* NameServer response */
6a9c52cf 10620 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 10621 /* NameServer */
6a9c52cf 10622 pring->prt[3].type = FC_TYPE_CT;
dea3101e 10623 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 10624 lpfc_ct_unsol_event;
dea3101e
JB
10625 break;
10626 }
7e56aa25
JS
10627 totiocbsize += (pring->sli.sli3.numCiocb *
10628 pring->sli.sli3.sizeCiocb) +
10629 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 10630 }
ed957684 10631 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 10632 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
10633 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10634 "SLI2 SLIM Data: x%x x%lx\n",
10635 phba->brd_no, totiocbsize,
10636 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 10637 }
cf5bf97e
JW
10638 if (phba->cfg_multi_ring_support == 2)
10639 lpfc_extra_ring_setup(phba);
dea3101e
JB
10640
10641 return 0;
10642}
10643
e59058c4 10644/**
895427bd 10645 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
10646 * @phba: Pointer to HBA context object.
10647 *
895427bd 10648 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
10649 * ring. This function also initializes ring indices of each ring.
10650 * This function is called during the initialization of the SLI
10651 * interface of an HBA.
10652 * This function is called with no lock held and always returns
10653 * 1.
10654 **/
895427bd
JS
10655void
10656lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e
JB
10657{
10658 struct lpfc_sli *psli;
10659 struct lpfc_sli_ring *pring;
604a3e30 10660 int i;
dea3101e
JB
10661
10662 psli = &phba->sli;
2e0fef85 10663 spin_lock_irq(&phba->hbalock);
dea3101e 10664 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10665 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10666 /* Initialize list headers for txq and txcmplq as double linked lists */
cdb42bec 10667 for (i = 0; i < phba->cfg_hdw_queue; i++) {
c00f62e6 10668 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
895427bd
JS
10669 pring->flag = 0;
10670 pring->ringno = LPFC_FCP_RING;
c490850a 10671 pring->txcmplq_cnt = 0;
895427bd
JS
10672 INIT_LIST_HEAD(&pring->txq);
10673 INIT_LIST_HEAD(&pring->txcmplq);
10674 INIT_LIST_HEAD(&pring->iocb_continueq);
10675 spin_lock_init(&pring->ring_lock);
10676 }
10677 pring = phba->sli4_hba.els_wq->pring;
10678 pring->flag = 0;
10679 pring->ringno = LPFC_ELS_RING;
c490850a 10680 pring->txcmplq_cnt = 0;
895427bd
JS
10681 INIT_LIST_HEAD(&pring->txq);
10682 INIT_LIST_HEAD(&pring->txcmplq);
10683 INIT_LIST_HEAD(&pring->iocb_continueq);
10684 spin_lock_init(&pring->ring_lock);
dea3101e 10685
cdb42bec 10686 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
10687 pring = phba->sli4_hba.nvmels_wq->pring;
10688 pring->flag = 0;
10689 pring->ringno = LPFC_ELS_RING;
c490850a 10690 pring->txcmplq_cnt = 0;
895427bd
JS
10691 INIT_LIST_HEAD(&pring->txq);
10692 INIT_LIST_HEAD(&pring->txcmplq);
10693 INIT_LIST_HEAD(&pring->iocb_continueq);
10694 spin_lock_init(&pring->ring_lock);
10695 }
10696
10697 spin_unlock_irq(&phba->hbalock);
10698}
10699
10700/**
10701 * lpfc_sli_queue_init - Queue initialization function
10702 * @phba: Pointer to HBA context object.
10703 *
10704 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10705 * ring. This function also initializes ring indices of each ring.
10706 * This function is called during the initialization of the SLI
10707 * interface of an HBA.
10708 * This function is called with no lock held and always returns
10709 * 1.
10710 **/
10711void
10712lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e
JB
10713{
10714 struct lpfc_sli *psli;
10715 struct lpfc_sli_ring *pring;
604a3e30 10716 int i;
dea3101e
JB
10717
10718 psli = &phba->sli;
2e0fef85 10719 spin_lock_irq(&phba->hbalock);
dea3101e 10720 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10721 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e
JB
10722 /* Initialize list headers for txq and txcmplq as double linked lists */
10723 for (i = 0; i < psli->num_rings; i++) {
895427bd 10724 pring = &psli->sli3_ring[i];
dea3101e 10725 pring->ringno = i;
7e56aa25
JS
10726 pring->sli.sli3.next_cmdidx = 0;
10727 pring->sli.sli3.local_getidx = 0;
10728 pring->sli.sli3.cmdidx = 0;
dea3101e 10729 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 10730 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 10731 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
10732 pring->flag = 0;
10733 INIT_LIST_HEAD(&pring->txq);
10734 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 10735 spin_lock_init(&pring->ring_lock);
dea3101e 10736 }
2e0fef85 10737 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
10738}
10739
04c68496
JS
10740/**
10741 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10742 * @phba: Pointer to HBA context object.
10743 *
10744 * This routine flushes the mailbox command subsystem. It will unconditionally
10745 * flush all the mailbox commands in the three possible stages in the mailbox
10746 * command sub-system: pending mailbox command queue; the outstanding mailbox
10747 * command; and completed mailbox command queue. It is caller's responsibility
10748 * to make sure that the driver is in the proper state to flush the mailbox
10749 * command sub-system. Namely, the posting of mailbox commands into the
10750 * pending mailbox command queue from the various clients must be stopped;
10751 * either the HBA is in a state that it will never works on the outstanding
10752 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10753 * mailbox command has been completed.
10754 **/
10755static void
10756lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10757{
10758 LIST_HEAD(completions);
10759 struct lpfc_sli *psli = &phba->sli;
10760 LPFC_MBOXQ_t *pmb;
10761 unsigned long iflag;
10762
523128e5
JS
10763 /* Disable softirqs, including timers from obtaining phba->hbalock */
10764 local_bh_disable();
10765
04c68496
JS
10766 /* Flush all the mailbox commands in the mbox system */
10767 spin_lock_irqsave(&phba->hbalock, iflag);
523128e5 10768
04c68496
JS
10769 /* The pending mailbox command queue */
10770 list_splice_init(&phba->sli.mboxq, &completions);
10771 /* The outstanding active mailbox command */
10772 if (psli->mbox_active) {
10773 list_add_tail(&psli->mbox_active->list, &completions);
10774 psli->mbox_active = NULL;
10775 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10776 }
10777 /* The completed mailbox command queue */
10778 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10779 spin_unlock_irqrestore(&phba->hbalock, iflag);
10780
523128e5
JS
10781 /* Enable softirqs again, done with phba->hbalock */
10782 local_bh_enable();
10783
04c68496
JS
10784 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10785 while (!list_empty(&completions)) {
10786 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10787 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10788 if (pmb->mbox_cmpl)
10789 pmb->mbox_cmpl(phba, pmb);
10790 }
10791}
10792
e59058c4 10793/**
3621a710 10794 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
10795 * @vport: Pointer to virtual port object.
10796 *
10797 * lpfc_sli_host_down is called to clean up the resources
10798 * associated with a vport before destroying virtual
10799 * port data structures.
10800 * This function does following operations:
10801 * - Free discovery resources associated with this virtual
10802 * port.
10803 * - Free iocbs associated with this virtual port in
10804 * the txq.
10805 * - Send abort for all iocb commands associated with this
10806 * vport in txcmplq.
10807 *
10808 * This function is called with no lock held and always returns 1.
10809 **/
92d7f7b0
JS
10810int
10811lpfc_sli_host_down(struct lpfc_vport *vport)
10812{
858c9f6c 10813 LIST_HEAD(completions);
92d7f7b0
JS
10814 struct lpfc_hba *phba = vport->phba;
10815 struct lpfc_sli *psli = &phba->sli;
895427bd 10816 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
10817 struct lpfc_sli_ring *pring;
10818 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
10819 int i;
10820 unsigned long flags = 0;
10821 uint16_t prev_pring_flag;
10822
10823 lpfc_cleanup_discovery_resources(vport);
10824
10825 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 10826
895427bd
JS
10827 /*
10828 * Error everything on the txq since these iocbs
10829 * have not been given to the FW yet.
10830 * Also issue ABTS for everything on the txcmplq
10831 */
10832 if (phba->sli_rev != LPFC_SLI_REV4) {
10833 for (i = 0; i < psli->num_rings; i++) {
10834 pring = &psli->sli3_ring[i];
10835 prev_pring_flag = pring->flag;
10836 /* Only slow rings */
10837 if (pring->ringno == LPFC_ELS_RING) {
10838 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10839 /* Set the lpfc data pending flag */
10840 set_bit(LPFC_DATA_READY, &phba->data_flags);
10841 }
10842 list_for_each_entry_safe(iocb, next_iocb,
10843 &pring->txq, list) {
10844 if (iocb->vport != vport)
10845 continue;
10846 list_move_tail(&iocb->list, &completions);
10847 }
10848 list_for_each_entry_safe(iocb, next_iocb,
10849 &pring->txcmplq, list) {
10850 if (iocb->vport != vport)
10851 continue;
10852 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10853 }
10854 pring->flag = prev_pring_flag;
10855 }
10856 } else {
10857 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10858 pring = qp->pring;
10859 if (!pring)
92d7f7b0 10860 continue;
895427bd
JS
10861 if (pring == phba->sli4_hba.els_wq->pring) {
10862 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10863 /* Set the lpfc data pending flag */
10864 set_bit(LPFC_DATA_READY, &phba->data_flags);
10865 }
10866 prev_pring_flag = pring->flag;
65a3df63 10867 spin_lock(&pring->ring_lock);
895427bd
JS
10868 list_for_each_entry_safe(iocb, next_iocb,
10869 &pring->txq, list) {
10870 if (iocb->vport != vport)
10871 continue;
10872 list_move_tail(&iocb->list, &completions);
10873 }
65a3df63 10874 spin_unlock(&pring->ring_lock);
895427bd
JS
10875 list_for_each_entry_safe(iocb, next_iocb,
10876 &pring->txcmplq, list) {
10877 if (iocb->vport != vport)
10878 continue;
10879 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10880 }
10881 pring->flag = prev_pring_flag;
92d7f7b0 10882 }
92d7f7b0 10883 }
92d7f7b0
JS
10884 spin_unlock_irqrestore(&phba->hbalock, flags);
10885
a257bf90
JS
10886 /* Cancel all the IOCBs from the completions list */
10887 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10888 IOERR_SLI_DOWN);
92d7f7b0
JS
10889 return 1;
10890}
10891
e59058c4 10892/**
3621a710 10893 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10894 * @phba: Pointer to HBA context object.
10895 *
10896 * This function cleans up all iocb, buffers, mailbox commands
10897 * while shutting down the HBA. This function is called with no
10898 * lock held and always returns 1.
10899 * This function does the following to cleanup driver resources:
10900 * - Free discovery resources for each virtual port
10901 * - Cleanup any pending fabric iocbs
10902 * - Iterate through the iocb txq and free each entry
10903 * in the list.
10904 * - Free up any buffer posted to the HBA
10905 * - Free mailbox commands in the mailbox queue.
10906 **/
dea3101e 10907int
2e0fef85 10908lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10909{
2534ba75 10910 LIST_HEAD(completions);
2e0fef85 10911 struct lpfc_sli *psli = &phba->sli;
895427bd 10912 struct lpfc_queue *qp = NULL;
dea3101e 10913 struct lpfc_sli_ring *pring;
0ff10d46 10914 struct lpfc_dmabuf *buf_ptr;
dea3101e 10915 unsigned long flags = 0;
04c68496
JS
10916 int i;
10917
10918 /* Shutdown the mailbox command sub-system */
618a5230 10919 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10920
dea3101e
JB
10921 lpfc_hba_down_prep(phba);
10922
523128e5
JS
10923 /* Disable softirqs, including timers from obtaining phba->hbalock */
10924 local_bh_disable();
10925
92d7f7b0
JS
10926 lpfc_fabric_abort_hba(phba);
10927
2e0fef85 10928 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10929
895427bd
JS
10930 /*
10931 * Error everything on the txq since these iocbs
10932 * have not been given to the FW yet.
10933 */
10934 if (phba->sli_rev != LPFC_SLI_REV4) {
10935 for (i = 0; i < psli->num_rings; i++) {
10936 pring = &psli->sli3_ring[i];
10937 /* Only slow rings */
10938 if (pring->ringno == LPFC_ELS_RING) {
10939 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10940 /* Set the lpfc data pending flag */
10941 set_bit(LPFC_DATA_READY, &phba->data_flags);
10942 }
10943 list_splice_init(&pring->txq, &completions);
10944 }
10945 } else {
10946 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10947 pring = qp->pring;
10948 if (!pring)
10949 continue;
4b0a42be 10950 spin_lock(&pring->ring_lock);
895427bd 10951 list_splice_init(&pring->txq, &completions);
4b0a42be 10952 spin_unlock(&pring->ring_lock);
895427bd
JS
10953 if (pring == phba->sli4_hba.els_wq->pring) {
10954 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10955 /* Set the lpfc data pending flag */
10956 set_bit(LPFC_DATA_READY, &phba->data_flags);
10957 }
10958 }
2534ba75 10959 }
2e0fef85 10960 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10961
a257bf90
JS
10962 /* Cancel all the IOCBs from the completions list */
10963 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10964 IOERR_SLI_DOWN);
dea3101e 10965
0ff10d46
JS
10966 spin_lock_irqsave(&phba->hbalock, flags);
10967 list_splice_init(&phba->elsbuf, &completions);
10968 phba->elsbuf_cnt = 0;
10969 phba->elsbuf_prev_cnt = 0;
10970 spin_unlock_irqrestore(&phba->hbalock, flags);
10971
10972 while (!list_empty(&completions)) {
10973 list_remove_head(&completions, buf_ptr,
10974 struct lpfc_dmabuf, list);
10975 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10976 kfree(buf_ptr);
10977 }
10978
523128e5
JS
10979 /* Enable softirqs again, done with phba->hbalock */
10980 local_bh_enable();
10981
dea3101e
JB
10982 /* Return any active mbox cmds */
10983 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10984
da0436e9 10985 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10986 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10987 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10988
da0436e9
JS
10989 return 1;
10990}
10991
e59058c4 10992/**
3621a710 10993 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10994 * @srcp: Source memory pointer.
10995 * @destp: Destination memory pointer.
10996 * @cnt: Number of words required to be copied.
10997 *
10998 * This function is used for copying data between driver memory
10999 * and the SLI memory. This function also changes the endianness
11000 * of each word if native endianness is different from SLI
11001 * endianness. This function can be called with or without
11002 * lock.
11003 **/
dea3101e
JB
11004void
11005lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11006{
11007 uint32_t *src = srcp;
11008 uint32_t *dest = destp;
11009 uint32_t ldata;
11010 int i;
11011
11012 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11013 ldata = *src;
11014 ldata = le32_to_cpu(ldata);
11015 *dest = ldata;
11016 src++;
11017 dest++;
11018 }
11019}
11020
e59058c4 11021
a0c87cbd
JS
11022/**
11023 * lpfc_sli_bemem_bcopy - SLI memory copy function
11024 * @srcp: Source memory pointer.
11025 * @destp: Destination memory pointer.
11026 * @cnt: Number of words required to be copied.
11027 *
11028 * This function is used for copying data between a data structure
11029 * with big endian representation to local endianness.
11030 * This function can be called with or without lock.
11031 **/
11032void
11033lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11034{
11035 uint32_t *src = srcp;
11036 uint32_t *dest = destp;
11037 uint32_t ldata;
11038 int i;
11039
11040 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11041 ldata = *src;
11042 ldata = be32_to_cpu(ldata);
11043 *dest = ldata;
11044 src++;
11045 dest++;
11046 }
11047}
11048
e59058c4 11049/**
3621a710 11050 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
11051 * @phba: Pointer to HBA context object.
11052 * @pring: Pointer to driver SLI ring object.
11053 * @mp: Pointer to driver buffer object.
11054 *
11055 * This function is called with no lock held.
11056 * It always return zero after adding the buffer to the postbufq
11057 * buffer list.
11058 **/
dea3101e 11059int
2e0fef85
JS
11060lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11061 struct lpfc_dmabuf *mp)
dea3101e
JB
11062{
11063 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11064 later */
2e0fef85 11065 spin_lock_irq(&phba->hbalock);
dea3101e 11066 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 11067 pring->postbufq_cnt++;
2e0fef85 11068 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
11069 return 0;
11070}
11071
e59058c4 11072/**
3621a710 11073 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
11074 * @phba: Pointer to HBA context object.
11075 *
11076 * When HBQ is enabled, buffers are searched based on tags. This function
11077 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11078 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11079 * does not conflict with tags of buffer posted for unsolicited events.
11080 * The function returns the allocated tag. The function is called with
11081 * no locks held.
11082 **/
76bb24ef
JS
11083uint32_t
11084lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11085{
11086 spin_lock_irq(&phba->hbalock);
11087 phba->buffer_tag_count++;
11088 /*
11089 * Always set the QUE_BUFTAG_BIT to distiguish between
11090 * a tag assigned by HBQ.
11091 */
11092 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11093 spin_unlock_irq(&phba->hbalock);
11094 return phba->buffer_tag_count;
11095}
11096
e59058c4 11097/**
3621a710 11098 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
11099 * @phba: Pointer to HBA context object.
11100 * @pring: Pointer to driver SLI ring object.
11101 * @tag: Buffer tag.
11102 *
11103 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11104 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11105 * iocb is posted to the response ring with the tag of the buffer.
11106 * This function searches the pring->postbufq list using the tag
11107 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11108 * iocb. If the buffer is found then lpfc_dmabuf object of the
11109 * buffer is returned to the caller else NULL is returned.
11110 * This function is called with no lock held.
11111 **/
76bb24ef
JS
11112struct lpfc_dmabuf *
11113lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11114 uint32_t tag)
11115{
11116 struct lpfc_dmabuf *mp, *next_mp;
11117 struct list_head *slp = &pring->postbufq;
11118
25985edc 11119 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
11120 spin_lock_irq(&phba->hbalock);
11121 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11122 if (mp->buffer_tag == tag) {
11123 list_del_init(&mp->list);
11124 pring->postbufq_cnt--;
11125 spin_unlock_irq(&phba->hbalock);
11126 return mp;
11127 }
11128 }
11129
11130 spin_unlock_irq(&phba->hbalock);
11131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 11132 "0402 Cannot find virtual addr for buffer tag on "
32350664 11133 "ring %d Data x%lx x%px x%px x%x\n",
76bb24ef
JS
11134 pring->ringno, (unsigned long) tag,
11135 slp->next, slp->prev, pring->postbufq_cnt);
11136
11137 return NULL;
11138}
dea3101e 11139
e59058c4 11140/**
3621a710 11141 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
11142 * @phba: Pointer to HBA context object.
11143 * @pring: Pointer to driver SLI ring object.
11144 * @phys: DMA address of the buffer.
11145 *
11146 * This function searches the buffer list using the dma_address
11147 * of unsolicited event to find the driver's lpfc_dmabuf object
11148 * corresponding to the dma_address. The function returns the
11149 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11150 * This function is called by the ct and els unsolicited event
11151 * handlers to get the buffer associated with the unsolicited
11152 * event.
11153 *
11154 * This function is called with no lock held.
11155 **/
dea3101e
JB
11156struct lpfc_dmabuf *
11157lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11158 dma_addr_t phys)
11159{
11160 struct lpfc_dmabuf *mp, *next_mp;
11161 struct list_head *slp = &pring->postbufq;
11162
25985edc 11163 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 11164 spin_lock_irq(&phba->hbalock);
dea3101e
JB
11165 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11166 if (mp->phys == phys) {
11167 list_del_init(&mp->list);
11168 pring->postbufq_cnt--;
2e0fef85 11169 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
11170 return mp;
11171 }
11172 }
11173
2e0fef85 11174 spin_unlock_irq(&phba->hbalock);
dea3101e 11175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 11176 "0410 Cannot find virtual addr for mapped buf on "
32350664 11177 "ring %d Data x%llx x%px x%px x%x\n",
e8b62011 11178 pring->ringno, (unsigned long long)phys,
dea3101e
JB
11179 slp->next, slp->prev, pring->postbufq_cnt);
11180 return NULL;
11181}
11182
e59058c4 11183/**
3621a710 11184 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
11185 * @phba: Pointer to HBA context object.
11186 * @cmdiocb: Pointer to driver command iocb object.
11187 * @rspiocb: Pointer to driver response iocb object.
11188 *
11189 * This function is the completion handler for the abort iocbs for
11190 * ELS commands. This function is called from the ELS ring event
11191 * handler with no lock held. This function frees memory resources
11192 * associated with the abort iocb.
11193 **/
dea3101e 11194static void
2e0fef85
JS
11195lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11196 struct lpfc_iocbq *rspiocb)
dea3101e 11197{
2e0fef85 11198 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 11199 uint16_t abort_iotag, abort_context;
ff78d8f9 11200 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
11201
11202 if (irsp->ulpStatus) {
ff78d8f9
JS
11203
11204 /*
11205 * Assume that the port already completed and returned, or
11206 * will return the iocb. Just Log the message.
11207 */
2680eeaa
JS
11208 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11209 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11210
2e0fef85 11211 spin_lock_irq(&phba->hbalock);
45ed1190 11212 if (phba->sli_rev < LPFC_SLI_REV4) {
faa832e9
JS
11213 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11214 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11215 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11216 spin_unlock_irq(&phba->hbalock);
11217 goto release_iocb;
11218 }
45ed1190
JS
11219 if (abort_iotag != 0 &&
11220 abort_iotag <= phba->sli.last_iotag)
11221 abort_iocb =
11222 phba->sli.iocbq_lookup[abort_iotag];
11223 } else
11224 /* For sli4 the abort_tag is the XRI,
11225 * so the abort routine puts the iotag of the iocb
11226 * being aborted in the context field of the abort
11227 * IOCB.
11228 */
11229 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 11230
2a9bf3d0 11231 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
32350664 11232 "0327 Cannot abort els iocb x%px "
2a9bf3d0
JS
11233 "with tag %x context %x, abort status %x, "
11234 "abort code %x\n",
11235 abort_iocb, abort_iotag, abort_context,
11236 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 11237
ff78d8f9 11238 spin_unlock_irq(&phba->hbalock);
2680eeaa 11239 }
faa832e9 11240release_iocb:
604a3e30 11241 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e
JB
11242 return;
11243}
11244
e59058c4 11245/**
3621a710 11246 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
11247 * @phba: Pointer to HBA context object.
11248 * @cmdiocb: Pointer to driver command iocb object.
11249 * @rspiocb: Pointer to driver response iocb object.
11250 *
11251 * The function is called from SLI ring event handler with no
11252 * lock held. This function is the completion handler for ELS commands
11253 * which are aborted. The function frees memory resources used for
11254 * the aborted ELS commands.
11255 **/
92d7f7b0
JS
11256static void
11257lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11258 struct lpfc_iocbq *rspiocb)
11259{
11260 IOCB_t *irsp = &rspiocb->iocb;
11261
11262 /* ELS cmd tag <ulpIoTag> completes */
11263 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 11264 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 11265 "x%x x%x x%x\n",
e8b62011 11266 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 11267 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
11268 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11269 lpfc_ct_free_iocb(phba, cmdiocb);
11270 else
11271 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
11272 return;
11273}
11274
e59058c4 11275/**
5af5eee7 11276 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
11277 * @phba: Pointer to HBA context object.
11278 * @pring: Pointer to driver SLI ring object.
11279 * @cmdiocb: Pointer to driver command iocb object.
11280 *
5af5eee7
JS
11281 * This function issues an abort iocb for the provided command iocb down to
11282 * the port. Other than the case the outstanding command iocb is an abort
11283 * request, this function issues abort out unconditionally. This function is
11284 * called with hbalock held. The function returns 0 when it fails due to
11285 * memory allocation failure or when the command iocb is an abort request.
88acb4d9 11286 * The hbalock is asserted held in the code path calling this routine.
e59058c4 11287 **/
5af5eee7
JS
11288static int
11289lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 11290 struct lpfc_iocbq *cmdiocb)
dea3101e 11291{
2e0fef85 11292 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 11293 struct lpfc_iocbq *abtsiocbp;
dea3101e
JB
11294 IOCB_t *icmd = NULL;
11295 IOCB_t *iabt = NULL;
5af5eee7 11296 int retval;
7e56aa25 11297 unsigned long iflags;
faa832e9 11298 struct lpfc_nodelist *ndlp;
07951076 11299
92d7f7b0
JS
11300 /*
11301 * There are certain command types we don't want to abort. And we
11302 * don't want to abort commands that are already in the process of
11303 * being aborted.
07951076
JS
11304 */
11305 icmd = &cmdiocb->iocb;
2e0fef85 11306 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
11307 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11308 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
11309 return 0;
11310
dea3101e 11311 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 11312 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
11313 if (abtsiocbp == NULL)
11314 return 0;
dea3101e 11315
07951076 11316 /* This signals the response to set the correct status
341af102 11317 * before calling the completion handler
07951076
JS
11318 */
11319 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11320
dea3101e 11321 iabt = &abtsiocbp->iocb;
07951076
JS
11322 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11323 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 11324 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 11325 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190 11326 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
faa832e9 11327 } else {
da0436e9 11328 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
faa832e9
JS
11329 if (pring->ringno == LPFC_ELS_RING) {
11330 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11331 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11332 }
11333 }
07951076
JS
11334 iabt->ulpLe = 1;
11335 iabt->ulpClass = icmd->ulpClass;
dea3101e 11336
5ffc266e 11337 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11338 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
11339 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11340 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11341 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11342 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11343
2e0fef85 11344 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
11345 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11346 else
11347 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 11348
07951076 11349 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 11350 abtsiocbp->vport = vport;
5b8bd0c9 11351
e8b62011
JS
11352 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11353 "0339 Abort xri x%x, original iotag x%x, "
11354 "abort cmd iotag x%x\n",
2a9bf3d0 11355 iabt->un.acxri.abortIoTag,
e8b62011 11356 iabt->un.acxri.abortContextTag,
2a9bf3d0 11357 abtsiocbp->iotag);
7e56aa25
JS
11358
11359 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11360 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11361 if (unlikely(pring == NULL))
9bd2bff5 11362 return 0;
7e56aa25
JS
11363 /* Note: both hbalock and ring_lock need to be set here */
11364 spin_lock_irqsave(&pring->ring_lock, iflags);
11365 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11366 abtsiocbp, 0);
11367 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11368 } else {
11369 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11370 abtsiocbp, 0);
11371 }
dea3101e 11372
d7c255b2
JS
11373 if (retval)
11374 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
11375
11376 /*
11377 * Caller to this routine should check for IOCB_ERROR
11378 * and handle it properly. This routine no longer removes
11379 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11380 */
11381 return retval;
11382}
11383
11384/**
11385 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11386 * @phba: Pointer to HBA context object.
11387 * @pring: Pointer to driver SLI ring object.
11388 * @cmdiocb: Pointer to driver command iocb object.
11389 *
11390 * This function issues an abort iocb for the provided command iocb. In case
11391 * of unloading, the abort iocb will not be issued to commands on the ELS
11392 * ring. Instead, the callback function shall be changed to those commands
11393 * so that nothing happens when them finishes. This function is called with
11394 * hbalock held. The function returns 0 when the command iocb is an abort
11395 * request.
11396 **/
11397int
11398lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11399 struct lpfc_iocbq *cmdiocb)
11400{
11401 struct lpfc_vport *vport = cmdiocb->vport;
11402 int retval = IOCB_ERROR;
11403 IOCB_t *icmd = NULL;
11404
1c2ba475
JT
11405 lockdep_assert_held(&phba->hbalock);
11406
5af5eee7
JS
11407 /*
11408 * There are certain command types we don't want to abort. And we
11409 * don't want to abort commands that are already in the process of
11410 * being aborted.
11411 */
11412 icmd = &cmdiocb->iocb;
11413 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11414 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11415 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11416 return 0;
11417
1234a6d5
DK
11418 if (!pring) {
11419 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11420 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11421 else
11422 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11423 goto abort_iotag_exit;
11424 }
11425
5af5eee7
JS
11426 /*
11427 * If we're unloading, don't abort iocb on the ELS ring, but change
11428 * the callback so that nothing happens when it finishes.
11429 */
11430 if ((vport->load_flag & FC_UNLOADING) &&
11431 (pring->ringno == LPFC_ELS_RING)) {
11432 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11433 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11434 else
11435 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11436 goto abort_iotag_exit;
11437 }
11438
11439 /* Now, we try to issue the abort to the cmdiocb out */
11440 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11441
07951076 11442abort_iotag_exit:
2e0fef85
JS
11443 /*
11444 * Caller to this routine should check for IOCB_ERROR
11445 * and handle it properly. This routine no longer removes
11446 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 11447 */
2e0fef85 11448 return retval;
dea3101e
JB
11449}
11450
5af5eee7
JS
11451/**
11452 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11453 * @phba: pointer to lpfc HBA data structure.
11454 *
11455 * This routine will abort all pending and outstanding iocbs to an HBA.
11456 **/
11457void
11458lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11459{
11460 struct lpfc_sli *psli = &phba->sli;
11461 struct lpfc_sli_ring *pring;
895427bd 11462 struct lpfc_queue *qp = NULL;
5af5eee7
JS
11463 int i;
11464
895427bd
JS
11465 if (phba->sli_rev != LPFC_SLI_REV4) {
11466 for (i = 0; i < psli->num_rings; i++) {
11467 pring = &psli->sli3_ring[i];
11468 lpfc_sli_abort_iocb_ring(phba, pring);
11469 }
11470 return;
11471 }
11472 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11473 pring = qp->pring;
11474 if (!pring)
11475 continue;
db55fba8 11476 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
11477 }
11478}
11479
e59058c4 11480/**
3621a710 11481 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
11482 * @iocbq: Pointer to driver iocb object.
11483 * @vport: Pointer to driver virtual port object.
11484 * @tgt_id: SCSI ID of the target.
11485 * @lun_id: LUN ID of the scsi device.
11486 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11487 *
3621a710 11488 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
11489 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11490 * 0 if the filtering criteria is met for the given iocb and will return
11491 * 1 if the filtering criteria is not met.
11492 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11493 * given iocb is for the SCSI device specified by vport, tgt_id and
11494 * lun_id parameter.
11495 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11496 * given iocb is for the SCSI target specified by vport and tgt_id
11497 * parameters.
11498 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11499 * given iocb is for the SCSI host associated with the given vport.
11500 * This function is called with no locks held.
11501 **/
dea3101e 11502static int
51ef4c26
JS
11503lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11504 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 11505 lpfc_ctx_cmd ctx_cmd)
dea3101e 11506{
c490850a 11507 struct lpfc_io_buf *lpfc_cmd;
dea3101e
JB
11508 int rc = 1;
11509
b0e83012 11510 if (iocbq->vport != vport)
0bd4ca25
JSEC
11511 return rc;
11512
b0e83012
JS
11513 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11514 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
51ef4c26
JS
11515 return rc;
11516
c490850a 11517 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
0bd4ca25 11518
495a714c 11519 if (lpfc_cmd->pCmd == NULL)
dea3101e
JB
11520 return rc;
11521
11522 switch (ctx_cmd) {
11523 case LPFC_CTX_LUN:
b0e83012 11524 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c
JS
11525 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11526 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e
JB
11527 rc = 0;
11528 break;
11529 case LPFC_CTX_TGT:
b0e83012 11530 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c 11531 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e
JB
11532 rc = 0;
11533 break;
dea3101e
JB
11534 case LPFC_CTX_HOST:
11535 rc = 0;
11536 break;
11537 default:
11538 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 11539 __func__, ctx_cmd);
dea3101e
JB
11540 break;
11541 }
11542
11543 return rc;
11544}
11545
e59058c4 11546/**
3621a710 11547 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
11548 * @vport: Pointer to virtual port.
11549 * @tgt_id: SCSI ID of the target.
11550 * @lun_id: LUN ID of the scsi device.
11551 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11552 *
11553 * This function returns number of FCP commands pending for the vport.
11554 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11555 * commands pending on the vport associated with SCSI device specified
11556 * by tgt_id and lun_id parameters.
11557 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11558 * commands pending on the vport associated with SCSI target specified
11559 * by tgt_id parameter.
11560 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11561 * commands pending on the vport.
11562 * This function returns the number of iocbs which satisfy the filter.
11563 * This function is called without any lock held.
11564 **/
dea3101e 11565int
51ef4c26
JS
11566lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11567 lpfc_ctx_cmd ctx_cmd)
dea3101e 11568{
51ef4c26 11569 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11570 struct lpfc_iocbq *iocbq;
11571 int sum, i;
dea3101e 11572
31979008 11573 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
11574 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11575 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11576
51ef4c26
JS
11577 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11578 ctx_cmd) == 0)
0bd4ca25 11579 sum++;
dea3101e 11580 }
31979008 11581 spin_unlock_irq(&phba->hbalock);
0bd4ca25 11582
dea3101e
JB
11583 return sum;
11584}
11585
e59058c4 11586/**
3621a710 11587 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
11588 * @phba: Pointer to HBA context object
11589 * @cmdiocb: Pointer to command iocb object.
11590 * @rspiocb: Pointer to response iocb object.
11591 *
11592 * This function is called when an aborted FCP iocb completes. This
11593 * function is called by the ring event handler with no lock held.
11594 * This function frees the iocb.
11595 **/
5eb95af0 11596void
2e0fef85
JS
11597lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11598 struct lpfc_iocbq *rspiocb)
5eb95af0 11599{
cb69f7de 11600 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 11601 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
11602 "original iotag x%x, abort cmd iotag x%x "
11603 "status 0x%x, reason 0x%x\n",
11604 cmdiocb->iocb.un.acxri.abortContextTag,
11605 cmdiocb->iocb.un.acxri.abortIoTag,
11606 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11607 rspiocb->iocb.un.ulpWord[4]);
604a3e30 11608 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
11609 return;
11610}
11611
e59058c4 11612/**
3621a710 11613 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
11614 * @vport: Pointer to virtual port.
11615 * @pring: Pointer to driver SLI ring object.
11616 * @tgt_id: SCSI ID of the target.
11617 * @lun_id: LUN ID of the scsi device.
11618 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11619 *
11620 * This function sends an abort command for every SCSI command
11621 * associated with the given virtual port pending on the ring
11622 * filtered by lpfc_sli_validate_fcp_iocb function.
11623 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11624 * FCP iocbs associated with lun specified by tgt_id and lun_id
11625 * parameters
11626 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11627 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11628 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11629 * FCP iocbs associated with virtual port.
11630 * This function returns number of iocbs it failed to abort.
11631 * This function is called with no locks held.
11632 **/
dea3101e 11633int
51ef4c26
JS
11634lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11635 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 11636{
51ef4c26 11637 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11638 struct lpfc_iocbq *iocbq;
11639 struct lpfc_iocbq *abtsiocb;
ecbb227e 11640 struct lpfc_sli_ring *pring_s4;
dea3101e 11641 IOCB_t *cmd = NULL;
dea3101e 11642 int errcnt = 0, ret_val = 0;
0bd4ca25 11643 int i;
dea3101e 11644
b0e83012 11645 /* all I/Os are in process of being flushed */
c00f62e6 11646 if (phba->hba_flag & HBA_IOQ_FLUSH)
b0e83012
JS
11647 return errcnt;
11648
0bd4ca25
JSEC
11649 for (i = 1; i <= phba->sli.last_iotag; i++) {
11650 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11651
51ef4c26 11652 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 11653 abort_cmd) != 0)
dea3101e
JB
11654 continue;
11655
afbd8d88
JS
11656 /*
11657 * If the iocbq is already being aborted, don't take a second
11658 * action, but do count it.
11659 */
11660 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11661 continue;
11662
dea3101e 11663 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 11664 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
11665 if (abtsiocb == NULL) {
11666 errcnt++;
11667 continue;
11668 }
dea3101e 11669
afbd8d88
JS
11670 /* indicate the IO is being aborted by the driver. */
11671 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11672
0bd4ca25 11673 cmd = &iocbq->iocb;
dea3101e
JB
11674 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11675 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
11676 if (phba->sli_rev == LPFC_SLI_REV4)
11677 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11678 else
11679 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e
JB
11680 abtsiocb->iocb.ulpLe = 1;
11681 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 11682 abtsiocb->vport = vport;
dea3101e 11683
5ffc266e 11684 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11685 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
11686 if (iocbq->iocb_flag & LPFC_IO_FCP)
11687 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11688 if (iocbq->iocb_flag & LPFC_IO_FOF)
11689 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11690
2e0fef85 11691 if (lpfc_is_link_up(phba))
dea3101e
JB
11692 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11693 else
11694 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11695
5eb95af0
JSEC
11696 /* Setup callback routine and issue the command. */
11697 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ecbb227e
JS
11698 if (phba->sli_rev == LPFC_SLI_REV4) {
11699 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11700 if (!pring_s4)
11701 continue;
11702 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11703 abtsiocb, 0);
11704 } else
11705 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11706 abtsiocb, 0);
dea3101e 11707 if (ret_val == IOCB_ERROR) {
604a3e30 11708 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e
JB
11709 errcnt++;
11710 continue;
11711 }
11712 }
11713
11714 return errcnt;
11715}
11716
98912dda
JS
11717/**
11718 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11719 * @vport: Pointer to virtual port.
11720 * @pring: Pointer to driver SLI ring object.
11721 * @tgt_id: SCSI ID of the target.
11722 * @lun_id: LUN ID of the scsi device.
11723 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11724 *
11725 * This function sends an abort command for every SCSI command
11726 * associated with the given virtual port pending on the ring
11727 * filtered by lpfc_sli_validate_fcp_iocb function.
11728 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11729 * FCP iocbs associated with lun specified by tgt_id and lun_id
11730 * parameters
11731 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11732 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11733 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11734 * FCP iocbs associated with virtual port.
11735 * This function returns number of iocbs it aborted .
11736 * This function is called with no locks held right after a taskmgmt
11737 * command is sent.
11738 **/
11739int
11740lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11741 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11742{
11743 struct lpfc_hba *phba = vport->phba;
c490850a 11744 struct lpfc_io_buf *lpfc_cmd;
98912dda 11745 struct lpfc_iocbq *abtsiocbq;
8c50d25c 11746 struct lpfc_nodelist *ndlp;
98912dda
JS
11747 struct lpfc_iocbq *iocbq;
11748 IOCB_t *icmd;
11749 int sum, i, ret_val;
11750 unsigned long iflags;
c2017260 11751 struct lpfc_sli_ring *pring_s4 = NULL;
98912dda 11752
59c68eaa 11753 spin_lock_irqsave(&phba->hbalock, iflags);
98912dda
JS
11754
11755 /* all I/Os are in process of being flushed */
c00f62e6 11756 if (phba->hba_flag & HBA_IOQ_FLUSH) {
59c68eaa 11757 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11758 return 0;
11759 }
11760 sum = 0;
11761
11762 for (i = 1; i <= phba->sli.last_iotag; i++) {
11763 iocbq = phba->sli.iocbq_lookup[i];
11764
11765 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11766 cmd) != 0)
11767 continue;
11768
c2017260
JS
11769 /* Guard against IO completion being called at same time */
11770 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11771 spin_lock(&lpfc_cmd->buf_lock);
11772
11773 if (!lpfc_cmd->pCmd) {
11774 spin_unlock(&lpfc_cmd->buf_lock);
11775 continue;
11776 }
11777
11778 if (phba->sli_rev == LPFC_SLI_REV4) {
11779 pring_s4 =
c00f62e6 11780 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
c2017260
JS
11781 if (!pring_s4) {
11782 spin_unlock(&lpfc_cmd->buf_lock);
11783 continue;
11784 }
11785 /* Note: both hbalock and ring_lock must be set here */
11786 spin_lock(&pring_s4->ring_lock);
11787 }
11788
98912dda
JS
11789 /*
11790 * If the iocbq is already being aborted, don't take a second
11791 * action, but do count it.
11792 */
c2017260
JS
11793 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11794 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11795 if (phba->sli_rev == LPFC_SLI_REV4)
11796 spin_unlock(&pring_s4->ring_lock);
11797 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11798 continue;
c2017260 11799 }
98912dda
JS
11800
11801 /* issue ABTS for this IOCB based on iotag */
11802 abtsiocbq = __lpfc_sli_get_iocbq(phba);
c2017260
JS
11803 if (!abtsiocbq) {
11804 if (phba->sli_rev == LPFC_SLI_REV4)
11805 spin_unlock(&pring_s4->ring_lock);
11806 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11807 continue;
c2017260 11808 }
98912dda
JS
11809
11810 icmd = &iocbq->iocb;
11811 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11812 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11813 if (phba->sli_rev == LPFC_SLI_REV4)
11814 abtsiocbq->iocb.un.acxri.abortIoTag =
11815 iocbq->sli4_xritag;
11816 else
11817 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11818 abtsiocbq->iocb.ulpLe = 1;
11819 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11820 abtsiocbq->vport = vport;
11821
11822 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11823 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11824 if (iocbq->iocb_flag & LPFC_IO_FCP)
11825 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11826 if (iocbq->iocb_flag & LPFC_IO_FOF)
11827 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11828
8c50d25c
JS
11829 ndlp = lpfc_cmd->rdata->pnode;
11830
11831 if (lpfc_is_link_up(phba) &&
11832 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11833 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11834 else
11835 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11836
11837 /* Setup callback routine and issue the command. */
11838 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11839
11840 /*
11841 * Indicate the IO is being aborted by the driver and set
11842 * the caller's flag into the aborted IO.
11843 */
11844 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11845
11846 if (phba->sli_rev == LPFC_SLI_REV4) {
98912dda
JS
11847 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11848 abtsiocbq, 0);
59c68eaa 11849 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
11850 } else {
11851 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11852 abtsiocbq, 0);
11853 }
11854
c2017260 11855 spin_unlock(&lpfc_cmd->buf_lock);
98912dda
JS
11856
11857 if (ret_val == IOCB_ERROR)
11858 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11859 else
11860 sum++;
11861 }
59c68eaa 11862 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11863 return sum;
11864}
11865
e59058c4 11866/**
3621a710 11867 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11868 * @phba: Pointer to HBA context object.
11869 * @cmdiocbq: Pointer to command iocb.
11870 * @rspiocbq: Pointer to response iocb.
11871 *
11872 * This function is the completion handler for iocbs issued using
11873 * lpfc_sli_issue_iocb_wait function. This function is called by the
11874 * ring event handler function without any lock held. This function
11875 * can be called from both worker thread context and interrupt
11876 * context. This function also can be called from other thread which
11877 * cleans up the SLI layer objects.
11878 * This function copy the contents of the response iocb to the
11879 * response iocb memory object provided by the caller of
11880 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11881 * sleeps for the iocb completion.
11882 **/
68876920
JSEC
11883static void
11884lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11885 struct lpfc_iocbq *cmdiocbq,
11886 struct lpfc_iocbq *rspiocbq)
dea3101e 11887{
68876920
JSEC
11888 wait_queue_head_t *pdone_q;
11889 unsigned long iflags;
c490850a 11890 struct lpfc_io_buf *lpfc_cmd;
dea3101e 11891
2e0fef85 11892 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11893 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11894
11895 /*
11896 * A time out has occurred for the iocb. If a time out
11897 * completion handler has been supplied, call it. Otherwise,
11898 * just free the iocbq.
11899 */
11900
11901 spin_unlock_irqrestore(&phba->hbalock, iflags);
11902 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11903 cmdiocbq->wait_iocb_cmpl = NULL;
11904 if (cmdiocbq->iocb_cmpl)
11905 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11906 else
11907 lpfc_sli_release_iocbq(phba, cmdiocbq);
11908 return;
11909 }
11910
68876920
JSEC
11911 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11912 if (cmdiocbq->context2 && rspiocbq)
11913 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11914 &rspiocbq->iocb, sizeof(IOCB_t));
11915
0f65ff68
JS
11916 /* Set the exchange busy flag for task management commands */
11917 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11918 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
c490850a 11919 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
0f65ff68 11920 cur_iocbq);
324e1c40
JS
11921 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11922 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11923 else
11924 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
0f65ff68
JS
11925 }
11926
68876920 11927 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11928 if (pdone_q)
11929 wake_up(pdone_q);
858c9f6c 11930 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
11931 return;
11932}
11933
d11e31dd
JS
11934/**
11935 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11936 * @phba: Pointer to HBA context object..
11937 * @piocbq: Pointer to command iocb.
11938 * @flag: Flag to test.
11939 *
11940 * This routine grabs the hbalock and then test the iocb_flag to
11941 * see if the passed in flag is set.
11942 * Returns:
11943 * 1 if flag is set.
11944 * 0 if flag is not set.
11945 **/
11946static int
11947lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11948 struct lpfc_iocbq *piocbq, uint32_t flag)
11949{
11950 unsigned long iflags;
11951 int ret;
11952
11953 spin_lock_irqsave(&phba->hbalock, iflags);
11954 ret = piocbq->iocb_flag & flag;
11955 spin_unlock_irqrestore(&phba->hbalock, iflags);
11956 return ret;
11957
11958}
11959
e59058c4 11960/**
3621a710 11961 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11962 * @phba: Pointer to HBA context object..
11963 * @pring: Pointer to sli ring.
11964 * @piocb: Pointer to command iocb.
11965 * @prspiocbq: Pointer to response iocb.
11966 * @timeout: Timeout in number of seconds.
11967 *
11968 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11969 * iocb to complete. The iocb_cmpl field of the shall be used
11970 * to handle iocbs which time out. If the field is NULL, the
11971 * function shall free the iocbq structure. If more clean up is
11972 * needed, the caller is expected to provide a completion function
11973 * that will provide the needed clean up. If the iocb command is
11974 * not completed within timeout seconds, the function will either
11975 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11976 * completion function set in the iocb_cmpl field and then return
11977 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11978 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11979 * The function waits for the iocb completion using an
11980 * non-interruptible wait.
11981 * This function will sleep while waiting for iocb completion.
11982 * So, this function should not be called from any context which
11983 * does not allow sleeping. Due to the same reason, this function
11984 * cannot be called with interrupt disabled.
11985 * This function assumes that the iocb completions occur while
11986 * this function sleep. So, this function cannot be called from
11987 * the thread which process iocb completion for this ring.
11988 * This function clears the iocb_flag of the iocb object before
11989 * issuing the iocb and the iocb completion handler sets this
11990 * flag and wakes this thread when the iocb completes.
11991 * The contents of the response iocb will be copied to prspiocbq
11992 * by the completion handler when the command completes.
11993 * This function returns IOCB_SUCCESS when success.
11994 * This function is called with no lock held.
11995 **/
dea3101e 11996int
2e0fef85 11997lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11998 uint32_t ring_number,
2e0fef85
JS
11999 struct lpfc_iocbq *piocb,
12000 struct lpfc_iocbq *prspiocbq,
68876920 12001 uint32_t timeout)
dea3101e 12002{
7259f0d0 12003 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
12004 long timeleft, timeout_req = 0;
12005 int retval = IOCB_SUCCESS;
875fbdfe 12006 uint32_t creg_val;
0e9bb8d7
JS
12007 struct lpfc_iocbq *iocb;
12008 int txq_cnt = 0;
12009 int txcmplq_cnt = 0;
895427bd 12010 struct lpfc_sli_ring *pring;
5a0916b4
JS
12011 unsigned long iflags;
12012 bool iocb_completed = true;
12013
895427bd
JS
12014 if (phba->sli_rev >= LPFC_SLI_REV4)
12015 pring = lpfc_sli4_calc_ring(phba, piocb);
12016 else
12017 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 12018 /*
68876920
JSEC
12019 * If the caller has provided a response iocbq buffer, then context2
12020 * is NULL or its an error.
dea3101e 12021 */
68876920
JSEC
12022 if (prspiocbq) {
12023 if (piocb->context2)
12024 return IOCB_ERROR;
12025 piocb->context2 = prspiocbq;
dea3101e
JB
12026 }
12027
5a0916b4 12028 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
12029 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12030 piocb->context_un.wait_queue = &done_q;
5a0916b4 12031 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 12032
875fbdfe 12033 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12034 if (lpfc_readl(phba->HCregaddr, &creg_val))
12035 return IOCB_ERROR;
875fbdfe
JSEC
12036 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12037 writel(creg_val, phba->HCregaddr);
12038 readl(phba->HCregaddr); /* flush */
12039 }
12040
2a9bf3d0
JS
12041 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12042 SLI_IOCB_RET_IOCB);
68876920 12043 if (retval == IOCB_SUCCESS) {
256ec0d0 12044 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 12045 timeleft = wait_event_timeout(done_q,
d11e31dd 12046 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 12047 timeout_req);
5a0916b4
JS
12048 spin_lock_irqsave(&phba->hbalock, iflags);
12049 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12050
12051 /*
12052 * IOCB timed out. Inform the wake iocb wait
12053 * completion function and set local status
12054 */
dea3101e 12055
5a0916b4
JS
12056 iocb_completed = false;
12057 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12058 }
12059 spin_unlock_irqrestore(&phba->hbalock, iflags);
12060 if (iocb_completed) {
7054a606 12061 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 12062 "0331 IOCB wake signaled\n");
53151bbb
JS
12063 /* Note: we are not indicating if the IOCB has a success
12064 * status or not - that's for the caller to check.
12065 * IOCB_SUCCESS means just that the command was sent and
12066 * completed. Not that it completed successfully.
12067 * */
7054a606 12068 } else if (timeleft == 0) {
68876920 12069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
12070 "0338 IOCB wait timeout error - no "
12071 "wake response Data x%x\n", timeout);
68876920 12072 retval = IOCB_TIMEDOUT;
7054a606 12073 } else {
68876920 12074 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
12075 "0330 IOCB wake NOT set, "
12076 "Data x%x x%lx\n",
68876920
JSEC
12077 timeout, (timeleft / jiffies));
12078 retval = IOCB_TIMEDOUT;
dea3101e 12079 }
2a9bf3d0 12080 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
12081 if (phba->cfg_log_verbose & LOG_SLI) {
12082 list_for_each_entry(iocb, &pring->txq, list) {
12083 txq_cnt++;
12084 }
12085 list_for_each_entry(iocb, &pring->txcmplq, list) {
12086 txcmplq_cnt++;
12087 }
12088 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12089 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12090 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12091 }
2a9bf3d0 12092 return retval;
68876920
JSEC
12093 } else {
12094 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 12095 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 12096 retval);
68876920 12097 retval = IOCB_ERROR;
dea3101e
JB
12098 }
12099
875fbdfe 12100 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12101 if (lpfc_readl(phba->HCregaddr, &creg_val))
12102 return IOCB_ERROR;
875fbdfe
JSEC
12103 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12104 writel(creg_val, phba->HCregaddr);
12105 readl(phba->HCregaddr); /* flush */
12106 }
12107
68876920
JSEC
12108 if (prspiocbq)
12109 piocb->context2 = NULL;
12110
12111 piocb->context_un.wait_queue = NULL;
12112 piocb->iocb_cmpl = NULL;
dea3101e
JB
12113 return retval;
12114}
68876920 12115
e59058c4 12116/**
3621a710 12117 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
12118 * @phba: Pointer to HBA context object.
12119 * @pmboxq: Pointer to driver mailbox object.
12120 * @timeout: Timeout in number of seconds.
12121 *
12122 * This function issues the mailbox to firmware and waits for the
12123 * mailbox command to complete. If the mailbox command is not
12124 * completed within timeout seconds, it returns MBX_TIMEOUT.
12125 * The function waits for the mailbox completion using an
12126 * interruptible wait. If the thread is woken up due to a
12127 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12128 * should not free the mailbox resources, if this function returns
12129 * MBX_TIMEOUT.
12130 * This function will sleep while waiting for mailbox completion.
12131 * So, this function should not be called from any context which
12132 * does not allow sleeping. Due to the same reason, this function
12133 * cannot be called with interrupt disabled.
12134 * This function assumes that the mailbox completion occurs while
12135 * this function sleep. So, this function cannot be called from
12136 * the worker thread which processes mailbox completion.
12137 * This function is called in the context of HBA management
12138 * applications.
12139 * This function returns MBX_SUCCESS when successful.
12140 * This function is called with no lock held.
12141 **/
dea3101e 12142int
2e0fef85 12143lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e
JB
12144 uint32_t timeout)
12145{
e29d74f8 12146 struct completion mbox_done;
dea3101e 12147 int retval;
858c9f6c 12148 unsigned long flag;
dea3101e 12149
495a714c 12150 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e
JB
12151 /* setup wake call as IOCB callback */
12152 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea3101e 12153
e29d74f8
JS
12154 /* setup context3 field to pass wait_queue pointer to wake function */
12155 init_completion(&mbox_done);
12156 pmboxq->context3 = &mbox_done;
dea3101e
JB
12157 /* now issue the command */
12158 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 12159 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
e29d74f8
JS
12160 wait_for_completion_timeout(&mbox_done,
12161 msecs_to_jiffies(timeout * 1000));
7054a606 12162
858c9f6c 12163 spin_lock_irqsave(&phba->hbalock, flag);
e29d74f8 12164 pmboxq->context3 = NULL;
7054a606
JS
12165 /*
12166 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12167 * else do not free the resources.
12168 */
d7c47992 12169 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 12170 retval = MBX_SUCCESS;
d7c47992 12171 } else {
7054a606 12172 retval = MBX_TIMEOUT;
858c9f6c
JS
12173 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12174 }
12175 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 12176 }
dea3101e
JB
12177 return retval;
12178}
12179
e59058c4 12180/**
3772a991 12181 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
12182 * @phba: Pointer to HBA context.
12183 *
3772a991
JS
12184 * This function is called to shutdown the driver's mailbox sub-system.
12185 * It first marks the mailbox sub-system is in a block state to prevent
12186 * the asynchronous mailbox command from issued off the pending mailbox
12187 * command queue. If the mailbox command sub-system shutdown is due to
12188 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12189 * the mailbox sub-system flush routine to forcefully bring down the
12190 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12191 * as with offline or HBA function reset), this routine will wait for the
12192 * outstanding mailbox command to complete before invoking the mailbox
12193 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 12194 **/
3772a991 12195void
618a5230 12196lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 12197{
3772a991 12198 struct lpfc_sli *psli = &phba->sli;
3772a991 12199 unsigned long timeout;
b4c02652 12200
618a5230
JS
12201 if (mbx_action == LPFC_MBX_NO_WAIT) {
12202 /* delay 100ms for port state */
12203 msleep(100);
12204 lpfc_sli_mbox_sys_flush(phba);
12205 return;
12206 }
a183a15f 12207 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 12208
523128e5
JS
12209 /* Disable softirqs, including timers from obtaining phba->hbalock */
12210 local_bh_disable();
12211
3772a991
JS
12212 spin_lock_irq(&phba->hbalock);
12213 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 12214
3772a991 12215 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
12216 /* Determine how long we might wait for the active mailbox
12217 * command to be gracefully completed by firmware.
12218 */
a183a15f
JS
12219 if (phba->sli.mbox_active)
12220 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12221 phba->sli.mbox_active) *
12222 1000) + jiffies;
12223 spin_unlock_irq(&phba->hbalock);
12224
523128e5
JS
12225 /* Enable softirqs again, done with phba->hbalock */
12226 local_bh_enable();
12227
3772a991
JS
12228 while (phba->sli.mbox_active) {
12229 /* Check active mailbox complete status every 2ms */
12230 msleep(2);
12231 if (time_after(jiffies, timeout))
12232 /* Timeout, let the mailbox flush routine to
12233 * forcefully release active mailbox command
12234 */
12235 break;
12236 }
523128e5 12237 } else {
d7069f09
JS
12238 spin_unlock_irq(&phba->hbalock);
12239
523128e5
JS
12240 /* Enable softirqs again, done with phba->hbalock */
12241 local_bh_enable();
12242 }
12243
3772a991
JS
12244 lpfc_sli_mbox_sys_flush(phba);
12245}
ed957684 12246
3772a991
JS
12247/**
12248 * lpfc_sli_eratt_read - read sli-3 error attention events
12249 * @phba: Pointer to HBA context.
12250 *
12251 * This function is called to read the SLI3 device error attention registers
12252 * for possible error attention events. The caller must hold the hostlock
12253 * with spin_lock_irq().
12254 *
25985edc 12255 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
12256 * Register and returns 0 otherwise.
12257 **/
12258static int
12259lpfc_sli_eratt_read(struct lpfc_hba *phba)
12260{
12261 uint32_t ha_copy;
b4c02652 12262
3772a991 12263 /* Read chip Host Attention (HA) register */
9940b97b
JS
12264 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12265 goto unplug_err;
12266
3772a991
JS
12267 if (ha_copy & HA_ERATT) {
12268 /* Read host status register to retrieve error event */
9940b97b
JS
12269 if (lpfc_sli_read_hs(phba))
12270 goto unplug_err;
b4c02652 12271
3772a991
JS
12272 /* Check if there is a deferred error condition is active */
12273 if ((HS_FFER1 & phba->work_hs) &&
12274 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 12275 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 12276 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
12277 /* Clear all interrupt enable conditions */
12278 writel(0, phba->HCregaddr);
12279 readl(phba->HCregaddr);
12280 }
12281
12282 /* Set the driver HA work bitmap */
3772a991
JS
12283 phba->work_ha |= HA_ERATT;
12284 /* Indicate polling handles this ERATT */
12285 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
12286 return 1;
12287 }
12288 return 0;
9940b97b
JS
12289
12290unplug_err:
12291 /* Set the driver HS work bitmap */
12292 phba->work_hs |= UNPLUG_ERR;
12293 /* Set the driver HA work bitmap */
12294 phba->work_ha |= HA_ERATT;
12295 /* Indicate polling handles this ERATT */
12296 phba->hba_flag |= HBA_ERATT_HANDLED;
12297 return 1;
b4c02652
JS
12298}
12299
da0436e9
JS
12300/**
12301 * lpfc_sli4_eratt_read - read sli-4 error attention events
12302 * @phba: Pointer to HBA context.
12303 *
12304 * This function is called to read the SLI4 device error attention registers
12305 * for possible error attention events. The caller must hold the hostlock
12306 * with spin_lock_irq().
12307 *
25985edc 12308 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
12309 * Register and returns 0 otherwise.
12310 **/
12311static int
12312lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12313{
12314 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
12315 uint32_t if_type, portsmphr;
12316 struct lpfc_register portstat_reg;
da0436e9 12317
2fcee4bf
JS
12318 /*
12319 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
12320 * registers for error attention. This can be changed later.
12321 */
2fcee4bf
JS
12322 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12323 switch (if_type) {
12324 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
12325 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12326 &uerr_sta_lo) ||
12327 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12328 &uerr_sta_hi)) {
12329 phba->work_hs |= UNPLUG_ERR;
12330 phba->work_ha |= HA_ERATT;
12331 phba->hba_flag |= HBA_ERATT_HANDLED;
12332 return 1;
12333 }
2fcee4bf
JS
12334 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12335 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12337 "1423 HBA Unrecoverable error: "
12338 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12339 "ue_mask_lo_reg=0x%x, "
12340 "ue_mask_hi_reg=0x%x\n",
12341 uerr_sta_lo, uerr_sta_hi,
12342 phba->sli4_hba.ue_mask_lo,
12343 phba->sli4_hba.ue_mask_hi);
12344 phba->work_status[0] = uerr_sta_lo;
12345 phba->work_status[1] = uerr_sta_hi;
12346 phba->work_ha |= HA_ERATT;
12347 phba->hba_flag |= HBA_ERATT_HANDLED;
12348 return 1;
12349 }
12350 break;
12351 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 12352 case LPFC_SLI_INTF_IF_TYPE_6:
9940b97b
JS
12353 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12354 &portstat_reg.word0) ||
12355 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12356 &portsmphr)){
12357 phba->work_hs |= UNPLUG_ERR;
12358 phba->work_ha |= HA_ERATT;
12359 phba->hba_flag |= HBA_ERATT_HANDLED;
12360 return 1;
12361 }
2fcee4bf
JS
12362 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12363 phba->work_status[0] =
12364 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12365 phba->work_status[1] =
12366 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 12368 "2885 Port Status Event: "
2fcee4bf
JS
12369 "port status reg 0x%x, "
12370 "port smphr reg 0x%x, "
12371 "error 1=0x%x, error 2=0x%x\n",
12372 portstat_reg.word0,
12373 portsmphr,
12374 phba->work_status[0],
12375 phba->work_status[1]);
12376 phba->work_ha |= HA_ERATT;
12377 phba->hba_flag |= HBA_ERATT_HANDLED;
12378 return 1;
12379 }
12380 break;
12381 case LPFC_SLI_INTF_IF_TYPE_1:
12382 default:
a747c9ce 12383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
12384 "2886 HBA Error Attention on unsupported "
12385 "if type %d.", if_type);
a747c9ce 12386 return 1;
da0436e9 12387 }
2fcee4bf 12388
da0436e9
JS
12389 return 0;
12390}
12391
e59058c4 12392/**
3621a710 12393 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
12394 * @phba: Pointer to HBA context.
12395 *
3772a991 12396 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
12397 * error attention register bit for error attention events.
12398 *
25985edc 12399 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
12400 * Register and returns 0 otherwise.
12401 **/
12402int
12403lpfc_sli_check_eratt(struct lpfc_hba *phba)
12404{
12405 uint32_t ha_copy;
12406
12407 /* If somebody is waiting to handle an eratt, don't process it
12408 * here. The brdkill function will do this.
12409 */
12410 if (phba->link_flag & LS_IGNORE_ERATT)
12411 return 0;
12412
12413 /* Check if interrupt handler handles this ERATT */
12414 spin_lock_irq(&phba->hbalock);
12415 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12416 /* Interrupt handler has handled ERATT */
12417 spin_unlock_irq(&phba->hbalock);
12418 return 0;
12419 }
12420
a257bf90
JS
12421 /*
12422 * If there is deferred error attention, do not check for error
12423 * attention
12424 */
12425 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12426 spin_unlock_irq(&phba->hbalock);
12427 return 0;
12428 }
12429
3772a991
JS
12430 /* If PCI channel is offline, don't process it */
12431 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 12432 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12433 return 0;
12434 }
12435
12436 switch (phba->sli_rev) {
12437 case LPFC_SLI_REV2:
12438 case LPFC_SLI_REV3:
12439 /* Read chip Host Attention (HA) register */
12440 ha_copy = lpfc_sli_eratt_read(phba);
12441 break;
da0436e9 12442 case LPFC_SLI_REV4:
2fcee4bf 12443 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
12444 ha_copy = lpfc_sli4_eratt_read(phba);
12445 break;
3772a991
JS
12446 default:
12447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12448 "0299 Invalid SLI revision (%d)\n",
12449 phba->sli_rev);
12450 ha_copy = 0;
12451 break;
9399627f
JS
12452 }
12453 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12454
12455 return ha_copy;
12456}
12457
12458/**
12459 * lpfc_intr_state_check - Check device state for interrupt handling
12460 * @phba: Pointer to HBA context.
12461 *
12462 * This inline routine checks whether a device or its PCI slot is in a state
12463 * that the interrupt should be handled.
12464 *
12465 * This function returns 0 if the device or the PCI slot is in a state that
12466 * interrupt should be handled, otherwise -EIO.
12467 */
12468static inline int
12469lpfc_intr_state_check(struct lpfc_hba *phba)
12470{
12471 /* If the pci channel is offline, ignore all the interrupts */
12472 if (unlikely(pci_channel_offline(phba->pcidev)))
12473 return -EIO;
12474
12475 /* Update device level interrupt statistics */
12476 phba->sli.slistat.sli_intr++;
12477
12478 /* Ignore all interrupts during initialization. */
12479 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12480 return -EIO;
12481
9399627f
JS
12482 return 0;
12483}
12484
12485/**
3772a991 12486 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
12487 * @irq: Interrupt number.
12488 * @dev_id: The device context pointer.
12489 *
9399627f 12490 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12491 * service routine when device with SLI-3 interface spec is enabled with
12492 * MSI-X multi-message interrupt mode and there are slow-path events in
12493 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12494 * interrupt mode, this function is called as part of the device-level
12495 * interrupt handler. When the PCI slot is in error recovery or the HBA
12496 * is undergoing initialization, the interrupt handler will not process
12497 * the interrupt. The link attention and ELS ring attention events are
12498 * handled by the worker thread. The interrupt handler signals the worker
12499 * thread and returns for these events. This function is called without
12500 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
12501 * structures.
12502 *
12503 * This function returns IRQ_HANDLED when interrupt is handled else it
12504 * returns IRQ_NONE.
e59058c4 12505 **/
dea3101e 12506irqreturn_t
3772a991 12507lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 12508{
2e0fef85 12509 struct lpfc_hba *phba;
a747c9ce 12510 uint32_t ha_copy, hc_copy;
dea3101e
JB
12511 uint32_t work_ha_copy;
12512 unsigned long status;
5b75da2f 12513 unsigned long iflag;
dea3101e
JB
12514 uint32_t control;
12515
92d7f7b0 12516 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
12517 struct lpfc_vport *vport;
12518 struct lpfc_nodelist *ndlp;
12519 struct lpfc_dmabuf *mp;
92d7f7b0
JS
12520 LPFC_MBOXQ_t *pmb;
12521 int rc;
12522
dea3101e
JB
12523 /*
12524 * Get the driver's phba structure from the dev_id and
12525 * assume the HBA is not interrupting.
12526 */
9399627f 12527 phba = (struct lpfc_hba *)dev_id;
dea3101e
JB
12528
12529 if (unlikely(!phba))
12530 return IRQ_NONE;
12531
dea3101e 12532 /*
9399627f
JS
12533 * Stuff needs to be attented to when this function is invoked as an
12534 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 12535 */
9399627f 12536 if (phba->intr_type == MSIX) {
3772a991
JS
12537 /* Check device state for handling interrupt */
12538 if (lpfc_intr_state_check(phba))
9399627f
JS
12539 return IRQ_NONE;
12540 /* Need to read HA REG for slow-path events */
5b75da2f 12541 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12542 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12543 goto unplug_error;
9399627f
JS
12544 /* If somebody is waiting to handle an eratt don't process it
12545 * here. The brdkill function will do this.
12546 */
12547 if (phba->link_flag & LS_IGNORE_ERATT)
12548 ha_copy &= ~HA_ERATT;
12549 /* Check the need for handling ERATT in interrupt handler */
12550 if (ha_copy & HA_ERATT) {
12551 if (phba->hba_flag & HBA_ERATT_HANDLED)
12552 /* ERATT polling has handled ERATT */
12553 ha_copy &= ~HA_ERATT;
12554 else
12555 /* Indicate interrupt handler handles ERATT */
12556 phba->hba_flag |= HBA_ERATT_HANDLED;
12557 }
a257bf90
JS
12558
12559 /*
12560 * If there is deferred error attention, do not check for any
12561 * interrupt.
12562 */
12563 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12564 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12565 return IRQ_NONE;
12566 }
12567
9399627f 12568 /* Clear up only attention source related to slow-path */
9940b97b
JS
12569 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12570 goto unplug_error;
12571
a747c9ce
JS
12572 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12573 HC_LAINT_ENA | HC_ERINT_ENA),
12574 phba->HCregaddr);
9399627f
JS
12575 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12576 phba->HAregaddr);
a747c9ce 12577 writel(hc_copy, phba->HCregaddr);
9399627f 12578 readl(phba->HAregaddr); /* flush */
5b75da2f 12579 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12580 } else
12581 ha_copy = phba->ha_copy;
dea3101e 12582
dea3101e
JB
12583 work_ha_copy = ha_copy & phba->work_ha_mask;
12584
9399627f 12585 if (work_ha_copy) {
dea3101e
JB
12586 if (work_ha_copy & HA_LATT) {
12587 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12588 /*
12589 * Turn off Link Attention interrupts
12590 * until CLEAR_LA done
12591 */
5b75da2f 12592 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12593 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
12594 if (lpfc_readl(phba->HCregaddr, &control))
12595 goto unplug_error;
dea3101e
JB
12596 control &= ~HC_LAINT_ENA;
12597 writel(control, phba->HCregaddr);
12598 readl(phba->HCregaddr); /* flush */
5b75da2f 12599 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
12600 }
12601 else
12602 work_ha_copy &= ~HA_LATT;
12603 }
12604
9399627f 12605 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
12606 /*
12607 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12608 * the only slow ring.
12609 */
12610 status = (work_ha_copy &
12611 (HA_RXMASK << (4*LPFC_ELS_RING)));
12612 status >>= (4*LPFC_ELS_RING);
12613 if (status & HA_RXMASK) {
5b75da2f 12614 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12615 if (lpfc_readl(phba->HCregaddr, &control))
12616 goto unplug_error;
a58cbd52
JS
12617
12618 lpfc_debugfs_slow_ring_trc(phba,
12619 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12620 control, status,
12621 (uint32_t)phba->sli.slistat.sli_intr);
12622
858c9f6c 12623 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
12624 lpfc_debugfs_slow_ring_trc(phba,
12625 "ISR Disable ring:"
12626 "pwork:x%x hawork:x%x wait:x%x",
12627 phba->work_ha, work_ha_copy,
12628 (uint32_t)((unsigned long)
5e9d9b82 12629 &phba->work_waitq));
a58cbd52 12630
858c9f6c
JS
12631 control &=
12632 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e
JB
12633 writel(control, phba->HCregaddr);
12634 readl(phba->HCregaddr); /* flush */
dea3101e 12635 }
a58cbd52
JS
12636 else {
12637 lpfc_debugfs_slow_ring_trc(phba,
12638 "ISR slow ring: pwork:"
12639 "x%x hawork:x%x wait:x%x",
12640 phba->work_ha, work_ha_copy,
12641 (uint32_t)((unsigned long)
5e9d9b82 12642 &phba->work_waitq));
a58cbd52 12643 }
5b75da2f 12644 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
12645 }
12646 }
5b75da2f 12647 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 12648 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
12649 if (lpfc_sli_read_hs(phba))
12650 goto unplug_error;
a257bf90
JS
12651 /*
12652 * Check if there is a deferred error condition
12653 * is active
12654 */
12655 if ((HS_FFER1 & phba->work_hs) &&
12656 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
12657 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12658 phba->work_hs)) {
a257bf90
JS
12659 phba->hba_flag |= DEFER_ERATT;
12660 /* Clear all interrupt enable conditions */
12661 writel(0, phba->HCregaddr);
12662 readl(phba->HCregaddr);
12663 }
12664 }
12665
9399627f 12666 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 12667 pmb = phba->sli.mbox_active;
04c68496 12668 pmbox = &pmb->u.mb;
34b02dcd 12669 mbox = phba->mbox;
858c9f6c 12670 vport = pmb->vport;
92d7f7b0
JS
12671
12672 /* First check out the status word */
12673 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12674 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 12675 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
12676 /*
12677 * Stray Mailbox Interrupt, mbxCommand <cmd>
12678 * mbxStatus <status>
12679 */
09372820 12680 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 12681 LOG_SLI,
e8b62011 12682 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
12683 "Interrupt mbxCommand x%x "
12684 "mbxStatus x%x\n",
e8b62011 12685 (vport ? vport->vpi : 0),
92d7f7b0
JS
12686 pmbox->mbxCommand,
12687 pmbox->mbxStatus);
09372820
JS
12688 /* clear mailbox attention bit */
12689 work_ha_copy &= ~HA_MBATT;
12690 } else {
97eab634 12691 phba->sli.mbox_active = NULL;
5b75da2f 12692 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
12693 phba->last_completion_time = jiffies;
12694 del_timer(&phba->sli.mbox_tmo);
09372820
JS
12695 if (pmb->mbox_cmpl) {
12696 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12697 MAILBOX_CMD_SIZE);
7a470277 12698 if (pmb->out_ext_byte_len &&
3e1f0718 12699 pmb->ctx_buf)
7a470277
JS
12700 lpfc_sli_pcimem_bcopy(
12701 phba->mbox_ext,
3e1f0718 12702 pmb->ctx_buf,
7a470277 12703 pmb->out_ext_byte_len);
09372820
JS
12704 }
12705 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12706 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12707
12708 lpfc_debugfs_disc_trc(vport,
12709 LPFC_DISC_TRC_MBOX_VPORT,
12710 "MBOX dflt rpi: : "
12711 "status:x%x rpi:x%x",
12712 (uint32_t)pmbox->mbxStatus,
12713 pmbox->un.varWords[0], 0);
12714
12715 if (!pmbox->mbxStatus) {
12716 mp = (struct lpfc_dmabuf *)
3e1f0718 12717 (pmb->ctx_buf);
09372820 12718 ndlp = (struct lpfc_nodelist *)
3e1f0718 12719 pmb->ctx_ndlp;
09372820
JS
12720
12721 /* Reg_LOGIN of dflt RPI was
12722 * successful. new lets get
12723 * rid of the RPI using the
12724 * same mbox buffer.
12725 */
12726 lpfc_unreg_login(phba,
12727 vport->vpi,
12728 pmbox->un.varWords[0],
12729 pmb);
12730 pmb->mbox_cmpl =
12731 lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
12732 pmb->ctx_buf = mp;
12733 pmb->ctx_ndlp = ndlp;
09372820 12734 pmb->vport = vport;
58da1ffb
JS
12735 rc = lpfc_sli_issue_mbox(phba,
12736 pmb,
12737 MBX_NOWAIT);
12738 if (rc != MBX_BUSY)
12739 lpfc_printf_log(phba,
12740 KERN_ERR,
12741 LOG_MBOX | LOG_SLI,
d7c255b2 12742 "0350 rc should have"
6a9c52cf 12743 "been MBX_BUSY\n");
3772a991
JS
12744 if (rc != MBX_NOT_FINISHED)
12745 goto send_current_mbox;
09372820 12746 }
858c9f6c 12747 }
5b75da2f
JS
12748 spin_lock_irqsave(
12749 &phba->pport->work_port_lock,
12750 iflag);
09372820
JS
12751 phba->pport->work_port_events &=
12752 ~WORKER_MBOX_TMO;
5b75da2f
JS
12753 spin_unlock_irqrestore(
12754 &phba->pport->work_port_lock,
12755 iflag);
09372820 12756 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 12757 }
97eab634 12758 } else
5b75da2f 12759 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 12760
92d7f7b0
JS
12761 if ((work_ha_copy & HA_MBATT) &&
12762 (phba->sli.mbox_active == NULL)) {
858c9f6c 12763send_current_mbox:
92d7f7b0 12764 /* Process next mailbox command if there is one */
58da1ffb
JS
12765 do {
12766 rc = lpfc_sli_issue_mbox(phba, NULL,
12767 MBX_NOWAIT);
12768 } while (rc == MBX_NOT_FINISHED);
12769 if (rc != MBX_SUCCESS)
12770 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12771 LOG_SLI, "0349 rc should be "
6a9c52cf 12772 "MBX_SUCCESS\n");
92d7f7b0
JS
12773 }
12774
5b75da2f 12775 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12776 phba->work_ha |= work_ha_copy;
5b75da2f 12777 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 12778 lpfc_worker_wake_up(phba);
dea3101e 12779 }
9399627f 12780 return IRQ_HANDLED;
9940b97b
JS
12781unplug_error:
12782 spin_unlock_irqrestore(&phba->hbalock, iflag);
12783 return IRQ_HANDLED;
dea3101e 12784
3772a991 12785} /* lpfc_sli_sp_intr_handler */
9399627f
JS
12786
12787/**
3772a991 12788 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
12789 * @irq: Interrupt number.
12790 * @dev_id: The device context pointer.
12791 *
12792 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12793 * service routine when device with SLI-3 interface spec is enabled with
12794 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12795 * ring event in the HBA. However, when the device is enabled with either
12796 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12797 * device-level interrupt handler. When the PCI slot is in error recovery
12798 * or the HBA is undergoing initialization, the interrupt handler will not
12799 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12800 * the intrrupt context. This function is called without any lock held.
12801 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
12802 *
12803 * This function returns IRQ_HANDLED when interrupt is handled else it
12804 * returns IRQ_NONE.
12805 **/
12806irqreturn_t
3772a991 12807lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12808{
12809 struct lpfc_hba *phba;
12810 uint32_t ha_copy;
12811 unsigned long status;
5b75da2f 12812 unsigned long iflag;
895427bd 12813 struct lpfc_sli_ring *pring;
9399627f
JS
12814
12815 /* Get the driver's phba structure from the dev_id and
12816 * assume the HBA is not interrupting.
12817 */
12818 phba = (struct lpfc_hba *) dev_id;
12819
12820 if (unlikely(!phba))
12821 return IRQ_NONE;
12822
12823 /*
12824 * Stuff needs to be attented to when this function is invoked as an
12825 * individual interrupt handler in MSI-X multi-message interrupt mode
12826 */
12827 if (phba->intr_type == MSIX) {
3772a991
JS
12828 /* Check device state for handling interrupt */
12829 if (lpfc_intr_state_check(phba))
9399627f
JS
12830 return IRQ_NONE;
12831 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12832 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12833 return IRQ_HANDLED;
9399627f 12834 /* Clear up only attention source related to fast-path */
5b75da2f 12835 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12836 /*
12837 * If there is deferred error attention, do not check for
12838 * any interrupt.
12839 */
12840 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12841 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12842 return IRQ_NONE;
12843 }
9399627f
JS
12844 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12845 phba->HAregaddr);
12846 readl(phba->HAregaddr); /* flush */
5b75da2f 12847 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12848 } else
12849 ha_copy = phba->ha_copy;
dea3101e
JB
12850
12851 /*
9399627f 12852 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12853 */
9399627f
JS
12854 ha_copy &= ~(phba->work_ha_mask);
12855
12856 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12857 status >>= (4*LPFC_FCP_RING);
895427bd 12858 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12859 if (status & HA_RXMASK)
895427bd 12860 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12861
12862 if (phba->cfg_multi_ring_support == 2) {
12863 /*
9399627f
JS
12864 * Process all events on extra ring. Take the optimized path
12865 * for extra ring IO.
a4bc3379 12866 */
9399627f 12867 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12868 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12869 if (status & HA_RXMASK) {
a4bc3379 12870 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12871 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12872 status);
12873 }
12874 }
dea3101e 12875 return IRQ_HANDLED;
3772a991 12876} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12877
12878/**
3772a991 12879 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12880 * @irq: Interrupt number.
12881 * @dev_id: The device context pointer.
12882 *
3772a991
JS
12883 * This function is the HBA device-level interrupt handler to device with
12884 * SLI-3 interface spec, called from the PCI layer when either MSI or
12885 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12886 * requires driver attention. This function invokes the slow-path interrupt
12887 * attention handling function and fast-path interrupt attention handling
12888 * function in turn to process the relevant HBA attention events. This
12889 * function is called without any lock held. It gets the hbalock to access
12890 * and update SLI data structures.
9399627f
JS
12891 *
12892 * This function returns IRQ_HANDLED when interrupt is handled, else it
12893 * returns IRQ_NONE.
12894 **/
12895irqreturn_t
3772a991 12896lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12897{
12898 struct lpfc_hba *phba;
12899 irqreturn_t sp_irq_rc, fp_irq_rc;
12900 unsigned long status1, status2;
a747c9ce 12901 uint32_t hc_copy;
9399627f
JS
12902
12903 /*
12904 * Get the driver's phba structure from the dev_id and
12905 * assume the HBA is not interrupting.
12906 */
12907 phba = (struct lpfc_hba *) dev_id;
12908
12909 if (unlikely(!phba))
12910 return IRQ_NONE;
12911
3772a991
JS
12912 /* Check device state for handling interrupt */
12913 if (lpfc_intr_state_check(phba))
9399627f
JS
12914 return IRQ_NONE;
12915
12916 spin_lock(&phba->hbalock);
9940b97b
JS
12917 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12918 spin_unlock(&phba->hbalock);
12919 return IRQ_HANDLED;
12920 }
12921
9399627f
JS
12922 if (unlikely(!phba->ha_copy)) {
12923 spin_unlock(&phba->hbalock);
12924 return IRQ_NONE;
12925 } else if (phba->ha_copy & HA_ERATT) {
12926 if (phba->hba_flag & HBA_ERATT_HANDLED)
12927 /* ERATT polling has handled ERATT */
12928 phba->ha_copy &= ~HA_ERATT;
12929 else
12930 /* Indicate interrupt handler handles ERATT */
12931 phba->hba_flag |= HBA_ERATT_HANDLED;
12932 }
12933
a257bf90
JS
12934 /*
12935 * If there is deferred error attention, do not check for any interrupt.
12936 */
12937 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12938 spin_unlock(&phba->hbalock);
a257bf90
JS
12939 return IRQ_NONE;
12940 }
12941
9399627f 12942 /* Clear attention sources except link and error attentions */
9940b97b
JS
12943 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12944 spin_unlock(&phba->hbalock);
12945 return IRQ_HANDLED;
12946 }
a747c9ce
JS
12947 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12948 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12949 phba->HCregaddr);
9399627f 12950 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12951 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12952 readl(phba->HAregaddr); /* flush */
12953 spin_unlock(&phba->hbalock);
12954
12955 /*
12956 * Invokes slow-path host attention interrupt handling as appropriate.
12957 */
12958
12959 /* status of events with mailbox and link attention */
12960 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12961
12962 /* status of events with ELS ring */
12963 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12964 status2 >>= (4*LPFC_ELS_RING);
12965
12966 if (status1 || (status2 & HA_RXMASK))
3772a991 12967 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12968 else
12969 sp_irq_rc = IRQ_NONE;
12970
12971 /*
12972 * Invoke fast-path host attention interrupt handling as appropriate.
12973 */
12974
12975 /* status of events with FCP ring */
12976 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12977 status1 >>= (4*LPFC_FCP_RING);
12978
12979 /* status of events with extra ring */
12980 if (phba->cfg_multi_ring_support == 2) {
12981 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12982 status2 >>= (4*LPFC_EXTRA_RING);
12983 } else
12984 status2 = 0;
12985
12986 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12987 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12988 else
12989 fp_irq_rc = IRQ_NONE;
dea3101e 12990
9399627f
JS
12991 /* Return device-level interrupt handling status */
12992 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12993} /* lpfc_sli_intr_handler */
4f774513
JS
12994
12995/**
4f774513 12996 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
4f774513
JS
12997 * @phba: pointer to lpfc hba data structure.
12998 *
12999 * This routine is invoked by the worker thread to process all the pending
4f774513 13000 * SLI4 els abort xri events.
4f774513 13001 **/
4f774513 13002void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
4f774513
JS
13003{
13004 struct lpfc_cq_event *cq_event;
13005
4f774513 13006 /* First, declare the els xri abort event has been handled */
4f774513 13007 spin_lock_irq(&phba->hbalock);
4f774513 13008 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
4f774513 13009 spin_unlock_irq(&phba->hbalock);
4f774513
JS
13010 /* Now, handle all the els xri abort events */
13011 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13012 /* Get the first event from the head of the event queue */
13013 spin_lock_irq(&phba->hbalock);
13014 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13015 cq_event, struct lpfc_cq_event, list);
13016 spin_unlock_irq(&phba->hbalock);
13017 /* Notify aborted XRI for ELS work queue */
13018 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13019 /* Free the event processed back to the free pool */
13020 lpfc_sli4_cq_event_release(phba, cq_event);
13021 }
13022}
13023
341af102
JS
13024/**
13025 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13026 * @phba: pointer to lpfc hba data structure
13027 * @pIocbIn: pointer to the rspiocbq
13028 * @pIocbOut: pointer to the cmdiocbq
13029 * @wcqe: pointer to the complete wcqe
13030 *
13031 * This routine transfers the fields of a command iocbq to a response iocbq
13032 * by copying all the IOCB fields from command iocbq and transferring the
13033 * completion status information from the complete wcqe.
13034 **/
4f774513 13035static void
341af102
JS
13036lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13037 struct lpfc_iocbq *pIocbIn,
4f774513
JS
13038 struct lpfc_iocbq *pIocbOut,
13039 struct lpfc_wcqe_complete *wcqe)
13040{
af22741c 13041 int numBdes, i;
341af102 13042 unsigned long iflags;
af22741c
JS
13043 uint32_t status, max_response;
13044 struct lpfc_dmabuf *dmabuf;
13045 struct ulp_bde64 *bpl, bde;
4f774513
JS
13046 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13047
13048 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13049 sizeof(struct lpfc_iocbq) - offset);
4f774513 13050 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
13051 status = bf_get(lpfc_wcqe_c_status, wcqe);
13052 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
13053 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13054 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13055 pIocbIn->iocb.un.fcpi.fcpi_parm =
13056 pIocbOut->iocb.un.fcpi.fcpi_parm -
13057 wcqe->total_data_placed;
13058 else
13059 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 13060 else {
4f774513 13061 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
13062 switch (pIocbOut->iocb.ulpCommand) {
13063 case CMD_ELS_REQUEST64_CR:
13064 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13065 bpl = (struct ulp_bde64 *)dmabuf->virt;
13066 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13067 max_response = bde.tus.f.bdeSize;
13068 break;
13069 case CMD_GEN_REQUEST64_CR:
13070 max_response = 0;
13071 if (!pIocbOut->context3)
13072 break;
13073 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13074 sizeof(struct ulp_bde64);
13075 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13076 bpl = (struct ulp_bde64 *)dmabuf->virt;
13077 for (i = 0; i < numBdes; i++) {
13078 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13079 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13080 max_response += bde.tus.f.bdeSize;
13081 }
13082 break;
13083 default:
13084 max_response = wcqe->total_data_placed;
13085 break;
13086 }
13087 if (max_response < wcqe->total_data_placed)
13088 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13089 else
13090 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13091 wcqe->total_data_placed;
695a814e 13092 }
341af102 13093
acd6859b
JS
13094 /* Convert BG errors for completion status */
13095 if (status == CQE_STATUS_DI_ERROR) {
13096 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13097
13098 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13099 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13100 else
13101 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13102
13103 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13104 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13105 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13106 BGS_GUARD_ERR_MASK;
13107 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13108 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13109 BGS_APPTAG_ERR_MASK;
13110 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13111 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13112 BGS_REFTAG_ERR_MASK;
13113
13114 /* Check to see if there was any good data before the error */
13115 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13116 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13117 BGS_HI_WATER_MARK_PRESENT_MASK;
13118 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13119 wcqe->total_data_placed;
13120 }
13121
13122 /*
13123 * Set ALL the error bits to indicate we don't know what
13124 * type of error it is.
13125 */
13126 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13127 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13128 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13129 BGS_GUARD_ERR_MASK);
13130 }
13131
341af102
JS
13132 /* Pick up HBA exchange busy condition */
13133 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13134 spin_lock_irqsave(&phba->hbalock, iflags);
13135 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13136 spin_unlock_irqrestore(&phba->hbalock, iflags);
13137 }
4f774513
JS
13138}
13139
45ed1190
JS
13140/**
13141 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13142 * @phba: Pointer to HBA context object.
13143 * @wcqe: Pointer to work-queue completion queue entry.
13144 *
13145 * This routine handles an ELS work-queue completion event and construct
13146 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13147 * discovery engine to handle.
13148 *
13149 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13150 **/
13151static struct lpfc_iocbq *
13152lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13153 struct lpfc_iocbq *irspiocbq)
13154{
895427bd 13155 struct lpfc_sli_ring *pring;
45ed1190
JS
13156 struct lpfc_iocbq *cmdiocbq;
13157 struct lpfc_wcqe_complete *wcqe;
13158 unsigned long iflags;
13159
895427bd 13160 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
13161 if (unlikely(!pring))
13162 return NULL;
895427bd 13163
45ed1190 13164 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
45ed1190
JS
13165 pring->stats.iocb_event++;
13166 /* Look up the ELS command IOCB and create pseudo response IOCB */
13167 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13168 bf_get(lpfc_wcqe_c_request_tag, wcqe));
45ed1190
JS
13169 if (unlikely(!cmdiocbq)) {
13170 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13171 "0386 ELS complete with no corresponding "
401bb416
DK
13172 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13173 wcqe->word0, wcqe->total_data_placed,
13174 wcqe->parameter, wcqe->word3);
45ed1190
JS
13175 lpfc_sli_release_iocbq(phba, irspiocbq);
13176 return NULL;
13177 }
13178
e2a8be56 13179 spin_lock_irqsave(&pring->ring_lock, iflags);
401bb416
DK
13180 /* Put the iocb back on the txcmplq */
13181 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13182 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13183
45ed1190 13184 /* Fake the irspiocbq and copy necessary response information */
341af102 13185 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
13186
13187 return irspiocbq;
13188}
13189
8a5ca109
JS
13190inline struct lpfc_cq_event *
13191lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13192{
13193 struct lpfc_cq_event *cq_event;
13194
13195 /* Allocate a new internal CQ_EVENT entry */
13196 cq_event = lpfc_sli4_cq_event_alloc(phba);
13197 if (!cq_event) {
13198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13199 "0602 Failed to alloc CQ_EVENT entry\n");
13200 return NULL;
13201 }
13202
13203 /* Move the CQE into the event */
13204 memcpy(&cq_event->cqe, entry, size);
13205 return cq_event;
13206}
13207
04c68496 13208/**
291c2548 13209 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
04c68496
JS
13210 * @phba: Pointer to HBA context object.
13211 * @cqe: Pointer to mailbox completion queue entry.
13212 *
291c2548 13213 * This routine process a mailbox completion queue entry with asynchronous
04c68496
JS
13214 * event.
13215 *
13216 * Return: true if work posted to worker thread, otherwise false.
13217 **/
13218static bool
13219lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13220{
13221 struct lpfc_cq_event *cq_event;
13222 unsigned long iflags;
13223
13224 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13225 "0392 Async Event: word0:x%x, word1:x%x, "
13226 "word2:x%x, word3:x%x\n", mcqe->word0,
13227 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13228
8a5ca109
JS
13229 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13230 if (!cq_event)
04c68496 13231 return false;
04c68496
JS
13232 spin_lock_irqsave(&phba->hbalock, iflags);
13233 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13234 /* Set the async event flag */
13235 phba->hba_flag |= ASYNC_EVENT;
13236 spin_unlock_irqrestore(&phba->hbalock, iflags);
13237
13238 return true;
13239}
13240
13241/**
13242 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13243 * @phba: Pointer to HBA context object.
13244 * @cqe: Pointer to mailbox completion queue entry.
13245 *
13246 * This routine process a mailbox completion queue entry with mailbox
13247 * completion event.
13248 *
13249 * Return: true if work posted to worker thread, otherwise false.
13250 **/
13251static bool
13252lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13253{
13254 uint32_t mcqe_status;
13255 MAILBOX_t *mbox, *pmbox;
13256 struct lpfc_mqe *mqe;
13257 struct lpfc_vport *vport;
13258 struct lpfc_nodelist *ndlp;
13259 struct lpfc_dmabuf *mp;
13260 unsigned long iflags;
13261 LPFC_MBOXQ_t *pmb;
13262 bool workposted = false;
13263 int rc;
13264
13265 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13266 if (!bf_get(lpfc_trailer_completed, mcqe))
13267 goto out_no_mqe_complete;
13268
13269 /* Get the reference to the active mbox command */
13270 spin_lock_irqsave(&phba->hbalock, iflags);
13271 pmb = phba->sli.mbox_active;
13272 if (unlikely(!pmb)) {
13273 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13274 "1832 No pending MBOX command to handle\n");
13275 spin_unlock_irqrestore(&phba->hbalock, iflags);
13276 goto out_no_mqe_complete;
13277 }
13278 spin_unlock_irqrestore(&phba->hbalock, iflags);
13279 mqe = &pmb->u.mqe;
13280 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13281 mbox = phba->mbox;
13282 vport = pmb->vport;
13283
13284 /* Reset heartbeat timer */
13285 phba->last_completion_time = jiffies;
13286 del_timer(&phba->sli.mbox_tmo);
13287
13288 /* Move mbox data to caller's mailbox region, do endian swapping */
13289 if (pmb->mbox_cmpl && mbox)
48f8fdb4 13290 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 13291
73d91e50
JS
13292 /*
13293 * For mcqe errors, conditionally move a modified error code to
13294 * the mbox so that the error will not be missed.
13295 */
13296 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13297 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13298 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13299 bf_set(lpfc_mqe_status, mqe,
13300 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13301 }
04c68496
JS
13302 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13303 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13304 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13305 "MBOX dflt rpi: status:x%x rpi:x%x",
13306 mcqe_status,
13307 pmbox->un.varWords[0], 0);
13308 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
3e1f0718
JS
13309 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13310 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
04c68496
JS
13311 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13312 * RID of the PPI using the same mbox buffer.
13313 */
13314 lpfc_unreg_login(phba, vport->vpi,
13315 pmbox->un.varWords[0], pmb);
13316 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
13317 pmb->ctx_buf = mp;
13318 pmb->ctx_ndlp = ndlp;
04c68496
JS
13319 pmb->vport = vport;
13320 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13321 if (rc != MBX_BUSY)
13322 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13323 LOG_SLI, "0385 rc should "
13324 "have been MBX_BUSY\n");
13325 if (rc != MBX_NOT_FINISHED)
13326 goto send_current_mbox;
13327 }
13328 }
13329 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13330 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13331 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13332
13333 /* There is mailbox completion work to do */
13334 spin_lock_irqsave(&phba->hbalock, iflags);
13335 __lpfc_mbox_cmpl_put(phba, pmb);
13336 phba->work_ha |= HA_MBATT;
13337 spin_unlock_irqrestore(&phba->hbalock, iflags);
13338 workposted = true;
13339
13340send_current_mbox:
13341 spin_lock_irqsave(&phba->hbalock, iflags);
13342 /* Release the mailbox command posting token */
13343 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13344 /* Setting active mailbox pointer need to be in sync to flag clear */
13345 phba->sli.mbox_active = NULL;
07b85824
JS
13346 if (bf_get(lpfc_trailer_consumed, mcqe))
13347 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
04c68496
JS
13348 spin_unlock_irqrestore(&phba->hbalock, iflags);
13349 /* Wake up worker thread to post the next pending mailbox command */
13350 lpfc_worker_wake_up(phba);
07b85824
JS
13351 return workposted;
13352
04c68496 13353out_no_mqe_complete:
07b85824 13354 spin_lock_irqsave(&phba->hbalock, iflags);
04c68496
JS
13355 if (bf_get(lpfc_trailer_consumed, mcqe))
13356 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
07b85824
JS
13357 spin_unlock_irqrestore(&phba->hbalock, iflags);
13358 return false;
04c68496
JS
13359}
13360
13361/**
13362 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13363 * @phba: Pointer to HBA context object.
13364 * @cqe: Pointer to mailbox completion queue entry.
13365 *
13366 * This routine process a mailbox completion queue entry, it invokes the
291c2548 13367 * proper mailbox complete handling or asynchronous event handling routine
04c68496
JS
13368 * according to the MCQE's async bit.
13369 *
13370 * Return: true if work posted to worker thread, otherwise false.
13371 **/
13372static bool
32517fc0
JS
13373lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13374 struct lpfc_cqe *cqe)
04c68496
JS
13375{
13376 struct lpfc_mcqe mcqe;
13377 bool workposted;
13378
32517fc0
JS
13379 cq->CQ_mbox++;
13380
04c68496 13381 /* Copy the mailbox MCQE and convert endian order as needed */
48f8fdb4 13382 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
04c68496
JS
13383
13384 /* Invoke the proper event handling routine */
13385 if (!bf_get(lpfc_trailer_async, &mcqe))
13386 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13387 else
13388 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13389 return workposted;
13390}
13391
4f774513
JS
13392/**
13393 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13394 * @phba: Pointer to HBA context object.
2a76a283 13395 * @cq: Pointer to associated CQ
4f774513
JS
13396 * @wcqe: Pointer to work-queue completion queue entry.
13397 *
13398 * This routine handles an ELS work-queue completion event.
13399 *
13400 * Return: true if work posted to worker thread, otherwise false.
13401 **/
13402static bool
2a76a283 13403lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13404 struct lpfc_wcqe_complete *wcqe)
13405{
4f774513
JS
13406 struct lpfc_iocbq *irspiocbq;
13407 unsigned long iflags;
2a76a283 13408 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
13409 int txq_cnt = 0;
13410 int txcmplq_cnt = 0;
4f774513 13411
11f0e34f
JS
13412 /* Check for response status */
13413 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13414 /* Log the error status */
13415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13416 "0357 ELS CQE error: status=x%x: "
13417 "CQE: %08x %08x %08x %08x\n",
13418 bf_get(lpfc_wcqe_c_status, wcqe),
13419 wcqe->word0, wcqe->total_data_placed,
13420 wcqe->parameter, wcqe->word3);
13421 }
13422
45ed1190 13423 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
13424 irspiocbq = lpfc_sli_get_iocbq(phba);
13425 if (!irspiocbq) {
0e9bb8d7
JS
13426 if (!list_empty(&pring->txq))
13427 txq_cnt++;
13428 if (!list_empty(&pring->txcmplq))
13429 txcmplq_cnt++;
4f774513 13430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0 13431 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
ff349bca 13432 "els_txcmplq_cnt=%d\n",
0e9bb8d7 13433 txq_cnt, phba->iocb_cnt,
0e9bb8d7 13434 txcmplq_cnt);
45ed1190 13435 return false;
4f774513 13436 }
4f774513 13437
45ed1190
JS
13438 /* Save off the slow-path queue event for work thread to process */
13439 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 13440 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 13441 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
13442 &phba->sli4_hba.sp_queue_event);
13443 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 13444 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 13445
45ed1190 13446 return true;
4f774513
JS
13447}
13448
13449/**
13450 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13451 * @phba: Pointer to HBA context object.
13452 * @wcqe: Pointer to work-queue completion queue entry.
13453 *
3f8b6fb7 13454 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
13455 * proper WQ release routine to the slow-path WQ.
13456 **/
13457static void
13458lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13459 struct lpfc_wcqe_release *wcqe)
13460{
2e90f4b5
JS
13461 /* sanity check on queue memory */
13462 if (unlikely(!phba->sli4_hba.els_wq))
13463 return;
4f774513
JS
13464 /* Check for the slow-path ELS work queue */
13465 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13466 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13467 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13468 else
13469 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13470 "2579 Slow-path wqe consume event carries "
13471 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13472 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13473 phba->sli4_hba.els_wq->queue_id);
13474}
13475
13476/**
13477 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13478 * @phba: Pointer to HBA context object.
13479 * @cq: Pointer to a WQ completion queue.
13480 * @wcqe: Pointer to work-queue completion queue entry.
13481 *
13482 * This routine handles an XRI abort event.
13483 *
13484 * Return: true if work posted to worker thread, otherwise false.
13485 **/
13486static bool
13487lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13488 struct lpfc_queue *cq,
13489 struct sli4_wcqe_xri_aborted *wcqe)
13490{
13491 bool workposted = false;
13492 struct lpfc_cq_event *cq_event;
13493 unsigned long iflags;
13494
4f774513 13495 switch (cq->subtype) {
c00f62e6
JS
13496 case LPFC_IO:
13497 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13498 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13499 /* Notify aborted XRI for NVME work queue */
13500 if (phba->nvmet_support)
13501 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13502 }
5e5b511d 13503 workposted = false;
4f774513 13504 break;
422c4cb7 13505 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
4f774513 13506 case LPFC_ELS:
8a5ca109
JS
13507 cq_event = lpfc_cq_event_setup(
13508 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13509 if (!cq_event)
13510 return false;
5e5b511d 13511 cq_event->hdwq = cq->hdwq;
4f774513
JS
13512 spin_lock_irqsave(&phba->hbalock, iflags);
13513 list_add_tail(&cq_event->list,
13514 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13515 /* Set the els xri abort event flag */
13516 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13517 spin_unlock_irqrestore(&phba->hbalock, iflags);
13518 workposted = true;
13519 break;
13520 default:
13521 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
13522 "0603 Invalid CQ subtype %d: "
13523 "%08x %08x %08x %08x\n",
13524 cq->subtype, wcqe->word0, wcqe->parameter,
13525 wcqe->word2, wcqe->word3);
4f774513
JS
13526 workposted = false;
13527 break;
13528 }
13529 return workposted;
13530}
13531
e817e5d7
JS
13532#define FC_RCTL_MDS_DIAGS 0xF4
13533
4f774513
JS
13534/**
13535 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13536 * @phba: Pointer to HBA context object.
13537 * @rcqe: Pointer to receive-queue completion queue entry.
13538 *
13539 * This routine process a receive-queue completion queue entry.
13540 *
13541 * Return: true if work posted to worker thread, otherwise false.
13542 **/
13543static bool
4d9ab994 13544lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 13545{
4f774513 13546 bool workposted = false;
e817e5d7 13547 struct fc_frame_header *fc_hdr;
4f774513
JS
13548 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13549 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 13550 struct lpfc_nvmet_tgtport *tgtp;
4f774513 13551 struct hbq_dmabuf *dma_buf;
7851fe2c 13552 uint32_t status, rq_id;
4f774513
JS
13553 unsigned long iflags;
13554
2e90f4b5
JS
13555 /* sanity check on queue memory */
13556 if (unlikely(!hrq) || unlikely(!drq))
13557 return workposted;
13558
7851fe2c
JS
13559 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13560 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13561 else
13562 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13563 if (rq_id != hrq->queue_id)
4f774513
JS
13564 goto out;
13565
4d9ab994 13566 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
13567 switch (status) {
13568 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13570 "2537 Receive Frame Truncated!!\n");
5bd5f66c 13571 /* fall through */
4f774513
JS
13572 case FC_STATUS_RQ_SUCCESS:
13573 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13574 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
13575 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13576 if (!dma_buf) {
b84daac9 13577 hrq->RQ_no_buf_found++;
4f774513
JS
13578 spin_unlock_irqrestore(&phba->hbalock, iflags);
13579 goto out;
13580 }
b84daac9 13581 hrq->RQ_rcv_buf++;
547077a4 13582 hrq->RQ_buf_posted--;
4d9ab994 13583 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd 13584
e817e5d7
JS
13585 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13586
13587 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13588 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13589 spin_unlock_irqrestore(&phba->hbalock, iflags);
13590 /* Handle MDS Loopback frames */
13591 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13592 break;
13593 }
13594
13595 /* save off the frame for the work thread to process */
4d9ab994 13596 list_add_tail(&dma_buf->cq_event.list,
45ed1190 13597 &phba->sli4_hba.sp_queue_event);
4f774513 13598 /* Frame received */
45ed1190 13599 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
13600 spin_unlock_irqrestore(&phba->hbalock, iflags);
13601 workposted = true;
13602 break;
4f774513 13603 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13604 if (phba->nvmet_support) {
13605 tgtp = phba->targetport->private;
13606 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13607 "6402 RQE Error x%x, posted %d err_cnt "
13608 "%d: %x %x %x\n",
13609 status, hrq->RQ_buf_posted,
13610 hrq->RQ_no_posted_buf,
13611 atomic_read(&tgtp->rcv_fcp_cmd_in),
13612 atomic_read(&tgtp->rcv_fcp_cmd_out),
13613 atomic_read(&tgtp->xmt_fcp_release));
13614 }
13615 /* fallthrough */
13616
13617 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 13618 hrq->RQ_no_posted_buf++;
4f774513
JS
13619 /* Post more buffers if possible */
13620 spin_lock_irqsave(&phba->hbalock, iflags);
13621 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13622 spin_unlock_irqrestore(&phba->hbalock, iflags);
13623 workposted = true;
13624 break;
13625 }
13626out:
13627 return workposted;
4f774513
JS
13628}
13629
4d9ab994
JS
13630/**
13631 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13632 * @phba: Pointer to HBA context object.
13633 * @cq: Pointer to the completion queue.
32517fc0 13634 * @cqe: Pointer to a completion queue entry.
4d9ab994 13635 *
25985edc 13636 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
13637 * entry.
13638 *
13639 * Return: true if work posted to worker thread, otherwise false.
13640 **/
13641static bool
13642lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13643 struct lpfc_cqe *cqe)
13644{
45ed1190 13645 struct lpfc_cqe cqevt;
4d9ab994
JS
13646 bool workposted = false;
13647
13648 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13649 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
13650
13651 /* Check and process for different type of WCQE and dispatch */
45ed1190 13652 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 13653 case CQE_CODE_COMPL_WQE:
45ed1190 13654 /* Process the WQ/RQ complete event */
bc73905a 13655 phba->last_completion_time = jiffies;
2a76a283 13656 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 13657 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
13658 break;
13659 case CQE_CODE_RELEASE_WQE:
13660 /* Process the WQ release event */
13661 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 13662 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
13663 break;
13664 case CQE_CODE_XRI_ABORTED:
13665 /* Process the WQ XRI abort event */
bc73905a 13666 phba->last_completion_time = jiffies;
4d9ab994 13667 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 13668 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
13669 break;
13670 case CQE_CODE_RECEIVE:
7851fe2c 13671 case CQE_CODE_RECEIVE_V1:
4d9ab994 13672 /* Process the RQ event */
bc73905a 13673 phba->last_completion_time = jiffies;
4d9ab994 13674 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 13675 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
13676 break;
13677 default:
13678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13679 "0388 Not a valid WCQE code: x%x\n",
45ed1190 13680 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
13681 break;
13682 }
13683 return workposted;
13684}
13685
4f774513
JS
13686/**
13687 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13688 * @phba: Pointer to HBA context object.
13689 * @eqe: Pointer to fast-path event queue entry.
13690 *
13691 * This routine process a event queue entry from the slow-path event queue.
13692 * It will check the MajorCode and MinorCode to determine this is for a
13693 * completion event on a completion queue, if not, an error shall be logged
13694 * and just return. Otherwise, it will get to the corresponding completion
13695 * queue and process all the entries on that completion queue, rearm the
13696 * completion queue, and then return.
13697 *
13698 **/
f485c18d 13699static void
67d12733
JS
13700lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13701 struct lpfc_queue *speq)
4f774513 13702{
67d12733 13703 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
13704 uint16_t cqid;
13705
4f774513 13706 /* Get the reference to the corresponding CQ */
cb5172ea 13707 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 13708
4f774513
JS
13709 list_for_each_entry(childq, &speq->child_list, list) {
13710 if (childq->queue_id == cqid) {
13711 cq = childq;
13712 break;
13713 }
13714 }
13715 if (unlikely(!cq)) {
75baf696
JS
13716 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13718 "0365 Slow-path CQ identifier "
13719 "(%d) does not exist\n", cqid);
f485c18d 13720 return;
4f774513
JS
13721 }
13722
895427bd
JS
13723 /* Save EQ associated with this CQ */
13724 cq->assoc_qp = speq;
13725
6a828b0f 13726 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
f485c18d
DK
13727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13728 "0390 Cannot schedule soft IRQ "
13729 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
d6d189ce 13730 cqid, cq->queue_id, raw_smp_processor_id());
f485c18d
DK
13731}
13732
13733/**
32517fc0 13734 * __lpfc_sli4_process_cq - Process elements of a CQ
f485c18d 13735 * @phba: Pointer to HBA context object.
32517fc0
JS
13736 * @cq: Pointer to CQ to be processed
13737 * @handler: Routine to process each cqe
13738 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
f485c18d 13739 *
32517fc0
JS
13740 * This routine processes completion queue entries in a CQ. While a valid
13741 * queue element is found, the handler is called. During processing checks
13742 * are made for periodic doorbell writes to let the hardware know of
13743 * element consumption.
13744 *
13745 * If the max limit on cqes to process is hit, or there are no more valid
13746 * entries, the loop stops. If we processed a sufficient number of elements,
13747 * meaning there is sufficient load, rather than rearming and generating
13748 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13749 * indicates no rescheduling.
f485c18d 13750 *
32517fc0 13751 * Returns True if work scheduled, False otherwise.
f485c18d 13752 **/
32517fc0
JS
13753static bool
13754__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13755 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13756 struct lpfc_cqe *), unsigned long *delay)
f485c18d 13757{
f485c18d
DK
13758 struct lpfc_cqe *cqe;
13759 bool workposted = false;
32517fc0
JS
13760 int count = 0, consumed = 0;
13761 bool arm = true;
13762
13763 /* default - no reschedule */
13764 *delay = 0;
13765
13766 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13767 goto rearm_and_exit;
f485c18d 13768
4f774513 13769 /* Process all the entries to the CQ */
d74a89aa 13770 cq->q_flag = 0;
32517fc0
JS
13771 cqe = lpfc_sli4_cq_get(cq);
13772 while (cqe) {
32517fc0
JS
13773 workposted |= handler(phba, cq, cqe);
13774 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13775
13776 consumed++;
13777 if (!(++count % cq->max_proc_limit))
13778 break;
13779
13780 if (!(count % cq->notify_interval)) {
13781 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13782 LPFC_QUEUE_NOARM);
13783 consumed = 0;
8156d378 13784 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
32517fc0
JS
13785 }
13786
d74a89aa
JS
13787 if (count == LPFC_NVMET_CQ_NOTIFY)
13788 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13789
32517fc0
JS
13790 cqe = lpfc_sli4_cq_get(cq);
13791 }
13792 if (count >= phba->cfg_cq_poll_threshold) {
13793 *delay = 1;
13794 arm = false;
13795 }
13796
13797 /* Track the max number of CQEs processed in 1 EQ */
13798 if (count > cq->CQ_max_cqe)
13799 cq->CQ_max_cqe = count;
13800
13801 cq->assoc_qp->EQ_cqe_cnt += count;
13802
13803 /* Catch the no cq entry condition */
13804 if (unlikely(count == 0))
13805 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13806 "0369 No entry from completion queue "
13807 "qid=%d\n", cq->queue_id);
13808
164ba8d2 13809 xchg(&cq->queue_claimed, 0);
32517fc0
JS
13810
13811rearm_and_exit:
13812 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13813 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13814
13815 return workposted;
13816}
13817
13818/**
13819 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13820 * @cq: pointer to CQ to process
13821 *
13822 * This routine calls the cq processing routine with a handler specific
13823 * to the type of queue bound to it.
13824 *
13825 * The CQ routine returns two values: the first is the calling status,
13826 * which indicates whether work was queued to the background discovery
13827 * thread. If true, the routine should wakeup the discovery thread;
13828 * the second is the delay parameter. If non-zero, rather than rearming
13829 * the CQ and yet another interrupt, the CQ handler should be queued so
13830 * that it is processed in a subsequent polling action. The value of
13831 * the delay indicates when to reschedule it.
13832 **/
13833static void
13834__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13835{
13836 struct lpfc_hba *phba = cq->phba;
13837 unsigned long delay;
13838 bool workposted = false;
13839
13840 /* Process and rearm the CQ */
4f774513
JS
13841 switch (cq->type) {
13842 case LPFC_MCQ:
32517fc0
JS
13843 workposted |= __lpfc_sli4_process_cq(phba, cq,
13844 lpfc_sli4_sp_handle_mcqe,
13845 &delay);
4f774513
JS
13846 break;
13847 case LPFC_WCQ:
c00f62e6 13848 if (cq->subtype == LPFC_IO)
32517fc0
JS
13849 workposted |= __lpfc_sli4_process_cq(phba, cq,
13850 lpfc_sli4_fp_handle_cqe,
13851 &delay);
13852 else
13853 workposted |= __lpfc_sli4_process_cq(phba, cq,
13854 lpfc_sli4_sp_handle_cqe,
13855 &delay);
4f774513
JS
13856 break;
13857 default:
13858 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13859 "0370 Invalid completion queue type (%d)\n",
13860 cq->type);
f485c18d 13861 return;
4f774513
JS
13862 }
13863
32517fc0
JS
13864 if (delay) {
13865 if (!queue_delayed_work_on(cq->chann, phba->wq,
13866 &cq->sched_spwork, delay))
13867 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13868 "0394 Cannot schedule soft IRQ "
13869 "for cqid=%d on CPU %d\n",
13870 cq->queue_id, cq->chann);
13871 }
4f774513
JS
13872
13873 /* wake up worker thread if there are works to be done */
13874 if (workposted)
13875 lpfc_worker_wake_up(phba);
13876}
13877
32517fc0
JS
13878/**
13879 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13880 * interrupt
13881 * @work: pointer to work element
13882 *
13883 * translates from the work handler and calls the slow-path handler.
13884 **/
13885static void
13886lpfc_sli4_sp_process_cq(struct work_struct *work)
13887{
13888 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13889
13890 __lpfc_sli4_sp_process_cq(cq);
13891}
13892
13893/**
13894 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13895 * @work: pointer to work element
13896 *
13897 * translates from the work handler and calls the slow-path handler.
13898 **/
13899static void
13900lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13901{
13902 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13903 struct lpfc_queue, sched_spwork);
13904
13905 __lpfc_sli4_sp_process_cq(cq);
13906}
13907
4f774513
JS
13908/**
13909 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13910 * @phba: Pointer to HBA context object.
13911 * @cq: Pointer to associated CQ
13912 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13913 *
13914 * This routine process a fast-path work queue completion entry from fast-path
13915 * event queue for FCP command response completion.
13916 **/
13917static void
2a76a283 13918lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13919 struct lpfc_wcqe_complete *wcqe)
13920{
2a76a283 13921 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13922 struct lpfc_iocbq *cmdiocbq;
13923 struct lpfc_iocbq irspiocbq;
13924 unsigned long iflags;
13925
4f774513
JS
13926 /* Check for response status */
13927 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13928 /* If resource errors reported from HBA, reduce queue
13929 * depth of the SCSI device.
13930 */
e3d2b802
JS
13931 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13932 IOSTAT_LOCAL_REJECT)) &&
13933 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13934 IOERR_NO_RESOURCES))
4f774513 13935 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13936
4f774513 13937 /* Log the error status */
11f0e34f
JS
13938 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13939 "0373 FCP CQE error: status=x%x: "
13940 "CQE: %08x %08x %08x %08x\n",
4f774513 13941 bf_get(lpfc_wcqe_c_status, wcqe),
11f0e34f
JS
13942 wcqe->word0, wcqe->total_data_placed,
13943 wcqe->parameter, wcqe->word3);
4f774513
JS
13944 }
13945
13946 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13947 spin_lock_irqsave(&pring->ring_lock, iflags);
13948 pring->stats.iocb_event++;
e2a8be56 13949 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13950 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13951 bf_get(lpfc_wcqe_c_request_tag, wcqe));
4f774513
JS
13952 if (unlikely(!cmdiocbq)) {
13953 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13954 "0374 FCP complete with no corresponding "
13955 "cmdiocb: iotag (%d)\n",
13956 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13957 return;
13958 }
c8a4ce0b
DK
13959#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13960 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13961#endif
895427bd
JS
13962 if (cmdiocbq->iocb_cmpl == NULL) {
13963 if (cmdiocbq->wqe_cmpl) {
13964 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13965 spin_lock_irqsave(&phba->hbalock, iflags);
13966 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13967 spin_unlock_irqrestore(&phba->hbalock, iflags);
13968 }
13969
13970 /* Pass the cmd_iocb and the wcqe to the upper layer */
13971 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13972 return;
13973 }
4f774513
JS
13974 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13975 "0375 FCP cmdiocb not callback function "
13976 "iotag: (%d)\n",
13977 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13978 return;
13979 }
13980
13981 /* Fake the irspiocb and copy necessary response information */
341af102 13982 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13983
0f65ff68
JS
13984 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13985 spin_lock_irqsave(&phba->hbalock, iflags);
13986 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13987 spin_unlock_irqrestore(&phba->hbalock, iflags);
13988 }
13989
4f774513
JS
13990 /* Pass the cmd_iocb and the rsp state to the upper layer */
13991 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13992}
13993
13994/**
13995 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13996 * @phba: Pointer to HBA context object.
13997 * @cq: Pointer to completion queue.
13998 * @wcqe: Pointer to work-queue completion queue entry.
13999 *
3f8b6fb7 14000 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
14001 * proper WQ release routine to the slow-path WQ.
14002 **/
14003static void
14004lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14005 struct lpfc_wcqe_release *wcqe)
14006{
14007 struct lpfc_queue *childwq;
14008 bool wqid_matched = false;
895427bd 14009 uint16_t hba_wqid;
4f774513
JS
14010
14011 /* Check for fast-path FCP work queue release */
895427bd 14012 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 14013 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 14014 if (childwq->queue_id == hba_wqid) {
4f774513
JS
14015 lpfc_sli4_wq_release(childwq,
14016 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
6e8e1c14
JS
14017 if (childwq->q_flag & HBA_NVMET_WQFULL)
14018 lpfc_nvmet_wqfull_process(phba, childwq);
4f774513
JS
14019 wqid_matched = true;
14020 break;
14021 }
14022 }
14023 /* Report warning log message if no match found */
14024 if (wqid_matched != true)
14025 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14026 "2580 Fast-path wqe consume event carries "
895427bd 14027 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
14028}
14029
14030/**
2d7dbc4c
JS
14031 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14032 * @phba: Pointer to HBA context object.
14033 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 14034 *
2d7dbc4c
JS
14035 * This routine process a receive-queue completion queue entry.
14036 *
14037 * Return: true if work posted to worker thread, otherwise false.
14038 **/
14039static bool
14040lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14041 struct lpfc_rcqe *rcqe)
14042{
14043 bool workposted = false;
14044 struct lpfc_queue *hrq;
14045 struct lpfc_queue *drq;
14046 struct rqb_dmabuf *dma_buf;
14047 struct fc_frame_header *fc_hdr;
547077a4 14048 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
14049 uint32_t status, rq_id;
14050 unsigned long iflags;
14051 uint32_t fctl, idx;
14052
14053 if ((phba->nvmet_support == 0) ||
14054 (phba->sli4_hba.nvmet_cqset == NULL))
14055 return workposted;
14056
14057 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14058 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14059 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14060
14061 /* sanity check on queue memory */
14062 if (unlikely(!hrq) || unlikely(!drq))
14063 return workposted;
14064
14065 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14066 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14067 else
14068 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14069
14070 if ((phba->nvmet_support == 0) ||
14071 (rq_id != hrq->queue_id))
14072 return workposted;
14073
14074 status = bf_get(lpfc_rcqe_status, rcqe);
14075 switch (status) {
14076 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14077 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14078 "6126 Receive Frame Truncated!!\n");
5bd5f66c 14079 /* fall through */
2d7dbc4c 14080 case FC_STATUS_RQ_SUCCESS:
2d7dbc4c 14081 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 14082 lpfc_sli4_rq_release(hrq, drq);
2d7dbc4c
JS
14083 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14084 if (!dma_buf) {
14085 hrq->RQ_no_buf_found++;
14086 spin_unlock_irqrestore(&phba->hbalock, iflags);
14087 goto out;
14088 }
14089 spin_unlock_irqrestore(&phba->hbalock, iflags);
14090 hrq->RQ_rcv_buf++;
547077a4 14091 hrq->RQ_buf_posted--;
2d7dbc4c
JS
14092 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14093
14094 /* Just some basic sanity checks on FCP Command frame */
14095 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3a8070c5
JS
14096 fc_hdr->fh_f_ctl[1] << 8 |
14097 fc_hdr->fh_f_ctl[2]);
2d7dbc4c
JS
14098 if (((fctl &
14099 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14100 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14101 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14102 goto drop;
14103
14104 if (fc_hdr->fh_type == FC_TYPE_FCP) {
d74a89aa 14105 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7 14106 lpfc_nvmet_unsol_fcp_event(
d74a89aa
JS
14107 phba, idx, dma_buf, cq->isr_timestamp,
14108 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
2d7dbc4c
JS
14109 return false;
14110 }
14111drop:
22b738ac 14112 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
2d7dbc4c 14113 break;
2d7dbc4c 14114 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
14115 if (phba->nvmet_support) {
14116 tgtp = phba->targetport->private;
14117 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14118 "6401 RQE Error x%x, posted %d err_cnt "
14119 "%d: %x %x %x\n",
14120 status, hrq->RQ_buf_posted,
14121 hrq->RQ_no_posted_buf,
14122 atomic_read(&tgtp->rcv_fcp_cmd_in),
14123 atomic_read(&tgtp->rcv_fcp_cmd_out),
14124 atomic_read(&tgtp->xmt_fcp_release));
14125 }
14126 /* fallthrough */
14127
14128 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
14129 hrq->RQ_no_posted_buf++;
14130 /* Post more buffers if possible */
2d7dbc4c
JS
14131 break;
14132 }
14133out:
14134 return workposted;
14135}
14136
4f774513 14137/**
895427bd 14138 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
32517fc0 14139 * @phba: adapter with cq
4f774513
JS
14140 * @cq: Pointer to the completion queue.
14141 * @eqe: Pointer to fast-path completion queue entry.
14142 *
14143 * This routine process a fast-path work queue completion entry from fast-path
14144 * event queue for FCP command response completion.
32517fc0
JS
14145 *
14146 * Return: true if work posted to worker thread, otherwise false.
4f774513 14147 **/
32517fc0 14148static bool
895427bd 14149lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
14150 struct lpfc_cqe *cqe)
14151{
14152 struct lpfc_wcqe_release wcqe;
14153 bool workposted = false;
14154
14155 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 14156 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
4f774513
JS
14157
14158 /* Check and process for different type of WCQE and dispatch */
14159 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14160 case CQE_CODE_COMPL_WQE:
895427bd 14161 case CQE_CODE_NVME_ERSP:
b84daac9 14162 cq->CQ_wq++;
4f774513 14163 /* Process the WQ complete event */
98fc5dd9 14164 phba->last_completion_time = jiffies;
c00f62e6 14165 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
895427bd 14166 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
14167 (struct lpfc_wcqe_complete *)&wcqe);
14168 break;
14169 case CQE_CODE_RELEASE_WQE:
b84daac9 14170 cq->CQ_release_wqe++;
4f774513
JS
14171 /* Process the WQ release event */
14172 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14173 (struct lpfc_wcqe_release *)&wcqe);
14174 break;
14175 case CQE_CODE_XRI_ABORTED:
b84daac9 14176 cq->CQ_xri_aborted++;
4f774513 14177 /* Process the WQ XRI abort event */
bc73905a 14178 phba->last_completion_time = jiffies;
4f774513
JS
14179 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14180 (struct sli4_wcqe_xri_aborted *)&wcqe);
14181 break;
895427bd
JS
14182 case CQE_CODE_RECEIVE_V1:
14183 case CQE_CODE_RECEIVE:
14184 phba->last_completion_time = jiffies;
2d7dbc4c
JS
14185 if (cq->subtype == LPFC_NVMET) {
14186 workposted = lpfc_sli4_nvmet_handle_rcqe(
14187 phba, cq, (struct lpfc_rcqe *)&wcqe);
14188 }
895427bd 14189 break;
4f774513
JS
14190 default:
14191 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 14192 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
14193 bf_get(lpfc_wcqe_c_code, &wcqe));
14194 break;
14195 }
14196 return workposted;
14197}
14198
14199/**
67d12733 14200 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
14201 * @phba: Pointer to HBA context object.
14202 * @eqe: Pointer to fast-path event queue entry.
14203 *
14204 * This routine process a event queue entry from the fast-path event queue.
14205 * It will check the MajorCode and MinorCode to determine this is for a
14206 * completion event on a completion queue, if not, an error shall be logged
14207 * and just return. Otherwise, it will get to the corresponding completion
14208 * queue and process all the entries on the completion queue, rearm the
14209 * completion queue, and then return.
14210 **/
f485c18d 14211static void
32517fc0
JS
14212lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14213 struct lpfc_eqe *eqe)
4f774513 14214{
895427bd 14215 struct lpfc_queue *cq = NULL;
32517fc0 14216 uint32_t qidx = eq->hdwq;
2d7dbc4c 14217 uint16_t cqid, id;
4f774513 14218
cb5172ea 14219 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 14220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 14221 "0366 Not a valid completion "
4f774513 14222 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
14223 bf_get_le32(lpfc_eqe_major_code, eqe),
14224 bf_get_le32(lpfc_eqe_minor_code, eqe));
f485c18d 14225 return;
4f774513
JS
14226 }
14227
67d12733
JS
14228 /* Get the reference to the corresponding CQ */
14229 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14230
6a828b0f
JS
14231 /* Use the fast lookup method first */
14232 if (cqid <= phba->sli4_hba.cq_max) {
14233 cq = phba->sli4_hba.cq_lookup[cqid];
14234 if (cq)
14235 goto work_cq;
cdb42bec
JS
14236 }
14237
14238 /* Next check for NVMET completion */
2d7dbc4c
JS
14239 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14240 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14241 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14242 /* Process NVMET unsol rcv */
14243 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14244 goto process_cq;
14245 }
67d12733
JS
14246 }
14247
895427bd
JS
14248 if (phba->sli4_hba.nvmels_cq &&
14249 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14250 /* Process NVME unsol rcv */
14251 cq = phba->sli4_hba.nvmels_cq;
14252 }
14253
14254 /* Otherwise this is a Slow path event */
14255 if (cq == NULL) {
cdb42bec
JS
14256 lpfc_sli4_sp_handle_eqe(phba, eqe,
14257 phba->sli4_hba.hdwq[qidx].hba_eq);
f485c18d 14258 return;
4f774513
JS
14259 }
14260
895427bd 14261process_cq:
4f774513
JS
14262 if (unlikely(cqid != cq->queue_id)) {
14263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14264 "0368 Miss-matched fast-path completion "
14265 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14266 cqid, cq->queue_id);
f485c18d 14267 return;
4f774513
JS
14268 }
14269
6a828b0f 14270work_cq:
d74a89aa
JS
14271#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14272 if (phba->ktime_on)
14273 cq->isr_timestamp = ktime_get_ns();
14274 else
14275 cq->isr_timestamp = 0;
14276#endif
45aa312e 14277 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
f485c18d
DK
14278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14279 "0363 Cannot schedule soft IRQ "
14280 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
d6d189ce 14281 cqid, cq->queue_id, raw_smp_processor_id());
f485c18d
DK
14282}
14283
14284/**
32517fc0
JS
14285 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14286 * @cq: Pointer to CQ to be processed
f485c18d 14287 *
32517fc0
JS
14288 * This routine calls the cq processing routine with the handler for
14289 * fast path CQEs.
14290 *
14291 * The CQ routine returns two values: the first is the calling status,
14292 * which indicates whether work was queued to the background discovery
14293 * thread. If true, the routine should wakeup the discovery thread;
14294 * the second is the delay parameter. If non-zero, rather than rearming
14295 * the CQ and yet another interrupt, the CQ handler should be queued so
14296 * that it is processed in a subsequent polling action. The value of
14297 * the delay indicates when to reschedule it.
f485c18d
DK
14298 **/
14299static void
32517fc0 14300__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
f485c18d 14301{
f485c18d 14302 struct lpfc_hba *phba = cq->phba;
32517fc0 14303 unsigned long delay;
f485c18d 14304 bool workposted = false;
f485c18d 14305
32517fc0
JS
14306 /* process and rearm the CQ */
14307 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14308 &delay);
4f774513 14309
32517fc0
JS
14310 if (delay) {
14311 if (!queue_delayed_work_on(cq->chann, phba->wq,
14312 &cq->sched_irqwork, delay))
14313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14314 "0367 Cannot schedule soft IRQ "
14315 "for cqid=%d on CPU %d\n",
14316 cq->queue_id, cq->chann);
14317 }
4f774513
JS
14318
14319 /* wake up worker thread if there are works to be done */
14320 if (workposted)
14321 lpfc_worker_wake_up(phba);
14322}
14323
1ba981fd 14324/**
32517fc0
JS
14325 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14326 * interrupt
14327 * @work: pointer to work element
1ba981fd 14328 *
32517fc0 14329 * translates from the work handler and calls the fast-path handler.
1ba981fd
JS
14330 **/
14331static void
32517fc0 14332lpfc_sli4_hba_process_cq(struct work_struct *work)
1ba981fd 14333{
32517fc0 14334 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
1ba981fd 14335
32517fc0 14336 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14337}
14338
14339/**
32517fc0
JS
14340 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14341 * @work: pointer to work element
1ba981fd 14342 *
32517fc0 14343 * translates from the work handler and calls the fast-path handler.
1ba981fd 14344 **/
32517fc0
JS
14345static void
14346lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
1ba981fd 14347{
32517fc0
JS
14348 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14349 struct lpfc_queue, sched_irqwork);
1ba981fd 14350
32517fc0 14351 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14352}
14353
4f774513 14354/**
67d12733 14355 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
14356 * @irq: Interrupt number.
14357 * @dev_id: The device context pointer.
14358 *
14359 * This function is directly called from the PCI layer as an interrupt
14360 * service routine when device with SLI-4 interface spec is enabled with
14361 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14362 * ring event in the HBA. However, when the device is enabled with either
14363 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14364 * device-level interrupt handler. When the PCI slot is in error recovery
14365 * or the HBA is undergoing initialization, the interrupt handler will not
14366 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14367 * the intrrupt context. This function is called without any lock held.
14368 * It gets the hbalock to access and update SLI data structures. Note that,
14369 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14370 * equal to that of FCP CQ index.
14371 *
67d12733
JS
14372 * The link attention and ELS ring attention events are handled
14373 * by the worker thread. The interrupt handler signals the worker thread
14374 * and returns for these events. This function is called without any lock
14375 * held. It gets the hbalock to access and update SLI data structures.
14376 *
4f774513
JS
14377 * This function returns IRQ_HANDLED when interrupt is handled else it
14378 * returns IRQ_NONE.
14379 **/
14380irqreturn_t
67d12733 14381lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
14382{
14383 struct lpfc_hba *phba;
895427bd 14384 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513 14385 struct lpfc_queue *fpeq;
4f774513
JS
14386 unsigned long iflag;
14387 int ecount = 0;
895427bd 14388 int hba_eqidx;
32517fc0 14389 struct lpfc_eq_intr_info *eqi;
4f774513
JS
14390
14391 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14392 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14393 phba = hba_eq_hdl->phba;
14394 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
14395
14396 if (unlikely(!phba))
14397 return IRQ_NONE;
cdb42bec 14398 if (unlikely(!phba->sli4_hba.hdwq))
5350d872 14399 return IRQ_NONE;
4f774513
JS
14400
14401 /* Get to the EQ struct associated with this vector */
657add4e 14402 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
2e90f4b5
JS
14403 if (unlikely(!fpeq))
14404 return IRQ_NONE;
4f774513
JS
14405
14406 /* Check device state for handling interrupt */
14407 if (unlikely(lpfc_intr_state_check(phba))) {
14408 /* Check again for link_state with lock held */
14409 spin_lock_irqsave(&phba->hbalock, iflag);
14410 if (phba->link_state < LPFC_LINK_DOWN)
14411 /* Flush, clear interrupt, and rearm the EQ */
24c7c0a6 14412 lpfc_sli4_eqcq_flush(phba, fpeq);
4f774513
JS
14413 spin_unlock_irqrestore(&phba->hbalock, iflag);
14414 return IRQ_NONE;
14415 }
14416
a7fc071a
DK
14417 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14418 eqi->icnt++;
14419
d6d189ce 14420 fpeq->last_cpu = raw_smp_processor_id();
4f774513 14421
a7fc071a 14422 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
8156d378 14423 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
32517fc0
JS
14424 phba->cfg_auto_imax &&
14425 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14426 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14427 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
b84daac9 14428
32517fc0 14429 /* process and rearm the EQ */
93a4d6f4 14430 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
4f774513
JS
14431
14432 if (unlikely(ecount == 0)) {
b84daac9 14433 fpeq->EQ_no_entry++;
4f774513
JS
14434 if (phba->intr_type == MSIX)
14435 /* MSI-X treated interrupt served as no EQ share INT */
14436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14437 "0358 MSI-X interrupt with no EQE\n");
14438 else
14439 /* Non MSI-X treated on interrupt as EQ share INT */
14440 return IRQ_NONE;
14441 }
14442
14443 return IRQ_HANDLED;
14444} /* lpfc_sli4_fp_intr_handler */
14445
14446/**
14447 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14448 * @irq: Interrupt number.
14449 * @dev_id: The device context pointer.
14450 *
14451 * This function is the device-level interrupt handler to device with SLI-4
14452 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14453 * interrupt mode is enabled and there is an event in the HBA which requires
14454 * driver attention. This function invokes the slow-path interrupt attention
14455 * handling function and fast-path interrupt attention handling function in
14456 * turn to process the relevant HBA attention events. This function is called
14457 * without any lock held. It gets the hbalock to access and update SLI data
14458 * structures.
14459 *
14460 * This function returns IRQ_HANDLED when interrupt is handled, else it
14461 * returns IRQ_NONE.
14462 **/
14463irqreturn_t
14464lpfc_sli4_intr_handler(int irq, void *dev_id)
14465{
14466 struct lpfc_hba *phba;
67d12733
JS
14467 irqreturn_t hba_irq_rc;
14468 bool hba_handled = false;
895427bd 14469 int qidx;
4f774513
JS
14470
14471 /* Get the driver's phba structure from the dev_id */
14472 phba = (struct lpfc_hba *)dev_id;
14473
14474 if (unlikely(!phba))
14475 return IRQ_NONE;
14476
4f774513
JS
14477 /*
14478 * Invoke fast-path host attention interrupt handling as appropriate.
14479 */
6a828b0f 14480 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
67d12733 14481 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 14482 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733
JS
14483 if (hba_irq_rc == IRQ_HANDLED)
14484 hba_handled |= true;
4f774513
JS
14485 }
14486
67d12733 14487 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
14488} /* lpfc_sli4_intr_handler */
14489
93a4d6f4
JS
14490void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14491{
14492 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14493 struct lpfc_queue *eq;
14494 int i = 0;
14495
14496 rcu_read_lock();
14497
14498 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14499 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14500 if (!list_empty(&phba->poll_list))
14501 mod_timer(&phba->cpuhp_poll_timer,
14502 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14503
14504 rcu_read_unlock();
14505}
14506
14507inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14508{
14509 struct lpfc_hba *phba = eq->phba;
14510 int i = 0;
14511
14512 /*
14513 * Unlocking an irq is one of the entry point to check
14514 * for re-schedule, but we are good for io submission
14515 * path as midlayer does a get_cpu to glue us in. Flush
14516 * out the invalidate queue so we can see the updated
14517 * value for flag.
14518 */
14519 smp_rmb();
14520
14521 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14522 /* We will not likely get the completion for the caller
14523 * during this iteration but i guess that's fine.
14524 * Future io's coming on this eq should be able to
14525 * pick it up. As for the case of single io's, they
14526 * will be handled through a sched from polling timer
14527 * function which is currently triggered every 1msec.
14528 */
14529 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14530
14531 return i;
14532}
14533
14534static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14535{
14536 struct lpfc_hba *phba = eq->phba;
14537
f861f596
JS
14538 /* kickstart slowpath processing if needed */
14539 if (list_empty(&phba->poll_list))
93a4d6f4
JS
14540 mod_timer(&phba->cpuhp_poll_timer,
14541 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
93a4d6f4
JS
14542
14543 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14544 synchronize_rcu();
14545}
14546
14547static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14548{
14549 struct lpfc_hba *phba = eq->phba;
14550
14551 /* Disable slowpath processing for this eq. Kick start the eq
14552 * by RE-ARMING the eq's ASAP
14553 */
14554 list_del_rcu(&eq->_poll_list);
14555 synchronize_rcu();
14556
14557 if (list_empty(&phba->poll_list))
14558 del_timer_sync(&phba->cpuhp_poll_timer);
14559}
14560
d480e578 14561void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
93a4d6f4
JS
14562{
14563 struct lpfc_queue *eq, *next;
14564
14565 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14566 list_del(&eq->_poll_list);
14567
14568 INIT_LIST_HEAD(&phba->poll_list);
14569 synchronize_rcu();
14570}
14571
14572static inline void
14573__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14574{
14575 if (mode == eq->mode)
14576 return;
14577 /*
14578 * currently this function is only called during a hotplug
14579 * event and the cpu on which this function is executing
14580 * is going offline. By now the hotplug has instructed
14581 * the scheduler to remove this cpu from cpu active mask.
14582 * So we don't need to work about being put aside by the
14583 * scheduler for a high priority process. Yes, the inte-
14584 * rrupts could come but they are known to retire ASAP.
14585 */
14586
14587 /* Disable polling in the fastpath */
14588 WRITE_ONCE(eq->mode, mode);
14589 /* flush out the store buffer */
14590 smp_wmb();
14591
14592 /*
14593 * Add this eq to the polling list and start polling. For
14594 * a grace period both interrupt handler and poller will
14595 * try to process the eq _but_ that's fine. We have a
14596 * synchronization mechanism in place (queue_claimed) to
14597 * deal with it. This is just a draining phase for int-
14598 * errupt handler (not eq's) as we have guranteed through
14599 * barrier that all the CPUs have seen the new CQ_POLLED
14600 * state. which will effectively disable the REARMING of
14601 * the EQ. The whole idea is eq's die off eventually as
14602 * we are not rearming EQ's anymore.
14603 */
14604 mode ? lpfc_sli4_add_to_poll_list(eq) :
14605 lpfc_sli4_remove_from_poll_list(eq);
14606}
14607
14608void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14609{
14610 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14611}
14612
14613void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14614{
14615 struct lpfc_hba *phba = eq->phba;
14616
14617 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14618
14619 /* Kick start for the pending io's in h/w.
14620 * Once we switch back to interrupt processing on a eq
14621 * the io path completion will only arm eq's when it
14622 * receives a completion. But since eq's are in disa-
14623 * rmed state it doesn't receive a completion. This
14624 * creates a deadlock scenaro.
14625 */
14626 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14627}
14628
4f774513
JS
14629/**
14630 * lpfc_sli4_queue_free - free a queue structure and associated memory
14631 * @queue: The queue structure to free.
14632 *
b595076a 14633 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
14634 * the host resident queue. This function must be called after destroying the
14635 * queue on the HBA.
14636 **/
14637void
14638lpfc_sli4_queue_free(struct lpfc_queue *queue)
14639{
14640 struct lpfc_dmabuf *dmabuf;
14641
14642 if (!queue)
14643 return;
14644
4645f7b5
JS
14645 if (!list_empty(&queue->wq_list))
14646 list_del(&queue->wq_list);
14647
4f774513
JS
14648 while (!list_empty(&queue->page_list)) {
14649 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14650 list);
81b96eda 14651 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
4f774513
JS
14652 dmabuf->virt, dmabuf->phys);
14653 kfree(dmabuf);
14654 }
895427bd
JS
14655 if (queue->rqbp) {
14656 lpfc_free_rq_buffer(queue->phba, queue);
14657 kfree(queue->rqbp);
14658 }
d1f525aa 14659
32517fc0
JS
14660 if (!list_empty(&queue->cpu_list))
14661 list_del(&queue->cpu_list);
14662
4f774513
JS
14663 kfree(queue);
14664 return;
14665}
14666
14667/**
14668 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14669 * @phba: The HBA that this queue is being created on.
81b96eda 14670 * @page_size: The size of a queue page
4f774513
JS
14671 * @entry_size: The size of each queue entry for this queue.
14672 * @entry count: The number of entries that this queue will handle.
c1a21ebc 14673 * @cpu: The cpu that will primarily utilize this queue.
4f774513
JS
14674 *
14675 * This function allocates a queue structure and the DMAable memory used for
14676 * the host resident queue. This function must be called before creating the
14677 * queue on the HBA.
14678 **/
14679struct lpfc_queue *
81b96eda 14680lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
c1a21ebc 14681 uint32_t entry_size, uint32_t entry_count, int cpu)
4f774513
JS
14682{
14683 struct lpfc_queue *queue;
14684 struct lpfc_dmabuf *dmabuf;
cb5172ea 14685 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9afbee3d 14686 uint16_t x, pgcnt;
4f774513 14687
cb5172ea 14688 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 14689 hw_page_size = page_size;
cb5172ea 14690
9afbee3d
JS
14691 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14692
14693 /* If needed, Adjust page count to match the max the adapter supports */
14694 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14695 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14696
c1a21ebc
JS
14697 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14698 GFP_KERNEL, cpu_to_node(cpu));
4f774513
JS
14699 if (!queue)
14700 return NULL;
895427bd 14701
4f774513 14702 INIT_LIST_HEAD(&queue->list);
93a4d6f4 14703 INIT_LIST_HEAD(&queue->_poll_list);
895427bd 14704 INIT_LIST_HEAD(&queue->wq_list);
6e8e1c14 14705 INIT_LIST_HEAD(&queue->wqfull_list);
4f774513
JS
14706 INIT_LIST_HEAD(&queue->page_list);
14707 INIT_LIST_HEAD(&queue->child_list);
32517fc0 14708 INIT_LIST_HEAD(&queue->cpu_list);
81b96eda
JS
14709
14710 /* Set queue parameters now. If the system cannot provide memory
14711 * resources, the free routine needs to know what was allocated.
14712 */
9afbee3d
JS
14713 queue->page_count = pgcnt;
14714 queue->q_pgs = (void **)&queue[1];
14715 queue->entry_cnt_per_pg = hw_page_size / entry_size;
81b96eda
JS
14716 queue->entry_size = entry_size;
14717 queue->entry_count = entry_count;
14718 queue->page_size = hw_page_size;
14719 queue->phba = phba;
14720
9afbee3d 14721 for (x = 0; x < queue->page_count; x++) {
c1a21ebc
JS
14722 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14723 dev_to_node(&phba->pcidev->dev));
4f774513
JS
14724 if (!dmabuf)
14725 goto out_fail;
750afb08
LC
14726 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14727 hw_page_size, &dmabuf->phys,
14728 GFP_KERNEL);
4f774513
JS
14729 if (!dmabuf->virt) {
14730 kfree(dmabuf);
14731 goto out_fail;
14732 }
14733 dmabuf->buffer_tag = x;
14734 list_add_tail(&dmabuf->list, &queue->page_list);
9afbee3d
JS
14735 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14736 queue->q_pgs[x] = dmabuf->virt;
4f774513 14737 }
f485c18d
DK
14738 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14739 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
32517fc0
JS
14740 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14741 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
4f774513 14742
32517fc0 14743 /* notify_interval will be set during q creation */
64eb4dcb 14744
4f774513
JS
14745 return queue;
14746out_fail:
14747 lpfc_sli4_queue_free(queue);
14748 return NULL;
14749}
14750
962bc51b
JS
14751/**
14752 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14753 * @phba: HBA structure that indicates port to create a queue on.
14754 * @pci_barset: PCI BAR set flag.
14755 *
14756 * This function shall perform iomap of the specified PCI BAR address to host
14757 * memory address if not already done so and return it. The returned host
14758 * memory address can be NULL.
14759 */
14760static void __iomem *
14761lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14762{
962bc51b
JS
14763 if (!phba->pcidev)
14764 return NULL;
962bc51b
JS
14765
14766 switch (pci_barset) {
14767 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
14768 return phba->pci_bar0_memmap_p;
14769 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
14770 return phba->pci_bar2_memmap_p;
14771 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
14772 return phba->pci_bar4_memmap_p;
14773 default:
14774 break;
14775 }
14776 return NULL;
14777}
14778
173edbb2 14779/**
cb733e35
JS
14780 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14781 * @phba: HBA structure that EQs are on.
14782 * @startq: The starting EQ index to modify
14783 * @numq: The number of EQs (consecutive indexes) to modify
14784 * @usdelay: amount of delay
173edbb2 14785 *
cb733e35
JS
14786 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14787 * is set either by writing to a register (if supported by the SLI Port)
14788 * or by mailbox command. The mailbox command allows several EQs to be
14789 * updated at once.
173edbb2 14790 *
cb733e35
JS
14791 * The @phba struct is used to send a mailbox command to HBA. The @startq
14792 * is used to get the starting EQ index to change. The @numq value is
14793 * used to specify how many consecutive EQ indexes, starting at EQ index,
14794 * are to be changed. This function is asynchronous and will wait for any
14795 * mailbox commands to finish before returning.
173edbb2 14796 *
cb733e35
JS
14797 * On success this function will return a zero. If unable to allocate
14798 * enough memory this function will return -ENOMEM. If a mailbox command
14799 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14800 * have had their delay multipler changed.
173edbb2 14801 **/
cb733e35 14802void
0cf07f84 14803lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
cb733e35 14804 uint32_t numq, uint32_t usdelay)
173edbb2
JS
14805{
14806 struct lpfc_mbx_modify_eq_delay *eq_delay;
14807 LPFC_MBOXQ_t *mbox;
14808 struct lpfc_queue *eq;
cb733e35 14809 int cnt = 0, rc, length;
173edbb2 14810 uint32_t shdr_status, shdr_add_status;
cb733e35 14811 uint32_t dmult;
895427bd 14812 int qidx;
173edbb2 14813 union lpfc_sli4_cfg_shdr *shdr;
173edbb2 14814
6a828b0f 14815 if (startq >= phba->cfg_irq_chann)
cb733e35
JS
14816 return;
14817
14818 if (usdelay > 0xFFFF) {
14819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14820 "6429 usdelay %d too large. Scaled down to "
14821 "0xFFFF.\n", usdelay);
14822 usdelay = 0xFFFF;
14823 }
14824
14825 /* set values by EQ_DELAY register if supported */
14826 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14827 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
657add4e 14828 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
cb733e35
JS
14829 if (!eq)
14830 continue;
14831
32517fc0 14832 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
cb733e35
JS
14833
14834 if (++cnt >= numq)
14835 break;
14836 }
cb733e35
JS
14837 return;
14838 }
14839
14840 /* Otherwise, set values by mailbox cmd */
173edbb2
JS
14841
14842 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cb733e35
JS
14843 if (!mbox) {
14844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14845 "6428 Failed allocating mailbox cmd buffer."
14846 " EQ delay was not set.\n");
14847 return;
14848 }
173edbb2
JS
14849 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14850 sizeof(struct lpfc_sli4_cfg_mhdr));
14851 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14852 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14853 length, LPFC_SLI4_MBX_EMBED);
14854 eq_delay = &mbox->u.mqe.un.eq_delay;
14855
14856 /* Calculate delay multiper from maximum interrupt per second */
cb733e35
JS
14857 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14858 if (dmult)
14859 dmult--;
0cf07f84
JS
14860 if (dmult > LPFC_DMULT_MAX)
14861 dmult = LPFC_DMULT_MAX;
173edbb2 14862
6a828b0f 14863 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
657add4e 14864 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
173edbb2
JS
14865 if (!eq)
14866 continue;
cb733e35 14867 eq->q_mode = usdelay;
173edbb2
JS
14868 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14869 eq_delay->u.request.eq[cnt].phase = 0;
14870 eq_delay->u.request.eq[cnt].delay_multi = dmult;
0cf07f84 14871
cb733e35 14872 if (++cnt >= numq)
173edbb2
JS
14873 break;
14874 }
14875 eq_delay->u.request.num_eq = cnt;
14876
14877 mbox->vport = phba->pport;
14878 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14879 mbox->ctx_buf = NULL;
14880 mbox->ctx_ndlp = NULL;
173edbb2
JS
14881 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14882 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14883 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14884 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14885 if (shdr_status || shdr_add_status || rc) {
14886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14887 "2512 MODIFY_EQ_DELAY mailbox failed with "
14888 "status x%x add_status x%x, mbx status x%x\n",
14889 shdr_status, shdr_add_status, rc);
173edbb2
JS
14890 }
14891 mempool_free(mbox, phba->mbox_mem_pool);
cb733e35 14892 return;
173edbb2
JS
14893}
14894
4f774513
JS
14895/**
14896 * lpfc_eq_create - Create an Event Queue on the HBA
14897 * @phba: HBA structure that indicates port to create a queue on.
14898 * @eq: The queue structure to use to create the event queue.
14899 * @imax: The maximum interrupt per second limit.
14900 *
14901 * This function creates an event queue, as detailed in @eq, on a port,
14902 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14903 *
14904 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14905 * is used to get the entry count and entry size that are necessary to
14906 * determine the number of pages to allocate and use for this queue. This
14907 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14908 * event queue. This function is asynchronous and will wait for the mailbox
14909 * command to finish before continuing.
14910 *
14911 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14912 * memory this function will return -ENOMEM. If the queue create mailbox command
14913 * fails this function will return -ENXIO.
4f774513 14914 **/
a2fc4aef 14915int
ee02006b 14916lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14917{
14918 struct lpfc_mbx_eq_create *eq_create;
14919 LPFC_MBOXQ_t *mbox;
14920 int rc, length, status = 0;
14921 struct lpfc_dmabuf *dmabuf;
14922 uint32_t shdr_status, shdr_add_status;
14923 union lpfc_sli4_cfg_shdr *shdr;
14924 uint16_t dmult;
49198b37
JS
14925 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14926
2e90f4b5
JS
14927 /* sanity check on queue memory */
14928 if (!eq)
14929 return -ENODEV;
49198b37
JS
14930 if (!phba->sli4_hba.pc_sli4_params.supported)
14931 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14932
14933 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14934 if (!mbox)
14935 return -ENOMEM;
14936 length = (sizeof(struct lpfc_mbx_eq_create) -
14937 sizeof(struct lpfc_sli4_cfg_mhdr));
14938 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14939 LPFC_MBOX_OPCODE_EQ_CREATE,
14940 length, LPFC_SLI4_MBX_EMBED);
14941 eq_create = &mbox->u.mqe.un.eq_create;
7365f6fd 14942 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
4f774513
JS
14943 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14944 eq->page_count);
14945 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14946 LPFC_EQE_SIZE);
14947 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
7365f6fd
JS
14948
14949 /* Use version 2 of CREATE_EQ if eqav is set */
14950 if (phba->sli4_hba.pc_sli4_params.eqav) {
14951 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14952 LPFC_Q_CREATE_VERSION_2);
14953 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14954 phba->sli4_hba.pc_sli4_params.eqav);
14955 }
14956
2c9c5a00
JS
14957 /* don't setup delay multiplier using EQ_CREATE */
14958 dmult = 0;
4f774513
JS
14959 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14960 dmult);
14961 switch (eq->entry_count) {
14962 default:
14963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14964 "0360 Unsupported EQ count. (%d)\n",
14965 eq->entry_count);
04d210c9
JS
14966 if (eq->entry_count < 256) {
14967 status = -EINVAL;
14968 goto out;
14969 }
5bd5f66c 14970 /* fall through - otherwise default to smallest count */
4f774513
JS
14971 case 256:
14972 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14973 LPFC_EQ_CNT_256);
14974 break;
14975 case 512:
14976 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14977 LPFC_EQ_CNT_512);
14978 break;
14979 case 1024:
14980 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14981 LPFC_EQ_CNT_1024);
14982 break;
14983 case 2048:
14984 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14985 LPFC_EQ_CNT_2048);
14986 break;
14987 case 4096:
14988 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14989 LPFC_EQ_CNT_4096);
14990 break;
14991 }
14992 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14993 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14994 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14995 putPaddrLow(dmabuf->phys);
14996 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14997 putPaddrHigh(dmabuf->phys);
14998 }
14999 mbox->vport = phba->pport;
15000 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
15001 mbox->ctx_buf = NULL;
15002 mbox->ctx_ndlp = NULL;
4f774513 15003 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4f774513
JS
15004 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15005 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15006 if (shdr_status || shdr_add_status || rc) {
15007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15008 "2500 EQ_CREATE mailbox failed with "
15009 "status x%x add_status x%x, mbx status x%x\n",
15010 shdr_status, shdr_add_status, rc);
15011 status = -ENXIO;
15012 }
15013 eq->type = LPFC_EQ;
15014 eq->subtype = LPFC_NONE;
15015 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15016 if (eq->queue_id == 0xFFFF)
15017 status = -ENXIO;
15018 eq->host_index = 0;
32517fc0
JS
15019 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15020 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
04d210c9 15021out:
8fa38513 15022 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15023 return status;
15024}
15025
15026/**
15027 * lpfc_cq_create - Create a Completion Queue on the HBA
15028 * @phba: HBA structure that indicates port to create a queue on.
15029 * @cq: The queue structure to use to create the completion queue.
15030 * @eq: The event queue to bind this completion queue to.
15031 *
15032 * This function creates a completion queue, as detailed in @wq, on a port,
15033 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15034 *
15035 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15036 * is used to get the entry count and entry size that are necessary to
15037 * determine the number of pages to allocate and use for this queue. The @eq
15038 * is used to indicate which event queue to bind this completion queue to. This
15039 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15040 * completion queue. This function is asynchronous and will wait for the mailbox
15041 * command to finish before continuing.
15042 *
15043 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15044 * memory this function will return -ENOMEM. If the queue create mailbox command
15045 * fails this function will return -ENXIO.
4f774513 15046 **/
a2fc4aef 15047int
4f774513
JS
15048lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15049 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15050{
15051 struct lpfc_mbx_cq_create *cq_create;
15052 struct lpfc_dmabuf *dmabuf;
15053 LPFC_MBOXQ_t *mbox;
15054 int rc, length, status = 0;
15055 uint32_t shdr_status, shdr_add_status;
15056 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15057
2e90f4b5
JS
15058 /* sanity check on queue memory */
15059 if (!cq || !eq)
15060 return -ENODEV;
49198b37 15061
4f774513
JS
15062 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15063 if (!mbox)
15064 return -ENOMEM;
15065 length = (sizeof(struct lpfc_mbx_cq_create) -
15066 sizeof(struct lpfc_sli4_cfg_mhdr));
15067 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15068 LPFC_MBOX_OPCODE_CQ_CREATE,
15069 length, LPFC_SLI4_MBX_EMBED);
15070 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 15071 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
15072 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15073 cq->page_count);
15074 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15075 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
15076 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15077 phba->sli4_hba.pc_sli4_params.cqv);
15078 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
81b96eda
JS
15079 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15080 (cq->page_size / SLI4_PAGE_SIZE));
5a6f133e
JS
15081 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15082 eq->queue_id);
7365f6fd
JS
15083 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15084 phba->sli4_hba.pc_sli4_params.cqav);
5a6f133e
JS
15085 } else {
15086 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15087 eq->queue_id);
15088 }
4f774513 15089 switch (cq->entry_count) {
81b96eda
JS
15090 case 2048:
15091 case 4096:
15092 if (phba->sli4_hba.pc_sli4_params.cqv ==
15093 LPFC_Q_CREATE_VERSION_2) {
15094 cq_create->u.request.context.lpfc_cq_context_count =
15095 cq->entry_count;
15096 bf_set(lpfc_cq_context_count,
15097 &cq_create->u.request.context,
15098 LPFC_CQ_CNT_WORD7);
15099 break;
15100 }
5bd5f66c 15101 /* fall through */
4f774513
JS
15102 default:
15103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee 15104 "0361 Unsupported CQ count: "
64eb4dcb 15105 "entry cnt %d sz %d pg cnt %d\n",
2ea259ee 15106 cq->entry_count, cq->entry_size,
64eb4dcb 15107 cq->page_count);
4f4c1863
JS
15108 if (cq->entry_count < 256) {
15109 status = -EINVAL;
15110 goto out;
15111 }
5bd5f66c 15112 /* fall through - otherwise default to smallest count */
4f774513
JS
15113 case 256:
15114 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15115 LPFC_CQ_CNT_256);
15116 break;
15117 case 512:
15118 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15119 LPFC_CQ_CNT_512);
15120 break;
15121 case 1024:
15122 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15123 LPFC_CQ_CNT_1024);
15124 break;
15125 }
15126 list_for_each_entry(dmabuf, &cq->page_list, list) {
81b96eda 15127 memset(dmabuf->virt, 0, cq->page_size);
4f774513
JS
15128 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15129 putPaddrLow(dmabuf->phys);
15130 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15131 putPaddrHigh(dmabuf->phys);
15132 }
15133 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15134
15135 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15136 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15137 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15138 if (shdr_status || shdr_add_status || rc) {
15139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15140 "2501 CQ_CREATE mailbox failed with "
15141 "status x%x add_status x%x, mbx status x%x\n",
15142 shdr_status, shdr_add_status, rc);
15143 status = -ENXIO;
15144 goto out;
15145 }
15146 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15147 if (cq->queue_id == 0xFFFF) {
15148 status = -ENXIO;
15149 goto out;
15150 }
15151 /* link the cq onto the parent eq child list */
15152 list_add_tail(&cq->list, &eq->child_list);
15153 /* Set up completion queue's type and subtype */
15154 cq->type = type;
15155 cq->subtype = subtype;
15156 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 15157 cq->assoc_qid = eq->queue_id;
6a828b0f 15158 cq->assoc_qp = eq;
4f774513 15159 cq->host_index = 0;
32517fc0
JS
15160 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15161 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
4f774513 15162
6a828b0f
JS
15163 if (cq->queue_id > phba->sli4_hba.cq_max)
15164 phba->sli4_hba.cq_max = cq->queue_id;
8fa38513
JS
15165out:
15166 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15167 return status;
15168}
15169
2d7dbc4c
JS
15170/**
15171 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15172 * @phba: HBA structure that indicates port to create a queue on.
15173 * @cqp: The queue structure array to use to create the completion queues.
cdb42bec 15174 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
2d7dbc4c
JS
15175 *
15176 * This function creates a set of completion queue, s to support MRQ
15177 * as detailed in @cqp, on a port,
15178 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15179 *
15180 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15181 * is used to get the entry count and entry size that are necessary to
15182 * determine the number of pages to allocate and use for this queue. The @eq
15183 * is used to indicate which event queue to bind this completion queue to. This
15184 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15185 * completion queue. This function is asynchronous and will wait for the mailbox
15186 * command to finish before continuing.
15187 *
15188 * On success this function will return a zero. If unable to allocate enough
15189 * memory this function will return -ENOMEM. If the queue create mailbox command
15190 * fails this function will return -ENXIO.
15191 **/
15192int
15193lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cdb42bec
JS
15194 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15195 uint32_t subtype)
2d7dbc4c
JS
15196{
15197 struct lpfc_queue *cq;
15198 struct lpfc_queue *eq;
15199 struct lpfc_mbx_cq_create_set *cq_set;
15200 struct lpfc_dmabuf *dmabuf;
15201 LPFC_MBOXQ_t *mbox;
15202 int rc, length, alloclen, status = 0;
15203 int cnt, idx, numcq, page_idx = 0;
15204 uint32_t shdr_status, shdr_add_status;
15205 union lpfc_sli4_cfg_shdr *shdr;
15206 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15207
15208 /* sanity check on queue memory */
15209 numcq = phba->cfg_nvmet_mrq;
cdb42bec 15210 if (!cqp || !hdwq || !numcq)
2d7dbc4c 15211 return -ENODEV;
2d7dbc4c
JS
15212
15213 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15214 if (!mbox)
15215 return -ENOMEM;
15216
15217 length = sizeof(struct lpfc_mbx_cq_create_set);
15218 length += ((numcq * cqp[0]->page_count) *
15219 sizeof(struct dma_address));
15220 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15221 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15222 LPFC_SLI4_MBX_NEMBED);
15223 if (alloclen < length) {
15224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15225 "3098 Allocated DMA memory size (%d) is "
15226 "less than the requested DMA memory size "
15227 "(%d)\n", alloclen, length);
15228 status = -ENOMEM;
15229 goto out;
15230 }
15231 cq_set = mbox->sge_array->addr[0];
15232 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15233 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15234
15235 for (idx = 0; idx < numcq; idx++) {
15236 cq = cqp[idx];
cdb42bec 15237 eq = hdwq[idx].hba_eq;
2d7dbc4c
JS
15238 if (!cq || !eq) {
15239 status = -ENOMEM;
15240 goto out;
15241 }
81b96eda
JS
15242 if (!phba->sli4_hba.pc_sli4_params.supported)
15243 hw_page_size = cq->page_size;
2d7dbc4c
JS
15244
15245 switch (idx) {
15246 case 0:
15247 bf_set(lpfc_mbx_cq_create_set_page_size,
15248 &cq_set->u.request,
15249 (hw_page_size / SLI4_PAGE_SIZE));
15250 bf_set(lpfc_mbx_cq_create_set_num_pages,
15251 &cq_set->u.request, cq->page_count);
15252 bf_set(lpfc_mbx_cq_create_set_evt,
15253 &cq_set->u.request, 1);
15254 bf_set(lpfc_mbx_cq_create_set_valid,
15255 &cq_set->u.request, 1);
15256 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15257 &cq_set->u.request, 0);
15258 bf_set(lpfc_mbx_cq_create_set_num_cq,
15259 &cq_set->u.request, numcq);
7365f6fd
JS
15260 bf_set(lpfc_mbx_cq_create_set_autovalid,
15261 &cq_set->u.request,
15262 phba->sli4_hba.pc_sli4_params.cqav);
2d7dbc4c 15263 switch (cq->entry_count) {
81b96eda
JS
15264 case 2048:
15265 case 4096:
15266 if (phba->sli4_hba.pc_sli4_params.cqv ==
15267 LPFC_Q_CREATE_VERSION_2) {
15268 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15269 &cq_set->u.request,
15270 cq->entry_count);
15271 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15272 &cq_set->u.request,
15273 LPFC_CQ_CNT_WORD7);
15274 break;
15275 }
5bd5f66c 15276 /* fall through */
2d7dbc4c
JS
15277 default:
15278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15279 "3118 Bad CQ count. (%d)\n",
15280 cq->entry_count);
15281 if (cq->entry_count < 256) {
15282 status = -EINVAL;
15283 goto out;
15284 }
5bd5f66c 15285 /* fall through - otherwise default to smallest */
2d7dbc4c
JS
15286 case 256:
15287 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15288 &cq_set->u.request, LPFC_CQ_CNT_256);
15289 break;
15290 case 512:
15291 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15292 &cq_set->u.request, LPFC_CQ_CNT_512);
15293 break;
15294 case 1024:
15295 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15296 &cq_set->u.request, LPFC_CQ_CNT_1024);
15297 break;
15298 }
15299 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15300 &cq_set->u.request, eq->queue_id);
15301 break;
15302 case 1:
15303 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15304 &cq_set->u.request, eq->queue_id);
15305 break;
15306 case 2:
15307 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15308 &cq_set->u.request, eq->queue_id);
15309 break;
15310 case 3:
15311 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15312 &cq_set->u.request, eq->queue_id);
15313 break;
15314 case 4:
15315 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15316 &cq_set->u.request, eq->queue_id);
15317 break;
15318 case 5:
15319 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15320 &cq_set->u.request, eq->queue_id);
15321 break;
15322 case 6:
15323 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15324 &cq_set->u.request, eq->queue_id);
15325 break;
15326 case 7:
15327 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15328 &cq_set->u.request, eq->queue_id);
15329 break;
15330 case 8:
15331 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15332 &cq_set->u.request, eq->queue_id);
15333 break;
15334 case 9:
15335 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15336 &cq_set->u.request, eq->queue_id);
15337 break;
15338 case 10:
15339 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15340 &cq_set->u.request, eq->queue_id);
15341 break;
15342 case 11:
15343 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15344 &cq_set->u.request, eq->queue_id);
15345 break;
15346 case 12:
15347 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15348 &cq_set->u.request, eq->queue_id);
15349 break;
15350 case 13:
15351 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15352 &cq_set->u.request, eq->queue_id);
15353 break;
15354 case 14:
15355 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15356 &cq_set->u.request, eq->queue_id);
15357 break;
15358 case 15:
15359 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15360 &cq_set->u.request, eq->queue_id);
15361 break;
15362 }
15363
15364 /* link the cq onto the parent eq child list */
15365 list_add_tail(&cq->list, &eq->child_list);
15366 /* Set up completion queue's type and subtype */
15367 cq->type = type;
15368 cq->subtype = subtype;
15369 cq->assoc_qid = eq->queue_id;
6a828b0f 15370 cq->assoc_qp = eq;
2d7dbc4c 15371 cq->host_index = 0;
32517fc0
JS
15372 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15373 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15374 cq->entry_count);
81b96eda 15375 cq->chann = idx;
2d7dbc4c
JS
15376
15377 rc = 0;
15378 list_for_each_entry(dmabuf, &cq->page_list, list) {
15379 memset(dmabuf->virt, 0, hw_page_size);
15380 cnt = page_idx + dmabuf->buffer_tag;
15381 cq_set->u.request.page[cnt].addr_lo =
15382 putPaddrLow(dmabuf->phys);
15383 cq_set->u.request.page[cnt].addr_hi =
15384 putPaddrHigh(dmabuf->phys);
15385 rc++;
15386 }
15387 page_idx += rc;
15388 }
15389
15390 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15391
15392 /* The IOCTL status is embedded in the mailbox subheader. */
15393 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15394 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15395 if (shdr_status || shdr_add_status || rc) {
15396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15397 "3119 CQ_CREATE_SET mailbox failed with "
15398 "status x%x add_status x%x, mbx status x%x\n",
15399 shdr_status, shdr_add_status, rc);
15400 status = -ENXIO;
15401 goto out;
15402 }
15403 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15404 if (rc == 0xFFFF) {
15405 status = -ENXIO;
15406 goto out;
15407 }
15408
15409 for (idx = 0; idx < numcq; idx++) {
15410 cq = cqp[idx];
15411 cq->queue_id = rc + idx;
6a828b0f
JS
15412 if (cq->queue_id > phba->sli4_hba.cq_max)
15413 phba->sli4_hba.cq_max = cq->queue_id;
2d7dbc4c
JS
15414 }
15415
15416out:
15417 lpfc_sli4_mbox_cmd_free(phba, mbox);
15418 return status;
15419}
15420
b19a061a
JS
15421/**
15422 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15423 * @phba: HBA structure that indicates port to create a queue on.
15424 * @mq: The queue structure to use to create the mailbox queue.
15425 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15426 * @cq: The completion queue to associate with this cq.
15427 *
15428 * This function provides failback (fb) functionality when the
15429 * mq_create_ext fails on older FW generations. It's purpose is identical
15430 * to mq_create_ext otherwise.
15431 *
15432 * This routine cannot fail as all attributes were previously accessed and
15433 * initialized in mq_create_ext.
15434 **/
15435static void
15436lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15437 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15438{
15439 struct lpfc_mbx_mq_create *mq_create;
15440 struct lpfc_dmabuf *dmabuf;
15441 int length;
15442
15443 length = (sizeof(struct lpfc_mbx_mq_create) -
15444 sizeof(struct lpfc_sli4_cfg_mhdr));
15445 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15446 LPFC_MBOX_OPCODE_MQ_CREATE,
15447 length, LPFC_SLI4_MBX_EMBED);
15448 mq_create = &mbox->u.mqe.un.mq_create;
15449 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15450 mq->page_count);
15451 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15452 cq->queue_id);
15453 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15454 switch (mq->entry_count) {
15455 case 16:
5a6f133e
JS
15456 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15457 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
15458 break;
15459 case 32:
5a6f133e
JS
15460 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15461 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
15462 break;
15463 case 64:
5a6f133e
JS
15464 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15465 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
15466 break;
15467 case 128:
5a6f133e
JS
15468 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15469 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
15470 break;
15471 }
15472 list_for_each_entry(dmabuf, &mq->page_list, list) {
15473 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15474 putPaddrLow(dmabuf->phys);
15475 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15476 putPaddrHigh(dmabuf->phys);
15477 }
15478}
15479
04c68496
JS
15480/**
15481 * lpfc_mq_create - Create a mailbox Queue on the HBA
15482 * @phba: HBA structure that indicates port to create a queue on.
15483 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
15484 * @cq: The completion queue to associate with this cq.
15485 * @subtype: The queue's subtype.
04c68496
JS
15486 *
15487 * This function creates a mailbox queue, as detailed in @mq, on a port,
15488 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15489 *
15490 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15491 * is used to get the entry count and entry size that are necessary to
15492 * determine the number of pages to allocate and use for this queue. This
15493 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15494 * mailbox queue. This function is asynchronous and will wait for the mailbox
15495 * command to finish before continuing.
15496 *
15497 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15498 * memory this function will return -ENOMEM. If the queue create mailbox command
15499 * fails this function will return -ENXIO.
04c68496 15500 **/
b19a061a 15501int32_t
04c68496
JS
15502lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15503 struct lpfc_queue *cq, uint32_t subtype)
15504{
15505 struct lpfc_mbx_mq_create *mq_create;
b19a061a 15506 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
15507 struct lpfc_dmabuf *dmabuf;
15508 LPFC_MBOXQ_t *mbox;
15509 int rc, length, status = 0;
15510 uint32_t shdr_status, shdr_add_status;
15511 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15512 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 15513
2e90f4b5
JS
15514 /* sanity check on queue memory */
15515 if (!mq || !cq)
15516 return -ENODEV;
49198b37
JS
15517 if (!phba->sli4_hba.pc_sli4_params.supported)
15518 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 15519
04c68496
JS
15520 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15521 if (!mbox)
15522 return -ENOMEM;
b19a061a 15523 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
15524 sizeof(struct lpfc_sli4_cfg_mhdr));
15525 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 15526 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 15527 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
15528
15529 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 15530 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
15531 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15532 &mq_create_ext->u.request, mq->page_count);
15533 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15534 &mq_create_ext->u.request, 1);
15535 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
15536 &mq_create_ext->u.request, 1);
15537 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15538 &mq_create_ext->u.request, 1);
70f3c073
JS
15539 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15540 &mq_create_ext->u.request, 1);
15541 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15542 &mq_create_ext->u.request, 1);
b19a061a 15543 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
15544 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15545 phba->sli4_hba.pc_sli4_params.mqv);
15546 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15547 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15548 cq->queue_id);
15549 else
15550 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15551 cq->queue_id);
04c68496
JS
15552 switch (mq->entry_count) {
15553 default:
15554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15555 "0362 Unsupported MQ count. (%d)\n",
15556 mq->entry_count);
4f4c1863
JS
15557 if (mq->entry_count < 16) {
15558 status = -EINVAL;
15559 goto out;
15560 }
5bd5f66c 15561 /* fall through - otherwise default to smallest count */
04c68496 15562 case 16:
5a6f133e
JS
15563 bf_set(lpfc_mq_context_ring_size,
15564 &mq_create_ext->u.request.context,
15565 LPFC_MQ_RING_SIZE_16);
04c68496
JS
15566 break;
15567 case 32:
5a6f133e
JS
15568 bf_set(lpfc_mq_context_ring_size,
15569 &mq_create_ext->u.request.context,
15570 LPFC_MQ_RING_SIZE_32);
04c68496
JS
15571 break;
15572 case 64:
5a6f133e
JS
15573 bf_set(lpfc_mq_context_ring_size,
15574 &mq_create_ext->u.request.context,
15575 LPFC_MQ_RING_SIZE_64);
04c68496
JS
15576 break;
15577 case 128:
5a6f133e
JS
15578 bf_set(lpfc_mq_context_ring_size,
15579 &mq_create_ext->u.request.context,
15580 LPFC_MQ_RING_SIZE_128);
04c68496
JS
15581 break;
15582 }
15583 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 15584 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 15585 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 15586 putPaddrLow(dmabuf->phys);
b19a061a 15587 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
15588 putPaddrHigh(dmabuf->phys);
15589 }
15590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
15591 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15592 &mq_create_ext->u.response);
15593 if (rc != MBX_SUCCESS) {
15594 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15595 "2795 MQ_CREATE_EXT failed with "
15596 "status x%x. Failback to MQ_CREATE.\n",
15597 rc);
15598 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15599 mq_create = &mbox->u.mqe.un.mq_create;
15600 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15601 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15602 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15603 &mq_create->u.response);
15604 }
15605
04c68496 15606 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
15607 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15608 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15609 if (shdr_status || shdr_add_status || rc) {
15610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15611 "2502 MQ_CREATE mailbox failed with "
15612 "status x%x add_status x%x, mbx status x%x\n",
15613 shdr_status, shdr_add_status, rc);
15614 status = -ENXIO;
15615 goto out;
15616 }
04c68496
JS
15617 if (mq->queue_id == 0xFFFF) {
15618 status = -ENXIO;
15619 goto out;
15620 }
15621 mq->type = LPFC_MQ;
2a622bfb 15622 mq->assoc_qid = cq->queue_id;
04c68496
JS
15623 mq->subtype = subtype;
15624 mq->host_index = 0;
15625 mq->hba_index = 0;
15626
15627 /* link the mq onto the parent cq child list */
15628 list_add_tail(&mq->list, &cq->child_list);
15629out:
8fa38513 15630 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
15631 return status;
15632}
15633
4f774513
JS
15634/**
15635 * lpfc_wq_create - Create a Work Queue on the HBA
15636 * @phba: HBA structure that indicates port to create a queue on.
15637 * @wq: The queue structure to use to create the work queue.
15638 * @cq: The completion queue to bind this work queue to.
15639 * @subtype: The subtype of the work queue indicating its functionality.
15640 *
15641 * This function creates a work queue, as detailed in @wq, on a port, described
15642 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15643 *
15644 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15645 * is used to get the entry count and entry size that are necessary to
15646 * determine the number of pages to allocate and use for this queue. The @cq
15647 * is used to indicate which completion queue to bind this work queue to. This
15648 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15649 * work queue. This function is asynchronous and will wait for the mailbox
15650 * command to finish before continuing.
15651 *
15652 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15653 * memory this function will return -ENOMEM. If the queue create mailbox command
15654 * fails this function will return -ENXIO.
4f774513 15655 **/
a2fc4aef 15656int
4f774513
JS
15657lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15658 struct lpfc_queue *cq, uint32_t subtype)
15659{
15660 struct lpfc_mbx_wq_create *wq_create;
15661 struct lpfc_dmabuf *dmabuf;
15662 LPFC_MBOXQ_t *mbox;
15663 int rc, length, status = 0;
15664 uint32_t shdr_status, shdr_add_status;
15665 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15666 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 15667 struct dma_address *page;
962bc51b
JS
15668 void __iomem *bar_memmap_p;
15669 uint32_t db_offset;
15670 uint16_t pci_barset;
1351e69f
JS
15671 uint8_t dpp_barset;
15672 uint32_t dpp_offset;
15673 unsigned long pg_addr;
81b96eda 15674 uint8_t wq_create_version;
49198b37 15675
2e90f4b5
JS
15676 /* sanity check on queue memory */
15677 if (!wq || !cq)
15678 return -ENODEV;
49198b37 15679 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 15680 hw_page_size = wq->page_size;
4f774513
JS
15681
15682 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15683 if (!mbox)
15684 return -ENOMEM;
15685 length = (sizeof(struct lpfc_mbx_wq_create) -
15686 sizeof(struct lpfc_sli4_cfg_mhdr));
15687 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15688 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15689 length, LPFC_SLI4_MBX_EMBED);
15690 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 15691 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
15692 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15693 wq->page_count);
15694 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15695 cq->queue_id);
0c651878
JS
15696
15697 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
15698 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15699 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 15700
c176ffa0
JS
15701 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15702 (wq->page_size > SLI4_PAGE_SIZE))
81b96eda
JS
15703 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15704 else
15705 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15706
0c651878 15707
1351e69f
JS
15708 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15709 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15710 else
15711 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15712
15713 switch (wq_create_version) {
0c651878 15714 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
15715 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15716 wq->entry_count);
3f247de7
JS
15717 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15718 LPFC_Q_CREATE_VERSION_1);
15719
5a6f133e
JS
15720 switch (wq->entry_size) {
15721 default:
15722 case 64:
15723 bf_set(lpfc_mbx_wq_create_wqe_size,
15724 &wq_create->u.request_1,
15725 LPFC_WQ_WQE_SIZE_64);
15726 break;
15727 case 128:
15728 bf_set(lpfc_mbx_wq_create_wqe_size,
15729 &wq_create->u.request_1,
15730 LPFC_WQ_WQE_SIZE_128);
15731 break;
15732 }
1351e69f
JS
15733 /* Request DPP by default */
15734 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
8ea73db4
JS
15735 bf_set(lpfc_mbx_wq_create_page_size,
15736 &wq_create->u.request_1,
81b96eda 15737 (wq->page_size / SLI4_PAGE_SIZE));
5a6f133e 15738 page = wq_create->u.request_1.page;
0c651878
JS
15739 break;
15740 default:
1351e69f
JS
15741 page = wq_create->u.request.page;
15742 break;
5a6f133e 15743 }
0c651878 15744
4f774513 15745 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 15746 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
15747 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15748 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 15749 }
962bc51b
JS
15750
15751 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15752 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15753
4f774513
JS
15754 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15755 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15756 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15757 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15758 if (shdr_status || shdr_add_status || rc) {
15759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15760 "2503 WQ_CREATE mailbox failed with "
15761 "status x%x add_status x%x, mbx status x%x\n",
15762 shdr_status, shdr_add_status, rc);
15763 status = -ENXIO;
15764 goto out;
15765 }
1351e69f
JS
15766
15767 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15768 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15769 &wq_create->u.response);
15770 else
15771 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15772 &wq_create->u.response_1);
15773
4f774513
JS
15774 if (wq->queue_id == 0xFFFF) {
15775 status = -ENXIO;
15776 goto out;
15777 }
1351e69f
JS
15778
15779 wq->db_format = LPFC_DB_LIST_FORMAT;
15780 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15781 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15782 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15783 &wq_create->u.response);
15784 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15785 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15787 "3265 WQ[%d] doorbell format "
15788 "not supported: x%x\n",
15789 wq->queue_id, wq->db_format);
15790 status = -EINVAL;
15791 goto out;
15792 }
15793 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15794 &wq_create->u.response);
15795 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15796 pci_barset);
15797 if (!bar_memmap_p) {
15798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15799 "3263 WQ[%d] failed to memmap "
15800 "pci barset:x%x\n",
15801 wq->queue_id, pci_barset);
15802 status = -ENOMEM;
15803 goto out;
15804 }
15805 db_offset = wq_create->u.response.doorbell_offset;
15806 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15807 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15809 "3252 WQ[%d] doorbell offset "
15810 "not supported: x%x\n",
15811 wq->queue_id, db_offset);
15812 status = -EINVAL;
15813 goto out;
15814 }
15815 wq->db_regaddr = bar_memmap_p + db_offset;
15816 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15817 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15818 "format:x%x\n", wq->queue_id,
15819 pci_barset, db_offset, wq->db_format);
15820 } else
15821 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15822 } else {
1351e69f
JS
15823 /* Check if DPP was honored by the firmware */
15824 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15825 &wq_create->u.response_1);
15826 if (wq->dpp_enable) {
15827 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15828 &wq_create->u.response_1);
15829 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15830 pci_barset);
15831 if (!bar_memmap_p) {
15832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15833 "3267 WQ[%d] failed to memmap "
15834 "pci barset:x%x\n",
15835 wq->queue_id, pci_barset);
15836 status = -ENOMEM;
15837 goto out;
15838 }
15839 db_offset = wq_create->u.response_1.doorbell_offset;
15840 wq->db_regaddr = bar_memmap_p + db_offset;
15841 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15842 &wq_create->u.response_1);
15843 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15844 &wq_create->u.response_1);
15845 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15846 dpp_barset);
15847 if (!bar_memmap_p) {
15848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15849 "3268 WQ[%d] failed to memmap "
15850 "pci barset:x%x\n",
15851 wq->queue_id, dpp_barset);
15852 status = -ENOMEM;
15853 goto out;
15854 }
15855 dpp_offset = wq_create->u.response_1.dpp_offset;
15856 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15858 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15859 "dpp_id:x%x dpp_barset:x%x "
15860 "dpp_offset:x%x\n",
15861 wq->queue_id, pci_barset, db_offset,
15862 wq->dpp_id, dpp_barset, dpp_offset);
15863
15864 /* Enable combined writes for DPP aperture */
15865 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15866#ifdef CONFIG_X86
15867 rc = set_memory_wc(pg_addr, 1);
15868 if (rc) {
15869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15870 "3272 Cannot setup Combined "
15871 "Write on WQ[%d] - disable DPP\n",
15872 wq->queue_id);
15873 phba->cfg_enable_dpp = 0;
15874 }
15875#else
15876 phba->cfg_enable_dpp = 0;
15877#endif
15878 } else
15879 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15880 }
895427bd
JS
15881 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15882 if (wq->pring == NULL) {
15883 status = -ENOMEM;
15884 goto out;
15885 }
4f774513 15886 wq->type = LPFC_WQ;
2a622bfb 15887 wq->assoc_qid = cq->queue_id;
4f774513
JS
15888 wq->subtype = subtype;
15889 wq->host_index = 0;
15890 wq->hba_index = 0;
32517fc0 15891 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
4f774513
JS
15892
15893 /* link the wq onto the parent cq child list */
15894 list_add_tail(&wq->list, &cq->child_list);
15895out:
8fa38513 15896 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15897 return status;
15898}
15899
15900/**
15901 * lpfc_rq_create - Create a Receive Queue on the HBA
15902 * @phba: HBA structure that indicates port to create a queue on.
15903 * @hrq: The queue structure to use to create the header receive queue.
15904 * @drq: The queue structure to use to create the data receive queue.
15905 * @cq: The completion queue to bind this work queue to.
15906 *
15907 * This function creates a receive buffer queue pair , as detailed in @hrq and
15908 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15909 * to the HBA.
15910 *
15911 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15912 * struct is used to get the entry count that is necessary to determine the
15913 * number of pages to use for this queue. The @cq is used to indicate which
15914 * completion queue to bind received buffers that are posted to these queues to.
15915 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15916 * receive queue pair. This function is asynchronous and will wait for the
15917 * mailbox command to finish before continuing.
15918 *
15919 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15920 * memory this function will return -ENOMEM. If the queue create mailbox command
15921 * fails this function will return -ENXIO.
4f774513 15922 **/
a2fc4aef 15923int
4f774513
JS
15924lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15925 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15926{
15927 struct lpfc_mbx_rq_create *rq_create;
15928 struct lpfc_dmabuf *dmabuf;
15929 LPFC_MBOXQ_t *mbox;
15930 int rc, length, status = 0;
15931 uint32_t shdr_status, shdr_add_status;
15932 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15933 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
15934 void __iomem *bar_memmap_p;
15935 uint32_t db_offset;
15936 uint16_t pci_barset;
49198b37 15937
2e90f4b5
JS
15938 /* sanity check on queue memory */
15939 if (!hrq || !drq || !cq)
15940 return -ENODEV;
49198b37
JS
15941 if (!phba->sli4_hba.pc_sli4_params.supported)
15942 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
15943
15944 if (hrq->entry_count != drq->entry_count)
15945 return -EINVAL;
15946 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15947 if (!mbox)
15948 return -ENOMEM;
15949 length = (sizeof(struct lpfc_mbx_rq_create) -
15950 sizeof(struct lpfc_sli4_cfg_mhdr));
15951 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15952 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15953 length, LPFC_SLI4_MBX_EMBED);
15954 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
15955 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15956 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15957 phba->sli4_hba.pc_sli4_params.rqv);
15958 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15959 bf_set(lpfc_rq_context_rqe_count_1,
15960 &rq_create->u.request.context,
15961 hrq->entry_count);
15962 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
15963 bf_set(lpfc_rq_context_rqe_size,
15964 &rq_create->u.request.context,
15965 LPFC_RQE_SIZE_8);
15966 bf_set(lpfc_rq_context_page_size,
15967 &rq_create->u.request.context,
8ea73db4 15968 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
15969 } else {
15970 switch (hrq->entry_count) {
15971 default:
15972 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15973 "2535 Unsupported RQ count. (%d)\n",
15974 hrq->entry_count);
4f4c1863
JS
15975 if (hrq->entry_count < 512) {
15976 status = -EINVAL;
15977 goto out;
15978 }
5bd5f66c 15979 /* fall through - otherwise default to smallest count */
5a6f133e
JS
15980 case 512:
15981 bf_set(lpfc_rq_context_rqe_count,
15982 &rq_create->u.request.context,
15983 LPFC_RQ_RING_SIZE_512);
15984 break;
15985 case 1024:
15986 bf_set(lpfc_rq_context_rqe_count,
15987 &rq_create->u.request.context,
15988 LPFC_RQ_RING_SIZE_1024);
15989 break;
15990 case 2048:
15991 bf_set(lpfc_rq_context_rqe_count,
15992 &rq_create->u.request.context,
15993 LPFC_RQ_RING_SIZE_2048);
15994 break;
15995 case 4096:
15996 bf_set(lpfc_rq_context_rqe_count,
15997 &rq_create->u.request.context,
15998 LPFC_RQ_RING_SIZE_4096);
15999 break;
16000 }
16001 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16002 LPFC_HDR_BUF_SIZE);
4f774513
JS
16003 }
16004 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16005 cq->queue_id);
16006 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16007 hrq->page_count);
4f774513 16008 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 16009 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
16010 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16011 putPaddrLow(dmabuf->phys);
16012 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16013 putPaddrHigh(dmabuf->phys);
16014 }
962bc51b
JS
16015 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16016 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16017
4f774513
JS
16018 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16019 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
16020 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16021 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16022 if (shdr_status || shdr_add_status || rc) {
16023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16024 "2504 RQ_CREATE mailbox failed with "
16025 "status x%x add_status x%x, mbx status x%x\n",
16026 shdr_status, shdr_add_status, rc);
16027 status = -ENXIO;
16028 goto out;
16029 }
16030 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16031 if (hrq->queue_id == 0xFFFF) {
16032 status = -ENXIO;
16033 goto out;
16034 }
962bc51b
JS
16035
16036 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16037 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16038 &rq_create->u.response);
16039 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16040 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16042 "3262 RQ [%d] doorbell format not "
16043 "supported: x%x\n", hrq->queue_id,
16044 hrq->db_format);
16045 status = -EINVAL;
16046 goto out;
16047 }
16048
16049 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16050 &rq_create->u.response);
16051 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16052 if (!bar_memmap_p) {
16053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16054 "3269 RQ[%d] failed to memmap pci "
16055 "barset:x%x\n", hrq->queue_id,
16056 pci_barset);
16057 status = -ENOMEM;
16058 goto out;
16059 }
16060
16061 db_offset = rq_create->u.response.doorbell_offset;
16062 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16063 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16065 "3270 RQ[%d] doorbell offset not "
16066 "supported: x%x\n", hrq->queue_id,
16067 db_offset);
16068 status = -EINVAL;
16069 goto out;
16070 }
16071 hrq->db_regaddr = bar_memmap_p + db_offset;
16072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
16073 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16074 "format:x%x\n", hrq->queue_id, pci_barset,
16075 db_offset, hrq->db_format);
962bc51b
JS
16076 } else {
16077 hrq->db_format = LPFC_DB_RING_FORMAT;
16078 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16079 }
4f774513 16080 hrq->type = LPFC_HRQ;
2a622bfb 16081 hrq->assoc_qid = cq->queue_id;
4f774513
JS
16082 hrq->subtype = subtype;
16083 hrq->host_index = 0;
16084 hrq->hba_index = 0;
32517fc0 16085 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
16086
16087 /* now create the data queue */
16088 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16089 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16090 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
16091 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16092 phba->sli4_hba.pc_sli4_params.rqv);
16093 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16094 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 16095 &rq_create->u.request.context, hrq->entry_count);
3c603be9
JS
16096 if (subtype == LPFC_NVMET)
16097 rq_create->u.request.context.buffer_size =
16098 LPFC_NVMET_DATA_BUF_SIZE;
16099 else
16100 rq_create->u.request.context.buffer_size =
16101 LPFC_DATA_BUF_SIZE;
c31098ce
JS
16102 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16103 LPFC_RQE_SIZE_8);
16104 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16105 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
16106 } else {
16107 switch (drq->entry_count) {
16108 default:
16109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16110 "2536 Unsupported RQ count. (%d)\n",
16111 drq->entry_count);
4f4c1863
JS
16112 if (drq->entry_count < 512) {
16113 status = -EINVAL;
16114 goto out;
16115 }
5bd5f66c 16116 /* fall through - otherwise default to smallest count */
5a6f133e
JS
16117 case 512:
16118 bf_set(lpfc_rq_context_rqe_count,
16119 &rq_create->u.request.context,
16120 LPFC_RQ_RING_SIZE_512);
16121 break;
16122 case 1024:
16123 bf_set(lpfc_rq_context_rqe_count,
16124 &rq_create->u.request.context,
16125 LPFC_RQ_RING_SIZE_1024);
16126 break;
16127 case 2048:
16128 bf_set(lpfc_rq_context_rqe_count,
16129 &rq_create->u.request.context,
16130 LPFC_RQ_RING_SIZE_2048);
16131 break;
16132 case 4096:
16133 bf_set(lpfc_rq_context_rqe_count,
16134 &rq_create->u.request.context,
16135 LPFC_RQ_RING_SIZE_4096);
16136 break;
16137 }
3c603be9
JS
16138 if (subtype == LPFC_NVMET)
16139 bf_set(lpfc_rq_context_buf_size,
16140 &rq_create->u.request.context,
16141 LPFC_NVMET_DATA_BUF_SIZE);
16142 else
16143 bf_set(lpfc_rq_context_buf_size,
16144 &rq_create->u.request.context,
16145 LPFC_DATA_BUF_SIZE);
4f774513
JS
16146 }
16147 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16148 cq->queue_id);
16149 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16150 drq->page_count);
4f774513
JS
16151 list_for_each_entry(dmabuf, &drq->page_list, list) {
16152 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16153 putPaddrLow(dmabuf->phys);
16154 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16155 putPaddrHigh(dmabuf->phys);
16156 }
962bc51b
JS
16157 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16158 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
16159 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16160 /* The IOCTL status is embedded in the mailbox subheader. */
16161 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16162 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16163 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16164 if (shdr_status || shdr_add_status || rc) {
16165 status = -ENXIO;
16166 goto out;
16167 }
16168 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16169 if (drq->queue_id == 0xFFFF) {
16170 status = -ENXIO;
16171 goto out;
16172 }
16173 drq->type = LPFC_DRQ;
2a622bfb 16174 drq->assoc_qid = cq->queue_id;
4f774513
JS
16175 drq->subtype = subtype;
16176 drq->host_index = 0;
16177 drq->hba_index = 0;
32517fc0 16178 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
16179
16180 /* link the header and data RQs onto the parent cq child list */
16181 list_add_tail(&hrq->list, &cq->child_list);
16182 list_add_tail(&drq->list, &cq->child_list);
16183
16184out:
8fa38513 16185 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
16186 return status;
16187}
16188
2d7dbc4c
JS
16189/**
16190 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16191 * @phba: HBA structure that indicates port to create a queue on.
16192 * @hrqp: The queue structure array to use to create the header receive queues.
16193 * @drqp: The queue structure array to use to create the data receive queues.
16194 * @cqp: The completion queue array to bind these receive queues to.
16195 *
16196 * This function creates a receive buffer queue pair , as detailed in @hrq and
16197 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16198 * to the HBA.
16199 *
16200 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16201 * struct is used to get the entry count that is necessary to determine the
16202 * number of pages to use for this queue. The @cq is used to indicate which
16203 * completion queue to bind received buffers that are posted to these queues to.
16204 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16205 * receive queue pair. This function is asynchronous and will wait for the
16206 * mailbox command to finish before continuing.
16207 *
16208 * On success this function will return a zero. If unable to allocate enough
16209 * memory this function will return -ENOMEM. If the queue create mailbox command
16210 * fails this function will return -ENXIO.
16211 **/
16212int
16213lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16214 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16215 uint32_t subtype)
16216{
16217 struct lpfc_queue *hrq, *drq, *cq;
16218 struct lpfc_mbx_rq_create_v2 *rq_create;
16219 struct lpfc_dmabuf *dmabuf;
16220 LPFC_MBOXQ_t *mbox;
16221 int rc, length, alloclen, status = 0;
16222 int cnt, idx, numrq, page_idx = 0;
16223 uint32_t shdr_status, shdr_add_status;
16224 union lpfc_sli4_cfg_shdr *shdr;
16225 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16226
16227 numrq = phba->cfg_nvmet_mrq;
16228 /* sanity check on array memory */
16229 if (!hrqp || !drqp || !cqp || !numrq)
16230 return -ENODEV;
16231 if (!phba->sli4_hba.pc_sli4_params.supported)
16232 hw_page_size = SLI4_PAGE_SIZE;
16233
16234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16235 if (!mbox)
16236 return -ENOMEM;
16237
16238 length = sizeof(struct lpfc_mbx_rq_create_v2);
16239 length += ((2 * numrq * hrqp[0]->page_count) *
16240 sizeof(struct dma_address));
16241
16242 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16243 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16244 LPFC_SLI4_MBX_NEMBED);
16245 if (alloclen < length) {
16246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16247 "3099 Allocated DMA memory size (%d) is "
16248 "less than the requested DMA memory size "
16249 "(%d)\n", alloclen, length);
16250 status = -ENOMEM;
16251 goto out;
16252 }
16253
16254
16255
16256 rq_create = mbox->sge_array->addr[0];
16257 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16258
16259 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16260 cnt = 0;
16261
16262 for (idx = 0; idx < numrq; idx++) {
16263 hrq = hrqp[idx];
16264 drq = drqp[idx];
16265 cq = cqp[idx];
16266
2d7dbc4c
JS
16267 /* sanity check on queue memory */
16268 if (!hrq || !drq || !cq) {
16269 status = -ENODEV;
16270 goto out;
16271 }
16272
7aabe84b
JS
16273 if (hrq->entry_count != drq->entry_count) {
16274 status = -EINVAL;
16275 goto out;
16276 }
16277
2d7dbc4c
JS
16278 if (idx == 0) {
16279 bf_set(lpfc_mbx_rq_create_num_pages,
16280 &rq_create->u.request,
16281 hrq->page_count);
16282 bf_set(lpfc_mbx_rq_create_rq_cnt,
16283 &rq_create->u.request, (numrq * 2));
16284 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16285 1);
16286 bf_set(lpfc_rq_context_base_cq,
16287 &rq_create->u.request.context,
16288 cq->queue_id);
16289 bf_set(lpfc_rq_context_data_size,
16290 &rq_create->u.request.context,
3c603be9 16291 LPFC_NVMET_DATA_BUF_SIZE);
2d7dbc4c
JS
16292 bf_set(lpfc_rq_context_hdr_size,
16293 &rq_create->u.request.context,
16294 LPFC_HDR_BUF_SIZE);
16295 bf_set(lpfc_rq_context_rqe_count_1,
16296 &rq_create->u.request.context,
16297 hrq->entry_count);
16298 bf_set(lpfc_rq_context_rqe_size,
16299 &rq_create->u.request.context,
16300 LPFC_RQE_SIZE_8);
16301 bf_set(lpfc_rq_context_page_size,
16302 &rq_create->u.request.context,
16303 (PAGE_SIZE/SLI4_PAGE_SIZE));
16304 }
16305 rc = 0;
16306 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16307 memset(dmabuf->virt, 0, hw_page_size);
16308 cnt = page_idx + dmabuf->buffer_tag;
16309 rq_create->u.request.page[cnt].addr_lo =
16310 putPaddrLow(dmabuf->phys);
16311 rq_create->u.request.page[cnt].addr_hi =
16312 putPaddrHigh(dmabuf->phys);
16313 rc++;
16314 }
16315 page_idx += rc;
16316
16317 rc = 0;
16318 list_for_each_entry(dmabuf, &drq->page_list, list) {
16319 memset(dmabuf->virt, 0, hw_page_size);
16320 cnt = page_idx + dmabuf->buffer_tag;
16321 rq_create->u.request.page[cnt].addr_lo =
16322 putPaddrLow(dmabuf->phys);
16323 rq_create->u.request.page[cnt].addr_hi =
16324 putPaddrHigh(dmabuf->phys);
16325 rc++;
16326 }
16327 page_idx += rc;
16328
16329 hrq->db_format = LPFC_DB_RING_FORMAT;
16330 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16331 hrq->type = LPFC_HRQ;
16332 hrq->assoc_qid = cq->queue_id;
16333 hrq->subtype = subtype;
16334 hrq->host_index = 0;
16335 hrq->hba_index = 0;
32517fc0 16336 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16337
16338 drq->db_format = LPFC_DB_RING_FORMAT;
16339 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16340 drq->type = LPFC_DRQ;
16341 drq->assoc_qid = cq->queue_id;
16342 drq->subtype = subtype;
16343 drq->host_index = 0;
16344 drq->hba_index = 0;
32517fc0 16345 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16346
16347 list_add_tail(&hrq->list, &cq->child_list);
16348 list_add_tail(&drq->list, &cq->child_list);
16349 }
16350
16351 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16352 /* The IOCTL status is embedded in the mailbox subheader. */
16353 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16354 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16355 if (shdr_status || shdr_add_status || rc) {
16356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16357 "3120 RQ_CREATE mailbox failed with "
16358 "status x%x add_status x%x, mbx status x%x\n",
16359 shdr_status, shdr_add_status, rc);
16360 status = -ENXIO;
16361 goto out;
16362 }
16363 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16364 if (rc == 0xFFFF) {
16365 status = -ENXIO;
16366 goto out;
16367 }
16368
16369 /* Initialize all RQs with associated queue id */
16370 for (idx = 0; idx < numrq; idx++) {
16371 hrq = hrqp[idx];
16372 hrq->queue_id = rc + (2 * idx);
16373 drq = drqp[idx];
16374 drq->queue_id = rc + (2 * idx) + 1;
16375 }
16376
16377out:
16378 lpfc_sli4_mbox_cmd_free(phba, mbox);
16379 return status;
16380}
16381
4f774513
JS
16382/**
16383 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16384 * @eq: The queue structure associated with the queue to destroy.
16385 *
16386 * This function destroys a queue, as detailed in @eq by sending an mailbox
16387 * command, specific to the type of queue, to the HBA.
16388 *
16389 * The @eq struct is used to get the queue ID of the queue to destroy.
16390 *
16391 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16392 * command fails this function will return -ENXIO.
4f774513 16393 **/
a2fc4aef 16394int
4f774513
JS
16395lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16396{
16397 LPFC_MBOXQ_t *mbox;
16398 int rc, length, status = 0;
16399 uint32_t shdr_status, shdr_add_status;
16400 union lpfc_sli4_cfg_shdr *shdr;
16401
2e90f4b5 16402 /* sanity check on queue memory */
4f774513
JS
16403 if (!eq)
16404 return -ENODEV;
32517fc0 16405
4f774513
JS
16406 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16407 if (!mbox)
16408 return -ENOMEM;
16409 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16410 sizeof(struct lpfc_sli4_cfg_mhdr));
16411 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16412 LPFC_MBOX_OPCODE_EQ_DESTROY,
16413 length, LPFC_SLI4_MBX_EMBED);
16414 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16415 eq->queue_id);
16416 mbox->vport = eq->phba->pport;
16417 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16418
16419 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16420 /* The IOCTL status is embedded in the mailbox subheader. */
16421 shdr = (union lpfc_sli4_cfg_shdr *)
16422 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16423 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16424 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16425 if (shdr_status || shdr_add_status || rc) {
16426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16427 "2505 EQ_DESTROY mailbox failed with "
16428 "status x%x add_status x%x, mbx status x%x\n",
16429 shdr_status, shdr_add_status, rc);
16430 status = -ENXIO;
16431 }
16432
16433 /* Remove eq from any list */
16434 list_del_init(&eq->list);
8fa38513 16435 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
16436 return status;
16437}
16438
16439/**
16440 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16441 * @cq: The queue structure associated with the queue to destroy.
16442 *
16443 * This function destroys a queue, as detailed in @cq by sending an mailbox
16444 * command, specific to the type of queue, to the HBA.
16445 *
16446 * The @cq struct is used to get the queue ID of the queue to destroy.
16447 *
16448 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16449 * command fails this function will return -ENXIO.
4f774513 16450 **/
a2fc4aef 16451int
4f774513
JS
16452lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16453{
16454 LPFC_MBOXQ_t *mbox;
16455 int rc, length, status = 0;
16456 uint32_t shdr_status, shdr_add_status;
16457 union lpfc_sli4_cfg_shdr *shdr;
16458
2e90f4b5 16459 /* sanity check on queue memory */
4f774513
JS
16460 if (!cq)
16461 return -ENODEV;
16462 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16463 if (!mbox)
16464 return -ENOMEM;
16465 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16466 sizeof(struct lpfc_sli4_cfg_mhdr));
16467 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16468 LPFC_MBOX_OPCODE_CQ_DESTROY,
16469 length, LPFC_SLI4_MBX_EMBED);
16470 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16471 cq->queue_id);
16472 mbox->vport = cq->phba->pport;
16473 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16474 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16475 /* The IOCTL status is embedded in the mailbox subheader. */
16476 shdr = (union lpfc_sli4_cfg_shdr *)
16477 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16478 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16479 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16480 if (shdr_status || shdr_add_status || rc) {
16481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16482 "2506 CQ_DESTROY mailbox failed with "
16483 "status x%x add_status x%x, mbx status x%x\n",
16484 shdr_status, shdr_add_status, rc);
16485 status = -ENXIO;
16486 }
16487 /* Remove cq from any list */
16488 list_del_init(&cq->list);
8fa38513 16489 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
16490 return status;
16491}
16492
04c68496
JS
16493/**
16494 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16495 * @qm: The queue structure associated with the queue to destroy.
16496 *
16497 * This function destroys a queue, as detailed in @mq by sending an mailbox
16498 * command, specific to the type of queue, to the HBA.
16499 *
16500 * The @mq struct is used to get the queue ID of the queue to destroy.
16501 *
16502 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16503 * command fails this function will return -ENXIO.
04c68496 16504 **/
a2fc4aef 16505int
04c68496
JS
16506lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16507{
16508 LPFC_MBOXQ_t *mbox;
16509 int rc, length, status = 0;
16510 uint32_t shdr_status, shdr_add_status;
16511 union lpfc_sli4_cfg_shdr *shdr;
16512
2e90f4b5 16513 /* sanity check on queue memory */
04c68496
JS
16514 if (!mq)
16515 return -ENODEV;
16516 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16517 if (!mbox)
16518 return -ENOMEM;
16519 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16520 sizeof(struct lpfc_sli4_cfg_mhdr));
16521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16522 LPFC_MBOX_OPCODE_MQ_DESTROY,
16523 length, LPFC_SLI4_MBX_EMBED);
16524 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16525 mq->queue_id);
16526 mbox->vport = mq->phba->pport;
16527 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16528 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16529 /* The IOCTL status is embedded in the mailbox subheader. */
16530 shdr = (union lpfc_sli4_cfg_shdr *)
16531 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16532 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16533 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16534 if (shdr_status || shdr_add_status || rc) {
16535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16536 "2507 MQ_DESTROY mailbox failed with "
16537 "status x%x add_status x%x, mbx status x%x\n",
16538 shdr_status, shdr_add_status, rc);
16539 status = -ENXIO;
16540 }
16541 /* Remove mq from any list */
16542 list_del_init(&mq->list);
8fa38513 16543 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
16544 return status;
16545}
16546
4f774513
JS
16547/**
16548 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16549 * @wq: The queue structure associated with the queue to destroy.
16550 *
16551 * This function destroys a queue, as detailed in @wq by sending an mailbox
16552 * command, specific to the type of queue, to the HBA.
16553 *
16554 * The @wq struct is used to get the queue ID of the queue to destroy.
16555 *
16556 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16557 * command fails this function will return -ENXIO.
4f774513 16558 **/
a2fc4aef 16559int
4f774513
JS
16560lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16561{
16562 LPFC_MBOXQ_t *mbox;
16563 int rc, length, status = 0;
16564 uint32_t shdr_status, shdr_add_status;
16565 union lpfc_sli4_cfg_shdr *shdr;
16566
2e90f4b5 16567 /* sanity check on queue memory */
4f774513
JS
16568 if (!wq)
16569 return -ENODEV;
16570 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16571 if (!mbox)
16572 return -ENOMEM;
16573 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16574 sizeof(struct lpfc_sli4_cfg_mhdr));
16575 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16576 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16577 length, LPFC_SLI4_MBX_EMBED);
16578 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16579 wq->queue_id);
16580 mbox->vport = wq->phba->pport;
16581 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16582 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16583 shdr = (union lpfc_sli4_cfg_shdr *)
16584 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16585 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16586 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16587 if (shdr_status || shdr_add_status || rc) {
16588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16589 "2508 WQ_DESTROY mailbox failed with "
16590 "status x%x add_status x%x, mbx status x%x\n",
16591 shdr_status, shdr_add_status, rc);
16592 status = -ENXIO;
16593 }
16594 /* Remove wq from any list */
16595 list_del_init(&wq->list);
d1f525aa
JS
16596 kfree(wq->pring);
16597 wq->pring = NULL;
8fa38513 16598 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
16599 return status;
16600}
16601
16602/**
16603 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16604 * @rq: The queue structure associated with the queue to destroy.
16605 *
16606 * This function destroys a queue, as detailed in @rq by sending an mailbox
16607 * command, specific to the type of queue, to the HBA.
16608 *
16609 * The @rq struct is used to get the queue ID of the queue to destroy.
16610 *
16611 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16612 * command fails this function will return -ENXIO.
4f774513 16613 **/
a2fc4aef 16614int
4f774513
JS
16615lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16616 struct lpfc_queue *drq)
16617{
16618 LPFC_MBOXQ_t *mbox;
16619 int rc, length, status = 0;
16620 uint32_t shdr_status, shdr_add_status;
16621 union lpfc_sli4_cfg_shdr *shdr;
16622
2e90f4b5 16623 /* sanity check on queue memory */
4f774513
JS
16624 if (!hrq || !drq)
16625 return -ENODEV;
16626 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16627 if (!mbox)
16628 return -ENOMEM;
16629 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 16630 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
16631 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16632 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16633 length, LPFC_SLI4_MBX_EMBED);
16634 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16635 hrq->queue_id);
16636 mbox->vport = hrq->phba->pport;
16637 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16638 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16639 /* The IOCTL status is embedded in the mailbox subheader. */
16640 shdr = (union lpfc_sli4_cfg_shdr *)
16641 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16642 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16643 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16644 if (shdr_status || shdr_add_status || rc) {
16645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16646 "2509 RQ_DESTROY mailbox failed with "
16647 "status x%x add_status x%x, mbx status x%x\n",
16648 shdr_status, shdr_add_status, rc);
16649 if (rc != MBX_TIMEOUT)
16650 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16651 return -ENXIO;
16652 }
16653 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16654 drq->queue_id);
16655 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16656 shdr = (union lpfc_sli4_cfg_shdr *)
16657 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16658 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16659 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16660 if (shdr_status || shdr_add_status || rc) {
16661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16662 "2510 RQ_DESTROY mailbox failed with "
16663 "status x%x add_status x%x, mbx status x%x\n",
16664 shdr_status, shdr_add_status, rc);
16665 status = -ENXIO;
16666 }
16667 list_del_init(&hrq->list);
16668 list_del_init(&drq->list);
8fa38513 16669 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
16670 return status;
16671}
16672
16673/**
16674 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16675 * @phba: The virtual port for which this call being executed.
16676 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16677 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16678 * @xritag: the xritag that ties this io to the SGL pages.
16679 *
16680 * This routine will post the sgl pages for the IO that has the xritag
16681 * that is in the iocbq structure. The xritag is assigned during iocbq
16682 * creation and persists for as long as the driver is loaded.
16683 * if the caller has fewer than 256 scatter gather segments to map then
16684 * pdma_phys_addr1 should be 0.
16685 * If the caller needs to map more than 256 scatter gather segment then
16686 * pdma_phys_addr1 should be a valid physical address.
16687 * physical address for SGLs must be 64 byte aligned.
16688 * If you are going to map 2 SGL's then the first one must have 256 entries
16689 * the second sgl can have between 1 and 256 entries.
16690 *
16691 * Return codes:
16692 * 0 - Success
16693 * -ENXIO, -ENOMEM - Failure
16694 **/
16695int
16696lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16697 dma_addr_t pdma_phys_addr0,
16698 dma_addr_t pdma_phys_addr1,
16699 uint16_t xritag)
16700{
16701 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16702 LPFC_MBOXQ_t *mbox;
16703 int rc;
16704 uint32_t shdr_status, shdr_add_status;
6d368e53 16705 uint32_t mbox_tmo;
4f774513
JS
16706 union lpfc_sli4_cfg_shdr *shdr;
16707
16708 if (xritag == NO_XRI) {
16709 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16710 "0364 Invalid param:\n");
16711 return -EINVAL;
16712 }
16713
16714 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16715 if (!mbox)
16716 return -ENOMEM;
16717
16718 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16719 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16720 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 16721 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
16722
16723 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16724 &mbox->u.mqe.un.post_sgl_pages;
16725 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16726 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16727
16728 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16729 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16730 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16731 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16732
16733 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16734 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16735 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16736 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16737 if (!phba->sli4_hba.intr_enable)
16738 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 16739 else {
a183a15f 16740 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
16741 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16742 }
4f774513
JS
16743 /* The IOCTL status is embedded in the mailbox subheader. */
16744 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16745 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16746 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16747 if (rc != MBX_TIMEOUT)
16748 mempool_free(mbox, phba->mbox_mem_pool);
16749 if (shdr_status || shdr_add_status || rc) {
16750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16751 "2511 POST_SGL mailbox failed with "
16752 "status x%x add_status x%x, mbx status x%x\n",
16753 shdr_status, shdr_add_status, rc);
4f774513
JS
16754 }
16755 return 0;
16756}
4f774513 16757
6d368e53 16758/**
88a2cfbb 16759 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
16760 * @phba: pointer to lpfc hba data structure.
16761 *
16762 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
16763 * HBA consistent with the SLI-4 interface spec. This routine
16764 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16765 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 16766 *
88a2cfbb
JS
16767 * Returns
16768 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16769 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16770 **/
5d8b8167 16771static uint16_t
6d368e53
JS
16772lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16773{
16774 unsigned long xri;
16775
16776 /*
16777 * Fetch the next logical xri. Because this index is logical,
16778 * the driver starts at 0 each time.
16779 */
16780 spin_lock_irq(&phba->hbalock);
16781 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16782 phba->sli4_hba.max_cfg_param.max_xri, 0);
16783 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16784 spin_unlock_irq(&phba->hbalock);
16785 return NO_XRI;
16786 } else {
16787 set_bit(xri, phba->sli4_hba.xri_bmask);
16788 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 16789 }
6d368e53
JS
16790 spin_unlock_irq(&phba->hbalock);
16791 return xri;
16792}
16793
16794/**
16795 * lpfc_sli4_free_xri - Release an xri for reuse.
16796 * @phba: pointer to lpfc hba data structure.
16797 *
16798 * This routine is invoked to release an xri to the pool of
16799 * available rpis maintained by the driver.
16800 **/
5d8b8167 16801static void
6d368e53
JS
16802__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16803{
16804 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
16805 phba->sli4_hba.max_cfg_param.xri_used--;
16806 }
16807}
16808
16809/**
16810 * lpfc_sli4_free_xri - Release an xri for reuse.
16811 * @phba: pointer to lpfc hba data structure.
16812 *
16813 * This routine is invoked to release an xri to the pool of
16814 * available rpis maintained by the driver.
16815 **/
16816void
16817lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16818{
16819 spin_lock_irq(&phba->hbalock);
16820 __lpfc_sli4_free_xri(phba, xri);
16821 spin_unlock_irq(&phba->hbalock);
16822}
16823
4f774513
JS
16824/**
16825 * lpfc_sli4_next_xritag - Get an xritag for the io
16826 * @phba: Pointer to HBA context object.
16827 *
16828 * This function gets an xritag for the iocb. If there is no unused xritag
16829 * it will return 0xffff.
16830 * The function returns the allocated xritag if successful, else returns zero.
16831 * Zero is not a valid xritag.
16832 * The caller is not required to hold any lock.
16833 **/
16834uint16_t
16835lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16836{
6d368e53 16837 uint16_t xri_index;
4f774513 16838
6d368e53 16839 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
16840 if (xri_index == NO_XRI)
16841 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16842 "2004 Failed to allocate XRI.last XRITAG is %d"
16843 " Max XRI is %d, Used XRI is %d\n",
16844 xri_index,
16845 phba->sli4_hba.max_cfg_param.max_xri,
16846 phba->sli4_hba.max_cfg_param.xri_used);
16847 return xri_index;
4f774513
JS
16848}
16849
16850/**
895427bd 16851 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 16852 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
16853 * @post_sgl_list: pointer to els sgl entry list.
16854 * @count: number of els sgl entries on the list.
4f774513
JS
16855 *
16856 * This routine is invoked to post a block of driver's sgl pages to the
16857 * HBA using non-embedded mailbox command. No Lock is held. This routine
16858 * is only called when the driver is loading and after all IO has been
16859 * stopped.
16860 **/
8a9d2e80 16861static int
895427bd 16862lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
16863 struct list_head *post_sgl_list,
16864 int post_cnt)
4f774513 16865{
8a9d2e80 16866 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
16867 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16868 struct sgl_page_pairs *sgl_pg_pairs;
16869 void *viraddr;
16870 LPFC_MBOXQ_t *mbox;
16871 uint32_t reqlen, alloclen, pg_pairs;
16872 uint32_t mbox_tmo;
8a9d2e80
JS
16873 uint16_t xritag_start = 0;
16874 int rc = 0;
4f774513
JS
16875 uint32_t shdr_status, shdr_add_status;
16876 union lpfc_sli4_cfg_shdr *shdr;
16877
895427bd 16878 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 16879 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16880 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 16881 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
16882 "2559 Block sgl registration required DMA "
16883 "size (%d) great than a page\n", reqlen);
16884 return -ENOMEM;
16885 }
895427bd 16886
4f774513 16887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 16888 if (!mbox)
4f774513 16889 return -ENOMEM;
4f774513
JS
16890
16891 /* Allocate DMA memory and set up the non-embedded mailbox command */
16892 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16893 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16894 LPFC_SLI4_MBX_NEMBED);
16895
16896 if (alloclen < reqlen) {
16897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16898 "0285 Allocated DMA memory size (%d) is "
16899 "less than the requested DMA memory "
16900 "size (%d)\n", alloclen, reqlen);
16901 lpfc_sli4_mbox_cmd_free(phba, mbox);
16902 return -ENOMEM;
16903 }
4f774513 16904 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 16905 viraddr = mbox->sge_array->addr[0];
4f774513
JS
16906 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16907 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16908
8a9d2e80
JS
16909 pg_pairs = 0;
16910 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
16911 /* Set up the sge entry */
16912 sgl_pg_pairs->sgl_pg0_addr_lo =
16913 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16914 sgl_pg_pairs->sgl_pg0_addr_hi =
16915 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16916 sgl_pg_pairs->sgl_pg1_addr_lo =
16917 cpu_to_le32(putPaddrLow(0));
16918 sgl_pg_pairs->sgl_pg1_addr_hi =
16919 cpu_to_le32(putPaddrHigh(0));
6d368e53 16920
4f774513
JS
16921 /* Keep the first xritag on the list */
16922 if (pg_pairs == 0)
16923 xritag_start = sglq_entry->sli4_xritag;
16924 sgl_pg_pairs++;
8a9d2e80 16925 pg_pairs++;
4f774513 16926 }
6d368e53
JS
16927
16928 /* Complete initialization and perform endian conversion. */
4f774513 16929 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 16930 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 16931 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 16932
4f774513
JS
16933 if (!phba->sli4_hba.intr_enable)
16934 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16935 else {
a183a15f 16936 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16937 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16938 }
16939 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16940 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16942 if (rc != MBX_TIMEOUT)
16943 lpfc_sli4_mbox_cmd_free(phba, mbox);
16944 if (shdr_status || shdr_add_status || rc) {
16945 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16946 "2513 POST_SGL_BLOCK mailbox command failed "
16947 "status x%x add_status x%x mbx status x%x\n",
16948 shdr_status, shdr_add_status, rc);
16949 rc = -ENXIO;
16950 }
16951 return rc;
16952}
16953
16954/**
5e5b511d 16955 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
4f774513 16956 * @phba: pointer to lpfc hba data structure.
0794d601 16957 * @nblist: pointer to nvme buffer list.
4f774513
JS
16958 * @count: number of scsi buffers on the list.
16959 *
16960 * This routine is invoked to post a block of @count scsi sgl pages from a
0794d601 16961 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
4f774513
JS
16962 * No Lock is held.
16963 *
16964 **/
0794d601 16965static int
5e5b511d
JS
16966lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16967 int count)
4f774513 16968{
c490850a 16969 struct lpfc_io_buf *lpfc_ncmd;
4f774513
JS
16970 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16971 struct sgl_page_pairs *sgl_pg_pairs;
16972 void *viraddr;
16973 LPFC_MBOXQ_t *mbox;
16974 uint32_t reqlen, alloclen, pg_pairs;
16975 uint32_t mbox_tmo;
16976 uint16_t xritag_start = 0;
16977 int rc = 0;
16978 uint32_t shdr_status, shdr_add_status;
16979 dma_addr_t pdma_phys_bpl1;
16980 union lpfc_sli4_cfg_shdr *shdr;
16981
16982 /* Calculate the requested length of the dma memory */
8a9d2e80 16983 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 16984 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16985 if (reqlen > SLI4_PAGE_SIZE) {
4f774513 16986 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0794d601 16987 "6118 Block sgl registration required DMA "
4f774513
JS
16988 "size (%d) great than a page\n", reqlen);
16989 return -ENOMEM;
16990 }
16991 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16992 if (!mbox) {
16993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16994 "6119 Failed to allocate mbox cmd memory\n");
4f774513
JS
16995 return -ENOMEM;
16996 }
16997
16998 /* Allocate DMA memory and set up the non-embedded mailbox command */
16999 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
0794d601
JS
17000 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17001 reqlen, LPFC_SLI4_MBX_NEMBED);
4f774513
JS
17002
17003 if (alloclen < reqlen) {
17004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 17005 "6120 Allocated DMA memory size (%d) is "
4f774513
JS
17006 "less than the requested DMA memory "
17007 "size (%d)\n", alloclen, reqlen);
17008 lpfc_sli4_mbox_cmd_free(phba, mbox);
17009 return -ENOMEM;
17010 }
6d368e53 17011
4f774513 17012 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
17013 viraddr = mbox->sge_array->addr[0];
17014
17015 /* Set up the SGL pages in the non-embedded DMA pages */
17016 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17017 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17018
17019 pg_pairs = 0;
0794d601 17020 list_for_each_entry(lpfc_ncmd, nblist, list) {
4f774513
JS
17021 /* Set up the sge entry */
17022 sgl_pg_pairs->sgl_pg0_addr_lo =
0794d601 17023 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
4f774513 17024 sgl_pg_pairs->sgl_pg0_addr_hi =
0794d601 17025 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
4f774513 17026 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
0794d601
JS
17027 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17028 SGL_PAGE_SIZE;
4f774513
JS
17029 else
17030 pdma_phys_bpl1 = 0;
17031 sgl_pg_pairs->sgl_pg1_addr_lo =
17032 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17033 sgl_pg_pairs->sgl_pg1_addr_hi =
17034 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17035 /* Keep the first xritag on the list */
17036 if (pg_pairs == 0)
0794d601 17037 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
4f774513
JS
17038 sgl_pg_pairs++;
17039 pg_pairs++;
17040 }
17041 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17042 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17043 /* Perform endian conversion if necessary */
17044 sgl->word0 = cpu_to_le32(sgl->word0);
17045
0794d601 17046 if (!phba->sli4_hba.intr_enable) {
4f774513 17047 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
0794d601 17048 } else {
a183a15f 17049 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
17050 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17051 }
0794d601 17052 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
4f774513
JS
17053 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17054 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17055 if (rc != MBX_TIMEOUT)
17056 lpfc_sli4_mbox_cmd_free(phba, mbox);
17057 if (shdr_status || shdr_add_status || rc) {
17058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
0794d601 17059 "6125 POST_SGL_BLOCK mailbox command failed "
4f774513
JS
17060 "status x%x add_status x%x mbx status x%x\n",
17061 shdr_status, shdr_add_status, rc);
17062 rc = -ENXIO;
17063 }
17064 return rc;
17065}
17066
0794d601 17067/**
5e5b511d 17068 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
0794d601
JS
17069 * @phba: pointer to lpfc hba data structure.
17070 * @post_nblist: pointer to the nvme buffer list.
17071 *
17072 * This routine walks a list of nvme buffers that was passed in. It attempts
17073 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17074 * uses the non-embedded SGL block post mailbox commands to post to the port.
17075 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17076 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17077 * must be local list, thus no lock is needed when manipulate the list.
17078 *
17079 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17080 **/
17081int
5e5b511d
JS
17082lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17083 struct list_head *post_nblist, int sb_count)
0794d601 17084{
c490850a 17085 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
0794d601
JS
17086 int status, sgl_size;
17087 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17088 dma_addr_t pdma_phys_sgl1;
17089 int last_xritag = NO_XRI;
17090 int cur_xritag;
0794d601
JS
17091 LIST_HEAD(prep_nblist);
17092 LIST_HEAD(blck_nblist);
17093 LIST_HEAD(nvme_nblist);
17094
17095 /* sanity check */
17096 if (sb_count <= 0)
17097 return -EINVAL;
17098
17099 sgl_size = phba->cfg_sg_dma_buf_size;
17100 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17101 list_del_init(&lpfc_ncmd->list);
17102 block_cnt++;
17103 if ((last_xritag != NO_XRI) &&
17104 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17105 /* a hole in xri block, form a sgl posting block */
17106 list_splice_init(&prep_nblist, &blck_nblist);
17107 post_cnt = block_cnt - 1;
17108 /* prepare list for next posting block */
17109 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17110 block_cnt = 1;
17111 } else {
17112 /* prepare list for next posting block */
17113 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17114 /* enough sgls for non-embed sgl mbox command */
17115 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17116 list_splice_init(&prep_nblist, &blck_nblist);
17117 post_cnt = block_cnt;
17118 block_cnt = 0;
17119 }
17120 }
17121 num_posting++;
17122 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17123
17124 /* end of repost sgl list condition for NVME buffers */
17125 if (num_posting == sb_count) {
17126 if (post_cnt == 0) {
17127 /* last sgl posting block */
17128 list_splice_init(&prep_nblist, &blck_nblist);
17129 post_cnt = block_cnt;
17130 } else if (block_cnt == 1) {
17131 /* last single sgl with non-contiguous xri */
17132 if (sgl_size > SGL_PAGE_SIZE)
17133 pdma_phys_sgl1 =
17134 lpfc_ncmd->dma_phys_sgl +
17135 SGL_PAGE_SIZE;
17136 else
17137 pdma_phys_sgl1 = 0;
17138 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17139 status = lpfc_sli4_post_sgl(
17140 phba, lpfc_ncmd->dma_phys_sgl,
17141 pdma_phys_sgl1, cur_xritag);
17142 if (status) {
c490850a
JS
17143 /* Post error. Buffer unavailable. */
17144 lpfc_ncmd->flags |=
17145 LPFC_SBUF_NOT_POSTED;
0794d601 17146 } else {
c490850a
JS
17147 /* Post success. Bffer available. */
17148 lpfc_ncmd->flags &=
17149 ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
17150 lpfc_ncmd->status = IOSTAT_SUCCESS;
17151 num_posted++;
17152 }
17153 /* success, put on NVME buffer sgl list */
17154 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17155 }
17156 }
17157
17158 /* continue until a nembed page worth of sgls */
17159 if (post_cnt == 0)
17160 continue;
17161
17162 /* post block of NVME buffer list sgls */
5e5b511d
JS
17163 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17164 post_cnt);
0794d601
JS
17165
17166 /* don't reset xirtag due to hole in xri block */
17167 if (block_cnt == 0)
17168 last_xritag = NO_XRI;
4f774513 17169
0794d601
JS
17170 /* reset NVME buffer post count for next round of posting */
17171 post_cnt = 0;
4f774513 17172
0794d601
JS
17173 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17174 while (!list_empty(&blck_nblist)) {
17175 list_remove_head(&blck_nblist, lpfc_ncmd,
c490850a 17176 struct lpfc_io_buf, list);
0794d601 17177 if (status) {
c490850a
JS
17178 /* Post error. Mark buffer unavailable. */
17179 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
0794d601 17180 } else {
c490850a
JS
17181 /* Post success, Mark buffer available. */
17182 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
17183 lpfc_ncmd->status = IOSTAT_SUCCESS;
17184 num_posted++;
17185 }
17186 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17187 }
4f774513 17188 }
0794d601 17189 /* Push NVME buffers with sgl posted to the available list */
5e5b511d
JS
17190 lpfc_io_buf_replenish(phba, &nvme_nblist);
17191
0794d601 17192 return num_posted;
4f774513
JS
17193}
17194
17195/**
17196 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17197 * @phba: pointer to lpfc_hba struct that the frame was received on
17198 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17199 *
17200 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17201 * valid type of frame that the LPFC driver will handle. This function will
17202 * return a zero if the frame is a valid frame or a non zero value when the
17203 * frame does not pass the check.
17204 **/
17205static int
17206lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17207{
474ffb74 17208 /* make rctl_names static to save stack space */
4f774513 17209 struct fc_vft_header *fc_vft_hdr;
546fc854 17210 uint32_t *header = (uint32_t *) fc_hdr;
4f774513 17211
e62245d9
JS
17212#define FC_RCTL_MDS_DIAGS 0xF4
17213
4f774513
JS
17214 switch (fc_hdr->fh_r_ctl) {
17215 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17216 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17217 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17218 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17219 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17220 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17221 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17222 case FC_RCTL_DD_CMD_STATUS: /* command status */
17223 case FC_RCTL_ELS_REQ: /* extended link services request */
17224 case FC_RCTL_ELS_REP: /* extended link services reply */
17225 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17226 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17227 case FC_RCTL_BA_NOP: /* basic link service NOP */
17228 case FC_RCTL_BA_ABTS: /* basic link service abort */
17229 case FC_RCTL_BA_RMC: /* remove connection */
17230 case FC_RCTL_BA_ACC: /* basic accept */
17231 case FC_RCTL_BA_RJT: /* basic reject */
17232 case FC_RCTL_BA_PRMT:
17233 case FC_RCTL_ACK_1: /* acknowledge_1 */
17234 case FC_RCTL_ACK_0: /* acknowledge_0 */
17235 case FC_RCTL_P_RJT: /* port reject */
17236 case FC_RCTL_F_RJT: /* fabric reject */
17237 case FC_RCTL_P_BSY: /* port busy */
17238 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17239 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17240 case FC_RCTL_LCR: /* link credit reset */
ae9e28f3 17241 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
4f774513
JS
17242 case FC_RCTL_END: /* end */
17243 break;
17244 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17245 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17246 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17247 return lpfc_fc_frame_check(phba, fc_hdr);
17248 default:
17249 goto drop;
17250 }
ae9e28f3 17251
4f774513
JS
17252 switch (fc_hdr->fh_type) {
17253 case FC_TYPE_BLS:
17254 case FC_TYPE_ELS:
17255 case FC_TYPE_FCP:
17256 case FC_TYPE_CT:
895427bd 17257 case FC_TYPE_NVME:
4f774513
JS
17258 break;
17259 case FC_TYPE_IP:
17260 case FC_TYPE_ILS:
17261 default:
17262 goto drop;
17263 }
546fc854 17264
4f774513 17265 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
78e1d200 17266 "2538 Received frame rctl:x%x, type:x%x, "
88f43a08 17267 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
78e1d200
JS
17268 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17269 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17270 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17271 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17272 be32_to_cpu(header[6]));
4f774513
JS
17273 return 0;
17274drop:
17275 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
78e1d200
JS
17276 "2539 Dropped frame rctl:x%x type:x%x\n",
17277 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
4f774513
JS
17278 return 1;
17279}
17280
17281/**
17282 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17283 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17284 *
17285 * This function processes the FC header to retrieve the VFI from the VF
17286 * header, if one exists. This function will return the VFI if one exists
17287 * or 0 if no VSAN Header exists.
17288 **/
17289static uint32_t
17290lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17291{
17292 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17293
17294 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17295 return 0;
17296 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17297}
17298
17299/**
17300 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17301 * @phba: Pointer to the HBA structure to search for the vport on
17302 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17303 * @fcfi: The FC Fabric ID that the frame came from
17304 *
17305 * This function searches the @phba for a vport that matches the content of the
17306 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17307 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17308 * returns the matching vport pointer or NULL if unable to match frame to a
17309 * vport.
17310 **/
17311static struct lpfc_vport *
17312lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 17313 uint16_t fcfi, uint32_t did)
4f774513
JS
17314{
17315 struct lpfc_vport **vports;
17316 struct lpfc_vport *vport = NULL;
17317 int i;
939723a4 17318
bf08611b
JS
17319 if (did == Fabric_DID)
17320 return phba->pport;
939723a4
JS
17321 if ((phba->pport->fc_flag & FC_PT2PT) &&
17322 !(phba->link_state == LPFC_HBA_READY))
17323 return phba->pport;
17324
4f774513 17325 vports = lpfc_create_vport_work_array(phba);
895427bd 17326 if (vports != NULL) {
4f774513
JS
17327 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17328 if (phba->fcf.fcfi == fcfi &&
17329 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17330 vports[i]->fc_myDID == did) {
17331 vport = vports[i];
17332 break;
17333 }
17334 }
895427bd 17335 }
4f774513
JS
17336 lpfc_destroy_vport_work_array(phba, vports);
17337 return vport;
17338}
17339
45ed1190
JS
17340/**
17341 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17342 * @vport: The vport to work on.
17343 *
17344 * This function updates the receive sequence time stamp for this vport. The
17345 * receive sequence time stamp indicates the time that the last frame of the
17346 * the sequence that has been idle for the longest amount of time was received.
17347 * the driver uses this time stamp to indicate if any received sequences have
17348 * timed out.
17349 **/
5d8b8167 17350static void
45ed1190
JS
17351lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17352{
17353 struct lpfc_dmabuf *h_buf;
17354 struct hbq_dmabuf *dmabuf = NULL;
17355
17356 /* get the oldest sequence on the rcv list */
17357 h_buf = list_get_first(&vport->rcv_buffer_list,
17358 struct lpfc_dmabuf, list);
17359 if (!h_buf)
17360 return;
17361 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17362 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17363}
17364
17365/**
17366 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17367 * @vport: The vport that the received sequences were sent to.
17368 *
17369 * This function cleans up all outstanding received sequences. This is called
17370 * by the driver when a link event or user action invalidates all the received
17371 * sequences.
17372 **/
17373void
17374lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17375{
17376 struct lpfc_dmabuf *h_buf, *hnext;
17377 struct lpfc_dmabuf *d_buf, *dnext;
17378 struct hbq_dmabuf *dmabuf = NULL;
17379
17380 /* start with the oldest sequence on the rcv list */
17381 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17382 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17383 list_del_init(&dmabuf->hbuf.list);
17384 list_for_each_entry_safe(d_buf, dnext,
17385 &dmabuf->dbuf.list, list) {
17386 list_del_init(&d_buf->list);
17387 lpfc_in_buf_free(vport->phba, d_buf);
17388 }
17389 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17390 }
17391}
17392
17393/**
17394 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17395 * @vport: The vport that the received sequences were sent to.
17396 *
17397 * This function determines whether any received sequences have timed out by
17398 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17399 * indicates that there is at least one timed out sequence this routine will
17400 * go through the received sequences one at a time from most inactive to most
17401 * active to determine which ones need to be cleaned up. Once it has determined
17402 * that a sequence needs to be cleaned up it will simply free up the resources
17403 * without sending an abort.
17404 **/
17405void
17406lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17407{
17408 struct lpfc_dmabuf *h_buf, *hnext;
17409 struct lpfc_dmabuf *d_buf, *dnext;
17410 struct hbq_dmabuf *dmabuf = NULL;
17411 unsigned long timeout;
17412 int abort_count = 0;
17413
17414 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17415 vport->rcv_buffer_time_stamp);
17416 if (list_empty(&vport->rcv_buffer_list) ||
17417 time_before(jiffies, timeout))
17418 return;
17419 /* start with the oldest sequence on the rcv list */
17420 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17421 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17422 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17423 dmabuf->time_stamp);
17424 if (time_before(jiffies, timeout))
17425 break;
17426 abort_count++;
17427 list_del_init(&dmabuf->hbuf.list);
17428 list_for_each_entry_safe(d_buf, dnext,
17429 &dmabuf->dbuf.list, list) {
17430 list_del_init(&d_buf->list);
17431 lpfc_in_buf_free(vport->phba, d_buf);
17432 }
17433 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17434 }
17435 if (abort_count)
17436 lpfc_update_rcv_time_stamp(vport);
17437}
17438
4f774513
JS
17439/**
17440 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17441 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17442 *
17443 * This function searches through the existing incomplete sequences that have
17444 * been sent to this @vport. If the frame matches one of the incomplete
17445 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17446 * make up that sequence. If no sequence is found that matches this frame then
17447 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17448 * This function returns a pointer to the first dmabuf in the sequence list that
17449 * the frame was linked to.
17450 **/
17451static struct hbq_dmabuf *
17452lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17453{
17454 struct fc_frame_header *new_hdr;
17455 struct fc_frame_header *temp_hdr;
17456 struct lpfc_dmabuf *d_buf;
17457 struct lpfc_dmabuf *h_buf;
17458 struct hbq_dmabuf *seq_dmabuf = NULL;
17459 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 17460 uint8_t found = 0;
4f774513 17461
4d9ab994 17462 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 17463 dmabuf->time_stamp = jiffies;
4f774513 17464 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 17465
4f774513
JS
17466 /* Use the hdr_buf to find the sequence that this frame belongs to */
17467 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17468 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17469 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17470 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17471 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17472 continue;
17473 /* found a pending sequence that matches this frame */
17474 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17475 break;
17476 }
17477 if (!seq_dmabuf) {
17478 /*
17479 * This indicates first frame received for this sequence.
17480 * Queue the buffer on the vport's rcv_buffer_list.
17481 */
17482 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 17483 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17484 return dmabuf;
17485 }
17486 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
17487 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17488 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
17489 list_del_init(&seq_dmabuf->hbuf.list);
17490 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17491 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 17492 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17493 return dmabuf;
17494 }
45ed1190
JS
17495 /* move this sequence to the tail to indicate a young sequence */
17496 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17497 seq_dmabuf->time_stamp = jiffies;
17498 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
17499 if (list_empty(&seq_dmabuf->dbuf.list)) {
17500 temp_hdr = dmabuf->hbuf.virt;
17501 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17502 return seq_dmabuf;
17503 }
4f774513 17504 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
17505 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17506 while (!found) {
4f774513
JS
17507 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17508 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17509 /*
17510 * If the frame's sequence count is greater than the frame on
17511 * the list then insert the frame right after this frame
17512 */
eeead811
JS
17513 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17514 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 17515 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
17516 found = 1;
17517 break;
4f774513 17518 }
4360ca9c
JS
17519
17520 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17521 break;
17522 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 17523 }
4360ca9c
JS
17524
17525 if (found)
17526 return seq_dmabuf;
4f774513
JS
17527 return NULL;
17528}
17529
6669f9bb
JS
17530/**
17531 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17532 * @vport: pointer to a vitural port
17533 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17534 *
17535 * This function tries to abort from the partially assembed sequence, described
17536 * by the information from basic abbort @dmabuf. It checks to see whether such
17537 * partially assembled sequence held by the driver. If so, it shall free up all
17538 * the frames from the partially assembled sequence.
17539 *
17540 * Return
17541 * true -- if there is matching partially assembled sequence present and all
17542 * the frames freed with the sequence;
17543 * false -- if there is no matching partially assembled sequence present so
17544 * nothing got aborted in the lower layer driver
17545 **/
17546static bool
17547lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17548 struct hbq_dmabuf *dmabuf)
17549{
17550 struct fc_frame_header *new_hdr;
17551 struct fc_frame_header *temp_hdr;
17552 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17553 struct hbq_dmabuf *seq_dmabuf = NULL;
17554
17555 /* Use the hdr_buf to find the sequence that matches this frame */
17556 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17557 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17558 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17559 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17560 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17561 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17562 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17563 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17564 continue;
17565 /* found a pending sequence that matches this frame */
17566 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17567 break;
17568 }
17569
17570 /* Free up all the frames from the partially assembled sequence */
17571 if (seq_dmabuf) {
17572 list_for_each_entry_safe(d_buf, n_buf,
17573 &seq_dmabuf->dbuf.list, list) {
17574 list_del_init(&d_buf->list);
17575 lpfc_in_buf_free(vport->phba, d_buf);
17576 }
17577 return true;
17578 }
17579 return false;
17580}
17581
6dd9e31c
JS
17582/**
17583 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17584 * @vport: pointer to a vitural port
17585 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17586 *
17587 * This function tries to abort from the assembed sequence from upper level
17588 * protocol, described by the information from basic abbort @dmabuf. It
17589 * checks to see whether such pending context exists at upper level protocol.
17590 * If so, it shall clean up the pending context.
17591 *
17592 * Return
17593 * true -- if there is matching pending context of the sequence cleaned
17594 * at ulp;
17595 * false -- if there is no matching pending context of the sequence present
17596 * at ulp.
17597 **/
17598static bool
17599lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17600{
17601 struct lpfc_hba *phba = vport->phba;
17602 int handled;
17603
17604 /* Accepting abort at ulp with SLI4 only */
17605 if (phba->sli_rev < LPFC_SLI_REV4)
17606 return false;
17607
17608 /* Register all caring upper level protocols to attend abort */
17609 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17610 if (handled)
17611 return true;
17612
17613 return false;
17614}
17615
6669f9bb 17616/**
546fc854 17617 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
17618 * @phba: Pointer to HBA context object.
17619 * @cmd_iocbq: pointer to the command iocbq structure.
17620 * @rsp_iocbq: pointer to the response iocbq structure.
17621 *
546fc854 17622 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
17623 * event. It properly releases the memory allocated to the sequence abort
17624 * accept iocb.
17625 **/
17626static void
546fc854 17627lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
17628 struct lpfc_iocbq *cmd_iocbq,
17629 struct lpfc_iocbq *rsp_iocbq)
17630{
6dd9e31c
JS
17631 struct lpfc_nodelist *ndlp;
17632
17633 if (cmd_iocbq) {
17634 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17635 lpfc_nlp_put(ndlp);
17636 lpfc_nlp_not_used(ndlp);
6669f9bb 17637 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 17638 }
6b5151fd
JS
17639
17640 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17641 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17643 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17644 rsp_iocbq->iocb.ulpStatus,
17645 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
17646}
17647
6d368e53
JS
17648/**
17649 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17650 * @phba: Pointer to HBA context object.
17651 * @xri: xri id in transaction.
17652 *
17653 * This function validates the xri maps to the known range of XRIs allocated an
17654 * used by the driver.
17655 **/
7851fe2c 17656uint16_t
6d368e53
JS
17657lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17658 uint16_t xri)
17659{
a2fc4aef 17660 uint16_t i;
6d368e53
JS
17661
17662 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17663 if (xri == phba->sli4_hba.xri_ids[i])
17664 return i;
17665 }
17666 return NO_XRI;
17667}
17668
6669f9bb 17669/**
546fc854 17670 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
17671 * @phba: Pointer to HBA context object.
17672 * @fc_hdr: pointer to a FC frame header.
17673 *
546fc854 17674 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
17675 * event after aborting the sequence handling.
17676 **/
86c67379 17677void
6dd9e31c
JS
17678lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17679 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 17680{
6dd9e31c 17681 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
17682 struct lpfc_iocbq *ctiocb = NULL;
17683 struct lpfc_nodelist *ndlp;
ee0f4fe1 17684 uint16_t oxid, rxid, xri, lxri;
5ffc266e 17685 uint32_t sid, fctl;
6669f9bb 17686 IOCB_t *icmd;
546fc854 17687 int rc;
6669f9bb
JS
17688
17689 if (!lpfc_is_link_up(phba))
17690 return;
17691
17692 sid = sli4_sid_from_fc_hdr(fc_hdr);
17693 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 17694 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 17695
6dd9e31c 17696 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 17697 if (!ndlp) {
9d3d340d 17698 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
17699 if (!ndlp) {
17700 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17701 "1268 Failed to allocate ndlp for "
17702 "oxid:x%x SID:x%x\n", oxid, sid);
17703 return;
17704 }
6dd9e31c
JS
17705 /* Put ndlp onto pport node list */
17706 lpfc_enqueue_node(vport, ndlp);
17707 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17708 /* re-setup ndlp without removing from node list */
17709 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17710 if (!ndlp) {
17711 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17712 "3275 Failed to active ndlp found "
17713 "for oxid:x%x SID:x%x\n", oxid, sid);
17714 return;
17715 }
6669f9bb
JS
17716 }
17717
546fc854 17718 /* Allocate buffer for rsp iocb */
6669f9bb
JS
17719 ctiocb = lpfc_sli_get_iocbq(phba);
17720 if (!ctiocb)
17721 return;
17722
5ffc266e
JS
17723 /* Extract the F_CTL field from FC_HDR */
17724 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17725
6669f9bb 17726 icmd = &ctiocb->iocb;
6669f9bb 17727 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 17728 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
17729 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17730 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17731 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17732
17733 /* Fill in the rest of iocb fields */
17734 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17735 icmd->ulpBdeCount = 0;
17736 icmd->ulpLe = 1;
17737 icmd->ulpClass = CLASS3;
6d368e53 17738 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 17739 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 17740
6669f9bb 17741 ctiocb->vport = phba->pport;
546fc854 17742 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 17743 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
17744 ctiocb->sli4_xritag = NO_XRI;
17745
ee0f4fe1
JS
17746 if (fctl & FC_FC_EX_CTX)
17747 /* Exchange responder sent the abort so we
17748 * own the oxid.
17749 */
17750 xri = oxid;
17751 else
17752 xri = rxid;
17753 lxri = lpfc_sli4_xri_inrange(phba, xri);
17754 if (lxri != NO_XRI)
17755 lpfc_set_rrq_active(phba, ndlp, lxri,
17756 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
17757 /* For BA_ABTS from exchange responder, if the logical xri with
17758 * the oxid maps to the FCP XRI range, the port no longer has
17759 * that exchange context, send a BLS_RJT. Override the IOCB for
17760 * a BA_RJT.
17761 */
17762 if ((fctl & FC_FC_EX_CTX) &&
895427bd 17763 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
17764 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17765 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17766 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17767 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17768 }
17769
17770 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17771 * the driver no longer has that exchange, send a BLS_RJT. Override
17772 * the IOCB for a BA_RJT.
546fc854 17773 */
6dd9e31c 17774 if (aborted == false) {
546fc854
JS
17775 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17776 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17777 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17778 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17779 }
6669f9bb 17780
5ffc266e
JS
17781 if (fctl & FC_FC_EX_CTX) {
17782 /* ABTS sent by responder to CT exchange, construction
17783 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17784 * field and RX_ID from ABTS for RX_ID field.
17785 */
546fc854 17786 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
17787 } else {
17788 /* ABTS sent by initiator to CT exchange, construction
17789 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 17790 * XRI_TAG field.
5ffc266e 17791 */
546fc854 17792 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 17793 }
f09c3acc 17794 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 17795 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 17796
546fc854 17797 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
17798 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17799 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17800 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
17801
17802 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17803 if (rc == IOCB_ERROR) {
6dd9e31c
JS
17804 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17805 "2925 Failed to issue CT ABTS RSP x%x on "
17806 "xri x%x, Data x%x\n",
17807 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17808 phba->link_state);
17809 lpfc_nlp_put(ndlp);
17810 ctiocb->context1 = NULL;
546fc854
JS
17811 lpfc_sli_release_iocbq(phba, ctiocb);
17812 }
6669f9bb
JS
17813}
17814
17815/**
17816 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17817 * @vport: Pointer to the vport on which this sequence was received
17818 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17819 *
17820 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17821 * receive sequence is only partially assembed by the driver, it shall abort
17822 * the partially assembled frames for the sequence. Otherwise, if the
17823 * unsolicited receive sequence has been completely assembled and passed to
17824 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17825 * unsolicited sequence has been aborted. After that, it will issue a basic
17826 * accept to accept the abort.
17827 **/
5d8b8167 17828static void
6669f9bb
JS
17829lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17830 struct hbq_dmabuf *dmabuf)
17831{
17832 struct lpfc_hba *phba = vport->phba;
17833 struct fc_frame_header fc_hdr;
5ffc266e 17834 uint32_t fctl;
6dd9e31c 17835 bool aborted;
6669f9bb 17836
6669f9bb
JS
17837 /* Make a copy of fc_hdr before the dmabuf being released */
17838 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 17839 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 17840
5ffc266e 17841 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
17842 /* ABTS by responder to exchange, no cleanup needed */
17843 aborted = true;
5ffc266e 17844 } else {
6dd9e31c
JS
17845 /* ABTS by initiator to exchange, need to do cleanup */
17846 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17847 if (aborted == false)
17848 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 17849 }
6dd9e31c
JS
17850 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17851
86c67379
JS
17852 if (phba->nvmet_support) {
17853 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17854 return;
17855 }
17856
6dd9e31c
JS
17857 /* Respond with BA_ACC or BA_RJT accordingly */
17858 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
17859}
17860
4f774513
JS
17861/**
17862 * lpfc_seq_complete - Indicates if a sequence is complete
17863 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17864 *
17865 * This function checks the sequence, starting with the frame described by
17866 * @dmabuf, to see if all the frames associated with this sequence are present.
17867 * the frames associated with this sequence are linked to the @dmabuf using the
17868 * dbuf list. This function looks for two major things. 1) That the first frame
17869 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17870 * set. 3) That there are no holes in the sequence count. The function will
17871 * return 1 when the sequence is complete, otherwise it will return 0.
17872 **/
17873static int
17874lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17875{
17876 struct fc_frame_header *hdr;
17877 struct lpfc_dmabuf *d_buf;
17878 struct hbq_dmabuf *seq_dmabuf;
17879 uint32_t fctl;
17880 int seq_count = 0;
17881
17882 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17883 /* make sure first fame of sequence has a sequence count of zero */
17884 if (hdr->fh_seq_cnt != seq_count)
17885 return 0;
17886 fctl = (hdr->fh_f_ctl[0] << 16 |
17887 hdr->fh_f_ctl[1] << 8 |
17888 hdr->fh_f_ctl[2]);
17889 /* If last frame of sequence we can return success. */
17890 if (fctl & FC_FC_END_SEQ)
17891 return 1;
17892 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17893 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17894 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17895 /* If there is a hole in the sequence count then fail. */
eeead811 17896 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
17897 return 0;
17898 fctl = (hdr->fh_f_ctl[0] << 16 |
17899 hdr->fh_f_ctl[1] << 8 |
17900 hdr->fh_f_ctl[2]);
17901 /* If last frame of sequence we can return success. */
17902 if (fctl & FC_FC_END_SEQ)
17903 return 1;
17904 }
17905 return 0;
17906}
17907
17908/**
17909 * lpfc_prep_seq - Prep sequence for ULP processing
17910 * @vport: Pointer to the vport on which this sequence was received
17911 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17912 *
17913 * This function takes a sequence, described by a list of frames, and creates
17914 * a list of iocbq structures to describe the sequence. This iocbq list will be
17915 * used to issue to the generic unsolicited sequence handler. This routine
17916 * returns a pointer to the first iocbq in the list. If the function is unable
17917 * to allocate an iocbq then it throw out the received frames that were not
17918 * able to be described and return a pointer to the first iocbq. If unable to
17919 * allocate any iocbqs (including the first) this function will return NULL.
17920 **/
17921static struct lpfc_iocbq *
17922lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17923{
7851fe2c 17924 struct hbq_dmabuf *hbq_buf;
4f774513
JS
17925 struct lpfc_dmabuf *d_buf, *n_buf;
17926 struct lpfc_iocbq *first_iocbq, *iocbq;
17927 struct fc_frame_header *fc_hdr;
17928 uint32_t sid;
7851fe2c 17929 uint32_t len, tot_len;
eeead811 17930 struct ulp_bde64 *pbde;
4f774513
JS
17931
17932 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17933 /* remove from receive buffer list */
17934 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 17935 lpfc_update_rcv_time_stamp(vport);
4f774513 17936 /* get the Remote Port's SID */
6669f9bb 17937 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 17938 tot_len = 0;
4f774513
JS
17939 /* Get an iocbq struct to fill in. */
17940 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17941 if (first_iocbq) {
17942 /* Initialize the first IOCB. */
8fa38513 17943 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 17944 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 17945 first_iocbq->vport = vport;
939723a4
JS
17946
17947 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17948 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17949 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17950 first_iocbq->iocb.un.rcvels.parmRo =
17951 sli4_did_from_fc_hdr(fc_hdr);
17952 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17953 } else
17954 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
17955 first_iocbq->iocb.ulpContext = NO_XRI;
17956 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17957 be16_to_cpu(fc_hdr->fh_ox_id);
17958 /* iocbq is prepped for internal consumption. Physical vpi. */
17959 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17960 vport->phba->vpi_ids[vport->vpi];
4f774513 17961 /* put the first buffer into the first IOCBq */
48a5a664
JS
17962 tot_len = bf_get(lpfc_rcqe_length,
17963 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17964
4f774513
JS
17965 first_iocbq->context2 = &seq_dmabuf->dbuf;
17966 first_iocbq->context3 = NULL;
17967 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17968 if (tot_len > LPFC_DATA_BUF_SIZE)
17969 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17970 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17971 else
17972 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17973
4f774513 17974 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 17975
7851fe2c 17976 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
17977 }
17978 iocbq = first_iocbq;
17979 /*
17980 * Each IOCBq can have two Buffers assigned, so go through the list
17981 * of buffers for this sequence and save two buffers in each IOCBq
17982 */
17983 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17984 if (!iocbq) {
17985 lpfc_in_buf_free(vport->phba, d_buf);
17986 continue;
17987 }
17988 if (!iocbq->context3) {
17989 iocbq->context3 = d_buf;
17990 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
17991 /* We need to get the size out of the right CQE */
17992 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17993 len = bf_get(lpfc_rcqe_length,
17994 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
17995 pbde = (struct ulp_bde64 *)
17996 &iocbq->iocb.unsli3.sli3Words[4];
17997 if (len > LPFC_DATA_BUF_SIZE)
17998 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17999 else
18000 pbde->tus.f.bdeSize = len;
18001
7851fe2c
JS
18002 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18003 tot_len += len;
4f774513
JS
18004 } else {
18005 iocbq = lpfc_sli_get_iocbq(vport->phba);
18006 if (!iocbq) {
18007 if (first_iocbq) {
18008 first_iocbq->iocb.ulpStatus =
18009 IOSTAT_FCP_RSP_ERROR;
18010 first_iocbq->iocb.un.ulpWord[4] =
18011 IOERR_NO_RESOURCES;
18012 }
18013 lpfc_in_buf_free(vport->phba, d_buf);
18014 continue;
18015 }
48a5a664
JS
18016 /* We need to get the size out of the right CQE */
18017 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18018 len = bf_get(lpfc_rcqe_length,
18019 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
18020 iocbq->context2 = d_buf;
18021 iocbq->context3 = NULL;
18022 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
18023 if (len > LPFC_DATA_BUF_SIZE)
18024 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 18025 LPFC_DATA_BUF_SIZE;
48a5a664
JS
18026 else
18027 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 18028
7851fe2c
JS
18029 tot_len += len;
18030 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18031
4f774513
JS
18032 iocbq->iocb.un.rcvels.remoteID = sid;
18033 list_add_tail(&iocbq->list, &first_iocbq->list);
18034 }
18035 }
39c4f1a9
JS
18036 /* Free the sequence's header buffer */
18037 if (!first_iocbq)
18038 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18039
4f774513
JS
18040 return first_iocbq;
18041}
18042
6669f9bb
JS
18043static void
18044lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18045 struct hbq_dmabuf *seq_dmabuf)
18046{
18047 struct fc_frame_header *fc_hdr;
18048 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18049 struct lpfc_hba *phba = vport->phba;
18050
18051 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18052 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18053 if (!iocbq) {
18054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18055 "2707 Ring %d handler: Failed to allocate "
18056 "iocb Rctl x%x Type x%x received\n",
18057 LPFC_ELS_RING,
18058 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18059 return;
18060 }
18061 if (!lpfc_complete_unsol_iocb(phba,
895427bd 18062 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
18063 iocbq, fc_hdr->fh_r_ctl,
18064 fc_hdr->fh_type))
6d368e53 18065 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
18066 "2540 Ring %d handler: unexpected Rctl "
18067 "x%x Type x%x received\n",
18068 LPFC_ELS_RING,
18069 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18070
18071 /* Free iocb created in lpfc_prep_seq */
18072 list_for_each_entry_safe(curr_iocb, next_iocb,
18073 &iocbq->list, list) {
18074 list_del_init(&curr_iocb->list);
18075 lpfc_sli_release_iocbq(phba, curr_iocb);
18076 }
18077 lpfc_sli_release_iocbq(phba, iocbq);
18078}
18079
ae9e28f3
JS
18080static void
18081lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18082 struct lpfc_iocbq *rspiocb)
18083{
18084 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18085
18086 if (pcmd && pcmd->virt)
771db5c0 18087 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3
JS
18088 kfree(pcmd);
18089 lpfc_sli_release_iocbq(phba, cmdiocb);
e817e5d7 18090 lpfc_drain_txq(phba);
ae9e28f3
JS
18091}
18092
18093static void
18094lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18095 struct hbq_dmabuf *dmabuf)
18096{
18097 struct fc_frame_header *fc_hdr;
18098 struct lpfc_hba *phba = vport->phba;
18099 struct lpfc_iocbq *iocbq = NULL;
18100 union lpfc_wqe *wqe;
18101 struct lpfc_dmabuf *pcmd = NULL;
18102 uint32_t frame_len;
18103 int rc;
e817e5d7 18104 unsigned long iflags;
ae9e28f3
JS
18105
18106 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18107 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18108
18109 /* Send the received frame back */
18110 iocbq = lpfc_sli_get_iocbq(phba);
e817e5d7
JS
18111 if (!iocbq) {
18112 /* Queue cq event and wakeup worker thread to process it */
18113 spin_lock_irqsave(&phba->hbalock, iflags);
18114 list_add_tail(&dmabuf->cq_event.list,
18115 &phba->sli4_hba.sp_queue_event);
18116 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18117 spin_unlock_irqrestore(&phba->hbalock, iflags);
18118 lpfc_worker_wake_up(phba);
18119 return;
18120 }
ae9e28f3
JS
18121
18122 /* Allocate buffer for command payload */
18123 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18124 if (pcmd)
771db5c0 18125 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
ae9e28f3
JS
18126 &pcmd->phys);
18127 if (!pcmd || !pcmd->virt)
18128 goto exit;
18129
18130 INIT_LIST_HEAD(&pcmd->list);
18131
18132 /* copyin the payload */
18133 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18134
18135 /* fill in BDE's for command */
18136 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18137 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18138 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18139 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18140
18141 iocbq->context2 = pcmd;
18142 iocbq->vport = vport;
18143 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18144 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18145
18146 /*
18147 * Setup rest of the iocb as though it were a WQE
18148 * Build the SEND_FRAME WQE
18149 */
18150 wqe = (union lpfc_wqe *)&iocbq->iocb;
18151
18152 wqe->send_frame.frame_len = frame_len;
18153 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18154 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18155 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18156 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18157 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18158 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18159
18160 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18161 iocbq->iocb.ulpLe = 1;
18162 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18163 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18164 if (rc == IOCB_ERROR)
18165 goto exit;
18166
18167 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18168 return;
18169
18170exit:
18171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18172 "2023 Unable to process MDS loopback frame\n");
18173 if (pcmd && pcmd->virt)
771db5c0 18174 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3 18175 kfree(pcmd);
401bb416
DK
18176 if (iocbq)
18177 lpfc_sli_release_iocbq(phba, iocbq);
ae9e28f3
JS
18178 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18179}
18180
4f774513
JS
18181/**
18182 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18183 * @phba: Pointer to HBA context object.
18184 *
18185 * This function is called with no lock held. This function processes all
18186 * the received buffers and gives it to upper layers when a received buffer
18187 * indicates that it is the final frame in the sequence. The interrupt
895427bd 18188 * service routine processes received buffers at interrupt contexts.
4f774513
JS
18189 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18190 * appropriate receive function when the final frame in a sequence is received.
18191 **/
4d9ab994
JS
18192void
18193lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18194 struct hbq_dmabuf *dmabuf)
4f774513 18195{
4d9ab994 18196 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
18197 struct fc_frame_header *fc_hdr;
18198 struct lpfc_vport *vport;
18199 uint32_t fcfi;
939723a4 18200 uint32_t did;
4f774513 18201
4f774513 18202 /* Process each received buffer */
4d9ab994 18203 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 18204
e817e5d7
JS
18205 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18206 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18207 vport = phba->pport;
18208 /* Handle MDS Loopback frames */
18209 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18210 return;
18211 }
18212
4d9ab994
JS
18213 /* check to see if this a valid type of frame */
18214 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18215 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18216 return;
18217 }
2ea259ee 18218
7851fe2c
JS
18219 if ((bf_get(lpfc_cqe_code,
18220 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18221 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18222 &dmabuf->cq_event.cqe.rcqe_cmpl);
18223 else
18224 fcfi = bf_get(lpfc_rcqe_fcf_id,
18225 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 18226
e62245d9
JS
18227 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18228 vport = phba->pport;
18229 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18230 "2023 MDS Loopback %d bytes\n",
18231 bf_get(lpfc_rcqe_length,
18232 &dmabuf->cq_event.cqe.rcqe_cmpl));
18233 /* Handle MDS Loopback frames */
18234 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18235 return;
18236 }
18237
895427bd
JS
18238 /* d_id this frame is directed to */
18239 did = sli4_did_from_fc_hdr(fc_hdr);
18240
18241 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 18242 if (!vport) {
4d9ab994
JS
18243 /* throw out the frame */
18244 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18245 return;
18246 }
939723a4 18247
939723a4
JS
18248 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18249 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18250 (did != Fabric_DID)) {
18251 /*
18252 * Throw out the frame if we are not pt2pt.
18253 * The pt2pt protocol allows for discovery frames
18254 * to be received without a registered VPI.
18255 */
18256 if (!(vport->fc_flag & FC_PT2PT) ||
18257 (phba->link_state == LPFC_HBA_READY)) {
18258 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18259 return;
18260 }
18261 }
18262
6669f9bb
JS
18263 /* Handle the basic abort sequence (BA_ABTS) event */
18264 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18265 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18266 return;
18267 }
18268
4d9ab994
JS
18269 /* Link this frame */
18270 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18271 if (!seq_dmabuf) {
18272 /* unable to add frame to vport - throw it out */
18273 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18274 return;
18275 }
18276 /* If not last frame in sequence continue processing frames. */
def9c7a9 18277 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 18278 return;
def9c7a9 18279
6669f9bb
JS
18280 /* Send the complete sequence to the upper layer protocol */
18281 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 18282}
6fb120a7
JS
18283
18284/**
18285 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18286 * @phba: pointer to lpfc hba data structure.
18287 *
18288 * This routine is invoked to post rpi header templates to the
18289 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18290 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18291 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18292 *
18293 * This routine does not require any locks. It's usage is expected
18294 * to be driver load or reset recovery when the driver is
18295 * sequential.
18296 *
18297 * Return codes
af901ca1 18298 * 0 - successful
d439d286 18299 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18300 * When this error occurs, the driver is not guaranteed
18301 * to have any rpi regions posted to the device and
18302 * must either attempt to repost the regions or take a
18303 * fatal error.
18304 **/
18305int
18306lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18307{
18308 struct lpfc_rpi_hdr *rpi_page;
18309 uint32_t rc = 0;
6d368e53
JS
18310 uint16_t lrpi = 0;
18311
18312 /* SLI4 ports that support extents do not require RPI headers. */
18313 if (!phba->sli4_hba.rpi_hdrs_in_use)
18314 goto exit;
18315 if (phba->sli4_hba.extents_in_use)
18316 return -EIO;
6fb120a7 18317
6fb120a7 18318 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
18319 /*
18320 * Assign the rpi headers a physical rpi only if the driver
18321 * has not initialized those resources. A port reset only
18322 * needs the headers posted.
18323 */
18324 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18325 LPFC_RPI_RSRC_RDY)
18326 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18327
6fb120a7
JS
18328 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18329 if (rc != MBX_SUCCESS) {
18330 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18331 "2008 Error %d posting all rpi "
18332 "headers\n", rc);
18333 rc = -EIO;
18334 break;
18335 }
18336 }
18337
6d368e53
JS
18338 exit:
18339 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18340 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
18341 return rc;
18342}
18343
18344/**
18345 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18346 * @phba: pointer to lpfc hba data structure.
18347 * @rpi_page: pointer to the rpi memory region.
18348 *
18349 * This routine is invoked to post a single rpi header to the
18350 * HBA consistent with the SLI-4 interface spec. This memory region
18351 * maps up to 64 rpi context regions.
18352 *
18353 * Return codes
af901ca1 18354 * 0 - successful
d439d286
JS
18355 * -ENOMEM - No available memory
18356 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18357 **/
18358int
18359lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18360{
18361 LPFC_MBOXQ_t *mboxq;
18362 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18363 uint32_t rc = 0;
6fb120a7
JS
18364 uint32_t shdr_status, shdr_add_status;
18365 union lpfc_sli4_cfg_shdr *shdr;
18366
6d368e53
JS
18367 /* SLI4 ports that support extents do not require RPI headers. */
18368 if (!phba->sli4_hba.rpi_hdrs_in_use)
18369 return rc;
18370 if (phba->sli4_hba.extents_in_use)
18371 return -EIO;
18372
6fb120a7
JS
18373 /* The port is notified of the header region via a mailbox command. */
18374 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18375 if (!mboxq) {
18376 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18377 "2001 Unable to allocate memory for issuing "
18378 "SLI_CONFIG_SPECIAL mailbox command\n");
18379 return -ENOMEM;
18380 }
18381
18382 /* Post all rpi memory regions to the port. */
18383 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
18384 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18385 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18386 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
18387 sizeof(struct lpfc_sli4_cfg_mhdr),
18388 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
18389
18390
18391 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
18392 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18393 rpi_page->start_rpi);
6d368e53
JS
18394 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18395 hdr_tmpl, rpi_page->page_count);
18396
6fb120a7
JS
18397 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18398 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 18399 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
18400 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18403 if (rc != MBX_TIMEOUT)
18404 mempool_free(mboxq, phba->mbox_mem_pool);
18405 if (shdr_status || shdr_add_status || rc) {
18406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18407 "2514 POST_RPI_HDR mailbox failed with "
18408 "status x%x add_status x%x, mbx status x%x\n",
18409 shdr_status, shdr_add_status, rc);
18410 rc = -ENXIO;
845d9e8d
JS
18411 } else {
18412 /*
18413 * The next_rpi stores the next logical module-64 rpi value used
18414 * to post physical rpis in subsequent rpi postings.
18415 */
18416 spin_lock_irq(&phba->hbalock);
18417 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18418 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
18419 }
18420 return rc;
18421}
18422
18423/**
18424 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18425 * @phba: pointer to lpfc hba data structure.
18426 *
18427 * This routine is invoked to post rpi header templates to the
18428 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18429 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18430 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18431 *
18432 * Returns
af901ca1 18433 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
18434 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18435 **/
18436int
18437lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18438{
6d368e53
JS
18439 unsigned long rpi;
18440 uint16_t max_rpi, rpi_limit;
18441 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 18442 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 18443 unsigned long iflag;
6fb120a7 18444
6fb120a7 18445 /*
6d368e53
JS
18446 * Fetch the next logical rpi. Because this index is logical,
18447 * the driver starts at 0 each time.
6fb120a7 18448 */
4902b381 18449 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
18450 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18451 rpi_limit = phba->sli4_hba.next_rpi;
18452
6d368e53
JS
18453 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18454 if (rpi >= rpi_limit)
6fb120a7
JS
18455 rpi = LPFC_RPI_ALLOC_ERROR;
18456 else {
18457 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18458 phba->sli4_hba.max_cfg_param.rpi_used++;
18459 phba->sli4_hba.rpi_count++;
18460 }
0f154226
JS
18461 lpfc_printf_log(phba, KERN_INFO,
18462 LOG_NODE | LOG_DISCOVERY,
18463 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
be6bb941 18464 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
18465
18466 /*
18467 * Don't try to allocate more rpi header regions if the device limit
6d368e53 18468 * has been exhausted.
6fb120a7
JS
18469 */
18470 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18471 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 18472 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18473 return rpi;
18474 }
18475
6d368e53
JS
18476 /*
18477 * RPI header postings are not required for SLI4 ports capable of
18478 * extents.
18479 */
18480 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 18481 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
18482 return rpi;
18483 }
18484
6fb120a7
JS
18485 /*
18486 * If the driver is running low on rpi resources, allocate another
18487 * page now. Note that the next_rpi value is used because
18488 * it represents how many are actually in use whereas max_rpi notes
18489 * how many are supported max by the device.
18490 */
6d368e53 18491 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 18492 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18493 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18494 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18495 if (!rpi_hdr) {
18496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18497 "2002 Error Could not grow rpi "
18498 "count\n");
18499 } else {
6d368e53
JS
18500 lrpi = rpi_hdr->start_rpi;
18501 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
18502 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18503 }
18504 }
18505
18506 return rpi;
18507}
18508
d7c47992
JS
18509/**
18510 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18511 * @phba: pointer to lpfc hba data structure.
18512 *
18513 * This routine is invoked to release an rpi to the pool of
18514 * available rpis maintained by the driver.
18515 **/
5d8b8167 18516static void
d7c47992
JS
18517__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18518{
7cfd5639
JS
18519 /*
18520 * if the rpi value indicates a prior unreg has already
18521 * been done, skip the unreg.
18522 */
18523 if (rpi == LPFC_RPI_ALLOC_ERROR)
18524 return;
18525
d7c47992
JS
18526 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18527 phba->sli4_hba.rpi_count--;
18528 phba->sli4_hba.max_cfg_param.rpi_used--;
b95b2119 18529 } else {
0f154226
JS
18530 lpfc_printf_log(phba, KERN_INFO,
18531 LOG_NODE | LOG_DISCOVERY,
b95b2119
JS
18532 "2016 rpi %x not inuse\n",
18533 rpi);
d7c47992
JS
18534 }
18535}
18536
6fb120a7
JS
18537/**
18538 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18539 * @phba: pointer to lpfc hba data structure.
18540 *
18541 * This routine is invoked to release an rpi to the pool of
18542 * available rpis maintained by the driver.
18543 **/
18544void
18545lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18546{
18547 spin_lock_irq(&phba->hbalock);
d7c47992 18548 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
18549 spin_unlock_irq(&phba->hbalock);
18550}
18551
18552/**
18553 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18554 * @phba: pointer to lpfc hba data structure.
18555 *
18556 * This routine is invoked to remove the memory region that
18557 * provided rpi via a bitmask.
18558 **/
18559void
18560lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18561{
18562 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
18563 kfree(phba->sli4_hba.rpi_ids);
18564 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
18565}
18566
18567/**
18568 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18569 * @phba: pointer to lpfc hba data structure.
18570 *
18571 * This routine is invoked to remove the memory region that
18572 * provided rpi via a bitmask.
18573 **/
18574int
6b5151fd
JS
18575lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18576 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
18577{
18578 LPFC_MBOXQ_t *mboxq;
18579 struct lpfc_hba *phba = ndlp->phba;
18580 int rc;
18581
18582 /* The port is notified of the header region via a mailbox command. */
18583 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18584 if (!mboxq)
18585 return -ENOMEM;
18586
18587 /* Post all rpi memory regions to the port. */
18588 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
18589 if (cmpl) {
18590 mboxq->mbox_cmpl = cmpl;
3e1f0718
JS
18591 mboxq->ctx_buf = arg;
18592 mboxq->ctx_ndlp = ndlp;
72859909
JS
18593 } else
18594 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 18595 mboxq->vport = ndlp->vport;
6fb120a7
JS
18596 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18597 if (rc == MBX_NOT_FINISHED) {
18598 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18599 "2010 Resume RPI Mailbox failed "
18600 "status %d, mbxStatus x%x\n", rc,
18601 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18602 mempool_free(mboxq, phba->mbox_mem_pool);
18603 return -EIO;
18604 }
18605 return 0;
18606}
18607
18608/**
18609 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 18610 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 18611 *
76a95d75 18612 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
18613 *
18614 * Returns:
18615 * 0 success
18616 * -Evalue otherwise
18617 **/
18618int
76a95d75 18619lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
18620{
18621 LPFC_MBOXQ_t *mboxq;
18622 int rc = 0;
6a9c52cf 18623 int retval = MBX_SUCCESS;
6fb120a7 18624 uint32_t mbox_tmo;
76a95d75 18625 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
18626 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18627 if (!mboxq)
18628 return -ENOMEM;
76a95d75 18629 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 18630 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 18631 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 18632 if (rc != MBX_SUCCESS) {
76a95d75 18633 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
18634 "2022 INIT VPI Mailbox failed "
18635 "status %d, mbxStatus x%x\n", rc,
18636 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 18637 retval = -EIO;
6fb120a7 18638 }
6a9c52cf 18639 if (rc != MBX_TIMEOUT)
76a95d75 18640 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
18641
18642 return retval;
6fb120a7
JS
18643}
18644
18645/**
18646 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18647 * @phba: pointer to lpfc hba data structure.
18648 * @mboxq: Pointer to mailbox object.
18649 *
18650 * This routine is invoked to manually add a single FCF record. The caller
18651 * must pass a completely initialized FCF_Record. This routine takes
18652 * care of the nonembedded mailbox operations.
18653 **/
18654static void
18655lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18656{
18657 void *virt_addr;
18658 union lpfc_sli4_cfg_shdr *shdr;
18659 uint32_t shdr_status, shdr_add_status;
18660
18661 virt_addr = mboxq->sge_array->addr[0];
18662 /* The IOCTL status is embedded in the mailbox subheader. */
18663 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18664 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18666
18667 if ((shdr_status || shdr_add_status) &&
18668 (shdr_status != STATUS_FCF_IN_USE))
18669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18670 "2558 ADD_FCF_RECORD mailbox failed with "
18671 "status x%x add_status x%x\n",
18672 shdr_status, shdr_add_status);
18673
18674 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18675}
18676
18677/**
18678 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18679 * @phba: pointer to lpfc hba data structure.
18680 * @fcf_record: pointer to the initialized fcf record to add.
18681 *
18682 * This routine is invoked to manually add a single FCF record. The caller
18683 * must pass a completely initialized FCF_Record. This routine takes
18684 * care of the nonembedded mailbox operations.
18685 **/
18686int
18687lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18688{
18689 int rc = 0;
18690 LPFC_MBOXQ_t *mboxq;
18691 uint8_t *bytep;
18692 void *virt_addr;
6fb120a7
JS
18693 struct lpfc_mbx_sge sge;
18694 uint32_t alloc_len, req_len;
18695 uint32_t fcfindex;
18696
18697 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18698 if (!mboxq) {
18699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18700 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18701 return -ENOMEM;
18702 }
18703
18704 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18705 sizeof(uint32_t);
18706
18707 /* Allocate DMA memory and set up the non-embedded mailbox command */
18708 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18709 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18710 req_len, LPFC_SLI4_MBX_NEMBED);
18711 if (alloc_len < req_len) {
18712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18713 "2523 Allocated DMA memory size (x%x) is "
18714 "less than the requested DMA memory "
18715 "size (x%x)\n", alloc_len, req_len);
18716 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18717 return -ENOMEM;
18718 }
18719
18720 /*
18721 * Get the first SGE entry from the non-embedded DMA memory. This
18722 * routine only uses a single SGE.
18723 */
18724 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
18725 virt_addr = mboxq->sge_array->addr[0];
18726 /*
18727 * Configure the FCF record for FCFI 0. This is the driver's
18728 * hardcoded default and gets used in nonFIP mode.
18729 */
18730 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18731 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18732 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18733
18734 /*
18735 * Copy the fcf_index and the FCF Record Data. The data starts after
18736 * the FCoE header plus word10. The data copy needs to be endian
18737 * correct.
18738 */
18739 bytep += sizeof(uint32_t);
18740 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18741 mboxq->vport = phba->pport;
18742 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18743 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18744 if (rc == MBX_NOT_FINISHED) {
18745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18746 "2515 ADD_FCF_RECORD mailbox failed with "
18747 "status 0x%x\n", rc);
18748 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18749 rc = -EIO;
18750 } else
18751 rc = 0;
18752
18753 return rc;
18754}
18755
18756/**
18757 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18758 * @phba: pointer to lpfc hba data structure.
18759 * @fcf_record: pointer to the fcf record to write the default data.
18760 * @fcf_index: FCF table entry index.
18761 *
18762 * This routine is invoked to build the driver's default FCF record. The
18763 * values used are hardcoded. This routine handles memory initialization.
18764 *
18765 **/
18766void
18767lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18768 struct fcf_record *fcf_record,
18769 uint16_t fcf_index)
18770{
18771 memset(fcf_record, 0, sizeof(struct fcf_record));
18772 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18773 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18774 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18775 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18776 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18777 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18778 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18779 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18780 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18781 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18782 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18783 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18784 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 18785 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
18786 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18787 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18788 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18789 /* Set the VLAN bit map */
18790 if (phba->valid_vlan) {
18791 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18792 = 1 << (phba->vlan_id % 8);
18793 }
18794}
18795
18796/**
0c9ab6f5 18797 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
18798 * @phba: pointer to lpfc hba data structure.
18799 * @fcf_index: FCF table entry offset.
18800 *
0c9ab6f5
JS
18801 * This routine is invoked to scan the entire FCF table by reading FCF
18802 * record and processing it one at a time starting from the @fcf_index
18803 * for initial FCF discovery or fast FCF failover rediscovery.
18804 *
25985edc 18805 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 18806 * otherwise.
6fb120a7
JS
18807 **/
18808int
0c9ab6f5 18809lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
18810{
18811 int rc = 0, error;
18812 LPFC_MBOXQ_t *mboxq;
6fb120a7 18813
32b9793f 18814 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 18815 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
18816 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18817 if (!mboxq) {
18818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18819 "2000 Failed to allocate mbox for "
18820 "READ_FCF cmd\n");
4d9ab994 18821 error = -ENOMEM;
0c9ab6f5 18822 goto fail_fcf_scan;
6fb120a7 18823 }
ecfd03c6 18824 /* Construct the read FCF record mailbox command */
0c9ab6f5 18825 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
18826 if (rc) {
18827 error = -EINVAL;
0c9ab6f5 18828 goto fail_fcf_scan;
6fb120a7 18829 }
ecfd03c6 18830 /* Issue the mailbox command asynchronously */
6fb120a7 18831 mboxq->vport = phba->pport;
0c9ab6f5 18832 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
18833
18834 spin_lock_irq(&phba->hbalock);
18835 phba->hba_flag |= FCF_TS_INPROG;
18836 spin_unlock_irq(&phba->hbalock);
18837
6fb120a7 18838 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 18839 if (rc == MBX_NOT_FINISHED)
6fb120a7 18840 error = -EIO;
ecfd03c6 18841 else {
38b92ef8
JS
18842 /* Reset eligible FCF count for new scan */
18843 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 18844 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 18845 error = 0;
32b9793f 18846 }
0c9ab6f5 18847fail_fcf_scan:
4d9ab994
JS
18848 if (error) {
18849 if (mboxq)
18850 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 18851 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 18852 spin_lock_irq(&phba->hbalock);
a93ff37a 18853 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
18854 spin_unlock_irq(&phba->hbalock);
18855 }
6fb120a7
JS
18856 return error;
18857}
a0c87cbd 18858
0c9ab6f5 18859/**
a93ff37a 18860 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
18861 * @phba: pointer to lpfc hba data structure.
18862 * @fcf_index: FCF table entry offset.
18863 *
18864 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 18865 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 18866 *
25985edc 18867 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18868 * otherwise.
18869 **/
18870int
18871lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18872{
18873 int rc = 0, error;
18874 LPFC_MBOXQ_t *mboxq;
18875
18876 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18877 if (!mboxq) {
18878 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18879 "2763 Failed to allocate mbox for "
18880 "READ_FCF cmd\n");
18881 error = -ENOMEM;
18882 goto fail_fcf_read;
18883 }
18884 /* Construct the read FCF record mailbox command */
18885 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18886 if (rc) {
18887 error = -EINVAL;
18888 goto fail_fcf_read;
18889 }
18890 /* Issue the mailbox command asynchronously */
18891 mboxq->vport = phba->pport;
18892 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18893 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18894 if (rc == MBX_NOT_FINISHED)
18895 error = -EIO;
18896 else
18897 error = 0;
18898
18899fail_fcf_read:
18900 if (error && mboxq)
18901 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18902 return error;
18903}
18904
18905/**
18906 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18907 * @phba: pointer to lpfc hba data structure.
18908 * @fcf_index: FCF table entry offset.
18909 *
18910 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 18911 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 18912 *
25985edc 18913 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18914 * otherwise.
18915 **/
18916int
18917lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18918{
18919 int rc = 0, error;
18920 LPFC_MBOXQ_t *mboxq;
18921
18922 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18923 if (!mboxq) {
18924 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18925 "2758 Failed to allocate mbox for "
18926 "READ_FCF cmd\n");
18927 error = -ENOMEM;
18928 goto fail_fcf_read;
18929 }
18930 /* Construct the read FCF record mailbox command */
18931 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18932 if (rc) {
18933 error = -EINVAL;
18934 goto fail_fcf_read;
18935 }
18936 /* Issue the mailbox command asynchronously */
18937 mboxq->vport = phba->pport;
18938 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18939 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18940 if (rc == MBX_NOT_FINISHED)
18941 error = -EIO;
18942 else
18943 error = 0;
18944
18945fail_fcf_read:
18946 if (error && mboxq)
18947 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18948 return error;
18949}
18950
7d791df7 18951/**
f5cb5304 18952 * lpfc_check_next_fcf_pri_level
7d791df7
JS
18953 * phba pointer to the lpfc_hba struct for this port.
18954 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18955 * routine when the rr_bmask is empty. The FCF indecies are put into the
18956 * rr_bmask based on their priority level. Starting from the highest priority
18957 * to the lowest. The most likely FCF candidate will be in the highest
18958 * priority group. When this routine is called it searches the fcf_pri list for
18959 * next lowest priority group and repopulates the rr_bmask with only those
18960 * fcf_indexes.
18961 * returns:
18962 * 1=success 0=failure
18963 **/
5d8b8167 18964static int
7d791df7
JS
18965lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18966{
18967 uint16_t next_fcf_pri;
18968 uint16_t last_index;
18969 struct lpfc_fcf_pri *fcf_pri;
18970 int rc;
18971 int ret = 0;
18972
18973 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18974 LPFC_SLI4_FCF_TBL_INDX_MAX);
18975 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18976 "3060 Last IDX %d\n", last_index);
2562669c
JS
18977
18978 /* Verify the priority list has 2 or more entries */
18979 spin_lock_irq(&phba->hbalock);
18980 if (list_empty(&phba->fcf.fcf_pri_list) ||
18981 list_is_singular(&phba->fcf.fcf_pri_list)) {
18982 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
18983 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18984 "3061 Last IDX %d\n", last_index);
18985 return 0; /* Empty rr list */
18986 }
2562669c
JS
18987 spin_unlock_irq(&phba->hbalock);
18988
7d791df7
JS
18989 next_fcf_pri = 0;
18990 /*
18991 * Clear the rr_bmask and set all of the bits that are at this
18992 * priority.
18993 */
18994 memset(phba->fcf.fcf_rr_bmask, 0,
18995 sizeof(*phba->fcf.fcf_rr_bmask));
18996 spin_lock_irq(&phba->hbalock);
18997 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18998 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18999 continue;
19000 /*
19001 * the 1st priority that has not FLOGI failed
19002 * will be the highest.
19003 */
19004 if (!next_fcf_pri)
19005 next_fcf_pri = fcf_pri->fcf_rec.priority;
19006 spin_unlock_irq(&phba->hbalock);
19007 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19008 rc = lpfc_sli4_fcf_rr_index_set(phba,
19009 fcf_pri->fcf_rec.fcf_index);
19010 if (rc)
19011 return 0;
19012 }
19013 spin_lock_irq(&phba->hbalock);
19014 }
19015 /*
19016 * if next_fcf_pri was not set above and the list is not empty then
19017 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 19018 * and start at the beginning.
7d791df7
JS
19019 */
19020 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19021 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19022 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19023 /*
19024 * the 1st priority that has not FLOGI failed
19025 * will be the highest.
19026 */
19027 if (!next_fcf_pri)
19028 next_fcf_pri = fcf_pri->fcf_rec.priority;
19029 spin_unlock_irq(&phba->hbalock);
19030 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19031 rc = lpfc_sli4_fcf_rr_index_set(phba,
19032 fcf_pri->fcf_rec.fcf_index);
19033 if (rc)
19034 return 0;
19035 }
19036 spin_lock_irq(&phba->hbalock);
19037 }
19038 } else
19039 ret = 1;
19040 spin_unlock_irq(&phba->hbalock);
19041
19042 return ret;
19043}
0c9ab6f5
JS
19044/**
19045 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19046 * @phba: pointer to lpfc hba data structure.
19047 *
19048 * This routine is to get the next eligible FCF record index in a round
19049 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 19050 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
19051 * shall be returned, otherwise, the next eligible FCF record's index
19052 * shall be returned.
19053 **/
19054uint16_t
19055lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19056{
19057 uint16_t next_fcf_index;
19058
421c6622 19059initial_priority:
3804dc84 19060 /* Search start from next bit of currently registered FCF index */
421c6622
JS
19061 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19062
7d791df7 19063next_priority:
421c6622
JS
19064 /* Determine the next fcf index to check */
19065 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
19066 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19067 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
19068 next_fcf_index);
19069
0c9ab6f5 19070 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
19071 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19072 /*
19073 * If we have wrapped then we need to clear the bits that
19074 * have been tested so that we can detect when we should
19075 * change the priority level.
19076 */
0c9ab6f5
JS
19077 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19078 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
19079 }
19080
3804dc84
JS
19081
19082 /* Check roundrobin failover list empty condition */
7d791df7
JS
19083 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19084 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19085 /*
19086 * If next fcf index is not found check if there are lower
19087 * Priority level fcf's in the fcf_priority list.
19088 * Set up the rr_bmask with all of the avaiable fcf bits
19089 * at that level and continue the selection process.
19090 */
19091 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 19092 goto initial_priority;
3804dc84
JS
19093 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19094 "2844 No roundrobin failover FCF available\n");
036cad1f
JS
19095
19096 return LPFC_FCOE_FCF_NEXT_NONE;
3804dc84
JS
19097 }
19098
7d791df7
JS
19099 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19100 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
19101 LPFC_FCF_FLOGI_FAILED) {
19102 if (list_is_singular(&phba->fcf.fcf_pri_list))
19103 return LPFC_FCOE_FCF_NEXT_NONE;
19104
7d791df7 19105 goto next_priority;
f5cb5304 19106 }
7d791df7 19107
3804dc84 19108 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
19109 "2845 Get next roundrobin failover FCF (x%x)\n",
19110 next_fcf_index);
19111
0c9ab6f5
JS
19112 return next_fcf_index;
19113}
19114
19115/**
19116 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19117 * @phba: pointer to lpfc hba data structure.
19118 *
19119 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 19120 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
19121 * does not go beyond the range of the driver allocated bmask dimension
19122 * before setting the bit.
19123 *
19124 * Returns 0 if the index bit successfully set, otherwise, it returns
19125 * -EINVAL.
19126 **/
19127int
19128lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19129{
19130 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19131 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
19132 "2610 FCF (x%x) reached driver's book "
19133 "keeping dimension:x%x\n",
0c9ab6f5
JS
19134 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19135 return -EINVAL;
19136 }
19137 /* Set the eligible FCF record index bmask */
19138 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19139
3804dc84 19140 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 19141 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
19142 "bmask\n", fcf_index);
19143
0c9ab6f5
JS
19144 return 0;
19145}
19146
19147/**
3804dc84 19148 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
19149 * @phba: pointer to lpfc hba data structure.
19150 *
19151 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 19152 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
19153 * does not go beyond the range of the driver allocated bmask dimension
19154 * before clearing the bit.
19155 **/
19156void
19157lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19158{
9a803a74 19159 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
19160 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19161 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
19162 "2762 FCF (x%x) reached driver's book "
19163 "keeping dimension:x%x\n",
0c9ab6f5
JS
19164 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19165 return;
19166 }
19167 /* Clear the eligible FCF record index bmask */
7d791df7 19168 spin_lock_irq(&phba->hbalock);
9a803a74
JS
19169 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19170 list) {
7d791df7
JS
19171 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19172 list_del_init(&fcf_pri->list);
19173 break;
19174 }
19175 }
19176 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 19177 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
19178
19179 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 19180 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 19181 "bmask\n", fcf_index);
0c9ab6f5
JS
19182}
19183
ecfd03c6
JS
19184/**
19185 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19186 * @phba: pointer to lpfc hba data structure.
19187 *
19188 * This routine is the completion routine for the rediscover FCF table mailbox
19189 * command. If the mailbox command returned failure, it will try to stop the
19190 * FCF rediscover wait timer.
19191 **/
5d8b8167 19192static void
ecfd03c6
JS
19193lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19194{
19195 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19196 uint32_t shdr_status, shdr_add_status;
19197
19198 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19199
19200 shdr_status = bf_get(lpfc_mbox_hdr_status,
19201 &redisc_fcf->header.cfg_shdr.response);
19202 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19203 &redisc_fcf->header.cfg_shdr.response);
19204 if (shdr_status || shdr_add_status) {
0c9ab6f5 19205 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
19206 "2746 Requesting for FCF rediscovery failed "
19207 "status x%x add_status x%x\n",
19208 shdr_status, shdr_add_status);
0c9ab6f5 19209 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 19210 spin_lock_irq(&phba->hbalock);
0c9ab6f5 19211 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
19212 spin_unlock_irq(&phba->hbalock);
19213 /*
19214 * CVL event triggered FCF rediscover request failed,
19215 * last resort to re-try current registered FCF entry.
19216 */
19217 lpfc_retry_pport_discovery(phba);
19218 } else {
19219 spin_lock_irq(&phba->hbalock);
0c9ab6f5 19220 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
19221 spin_unlock_irq(&phba->hbalock);
19222 /*
19223 * DEAD FCF event triggered FCF rediscover request
19224 * failed, last resort to fail over as a link down
19225 * to FCF registration.
19226 */
19227 lpfc_sli4_fcf_dead_failthrough(phba);
19228 }
0c9ab6f5
JS
19229 } else {
19230 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 19231 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
19232 /*
19233 * Start FCF rediscovery wait timer for pending FCF
19234 * before rescan FCF record table.
19235 */
19236 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 19237 }
ecfd03c6
JS
19238
19239 mempool_free(mbox, phba->mbox_mem_pool);
19240}
19241
19242/**
3804dc84 19243 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
19244 * @phba: pointer to lpfc hba data structure.
19245 *
19246 * This routine is invoked to request for rediscovery of the entire FCF table
19247 * by the port.
19248 **/
19249int
19250lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19251{
19252 LPFC_MBOXQ_t *mbox;
19253 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19254 int rc, length;
19255
0c9ab6f5
JS
19256 /* Cancel retry delay timers to all vports before FCF rediscover */
19257 lpfc_cancel_all_vport_retry_delay_timer(phba);
19258
ecfd03c6
JS
19259 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19260 if (!mbox) {
19261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19262 "2745 Failed to allocate mbox for "
19263 "requesting FCF rediscover.\n");
19264 return -ENOMEM;
19265 }
19266
19267 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19268 sizeof(struct lpfc_sli4_cfg_mhdr));
19269 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19270 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19271 length, LPFC_SLI4_MBX_EMBED);
19272
19273 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19274 /* Set count to 0 for invalidating the entire FCF database */
19275 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19276
19277 /* Issue the mailbox command asynchronously */
19278 mbox->vport = phba->pport;
19279 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19280 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19281
19282 if (rc == MBX_NOT_FINISHED) {
19283 mempool_free(mbox, phba->mbox_mem_pool);
19284 return -EIO;
19285 }
19286 return 0;
19287}
19288
fc2b989b
JS
19289/**
19290 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19291 * @phba: pointer to lpfc hba data structure.
19292 *
19293 * This function is the failover routine as a last resort to the FCF DEAD
19294 * event when driver failed to perform fast FCF failover.
19295 **/
19296void
19297lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19298{
19299 uint32_t link_state;
19300
19301 /*
19302 * Last resort as FCF DEAD event failover will treat this as
19303 * a link down, but save the link state because we don't want
19304 * it to be changed to Link Down unless it is already down.
19305 */
19306 link_state = phba->link_state;
19307 lpfc_linkdown(phba);
19308 phba->link_state = link_state;
19309
19310 /* Unregister FCF if no devices connected to it */
19311 lpfc_unregister_unused_fcf(phba);
19312}
19313
a0c87cbd 19314/**
026abb87 19315 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 19316 * @phba: pointer to lpfc hba data structure.
026abb87 19317 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 19318 *
026abb87
JS
19319 * This function gets SLI3 port configure region 23 data through memory dump
19320 * mailbox command. When it successfully retrieves data, the size of the data
19321 * will be returned, otherwise, 0 will be returned.
a0c87cbd 19322 **/
026abb87
JS
19323static uint32_t
19324lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
19325{
19326 LPFC_MBOXQ_t *pmb = NULL;
19327 MAILBOX_t *mb;
026abb87 19328 uint32_t offset = 0;
a0c87cbd
JS
19329 int rc;
19330
026abb87
JS
19331 if (!rgn23_data)
19332 return 0;
19333
a0c87cbd
JS
19334 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19335 if (!pmb) {
19336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
19337 "2600 failed to allocate mailbox memory\n");
19338 return 0;
a0c87cbd
JS
19339 }
19340 mb = &pmb->u.mb;
19341
a0c87cbd
JS
19342 do {
19343 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19345
19346 if (rc != MBX_SUCCESS) {
19347 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
19348 "2601 failed to read config "
19349 "region 23, rc 0x%x Status 0x%x\n",
19350 rc, mb->mbxStatus);
a0c87cbd
JS
19351 mb->un.varDmp.word_cnt = 0;
19352 }
19353 /*
19354 * dump mem may return a zero when finished or we got a
19355 * mailbox error, either way we are done.
19356 */
19357 if (mb->un.varDmp.word_cnt == 0)
19358 break;
19359 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19360 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19361
19362 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
19363 rgn23_data + offset,
19364 mb->un.varDmp.word_cnt);
a0c87cbd
JS
19365 offset += mb->un.varDmp.word_cnt;
19366 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19367
026abb87
JS
19368 mempool_free(pmb, phba->mbox_mem_pool);
19369 return offset;
19370}
19371
19372/**
19373 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19374 * @phba: pointer to lpfc hba data structure.
19375 * @rgn23_data: pointer to configure region 23 data.
19376 *
19377 * This function gets SLI4 port configure region 23 data through memory dump
19378 * mailbox command. When it successfully retrieves data, the size of the data
19379 * will be returned, otherwise, 0 will be returned.
19380 **/
19381static uint32_t
19382lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19383{
19384 LPFC_MBOXQ_t *mboxq = NULL;
19385 struct lpfc_dmabuf *mp = NULL;
19386 struct lpfc_mqe *mqe;
19387 uint32_t data_length = 0;
19388 int rc;
19389
19390 if (!rgn23_data)
19391 return 0;
19392
19393 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19394 if (!mboxq) {
19395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19396 "3105 failed to allocate mailbox memory\n");
19397 return 0;
19398 }
19399
19400 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19401 goto out;
19402 mqe = &mboxq->u.mqe;
3e1f0718 19403 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
026abb87
JS
19404 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19405 if (rc)
19406 goto out;
19407 data_length = mqe->un.mb_words[5];
19408 if (data_length == 0)
19409 goto out;
19410 if (data_length > DMP_RGN23_SIZE) {
19411 data_length = 0;
19412 goto out;
19413 }
19414 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19415out:
19416 mempool_free(mboxq, phba->mbox_mem_pool);
19417 if (mp) {
19418 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19419 kfree(mp);
19420 }
19421 return data_length;
19422}
19423
19424/**
19425 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19426 * @phba: pointer to lpfc hba data structure.
19427 *
19428 * This function read region 23 and parse TLV for port status to
19429 * decide if the user disaled the port. If the TLV indicates the
19430 * port is disabled, the hba_flag is set accordingly.
19431 **/
19432void
19433lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19434{
19435 uint8_t *rgn23_data = NULL;
19436 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19437 uint32_t offset = 0;
19438
19439 /* Get adapter Region 23 data */
19440 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19441 if (!rgn23_data)
19442 goto out;
19443
19444 if (phba->sli_rev < LPFC_SLI_REV4)
19445 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19446 else {
19447 if_type = bf_get(lpfc_sli_intf_if_type,
19448 &phba->sli4_hba.sli_intf);
19449 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19450 goto out;
19451 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19452 }
a0c87cbd
JS
19453
19454 if (!data_size)
19455 goto out;
19456
19457 /* Check the region signature first */
19458 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19460 "2619 Config region 23 has bad signature\n");
19461 goto out;
19462 }
19463 offset += 4;
19464
19465 /* Check the data structure version */
19466 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19468 "2620 Config region 23 has bad version\n");
19469 goto out;
19470 }
19471 offset += 4;
19472
19473 /* Parse TLV entries in the region */
19474 while (offset < data_size) {
19475 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19476 break;
19477 /*
19478 * If the TLV is not driver specific TLV or driver id is
19479 * not linux driver id, skip the record.
19480 */
19481 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19482 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19483 (rgn23_data[offset + 3] != 0)) {
19484 offset += rgn23_data[offset + 1] * 4 + 4;
19485 continue;
19486 }
19487
19488 /* Driver found a driver specific TLV in the config region */
19489 sub_tlv_len = rgn23_data[offset + 1] * 4;
19490 offset += 4;
19491 tlv_offset = 0;
19492
19493 /*
19494 * Search for configured port state sub-TLV.
19495 */
19496 while ((offset < data_size) &&
19497 (tlv_offset < sub_tlv_len)) {
19498 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19499 offset += 4;
19500 tlv_offset += 4;
19501 break;
19502 }
19503 if (rgn23_data[offset] != PORT_STE_TYPE) {
19504 offset += rgn23_data[offset + 1] * 4 + 4;
19505 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19506 continue;
19507 }
19508
19509 /* This HBA contains PORT_STE configured */
19510 if (!rgn23_data[offset + 2])
19511 phba->hba_flag |= LINK_DISABLED;
19512
19513 goto out;
19514 }
19515 }
026abb87 19516
a0c87cbd 19517out:
a0c87cbd
JS
19518 kfree(rgn23_data);
19519 return;
19520}
695a814e 19521
52d52440
JS
19522/**
19523 * lpfc_wr_object - write an object to the firmware
19524 * @phba: HBA structure that indicates port to create a queue on.
19525 * @dmabuf_list: list of dmabufs to write to the port.
19526 * @size: the total byte value of the objects to write to the port.
19527 * @offset: the current offset to be used to start the transfer.
19528 *
19529 * This routine will create a wr_object mailbox command to send to the port.
19530 * the mailbox command will be constructed using the dma buffers described in
19531 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19532 * BDEs that the imbedded mailbox can support. The @offset variable will be
19533 * used to indicate the starting offset of the transfer and will also return
19534 * the offset after the write object mailbox has completed. @size is used to
19535 * determine the end of the object and whether the eof bit should be set.
19536 *
19537 * Return 0 is successful and offset will contain the the new offset to use
19538 * for the next write.
19539 * Return negative value for error cases.
19540 **/
19541int
19542lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19543 uint32_t size, uint32_t *offset)
19544{
19545 struct lpfc_mbx_wr_object *wr_object;
19546 LPFC_MBOXQ_t *mbox;
19547 int rc = 0, i = 0;
f3d0a8ac 19548 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
52d52440 19549 uint32_t mbox_tmo;
52d52440
JS
19550 struct lpfc_dmabuf *dmabuf;
19551 uint32_t written = 0;
5021267a 19552 bool check_change_status = false;
52d52440
JS
19553
19554 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19555 if (!mbox)
19556 return -ENOMEM;
19557
19558 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19559 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19560 sizeof(struct lpfc_mbx_wr_object) -
19561 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19562
19563 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19564 wr_object->u.request.write_offset = *offset;
19565 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19566 wr_object->u.request.object_name[0] =
19567 cpu_to_le32(wr_object->u.request.object_name[0]);
19568 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19569 list_for_each_entry(dmabuf, dmabuf_list, list) {
19570 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19571 break;
19572 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19573 wr_object->u.request.bde[i].addrHigh =
19574 putPaddrHigh(dmabuf->phys);
19575 if (written + SLI4_PAGE_SIZE >= size) {
19576 wr_object->u.request.bde[i].tus.f.bdeSize =
19577 (size - written);
19578 written += (size - written);
19579 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
5021267a
JS
19580 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19581 check_change_status = true;
52d52440
JS
19582 } else {
19583 wr_object->u.request.bde[i].tus.f.bdeSize =
19584 SLI4_PAGE_SIZE;
19585 written += SLI4_PAGE_SIZE;
19586 }
19587 i++;
19588 }
19589 wr_object->u.request.bde_count = i;
19590 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19591 if (!phba->sli4_hba.intr_enable)
19592 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19593 else {
a183a15f 19594 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
19595 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19596 }
19597 /* The IOCTL status is embedded in the mailbox subheader. */
5021267a
JS
19598 shdr_status = bf_get(lpfc_mbox_hdr_status,
19599 &wr_object->header.cfg_shdr.response);
19600 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19601 &wr_object->header.cfg_shdr.response);
19602 if (check_change_status) {
19603 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19604 &wr_object->u.response);
f3d0a8ac
JS
19605
19606 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19607 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19608 shdr_csf = bf_get(lpfc_wr_object_csf,
19609 &wr_object->u.response);
19610 if (shdr_csf)
19611 shdr_change_status =
19612 LPFC_CHANGE_STATUS_PCI_RESET;
19613 }
19614
5021267a
JS
19615 switch (shdr_change_status) {
19616 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19617 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19618 "3198 Firmware write complete: System "
19619 "reboot required to instantiate\n");
19620 break;
19621 case (LPFC_CHANGE_STATUS_FW_RESET):
19622 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19623 "3199 Firmware write complete: Firmware"
19624 " reset required to instantiate\n");
19625 break;
19626 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19628 "3200 Firmware write complete: Port "
19629 "Migration or PCI Reset required to "
19630 "instantiate\n");
19631 break;
19632 case (LPFC_CHANGE_STATUS_PCI_RESET):
19633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19634 "3201 Firmware write complete: PCI "
19635 "Reset required to instantiate\n");
19636 break;
19637 default:
19638 break;
19639 }
19640 }
52d52440
JS
19641 if (rc != MBX_TIMEOUT)
19642 mempool_free(mbox, phba->mbox_mem_pool);
19643 if (shdr_status || shdr_add_status || rc) {
19644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19645 "3025 Write Object mailbox failed with "
19646 "status x%x add_status x%x, mbx status x%x\n",
19647 shdr_status, shdr_add_status, rc);
19648 rc = -ENXIO;
1feb8204 19649 *offset = shdr_add_status;
52d52440
JS
19650 } else
19651 *offset += wr_object->u.response.actual_write_length;
19652 return rc;
19653}
19654
695a814e
JS
19655/**
19656 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19657 * @vport: pointer to vport data structure.
19658 *
19659 * This function iterate through the mailboxq and clean up all REG_LOGIN
19660 * and REG_VPI mailbox commands associated with the vport. This function
19661 * is called when driver want to restart discovery of the vport due to
19662 * a Clear Virtual Link event.
19663 **/
19664void
19665lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19666{
19667 struct lpfc_hba *phba = vport->phba;
19668 LPFC_MBOXQ_t *mb, *nextmb;
19669 struct lpfc_dmabuf *mp;
78730cfe 19670 struct lpfc_nodelist *ndlp;
d439d286 19671 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 19672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 19673 LIST_HEAD(mbox_cmd_list);
63e801ce 19674 uint8_t restart_loop;
695a814e 19675
d439d286 19676 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
19677 spin_lock_irq(&phba->hbalock);
19678 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19679 if (mb->vport != vport)
19680 continue;
19681
19682 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19683 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19684 continue;
19685
d439d286
JS
19686 list_del(&mb->list);
19687 list_add_tail(&mb->list, &mbox_cmd_list);
19688 }
19689 /* Clean up active mailbox command with the vport */
19690 mb = phba->sli.mbox_active;
19691 if (mb && (mb->vport == vport)) {
19692 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19693 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19694 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19695 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19696 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
d439d286
JS
19697 /* Put reference count for delayed processing */
19698 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19699 /* Unregister the RPI when mailbox complete */
19700 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19701 }
19702 }
63e801ce
JS
19703 /* Cleanup any mailbox completions which are not yet processed */
19704 do {
19705 restart_loop = 0;
19706 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19707 /*
19708 * If this mailox is already processed or it is
19709 * for another vport ignore it.
19710 */
19711 if ((mb->vport != vport) ||
19712 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19713 continue;
19714
19715 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19716 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19717 continue;
19718
19719 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19720 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19721 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
63e801ce
JS
19722 /* Unregister the RPI when mailbox complete */
19723 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19724 restart_loop = 1;
19725 spin_unlock_irq(&phba->hbalock);
19726 spin_lock(shost->host_lock);
19727 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19728 spin_unlock(shost->host_lock);
19729 spin_lock_irq(&phba->hbalock);
19730 break;
19731 }
19732 }
19733 } while (restart_loop);
19734
d439d286
JS
19735 spin_unlock_irq(&phba->hbalock);
19736
19737 /* Release the cleaned-up mailbox commands */
19738 while (!list_empty(&mbox_cmd_list)) {
19739 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e 19740 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19741 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
695a814e
JS
19742 if (mp) {
19743 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19744 kfree(mp);
19745 }
3e1f0718
JS
19746 mb->ctx_buf = NULL;
19747 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19748 mb->ctx_ndlp = NULL;
78730cfe 19749 if (ndlp) {
ec21b3b0 19750 spin_lock(shost->host_lock);
589a52d6 19751 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 19752 spin_unlock(shost->host_lock);
78730cfe 19753 lpfc_nlp_put(ndlp);
78730cfe 19754 }
695a814e 19755 }
695a814e
JS
19756 mempool_free(mb, phba->mbox_mem_pool);
19757 }
d439d286
JS
19758
19759 /* Release the ndlp with the cleaned-up active mailbox command */
19760 if (act_mbx_ndlp) {
19761 spin_lock(shost->host_lock);
19762 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19763 spin_unlock(shost->host_lock);
19764 lpfc_nlp_put(act_mbx_ndlp);
695a814e 19765 }
695a814e
JS
19766}
19767
2a9bf3d0
JS
19768/**
19769 * lpfc_drain_txq - Drain the txq
19770 * @phba: Pointer to HBA context object.
19771 *
19772 * This function attempt to submit IOCBs on the txq
19773 * to the adapter. For SLI4 adapters, the txq contains
19774 * ELS IOCBs that have been deferred because the there
19775 * are no SGLs. This congestion can occur with large
19776 * vport counts during node discovery.
19777 **/
19778
19779uint32_t
19780lpfc_drain_txq(struct lpfc_hba *phba)
19781{
19782 LIST_HEAD(completions);
895427bd 19783 struct lpfc_sli_ring *pring;
2e706377 19784 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
19785 unsigned long iflags = 0;
19786 char *fail_msg = NULL;
19787 struct lpfc_sglq *sglq;
205e8240 19788 union lpfc_wqe128 wqe;
a2fc4aef 19789 uint32_t txq_cnt = 0;
dc19e3b4 19790 struct lpfc_queue *wq;
2a9bf3d0 19791
dc19e3b4
JS
19792 if (phba->link_flag & LS_MDS_LOOPBACK) {
19793 /* MDS WQE are posted only to first WQ*/
c00f62e6 19794 wq = phba->sli4_hba.hdwq[0].io_wq;
dc19e3b4
JS
19795 if (unlikely(!wq))
19796 return 0;
19797 pring = wq->pring;
19798 } else {
19799 wq = phba->sli4_hba.els_wq;
19800 if (unlikely(!wq))
19801 return 0;
19802 pring = lpfc_phba_elsring(phba);
19803 }
19804
19805 if (unlikely(!pring) || list_empty(&pring->txq))
1234a6d5 19806 return 0;
895427bd 19807
398d81c9 19808 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
19809 list_for_each_entry(piocbq, &pring->txq, list) {
19810 txq_cnt++;
19811 }
19812
19813 if (txq_cnt > pring->txq_max)
19814 pring->txq_max = txq_cnt;
2a9bf3d0 19815
398d81c9 19816 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19817
0e9bb8d7 19818 while (!list_empty(&pring->txq)) {
398d81c9 19819 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 19820
19ca7609 19821 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 19822 if (!piocbq) {
398d81c9 19823 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
19824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19825 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 19826 txq_cnt);
a629852a
JS
19827 break;
19828 }
895427bd 19829 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 19830 if (!sglq) {
19ca7609 19831 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 19832 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19833 break;
2a9bf3d0 19834 }
0e9bb8d7 19835 txq_cnt--;
2a9bf3d0
JS
19836
19837 /* The xri and iocb resources secured,
19838 * attempt to issue request
19839 */
6d368e53 19840 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
19841 piocbq->sli4_xritag = sglq->sli4_xritag;
19842 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19843 fail_msg = "to convert bpl to sgl";
205e8240 19844 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
2a9bf3d0 19845 fail_msg = "to convert iocb to wqe";
dc19e3b4 19846 else if (lpfc_sli4_wq_put(wq, &wqe))
2a9bf3d0
JS
19847 fail_msg = " - Wq is full";
19848 else
19849 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19850
19851 if (fail_msg) {
19852 /* Failed means we can't issue and need to cancel */
19853 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19854 "2822 IOCB failed %s iotag 0x%x "
19855 "xri 0x%x\n",
19856 fail_msg,
19857 piocbq->iotag, piocbq->sli4_xritag);
19858 list_add_tail(&piocbq->list, &completions);
19859 }
398d81c9 19860 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
19861 }
19862
2a9bf3d0
JS
19863 /* Cancel all the IOCBs that cannot be issued */
19864 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19865 IOERR_SLI_ABORTED);
19866
0e9bb8d7 19867 return txq_cnt;
2a9bf3d0 19868}
895427bd
JS
19869
19870/**
19871 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19872 * @phba: Pointer to HBA context object.
19873 * @pwqe: Pointer to command WQE.
19874 * @sglq: Pointer to the scatter gather queue object.
19875 *
19876 * This routine converts the bpl or bde that is in the WQE
19877 * to a sgl list for the sli4 hardware. The physical address
19878 * of the bpl/bde is converted back to a virtual address.
19879 * If the WQE contains a BPL then the list of BDE's is
19880 * converted to sli4_sge's. If the WQE contains a single
19881 * BDE then it is converted to a single sli_sge.
19882 * The WQE is still in cpu endianness so the contents of
19883 * the bpl can be used without byte swapping.
19884 *
19885 * Returns valid XRI = Success, NO_XRI = Failure.
19886 */
19887static uint16_t
19888lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19889 struct lpfc_sglq *sglq)
19890{
19891 uint16_t xritag = NO_XRI;
19892 struct ulp_bde64 *bpl = NULL;
19893 struct ulp_bde64 bde;
19894 struct sli4_sge *sgl = NULL;
19895 struct lpfc_dmabuf *dmabuf;
205e8240 19896 union lpfc_wqe128 *wqe;
895427bd
JS
19897 int numBdes = 0;
19898 int i = 0;
19899 uint32_t offset = 0; /* accumulated offset in the sg request list */
19900 int inbound = 0; /* number of sg reply entries inbound from firmware */
19901 uint32_t cmd;
19902
19903 if (!pwqeq || !sglq)
19904 return xritag;
19905
19906 sgl = (struct sli4_sge *)sglq->sgl;
19907 wqe = &pwqeq->wqe;
19908 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19909
19910 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19911 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19912 return sglq->sli4_xritag;
19913 numBdes = pwqeq->rsvd2;
19914 if (numBdes) {
19915 /* The addrHigh and addrLow fields within the WQE
19916 * have not been byteswapped yet so there is no
19917 * need to swap them back.
19918 */
19919 if (pwqeq->context3)
19920 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19921 else
19922 return xritag;
19923
19924 bpl = (struct ulp_bde64 *)dmabuf->virt;
19925 if (!bpl)
19926 return xritag;
19927
19928 for (i = 0; i < numBdes; i++) {
19929 /* Should already be byte swapped. */
19930 sgl->addr_hi = bpl->addrHigh;
19931 sgl->addr_lo = bpl->addrLow;
19932
19933 sgl->word2 = le32_to_cpu(sgl->word2);
19934 if ((i+1) == numBdes)
19935 bf_set(lpfc_sli4_sge_last, sgl, 1);
19936 else
19937 bf_set(lpfc_sli4_sge_last, sgl, 0);
19938 /* swap the size field back to the cpu so we
19939 * can assign it to the sgl.
19940 */
19941 bde.tus.w = le32_to_cpu(bpl->tus.w);
19942 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19943 /* The offsets in the sgl need to be accumulated
19944 * separately for the request and reply lists.
19945 * The request is always first, the reply follows.
19946 */
19947 switch (cmd) {
19948 case CMD_GEN_REQUEST64_WQE:
19949 /* add up the reply sg entries */
19950 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19951 inbound++;
19952 /* first inbound? reset the offset */
19953 if (inbound == 1)
19954 offset = 0;
19955 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19956 bf_set(lpfc_sli4_sge_type, sgl,
19957 LPFC_SGE_TYPE_DATA);
19958 offset += bde.tus.f.bdeSize;
19959 break;
19960 case CMD_FCP_TRSP64_WQE:
19961 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19962 bf_set(lpfc_sli4_sge_type, sgl,
19963 LPFC_SGE_TYPE_DATA);
19964 break;
19965 case CMD_FCP_TSEND64_WQE:
19966 case CMD_FCP_TRECEIVE64_WQE:
19967 bf_set(lpfc_sli4_sge_type, sgl,
19968 bpl->tus.f.bdeFlags);
19969 if (i < 3)
19970 offset = 0;
19971 else
19972 offset += bde.tus.f.bdeSize;
19973 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19974 break;
19975 }
19976 sgl->word2 = cpu_to_le32(sgl->word2);
19977 bpl++;
19978 sgl++;
19979 }
19980 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19981 /* The addrHigh and addrLow fields of the BDE have not
19982 * been byteswapped yet so they need to be swapped
19983 * before putting them in the sgl.
19984 */
19985 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19986 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19987 sgl->word2 = le32_to_cpu(sgl->word2);
19988 bf_set(lpfc_sli4_sge_last, sgl, 1);
19989 sgl->word2 = cpu_to_le32(sgl->word2);
19990 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19991 }
19992 return sglq->sli4_xritag;
19993}
19994
19995/**
19996 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19997 * @phba: Pointer to HBA context object.
19998 * @ring_number: Base sli ring number
19999 * @pwqe: Pointer to command WQE.
20000 **/
20001int
1fbf9742 20002lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
895427bd
JS
20003 struct lpfc_iocbq *pwqe)
20004{
205e8240 20005 union lpfc_wqe128 *wqe = &pwqe->wqe;
7cacae2a 20006 struct lpfc_async_xchg_ctx *ctxp;
895427bd
JS
20007 struct lpfc_queue *wq;
20008 struct lpfc_sglq *sglq;
20009 struct lpfc_sli_ring *pring;
20010 unsigned long iflags;
cd22d605 20011 uint32_t ret = 0;
895427bd
JS
20012
20013 /* NVME_LS and NVME_LS ABTS requests. */
20014 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20015 pring = phba->sli4_hba.nvmels_wq->pring;
6a828b0f
JS
20016 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20017 qp, wq_access);
895427bd
JS
20018 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20019 if (!sglq) {
20020 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20021 return WQE_BUSY;
20022 }
20023 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20024 pwqe->sli4_xritag = sglq->sli4_xritag;
20025 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20026 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20027 return WQE_ERROR;
20028 }
20029 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20030 pwqe->sli4_xritag);
cd22d605
DK
20031 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20032 if (ret) {
895427bd 20033 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 20034 return ret;
895427bd 20035 }
cd22d605 20036
895427bd
JS
20037 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20038 spin_unlock_irqrestore(&pring->ring_lock, iflags);
93a4d6f4
JS
20039
20040 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
895427bd
JS
20041 return 0;
20042 }
20043
20044 /* NVME_FCREQ and NVME_ABTS requests */
20045 if (pwqe->iocb_flag & LPFC_IO_NVME) {
20046 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
c00f62e6 20047 wq = qp->io_wq;
1fbf9742 20048 pring = wq->pring;
895427bd 20049
c00f62e6 20050 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
895427bd 20051
6a828b0f
JS
20052 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20053 qp, wq_access);
cd22d605
DK
20054 ret = lpfc_sli4_wq_put(wq, wqe);
20055 if (ret) {
895427bd 20056 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 20057 return ret;
895427bd
JS
20058 }
20059 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20060 spin_unlock_irqrestore(&pring->ring_lock, iflags);
93a4d6f4
JS
20061
20062 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
895427bd
JS
20063 return 0;
20064 }
20065
f358dd0c
JS
20066 /* NVMET requests */
20067 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20068 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
c00f62e6 20069 wq = qp->io_wq;
1fbf9742 20070 pring = wq->pring;
f358dd0c 20071
f358dd0c 20072 ctxp = pwqe->context2;
6c621a22 20073 sglq = ctxp->ctxbuf->sglq;
f358dd0c
JS
20074 if (pwqe->sli4_xritag == NO_XRI) {
20075 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20076 pwqe->sli4_xritag = sglq->sli4_xritag;
20077 }
20078 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20079 pwqe->sli4_xritag);
c00f62e6 20080 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
1fbf9742 20081
6a828b0f
JS
20082 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20083 qp, wq_access);
cd22d605
DK
20084 ret = lpfc_sli4_wq_put(wq, wqe);
20085 if (ret) {
f358dd0c 20086 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 20087 return ret;
f358dd0c
JS
20088 }
20089 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20090 spin_unlock_irqrestore(&pring->ring_lock, iflags);
93a4d6f4
JS
20091
20092 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
f358dd0c
JS
20093 return 0;
20094 }
895427bd
JS
20095 return WQE_ERROR;
20096}
c490850a
JS
20097
20098#ifdef LPFC_MXP_STAT
20099/**
20100 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20101 * @phba: pointer to lpfc hba data structure.
20102 * @hwqid: belong to which HWQ.
20103 *
20104 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20105 * 15 seconds after a test case is running.
20106 *
20107 * The user should call lpfc_debugfs_multixripools_write before running a test
20108 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20109 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20110 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20111 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20112 **/
20113void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20114{
20115 struct lpfc_sli4_hdw_queue *qp;
20116 struct lpfc_multixri_pool *multixri_pool;
20117 struct lpfc_pvt_pool *pvt_pool;
20118 struct lpfc_pbl_pool *pbl_pool;
20119 u32 txcmplq_cnt;
20120
20121 qp = &phba->sli4_hba.hdwq[hwqid];
20122 multixri_pool = qp->p_multixri_pool;
20123 if (!multixri_pool)
20124 return;
20125
20126 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20127 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20128 pbl_pool = &qp->p_multixri_pool->pbl_pool;
c00f62e6 20129 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
c490850a
JS
20130
20131 multixri_pool->stat_pbl_count = pbl_pool->count;
20132 multixri_pool->stat_pvt_count = pvt_pool->count;
20133 multixri_pool->stat_busy_count = txcmplq_cnt;
20134 }
20135
20136 multixri_pool->stat_snapshot_taken++;
20137}
20138#endif
20139
20140/**
20141 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20142 * @phba: pointer to lpfc hba data structure.
20143 * @hwqid: belong to which HWQ.
20144 *
20145 * This routine moves some XRIs from private to public pool when private pool
20146 * is not busy.
20147 **/
20148void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20149{
20150 struct lpfc_multixri_pool *multixri_pool;
20151 u32 io_req_count;
20152 u32 prev_io_req_count;
20153
20154 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20155 if (!multixri_pool)
20156 return;
20157 io_req_count = multixri_pool->io_req_count;
20158 prev_io_req_count = multixri_pool->prev_io_req_count;
20159
20160 if (prev_io_req_count != io_req_count) {
20161 /* Private pool is busy */
20162 multixri_pool->prev_io_req_count = io_req_count;
20163 } else {
20164 /* Private pool is not busy.
20165 * Move XRIs from private to public pool.
20166 */
20167 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20168 }
20169}
20170
20171/**
20172 * lpfc_adjust_high_watermark - Adjust high watermark
20173 * @phba: pointer to lpfc hba data structure.
20174 * @hwqid: belong to which HWQ.
20175 *
20176 * This routine sets high watermark as number of outstanding XRIs,
20177 * but make sure the new value is between xri_limit/2 and xri_limit.
20178 **/
20179void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20180{
20181 u32 new_watermark;
20182 u32 watermark_max;
20183 u32 watermark_min;
20184 u32 xri_limit;
20185 u32 txcmplq_cnt;
20186 u32 abts_io_bufs;
20187 struct lpfc_multixri_pool *multixri_pool;
20188 struct lpfc_sli4_hdw_queue *qp;
20189
20190 qp = &phba->sli4_hba.hdwq[hwqid];
20191 multixri_pool = qp->p_multixri_pool;
20192 if (!multixri_pool)
20193 return;
20194 xri_limit = multixri_pool->xri_limit;
20195
20196 watermark_max = xri_limit;
20197 watermark_min = xri_limit / 2;
20198
c00f62e6 20199 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
c490850a 20200 abts_io_bufs = qp->abts_scsi_io_bufs;
c00f62e6 20201 abts_io_bufs += qp->abts_nvme_io_bufs;
c490850a
JS
20202
20203 new_watermark = txcmplq_cnt + abts_io_bufs;
20204 new_watermark = min(watermark_max, new_watermark);
20205 new_watermark = max(watermark_min, new_watermark);
20206 multixri_pool->pvt_pool.high_watermark = new_watermark;
20207
20208#ifdef LPFC_MXP_STAT
20209 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20210 new_watermark);
20211#endif
20212}
20213
20214/**
20215 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20216 * @phba: pointer to lpfc hba data structure.
20217 * @hwqid: belong to which HWQ.
20218 *
20219 * This routine is called from hearbeat timer when pvt_pool is idle.
20220 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20221 * The first step moves (all - low_watermark) amount of XRIs.
20222 * The second step moves the rest of XRIs.
20223 **/
20224void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20225{
20226 struct lpfc_pbl_pool *pbl_pool;
20227 struct lpfc_pvt_pool *pvt_pool;
6a828b0f 20228 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
20229 struct lpfc_io_buf *lpfc_ncmd;
20230 struct lpfc_io_buf *lpfc_ncmd_next;
20231 unsigned long iflag;
20232 struct list_head tmp_list;
20233 u32 tmp_count;
20234
6a828b0f
JS
20235 qp = &phba->sli4_hba.hdwq[hwqid];
20236 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20237 pvt_pool = &qp->p_multixri_pool->pvt_pool;
c490850a
JS
20238 tmp_count = 0;
20239
6a828b0f
JS
20240 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20241 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
c490850a
JS
20242
20243 if (pvt_pool->count > pvt_pool->low_watermark) {
20244 /* Step 1: move (all - low_watermark) from pvt_pool
20245 * to pbl_pool
20246 */
20247
20248 /* Move low watermark of bufs from pvt_pool to tmp_list */
20249 INIT_LIST_HEAD(&tmp_list);
20250 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20251 &pvt_pool->list, list) {
20252 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20253 tmp_count++;
20254 if (tmp_count >= pvt_pool->low_watermark)
20255 break;
20256 }
20257
20258 /* Move all bufs from pvt_pool to pbl_pool */
20259 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20260
20261 /* Move all bufs from tmp_list to pvt_pool */
20262 list_splice(&tmp_list, &pvt_pool->list);
20263
20264 pbl_pool->count += (pvt_pool->count - tmp_count);
20265 pvt_pool->count = tmp_count;
20266 } else {
20267 /* Step 2: move the rest from pvt_pool to pbl_pool */
20268 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20269 pbl_pool->count += pvt_pool->count;
20270 pvt_pool->count = 0;
20271 }
20272
20273 spin_unlock(&pvt_pool->lock);
20274 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20275}
20276
20277/**
20278 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20279 * @phba: pointer to lpfc hba data structure
20280 * @pbl_pool: specified public free XRI pool
20281 * @pvt_pool: specified private free XRI pool
20282 * @count: number of XRIs to move
20283 *
20284 * This routine tries to move some free common bufs from the specified pbl_pool
20285 * to the specified pvt_pool. It might move less than count XRIs if there's not
20286 * enough in public pool.
20287 *
20288 * Return:
20289 * true - if XRIs are successfully moved from the specified pbl_pool to the
20290 * specified pvt_pool
20291 * false - if the specified pbl_pool is empty or locked by someone else
20292 **/
20293static bool
6a828b0f
JS
20294_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20295 struct lpfc_pbl_pool *pbl_pool,
c490850a
JS
20296 struct lpfc_pvt_pool *pvt_pool, u32 count)
20297{
20298 struct lpfc_io_buf *lpfc_ncmd;
20299 struct lpfc_io_buf *lpfc_ncmd_next;
20300 unsigned long iflag;
20301 int ret;
20302
20303 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20304 if (ret) {
20305 if (pbl_pool->count) {
20306 /* Move a batch of XRIs from public to private pool */
6a828b0f 20307 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
c490850a
JS
20308 list_for_each_entry_safe(lpfc_ncmd,
20309 lpfc_ncmd_next,
20310 &pbl_pool->list,
20311 list) {
20312 list_move_tail(&lpfc_ncmd->list,
20313 &pvt_pool->list);
20314 pvt_pool->count++;
20315 pbl_pool->count--;
20316 count--;
20317 if (count == 0)
20318 break;
20319 }
20320
20321 spin_unlock(&pvt_pool->lock);
20322 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20323 return true;
20324 }
20325 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20326 }
20327
20328 return false;
20329}
20330
20331/**
20332 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20333 * @phba: pointer to lpfc hba data structure.
20334 * @hwqid: belong to which HWQ.
20335 * @count: number of XRIs to move
20336 *
20337 * This routine tries to find some free common bufs in one of public pools with
20338 * Round Robin method. The search always starts from local hwqid, then the next
20339 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20340 * a batch of free common bufs are moved to private pool on hwqid.
20341 * It might move less than count XRIs if there's not enough in public pool.
20342 **/
20343void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20344{
20345 struct lpfc_multixri_pool *multixri_pool;
20346 struct lpfc_multixri_pool *next_multixri_pool;
20347 struct lpfc_pvt_pool *pvt_pool;
20348 struct lpfc_pbl_pool *pbl_pool;
6a828b0f 20349 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
20350 u32 next_hwqid;
20351 u32 hwq_count;
20352 int ret;
20353
6a828b0f
JS
20354 qp = &phba->sli4_hba.hdwq[hwqid];
20355 multixri_pool = qp->p_multixri_pool;
c490850a
JS
20356 pvt_pool = &multixri_pool->pvt_pool;
20357 pbl_pool = &multixri_pool->pbl_pool;
20358
20359 /* Check if local pbl_pool is available */
6a828b0f 20360 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20361 if (ret) {
20362#ifdef LPFC_MXP_STAT
20363 multixri_pool->local_pbl_hit_count++;
20364#endif
20365 return;
20366 }
20367
20368 hwq_count = phba->cfg_hdw_queue;
20369
20370 /* Get the next hwqid which was found last time */
20371 next_hwqid = multixri_pool->rrb_next_hwqid;
20372
20373 do {
20374 /* Go to next hwq */
20375 next_hwqid = (next_hwqid + 1) % hwq_count;
20376
20377 next_multixri_pool =
20378 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20379 pbl_pool = &next_multixri_pool->pbl_pool;
20380
20381 /* Check if the public free xri pool is available */
20382 ret = _lpfc_move_xri_pbl_to_pvt(
6a828b0f 20383 phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20384
20385 /* Exit while-loop if success or all hwqid are checked */
20386 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20387
20388 /* Starting point for the next time */
20389 multixri_pool->rrb_next_hwqid = next_hwqid;
20390
20391 if (!ret) {
20392 /* stats: all public pools are empty*/
20393 multixri_pool->pbl_empty_count++;
20394 }
20395
20396#ifdef LPFC_MXP_STAT
20397 if (ret) {
20398 if (next_hwqid == hwqid)
20399 multixri_pool->local_pbl_hit_count++;
20400 else
20401 multixri_pool->other_pbl_hit_count++;
20402 }
20403#endif
20404}
20405
20406/**
20407 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20408 * @phba: pointer to lpfc hba data structure.
20409 * @qp: belong to which HWQ.
20410 *
20411 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20412 * low watermark.
20413 **/
20414void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20415{
20416 struct lpfc_multixri_pool *multixri_pool;
20417 struct lpfc_pvt_pool *pvt_pool;
20418
20419 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20420 pvt_pool = &multixri_pool->pvt_pool;
20421
20422 if (pvt_pool->count < pvt_pool->low_watermark)
20423 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20424}
20425
20426/**
20427 * lpfc_release_io_buf - Return one IO buf back to free pool
20428 * @phba: pointer to lpfc hba data structure.
20429 * @lpfc_ncmd: IO buf to be returned.
20430 * @qp: belong to which HWQ.
20431 *
20432 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20433 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20434 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20435 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20436 * lpfc_io_buf_list_put.
20437 **/
20438void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20439 struct lpfc_sli4_hdw_queue *qp)
20440{
20441 unsigned long iflag;
20442 struct lpfc_pbl_pool *pbl_pool;
20443 struct lpfc_pvt_pool *pvt_pool;
20444 struct lpfc_epd_pool *epd_pool;
20445 u32 txcmplq_cnt;
20446 u32 xri_owned;
20447 u32 xri_limit;
20448 u32 abts_io_bufs;
20449
20450 /* MUST zero fields if buffer is reused by another protocol */
20451 lpfc_ncmd->nvmeCmd = NULL;
20452 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20453 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20454
35a635af
JS
20455 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20456 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20457 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20458
20459 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20460 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20461
c490850a
JS
20462 if (phba->cfg_xri_rebalancing) {
20463 if (lpfc_ncmd->expedite) {
20464 /* Return to expedite pool */
20465 epd_pool = &phba->epd_pool;
20466 spin_lock_irqsave(&epd_pool->lock, iflag);
20467 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20468 epd_pool->count++;
20469 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20470 return;
20471 }
20472
20473 /* Avoid invalid access if an IO sneaks in and is being rejected
20474 * just _after_ xri pools are destroyed in lpfc_offline.
20475 * Nothing much can be done at this point.
20476 */
20477 if (!qp->p_multixri_pool)
20478 return;
20479
20480 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20481 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20482
c00f62e6 20483 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
c490850a 20484 abts_io_bufs = qp->abts_scsi_io_bufs;
c00f62e6 20485 abts_io_bufs += qp->abts_nvme_io_bufs;
c490850a
JS
20486
20487 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20488 xri_limit = qp->p_multixri_pool->xri_limit;
20489
20490#ifdef LPFC_MXP_STAT
20491 if (xri_owned <= xri_limit)
20492 qp->p_multixri_pool->below_limit_count++;
20493 else
20494 qp->p_multixri_pool->above_limit_count++;
20495#endif
20496
20497 /* XRI goes to either public or private free xri pool
20498 * based on watermark and xri_limit
20499 */
20500 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20501 (xri_owned < xri_limit &&
20502 pvt_pool->count < pvt_pool->high_watermark)) {
6a828b0f
JS
20503 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20504 qp, free_pvt_pool);
c490850a
JS
20505 list_add_tail(&lpfc_ncmd->list,
20506 &pvt_pool->list);
20507 pvt_pool->count++;
20508 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20509 } else {
6a828b0f
JS
20510 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20511 qp, free_pub_pool);
c490850a
JS
20512 list_add_tail(&lpfc_ncmd->list,
20513 &pbl_pool->list);
20514 pbl_pool->count++;
20515 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20516 }
20517 } else {
6a828b0f
JS
20518 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20519 qp, free_xri);
c490850a
JS
20520 list_add_tail(&lpfc_ncmd->list,
20521 &qp->lpfc_io_buf_list_put);
20522 qp->put_io_bufs++;
20523 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20524 iflag);
20525 }
20526}
20527
20528/**
20529 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20530 * @phba: pointer to lpfc hba data structure.
20531 * @pvt_pool: pointer to private pool data structure.
20532 * @ndlp: pointer to lpfc nodelist data structure.
20533 *
20534 * This routine tries to get one free IO buf from private pool.
20535 *
20536 * Return:
20537 * pointer to one free IO buf - if private pool is not empty
20538 * NULL - if private pool is empty
20539 **/
20540static struct lpfc_io_buf *
20541lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
6a828b0f 20542 struct lpfc_sli4_hdw_queue *qp,
c490850a
JS
20543 struct lpfc_pvt_pool *pvt_pool,
20544 struct lpfc_nodelist *ndlp)
20545{
20546 struct lpfc_io_buf *lpfc_ncmd;
20547 struct lpfc_io_buf *lpfc_ncmd_next;
20548 unsigned long iflag;
20549
6a828b0f 20550 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
c490850a
JS
20551 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20552 &pvt_pool->list, list) {
20553 if (lpfc_test_rrq_active(
20554 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20555 continue;
20556 list_del(&lpfc_ncmd->list);
20557 pvt_pool->count--;
20558 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20559 return lpfc_ncmd;
20560 }
20561 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20562
20563 return NULL;
20564}
20565
20566/**
20567 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20568 * @phba: pointer to lpfc hba data structure.
20569 *
20570 * This routine tries to get one free IO buf from expedite pool.
20571 *
20572 * Return:
20573 * pointer to one free IO buf - if expedite pool is not empty
20574 * NULL - if expedite pool is empty
20575 **/
20576static struct lpfc_io_buf *
20577lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20578{
20579 struct lpfc_io_buf *lpfc_ncmd;
20580 struct lpfc_io_buf *lpfc_ncmd_next;
20581 unsigned long iflag;
20582 struct lpfc_epd_pool *epd_pool;
20583
20584 epd_pool = &phba->epd_pool;
20585 lpfc_ncmd = NULL;
20586
20587 spin_lock_irqsave(&epd_pool->lock, iflag);
20588 if (epd_pool->count > 0) {
20589 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20590 &epd_pool->list, list) {
20591 list_del(&lpfc_ncmd->list);
20592 epd_pool->count--;
20593 break;
20594 }
20595 }
20596 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20597
20598 return lpfc_ncmd;
20599}
20600
20601/**
20602 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20603 * @phba: pointer to lpfc hba data structure.
20604 * @ndlp: pointer to lpfc nodelist data structure.
20605 * @hwqid: belong to which HWQ
20606 * @expedite: 1 means this request is urgent.
20607 *
20608 * This routine will do the following actions and then return a pointer to
20609 * one free IO buf.
20610 *
20611 * 1. If private free xri count is empty, move some XRIs from public to
20612 * private pool.
20613 * 2. Get one XRI from private free xri pool.
20614 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20615 * get one free xri from expedite pool.
20616 *
20617 * Note: ndlp is only used on SCSI side for RRQ testing.
20618 * The caller should pass NULL for ndlp on NVME side.
20619 *
20620 * Return:
20621 * pointer to one free IO buf - if private pool is not empty
20622 * NULL - if private pool is empty
20623 **/
20624static struct lpfc_io_buf *
20625lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20626 struct lpfc_nodelist *ndlp,
20627 int hwqid, int expedite)
20628{
20629 struct lpfc_sli4_hdw_queue *qp;
20630 struct lpfc_multixri_pool *multixri_pool;
20631 struct lpfc_pvt_pool *pvt_pool;
20632 struct lpfc_io_buf *lpfc_ncmd;
20633
20634 qp = &phba->sli4_hba.hdwq[hwqid];
20635 lpfc_ncmd = NULL;
20636 multixri_pool = qp->p_multixri_pool;
20637 pvt_pool = &multixri_pool->pvt_pool;
20638 multixri_pool->io_req_count++;
20639
20640 /* If pvt_pool is empty, move some XRIs from public to private pool */
20641 if (pvt_pool->count == 0)
20642 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20643
20644 /* Get one XRI from private free xri pool */
6a828b0f 20645 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
c490850a
JS
20646
20647 if (lpfc_ncmd) {
20648 lpfc_ncmd->hdwq = qp;
20649 lpfc_ncmd->hdwq_no = hwqid;
20650 } else if (expedite) {
20651 /* If we fail to get one from pvt_pool and this is an expedite
20652 * request, get one free xri from expedite pool.
20653 */
20654 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20655 }
20656
20657 return lpfc_ncmd;
20658}
20659
20660static inline struct lpfc_io_buf *
20661lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20662{
20663 struct lpfc_sli4_hdw_queue *qp;
20664 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20665
20666 qp = &phba->sli4_hba.hdwq[idx];
20667 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20668 &qp->lpfc_io_buf_list_get, list) {
20669 if (lpfc_test_rrq_active(phba, ndlp,
20670 lpfc_cmd->cur_iocbq.sli4_lxritag))
20671 continue;
20672
20673 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20674 continue;
20675
20676 list_del_init(&lpfc_cmd->list);
20677 qp->get_io_bufs--;
20678 lpfc_cmd->hdwq = qp;
20679 lpfc_cmd->hdwq_no = idx;
20680 return lpfc_cmd;
20681 }
20682 return NULL;
20683}
20684
20685/**
20686 * lpfc_get_io_buf - Get one IO buffer from free pool
20687 * @phba: The HBA for which this call is being executed.
20688 * @ndlp: pointer to lpfc nodelist data structure.
20689 * @hwqid: belong to which HWQ
20690 * @expedite: 1 means this request is urgent.
20691 *
20692 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20693 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20694 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20695 *
20696 * Note: ndlp is only used on SCSI side for RRQ testing.
20697 * The caller should pass NULL for ndlp on NVME side.
20698 *
20699 * Return codes:
20700 * NULL - Error
20701 * Pointer to lpfc_io_buf - Success
20702 **/
20703struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20704 struct lpfc_nodelist *ndlp,
20705 u32 hwqid, int expedite)
20706{
20707 struct lpfc_sli4_hdw_queue *qp;
20708 unsigned long iflag;
20709 struct lpfc_io_buf *lpfc_cmd;
20710
20711 qp = &phba->sli4_hba.hdwq[hwqid];
20712 lpfc_cmd = NULL;
20713
20714 if (phba->cfg_xri_rebalancing)
20715 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20716 phba, ndlp, hwqid, expedite);
20717 else {
6a828b0f
JS
20718 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20719 qp, alloc_xri_get);
c490850a
JS
20720 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20721 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20722 if (!lpfc_cmd) {
6a828b0f
JS
20723 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20724 qp, alloc_xri_put);
c490850a
JS
20725 list_splice(&qp->lpfc_io_buf_list_put,
20726 &qp->lpfc_io_buf_list_get);
20727 qp->get_io_bufs += qp->put_io_bufs;
20728 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20729 qp->put_io_bufs = 0;
20730 spin_unlock(&qp->io_buf_list_put_lock);
20731 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20732 expedite)
20733 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20734 }
20735 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20736 }
20737
20738 return lpfc_cmd;
20739}
d79c9e9d
JS
20740
20741/**
20742 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20743 * @phba: The HBA for which this call is being executed.
20744 * @lpfc_buf: IO buf structure to append the SGL chunk
20745 *
20746 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20747 * and will allocate an SGL chunk if the pool is empty.
20748 *
20749 * Return codes:
20750 * NULL - Error
20751 * Pointer to sli4_hybrid_sgl - Success
20752 **/
20753struct sli4_hybrid_sgl *
20754lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20755{
20756 struct sli4_hybrid_sgl *list_entry = NULL;
20757 struct sli4_hybrid_sgl *tmp = NULL;
20758 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20759 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20760 struct list_head *buf_list = &hdwq->sgl_list;
a4c21acc 20761 unsigned long iflags;
d79c9e9d 20762
a4c21acc 20763 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20764
20765 if (likely(!list_empty(buf_list))) {
20766 /* break off 1 chunk from the sgl_list */
20767 list_for_each_entry_safe(list_entry, tmp,
20768 buf_list, list_node) {
20769 list_move_tail(&list_entry->list_node,
20770 &lpfc_buf->dma_sgl_xtra_list);
20771 break;
20772 }
20773 } else {
20774 /* allocate more */
a4c21acc 20775 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d 20776 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
4583a4f6 20777 cpu_to_node(hdwq->io_wq->chann));
d79c9e9d
JS
20778 if (!tmp) {
20779 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20780 "8353 error kmalloc memory for HDWQ "
20781 "%d %s\n",
20782 lpfc_buf->hdwq_no, __func__);
20783 return NULL;
20784 }
20785
20786 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20787 GFP_ATOMIC, &tmp->dma_phys_sgl);
20788 if (!tmp->dma_sgl) {
20789 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20790 "8354 error pool_alloc memory for HDWQ "
20791 "%d %s\n",
20792 lpfc_buf->hdwq_no, __func__);
20793 kfree(tmp);
20794 return NULL;
20795 }
20796
a4c21acc 20797 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20798 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20799 }
20800
20801 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20802 struct sli4_hybrid_sgl,
20803 list_node);
20804
a4c21acc 20805 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20806
20807 return allocated_sgl;
20808}
20809
20810/**
20811 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20812 * @phba: The HBA for which this call is being executed.
20813 * @lpfc_buf: IO buf structure with the SGL chunk
20814 *
20815 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20816 *
20817 * Return codes:
20818 * 0 - Success
20819 * -EINVAL - Error
20820 **/
20821int
20822lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20823{
20824 int rc = 0;
20825 struct sli4_hybrid_sgl *list_entry = NULL;
20826 struct sli4_hybrid_sgl *tmp = NULL;
20827 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20828 struct list_head *buf_list = &hdwq->sgl_list;
a4c21acc 20829 unsigned long iflags;
d79c9e9d 20830
a4c21acc 20831 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20832
20833 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20834 list_for_each_entry_safe(list_entry, tmp,
20835 &lpfc_buf->dma_sgl_xtra_list,
20836 list_node) {
20837 list_move_tail(&list_entry->list_node,
20838 buf_list);
20839 }
20840 } else {
20841 rc = -EINVAL;
20842 }
20843
a4c21acc 20844 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20845 return rc;
20846}
20847
20848/**
20849 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20850 * @phba: phba object
20851 * @hdwq: hdwq to cleanup sgl buff resources on
20852 *
20853 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20854 *
20855 * Return codes:
20856 * None
20857 **/
20858void
20859lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20860 struct lpfc_sli4_hdw_queue *hdwq)
20861{
20862 struct list_head *buf_list = &hdwq->sgl_list;
20863 struct sli4_hybrid_sgl *list_entry = NULL;
20864 struct sli4_hybrid_sgl *tmp = NULL;
a4c21acc 20865 unsigned long iflags;
d79c9e9d 20866
a4c21acc 20867 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20868
20869 /* Free sgl pool */
20870 list_for_each_entry_safe(list_entry, tmp,
20871 buf_list, list_node) {
20872 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20873 list_entry->dma_sgl,
20874 list_entry->dma_phys_sgl);
20875 list_del(&list_entry->list_node);
20876 kfree(list_entry);
20877 }
20878
a4c21acc 20879 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20880}
20881
20882/**
20883 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20884 * @phba: The HBA for which this call is being executed.
20885 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20886 *
20887 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20888 * and will allocate an CMD/RSP buffer if the pool is empty.
20889 *
20890 * Return codes:
20891 * NULL - Error
20892 * Pointer to fcp_cmd_rsp_buf - Success
20893 **/
20894struct fcp_cmd_rsp_buf *
20895lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20896 struct lpfc_io_buf *lpfc_buf)
20897{
20898 struct fcp_cmd_rsp_buf *list_entry = NULL;
20899 struct fcp_cmd_rsp_buf *tmp = NULL;
20900 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20901 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20902 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
a4c21acc 20903 unsigned long iflags;
d79c9e9d 20904
a4c21acc 20905 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20906
20907 if (likely(!list_empty(buf_list))) {
20908 /* break off 1 chunk from the list */
20909 list_for_each_entry_safe(list_entry, tmp,
20910 buf_list,
20911 list_node) {
20912 list_move_tail(&list_entry->list_node,
20913 &lpfc_buf->dma_cmd_rsp_list);
20914 break;
20915 }
20916 } else {
20917 /* allocate more */
a4c21acc 20918 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d 20919 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
4583a4f6 20920 cpu_to_node(hdwq->io_wq->chann));
d79c9e9d
JS
20921 if (!tmp) {
20922 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20923 "8355 error kmalloc memory for HDWQ "
20924 "%d %s\n",
20925 lpfc_buf->hdwq_no, __func__);
20926 return NULL;
20927 }
20928
20929 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20930 GFP_ATOMIC,
20931 &tmp->fcp_cmd_rsp_dma_handle);
20932
20933 if (!tmp->fcp_cmnd) {
20934 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20935 "8356 error pool_alloc memory for HDWQ "
20936 "%d %s\n",
20937 lpfc_buf->hdwq_no, __func__);
20938 kfree(tmp);
20939 return NULL;
20940 }
20941
20942 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20943 sizeof(struct fcp_cmnd));
20944
a4c21acc 20945 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20946 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20947 }
20948
20949 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20950 struct fcp_cmd_rsp_buf,
20951 list_node);
20952
a4c21acc 20953 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20954
20955 return allocated_buf;
20956}
20957
20958/**
20959 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20960 * @phba: The HBA for which this call is being executed.
20961 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20962 *
20963 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20964 *
20965 * Return codes:
20966 * 0 - Success
20967 * -EINVAL - Error
20968 **/
20969int
20970lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20971 struct lpfc_io_buf *lpfc_buf)
20972{
20973 int rc = 0;
20974 struct fcp_cmd_rsp_buf *list_entry = NULL;
20975 struct fcp_cmd_rsp_buf *tmp = NULL;
20976 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20977 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
a4c21acc 20978 unsigned long iflags;
d79c9e9d 20979
a4c21acc 20980 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20981
20982 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20983 list_for_each_entry_safe(list_entry, tmp,
20984 &lpfc_buf->dma_cmd_rsp_list,
20985 list_node) {
20986 list_move_tail(&list_entry->list_node,
20987 buf_list);
20988 }
20989 } else {
20990 rc = -EINVAL;
20991 }
20992
a4c21acc 20993 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
20994 return rc;
20995}
20996
20997/**
20998 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20999 * @phba: phba object
21000 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21001 *
21002 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21003 *
21004 * Return codes:
21005 * None
21006 **/
21007void
21008lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21009 struct lpfc_sli4_hdw_queue *hdwq)
21010{
21011 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21012 struct fcp_cmd_rsp_buf *list_entry = NULL;
21013 struct fcp_cmd_rsp_buf *tmp = NULL;
a4c21acc 21014 unsigned long iflags;
d79c9e9d 21015
a4c21acc 21016 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
d79c9e9d
JS
21017
21018 /* Free cmd_rsp buf pool */
21019 list_for_each_entry_safe(list_entry, tmp,
21020 buf_list,
21021 list_node) {
21022 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21023 list_entry->fcp_cmnd,
21024 list_entry->fcp_cmd_rsp_dma_handle);
21025 list_del(&list_entry->list_node);
21026 kfree(list_entry);
21027 }
21028
a4c21acc 21029 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
d79c9e9d 21030}