]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/lpfc/lpfc_sli.c
[SCSI] lpfc 8.3.31: Fix initiator sending flogi after acking flogi from target
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
bdcd2b92 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e
JB
20 *******************************************************************/
21
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
dea3101e 27
91886523 28#include <scsi/scsi.h>
dea3101e
JB
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
f888ba3c 32#include <scsi/scsi_transport_fc.h>
da0436e9 33#include <scsi/fc/fc_fs.h>
0d878419 34#include <linux/aer.h>
dea3101e 35
da0436e9 36#include "lpfc_hw4.h"
dea3101e
JB
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e
JB
41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_crtn.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_compat.h"
858c9f6c 47#include "lpfc_debugfs.h"
04c68496 48#include "lpfc_vport.h"
dea3101e
JB
49
50/* There are only four IOCB completion types. */
51typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56} lpfc_iocb_type;
57
4f774513
JS
58
59/* Provide function prototypes local to this module. */
60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
63 uint8_t *, uint32_t *);
64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
6669f9bb
JS
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
0558056c
JS
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
8a9d2e80
JS
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
0558056c 72
4f774513
JS
73static IOCB_t *
74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
75{
76 return &iocbq->iocb;
77}
78
79/**
80 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
81 * @q: The Work Queue to operate on.
82 * @wqe: The work Queue Entry to put on the Work queue.
83 *
84 * This routine will copy the contents of @wqe to the next available entry on
85 * the @q. This function will then ring the Work Queue Doorbell to signal the
86 * HBA to start processing the Work Queue Entry. This function returns 0 if
87 * successful. If no entries are available on @q then this function will return
88 * -ENOMEM.
89 * The caller is expected to hold the hbalock when calling this routine.
90 **/
91static uint32_t
92lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
93{
2e90f4b5 94 union lpfc_wqe *temp_wqe;
4f774513
JS
95 struct lpfc_register doorbell;
96 uint32_t host_index;
97
2e90f4b5
JS
98 /* sanity check on queue memory */
99 if (unlikely(!q))
100 return -ENOMEM;
101 temp_wqe = q->qe[q->host_index].wqe;
102
4f774513
JS
103 /* If the host has not yet processed the next entry then we are done */
104 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
105 return -ENOMEM;
106 /* set consumption flag every once in a while */
ff78d8f9 107 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
fedd3b7b
JS
109 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
110 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
4f774513
JS
111 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
112
113 /* Update the host index before invoking device */
114 host_index = q->host_index;
115 q->host_index = ((q->host_index + 1) % q->entry_count);
116
117 /* Ring Doorbell */
118 doorbell.word0 = 0;
119 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
120 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
121 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
122 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
123 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
124
125 return 0;
126}
127
128/**
129 * lpfc_sli4_wq_release - Updates internal hba index for WQ
130 * @q: The Work Queue to operate on.
131 * @index: The index to advance the hba index to.
132 *
133 * This routine will update the HBA index of a queue to reflect consumption of
134 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
135 * an entry the host calls this function to update the queue's internal
136 * pointers. This routine returns the number of entries that were consumed by
137 * the HBA.
138 **/
139static uint32_t
140lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
141{
142 uint32_t released = 0;
143
2e90f4b5
JS
144 /* sanity check on queue memory */
145 if (unlikely(!q))
146 return 0;
147
4f774513
JS
148 if (q->hba_index == index)
149 return 0;
150 do {
151 q->hba_index = ((q->hba_index + 1) % q->entry_count);
152 released++;
153 } while (q->hba_index != index);
154 return released;
155}
156
157/**
158 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
159 * @q: The Mailbox Queue to operate on.
160 * @wqe: The Mailbox Queue Entry to put on the Work queue.
161 *
162 * This routine will copy the contents of @mqe to the next available entry on
163 * the @q. This function will then ring the Work Queue Doorbell to signal the
164 * HBA to start processing the Work Queue Entry. This function returns 0 if
165 * successful. If no entries are available on @q then this function will return
166 * -ENOMEM.
167 * The caller is expected to hold the hbalock when calling this routine.
168 **/
169static uint32_t
170lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
171{
2e90f4b5 172 struct lpfc_mqe *temp_mqe;
4f774513
JS
173 struct lpfc_register doorbell;
174 uint32_t host_index;
175
2e90f4b5
JS
176 /* sanity check on queue memory */
177 if (unlikely(!q))
178 return -ENOMEM;
179 temp_mqe = q->qe[q->host_index].mqe;
180
4f774513
JS
181 /* If the host has not yet processed the next entry then we are done */
182 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
183 return -ENOMEM;
184 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
185 /* Save off the mailbox pointer for completion */
186 q->phba->mbox = (MAILBOX_t *)temp_mqe;
187
188 /* Update the host index before invoking device */
189 host_index = q->host_index;
190 q->host_index = ((q->host_index + 1) % q->entry_count);
191
192 /* Ring Doorbell */
193 doorbell.word0 = 0;
194 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
195 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
196 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
197 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
198 return 0;
199}
200
201/**
202 * lpfc_sli4_mq_release - Updates internal hba index for MQ
203 * @q: The Mailbox Queue to operate on.
204 *
205 * This routine will update the HBA index of a queue to reflect consumption of
206 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
207 * an entry the host calls this function to update the queue's internal
208 * pointers. This routine returns the number of entries that were consumed by
209 * the HBA.
210 **/
211static uint32_t
212lpfc_sli4_mq_release(struct lpfc_queue *q)
213{
2e90f4b5
JS
214 /* sanity check on queue memory */
215 if (unlikely(!q))
216 return 0;
217
4f774513
JS
218 /* Clear the mailbox pointer for completion */
219 q->phba->mbox = NULL;
220 q->hba_index = ((q->hba_index + 1) % q->entry_count);
221 return 1;
222}
223
224/**
225 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
226 * @q: The Event Queue to get the first valid EQE from
227 *
228 * This routine will get the first valid Event Queue Entry from @q, update
229 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
230 * the Queue (no more work to do), or the Queue is full of EQEs that have been
231 * processed, but not popped back to the HBA then this routine will return NULL.
232 **/
233static struct lpfc_eqe *
234lpfc_sli4_eq_get(struct lpfc_queue *q)
235{
2e90f4b5
JS
236 struct lpfc_eqe *eqe;
237
238 /* sanity check on queue memory */
239 if (unlikely(!q))
240 return NULL;
241 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
242
243 /* If the next EQE is not valid then we are done */
cb5172ea 244 if (!bf_get_le32(lpfc_eqe_valid, eqe))
4f774513
JS
245 return NULL;
246 /* If the host has not yet processed the next entry then we are done */
247 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
248 return NULL;
249
250 q->hba_index = ((q->hba_index + 1) % q->entry_count);
251 return eqe;
252}
253
254/**
255 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
256 * @q: The Event Queue that the host has completed processing for.
257 * @arm: Indicates whether the host wants to arms this CQ.
258 *
259 * This routine will mark all Event Queue Entries on @q, from the last
260 * known completed entry to the last entry that was processed, as completed
261 * by clearing the valid bit for each completion queue entry. Then it will
262 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
263 * The internal host index in the @q will be updated by this routine to indicate
264 * that the host has finished processing the entries. The @arm parameter
265 * indicates that the queue should be rearmed when ringing the doorbell.
266 *
267 * This function will return the number of EQEs that were popped.
268 **/
269uint32_t
270lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
271{
272 uint32_t released = 0;
273 struct lpfc_eqe *temp_eqe;
274 struct lpfc_register doorbell;
275
2e90f4b5
JS
276 /* sanity check on queue memory */
277 if (unlikely(!q))
278 return 0;
279
4f774513
JS
280 /* while there are valid entries */
281 while (q->hba_index != q->host_index) {
282 temp_eqe = q->qe[q->host_index].eqe;
cb5172ea 283 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
4f774513
JS
284 released++;
285 q->host_index = ((q->host_index + 1) % q->entry_count);
286 }
287 if (unlikely(released == 0 && !arm))
288 return 0;
289
290 /* ring doorbell for number popped */
291 doorbell.word0 = 0;
292 if (arm) {
293 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
294 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
295 }
296 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
297 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
298 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
299 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
300 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
4f774513 301 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
302 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
303 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
304 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
305 return released;
306}
307
308/**
309 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
310 * @q: The Completion Queue to get the first valid CQE from
311 *
312 * This routine will get the first valid Completion Queue Entry from @q, update
313 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
314 * the Queue (no more work to do), or the Queue is full of CQEs that have been
315 * processed, but not popped back to the HBA then this routine will return NULL.
316 **/
317static struct lpfc_cqe *
318lpfc_sli4_cq_get(struct lpfc_queue *q)
319{
320 struct lpfc_cqe *cqe;
321
2e90f4b5
JS
322 /* sanity check on queue memory */
323 if (unlikely(!q))
324 return NULL;
325
4f774513 326 /* If the next CQE is not valid then we are done */
cb5172ea 327 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
4f774513
JS
328 return NULL;
329 /* If the host has not yet processed the next entry then we are done */
330 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
331 return NULL;
332
333 cqe = q->qe[q->hba_index].cqe;
334 q->hba_index = ((q->hba_index + 1) % q->entry_count);
335 return cqe;
336}
337
338/**
339 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
340 * @q: The Completion Queue that the host has completed processing for.
341 * @arm: Indicates whether the host wants to arms this CQ.
342 *
343 * This routine will mark all Completion queue entries on @q, from the last
344 * known completed entry to the last entry that was processed, as completed
345 * by clearing the valid bit for each completion queue entry. Then it will
346 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
347 * The internal host index in the @q will be updated by this routine to indicate
348 * that the host has finished processing the entries. The @arm parameter
349 * indicates that the queue should be rearmed when ringing the doorbell.
350 *
351 * This function will return the number of CQEs that were released.
352 **/
353uint32_t
354lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
355{
356 uint32_t released = 0;
357 struct lpfc_cqe *temp_qe;
358 struct lpfc_register doorbell;
359
2e90f4b5
JS
360 /* sanity check on queue memory */
361 if (unlikely(!q))
362 return 0;
4f774513
JS
363 /* while there are valid entries */
364 while (q->hba_index != q->host_index) {
365 temp_qe = q->qe[q->host_index].cqe;
cb5172ea 366 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
4f774513
JS
367 released++;
368 q->host_index = ((q->host_index + 1) % q->entry_count);
369 }
370 if (unlikely(released == 0 && !arm))
371 return 0;
372
373 /* ring doorbell for number popped */
374 doorbell.word0 = 0;
375 if (arm)
376 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
377 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
378 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
379 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
380 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
381 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
4f774513
JS
382 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
383 return released;
384}
385
386/**
387 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
388 * @q: The Header Receive Queue to operate on.
389 * @wqe: The Receive Queue Entry to put on the Receive queue.
390 *
391 * This routine will copy the contents of @wqe to the next available entry on
392 * the @q. This function will then ring the Receive Queue Doorbell to signal the
393 * HBA to start processing the Receive Queue Entry. This function returns the
394 * index that the rqe was copied to if successful. If no entries are available
395 * on @q then this function will return -ENOMEM.
396 * The caller is expected to hold the hbalock when calling this routine.
397 **/
398static int
399lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
400 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
401{
2e90f4b5
JS
402 struct lpfc_rqe *temp_hrqe;
403 struct lpfc_rqe *temp_drqe;
4f774513
JS
404 struct lpfc_register doorbell;
405 int put_index = hq->host_index;
406
2e90f4b5
JS
407 /* sanity check on queue memory */
408 if (unlikely(!hq) || unlikely(!dq))
409 return -ENOMEM;
410 temp_hrqe = hq->qe[hq->host_index].rqe;
411 temp_drqe = dq->qe[dq->host_index].rqe;
412
4f774513
JS
413 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
414 return -EINVAL;
415 if (hq->host_index != dq->host_index)
416 return -EINVAL;
417 /* If the host has not yet processed the next entry then we are done */
418 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
419 return -EBUSY;
420 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
421 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
422
423 /* Update the host index to point to the next slot */
424 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
425 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
426
427 /* Ring The Header Receive Queue Doorbell */
73d91e50 428 if (!(hq->host_index % hq->entry_repost)) {
4f774513
JS
429 doorbell.word0 = 0;
430 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
73d91e50 431 hq->entry_repost);
4f774513
JS
432 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
433 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
434 }
435 return put_index;
436}
437
438/**
439 * lpfc_sli4_rq_release - Updates internal hba index for RQ
440 * @q: The Header Receive Queue to operate on.
441 *
442 * This routine will update the HBA index of a queue to reflect consumption of
443 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
444 * consumed an entry the host calls this function to update the queue's
445 * internal pointers. This routine returns the number of entries that were
446 * consumed by the HBA.
447 **/
448static uint32_t
449lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
450{
2e90f4b5
JS
451 /* sanity check on queue memory */
452 if (unlikely(!hq) || unlikely(!dq))
453 return 0;
454
4f774513
JS
455 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
456 return 0;
457 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
458 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
459 return 1;
460}
461
e59058c4 462/**
3621a710 463 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
464 * @phba: Pointer to HBA context object.
465 * @pring: Pointer to driver SLI ring object.
466 *
467 * This function returns pointer to next command iocb entry
468 * in the command ring. The caller must hold hbalock to prevent
469 * other threads consume the next command iocb.
470 * SLI-2/SLI-3 provide different sized iocbs.
471 **/
ed957684
JS
472static inline IOCB_t *
473lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
474{
475 return (IOCB_t *) (((char *) pring->cmdringaddr) +
476 pring->cmdidx * phba->iocb_cmd_size);
477}
478
e59058c4 479/**
3621a710 480 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
481 * @phba: Pointer to HBA context object.
482 * @pring: Pointer to driver SLI ring object.
483 *
484 * This function returns pointer to next response iocb entry
485 * in the response ring. The caller must hold hbalock to make sure
486 * that no other thread consume the next response iocb.
487 * SLI-2/SLI-3 provide different sized iocbs.
488 **/
ed957684
JS
489static inline IOCB_t *
490lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
491{
492 return (IOCB_t *) (((char *) pring->rspringaddr) +
493 pring->rspidx * phba->iocb_rsp_size);
494}
495
e59058c4 496/**
3621a710 497 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
498 * @phba: Pointer to HBA context object.
499 *
500 * This function is called with hbalock held. This function
501 * allocates a new driver iocb object from the iocb pool. If the
502 * allocation is successful, it returns pointer to the newly
503 * allocated iocb object else it returns NULL.
504 **/
2e0fef85
JS
505static struct lpfc_iocbq *
506__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
507{
508 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
509 struct lpfc_iocbq * iocbq = NULL;
510
511 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
512 if (iocbq)
513 phba->iocb_cnt++;
514 if (phba->iocb_cnt > phba->iocb_max)
515 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
516 return iocbq;
517}
518
da0436e9
JS
519/**
520 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
521 * @phba: Pointer to HBA context object.
522 * @xritag: XRI value.
523 *
524 * This function clears the sglq pointer from the array of acive
525 * sglq's. The xritag that is passed in is used to index into the
526 * array. Before the xritag can be used it needs to be adjusted
527 * by subtracting the xribase.
528 *
529 * Returns sglq ponter = success, NULL = Failure.
530 **/
531static struct lpfc_sglq *
532__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
533{
da0436e9 534 struct lpfc_sglq *sglq;
6d368e53
JS
535
536 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
537 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
538 return sglq;
539}
540
541/**
542 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
543 * @phba: Pointer to HBA context object.
544 * @xritag: XRI value.
545 *
546 * This function returns the sglq pointer from the array of acive
547 * sglq's. The xritag that is passed in is used to index into the
548 * array. Before the xritag can be used it needs to be adjusted
549 * by subtracting the xribase.
550 *
551 * Returns sglq ponter = success, NULL = Failure.
552 **/
0f65ff68 553struct lpfc_sglq *
da0436e9
JS
554__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
555{
da0436e9 556 struct lpfc_sglq *sglq;
6d368e53
JS
557
558 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
559 return sglq;
560}
561
19ca7609 562/**
1151e3ec 563 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
564 * @phba: Pointer to HBA context object.
565 * @xritag: xri used in this exchange.
566 * @rrq: The RRQ to be cleared.
567 *
19ca7609 568 **/
1151e3ec
JS
569void
570lpfc_clr_rrq_active(struct lpfc_hba *phba,
571 uint16_t xritag,
572 struct lpfc_node_rrq *rrq)
19ca7609 573{
1151e3ec 574 struct lpfc_nodelist *ndlp = NULL;
19ca7609 575
1151e3ec
JS
576 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
577 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
578
579 /* The target DID could have been swapped (cable swap)
580 * we should use the ndlp from the findnode if it is
581 * available.
582 */
1151e3ec 583 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
584 ndlp = rrq->ndlp;
585
1151e3ec
JS
586 if (!ndlp)
587 goto out;
588
6d368e53 589 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
19ca7609
JS
590 rrq->send_rrq = 0;
591 rrq->xritag = 0;
592 rrq->rrq_stop_time = 0;
593 }
1151e3ec 594out:
19ca7609
JS
595 mempool_free(rrq, phba->rrq_pool);
596}
597
598/**
599 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
600 * @phba: Pointer to HBA context object.
601 *
602 * This function is called with hbalock held. This function
603 * Checks if stop_time (ratov from setting rrq active) has
604 * been reached, if it has and the send_rrq flag is set then
605 * it will call lpfc_send_rrq. If the send_rrq flag is not set
606 * then it will just call the routine to clear the rrq and
607 * free the rrq resource.
608 * The timer is set to the next rrq that is going to expire before
609 * leaving the routine.
610 *
611 **/
612void
613lpfc_handle_rrq_active(struct lpfc_hba *phba)
614{
615 struct lpfc_node_rrq *rrq;
616 struct lpfc_node_rrq *nextrrq;
617 unsigned long next_time;
618 unsigned long iflags;
1151e3ec 619 LIST_HEAD(send_rrq);
19ca7609
JS
620
621 spin_lock_irqsave(&phba->hbalock, iflags);
622 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
623 next_time = jiffies + HZ * (phba->fc_ratov + 1);
624 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
625 &phba->active_rrq_list, list) {
626 if (time_after(jiffies, rrq->rrq_stop_time))
627 list_move(&rrq->list, &send_rrq);
628 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
629 next_time = rrq->rrq_stop_time;
630 }
631 spin_unlock_irqrestore(&phba->hbalock, iflags);
632 if (!list_empty(&phba->active_rrq_list))
633 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
634 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
635 list_del(&rrq->list);
636 if (!rrq->send_rrq)
637 /* this call will free the rrq */
638 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
639 else if (lpfc_send_rrq(phba, rrq)) {
640 /* if we send the rrq then the completion handler
641 * will clear the bit in the xribitmap.
642 */
643 lpfc_clr_rrq_active(phba, rrq->xritag,
644 rrq);
645 }
646 }
19ca7609
JS
647}
648
649/**
650 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
651 * @vport: Pointer to vport context object.
652 * @xri: The xri used in the exchange.
653 * @did: The targets DID for this exchange.
654 *
655 * returns NULL = rrq not found in the phba->active_rrq_list.
656 * rrq = rrq for this xri and target.
657 **/
658struct lpfc_node_rrq *
659lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
660{
661 struct lpfc_hba *phba = vport->phba;
662 struct lpfc_node_rrq *rrq;
663 struct lpfc_node_rrq *nextrrq;
664 unsigned long iflags;
665
666 if (phba->sli_rev != LPFC_SLI_REV4)
667 return NULL;
668 spin_lock_irqsave(&phba->hbalock, iflags);
669 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
670 if (rrq->vport == vport && rrq->xritag == xri &&
671 rrq->nlp_DID == did){
672 list_del(&rrq->list);
673 spin_unlock_irqrestore(&phba->hbalock, iflags);
674 return rrq;
675 }
676 }
677 spin_unlock_irqrestore(&phba->hbalock, iflags);
678 return NULL;
679}
680
681/**
682 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
683 * @vport: Pointer to vport context object.
1151e3ec
JS
684 * @ndlp: Pointer to the lpfc_node_list structure.
685 * If ndlp is NULL Remove all active RRQs for this vport from the
686 * phba->active_rrq_list and clear the rrq.
687 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
688 **/
689void
1151e3ec 690lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
691
692{
693 struct lpfc_hba *phba = vport->phba;
694 struct lpfc_node_rrq *rrq;
695 struct lpfc_node_rrq *nextrrq;
696 unsigned long iflags;
1151e3ec 697 LIST_HEAD(rrq_list);
19ca7609
JS
698
699 if (phba->sli_rev != LPFC_SLI_REV4)
700 return;
1151e3ec
JS
701 if (!ndlp) {
702 lpfc_sli4_vport_delete_els_xri_aborted(vport);
703 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 704 }
1151e3ec
JS
705 spin_lock_irqsave(&phba->hbalock, iflags);
706 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
707 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
708 list_move(&rrq->list, &rrq_list);
19ca7609 709 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
710
711 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
712 list_del(&rrq->list);
713 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
714 }
19ca7609
JS
715}
716
717/**
718 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
719 * @phba: Pointer to HBA context object.
720 *
721 * Remove all rrqs from the phba->active_rrq_list and free them by
722 * calling __lpfc_clr_active_rrq
723 *
724 **/
725void
726lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
727{
728 struct lpfc_node_rrq *rrq;
729 struct lpfc_node_rrq *nextrrq;
730 unsigned long next_time;
731 unsigned long iflags;
1151e3ec 732 LIST_HEAD(rrq_list);
19ca7609
JS
733
734 if (phba->sli_rev != LPFC_SLI_REV4)
735 return;
736 spin_lock_irqsave(&phba->hbalock, iflags);
737 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
738 next_time = jiffies + HZ * (phba->fc_ratov * 2);
1151e3ec
JS
739 list_splice_init(&phba->active_rrq_list, &rrq_list);
740 spin_unlock_irqrestore(&phba->hbalock, iflags);
741
742 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
19ca7609 743 list_del(&rrq->list);
1151e3ec 744 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
19ca7609 745 }
19ca7609
JS
746 if (!list_empty(&phba->active_rrq_list))
747 mod_timer(&phba->rrq_tmr, next_time);
748}
749
750
751/**
1151e3ec 752 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
753 * @phba: Pointer to HBA context object.
754 * @ndlp: Targets nodelist pointer for this exchange.
755 * @xritag the xri in the bitmap to test.
756 *
757 * This function is called with hbalock held. This function
758 * returns 0 = rrq not active for this xri
759 * 1 = rrq is valid for this xri.
760 **/
1151e3ec
JS
761int
762lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
763 uint16_t xritag)
764{
19ca7609
JS
765 if (!ndlp)
766 return 0;
6d368e53 767 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
19ca7609
JS
768 return 1;
769 else
770 return 0;
771}
772
773/**
774 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
775 * @phba: Pointer to HBA context object.
776 * @ndlp: nodelist pointer for this target.
777 * @xritag: xri used in this exchange.
778 * @rxid: Remote Exchange ID.
779 * @send_rrq: Flag used to determine if we should send rrq els cmd.
780 *
781 * This function takes the hbalock.
782 * The active bit is always set in the active rrq xri_bitmap even
783 * if there is no slot avaiable for the other rrq information.
784 *
785 * returns 0 rrq actived for this xri
786 * < 0 No memory or invalid ndlp.
787 **/
788int
789lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 790 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 791{
19ca7609 792 unsigned long iflags;
b42c07c8
JS
793 struct lpfc_node_rrq *rrq;
794 int empty;
795
796 if (!ndlp)
797 return -EINVAL;
798
799 if (!phba->cfg_enable_rrq)
800 return -EINVAL;
19ca7609
JS
801
802 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
803 if (phba->pport->load_flag & FC_UNLOADING) {
804 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
805 goto out;
806 }
807
808 /*
809 * set the active bit even if there is no mem available.
810 */
811 if (NLP_CHK_FREE_REQ(ndlp))
812 goto out;
813
814 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
815 goto out;
816
817 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
818 goto out;
819
19ca7609 820 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
821 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
822 if (!rrq) {
823 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
824 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
825 " DID:0x%x Send:%d\n",
826 xritag, rxid, ndlp->nlp_DID, send_rrq);
827 return -EINVAL;
828 }
829 rrq->send_rrq = send_rrq;
830 rrq->xritag = xritag;
831 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
832 rrq->ndlp = ndlp;
833 rrq->nlp_DID = ndlp->nlp_DID;
834 rrq->vport = ndlp->vport;
835 rrq->rxid = rxid;
836 rrq->send_rrq = send_rrq;
837 spin_lock_irqsave(&phba->hbalock, iflags);
838 empty = list_empty(&phba->active_rrq_list);
839 list_add_tail(&rrq->list, &phba->active_rrq_list);
840 phba->hba_flag |= HBA_RRQ_ACTIVE;
841 if (empty)
842 lpfc_worker_wake_up(phba);
843 spin_unlock_irqrestore(&phba->hbalock, iflags);
844 return 0;
845out:
846 spin_unlock_irqrestore(&phba->hbalock, iflags);
847 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
848 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
849 " DID:0x%x Send:%d\n",
850 xritag, rxid, ndlp->nlp_DID, send_rrq);
851 return -EINVAL;
19ca7609
JS
852}
853
da0436e9
JS
854/**
855 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
856 * @phba: Pointer to HBA context object.
19ca7609 857 * @piocb: Pointer to the iocbq.
da0436e9
JS
858 *
859 * This function is called with hbalock held. This function
6d368e53 860 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
861 * list is not empty then it is successful, it returns pointer to the newly
862 * allocated sglq object else it returns NULL.
863 **/
864static struct lpfc_sglq *
19ca7609 865__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9
JS
866{
867 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
868 struct lpfc_sglq *sglq = NULL;
19ca7609 869 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
870 struct lpfc_scsi_buf *lpfc_cmd;
871 struct lpfc_nodelist *ndlp;
872 int found = 0;
873
874 if (piocbq->iocb_flag & LPFC_IO_FCP) {
875 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
876 ndlp = lpfc_cmd->rdata->pnode;
be858b65
JS
877 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
878 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
19ca7609 879 ndlp = piocbq->context_un.ndlp;
19ca7609
JS
880 else
881 ndlp = piocbq->context1;
882
da0436e9 883 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
884 start_sglq = sglq;
885 while (!found) {
886 if (!sglq)
887 return NULL;
1151e3ec 888 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
19ca7609
JS
889 /* This xri has an rrq outstanding for this DID.
890 * put it back in the list and get another xri.
891 */
892 list_add_tail(&sglq->list, lpfc_sgl_list);
893 sglq = NULL;
894 list_remove_head(lpfc_sgl_list, sglq,
895 struct lpfc_sglq, list);
896 if (sglq == start_sglq) {
897 sglq = NULL;
898 break;
899 } else
900 continue;
901 }
902 sglq->ndlp = ndlp;
903 found = 1;
6d368e53 904 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
905 sglq->state = SGL_ALLOCATED;
906 }
da0436e9
JS
907 return sglq;
908}
909
e59058c4 910/**
3621a710 911 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
912 * @phba: Pointer to HBA context object.
913 *
914 * This function is called with no lock held. This function
915 * allocates a new driver iocb object from the iocb pool. If the
916 * allocation is successful, it returns pointer to the newly
917 * allocated iocb object else it returns NULL.
918 **/
2e0fef85
JS
919struct lpfc_iocbq *
920lpfc_sli_get_iocbq(struct lpfc_hba *phba)
921{
922 struct lpfc_iocbq * iocbq = NULL;
923 unsigned long iflags;
924
925 spin_lock_irqsave(&phba->hbalock, iflags);
926 iocbq = __lpfc_sli_get_iocbq(phba);
927 spin_unlock_irqrestore(&phba->hbalock, iflags);
928 return iocbq;
929}
930
4f774513
JS
931/**
932 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
933 * @phba: Pointer to HBA context object.
934 * @iocbq: Pointer to driver iocb object.
935 *
936 * This function is called with hbalock held to release driver
937 * iocb object to the iocb pool. The iotag in the iocb object
938 * does not change for each use of the iocb object. This function
939 * clears all other fields of the iocb object when it is freed.
940 * The sqlq structure that holds the xritag and phys and virtual
941 * mappings for the scatter gather list is retrieved from the
942 * active array of sglq. The get of the sglq pointer also clears
943 * the entry in the array. If the status of the IO indiactes that
944 * this IO was aborted then the sglq entry it put on the
945 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
946 * IO has good status or fails for any other reason then the sglq
947 * entry is added to the free list (lpfc_sgl_list).
948 **/
949static void
950__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
951{
952 struct lpfc_sglq *sglq;
953 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0
JS
954 unsigned long iflag = 0;
955 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4f774513
JS
956
957 if (iocbq->sli4_xritag == NO_XRI)
958 sglq = NULL;
959 else
6d368e53
JS
960 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
961
4f774513 962 if (sglq) {
0f65ff68
JS
963 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
964 (sglq->state != SGL_XRI_ABORTED)) {
4f774513
JS
965 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
966 iflag);
967 list_add(&sglq->list,
968 &phba->sli4_hba.lpfc_abts_els_sgl_list);
969 spin_unlock_irqrestore(
970 &phba->sli4_hba.abts_sgl_list_lock, iflag);
0f65ff68
JS
971 } else {
972 sglq->state = SGL_FREED;
19ca7609 973 sglq->ndlp = NULL;
fedd3b7b
JS
974 list_add_tail(&sglq->list,
975 &phba->sli4_hba.lpfc_sgl_list);
2a9bf3d0
JS
976
977 /* Check if TXQ queue needs to be serviced */
589a52d6 978 if (pring->txq_cnt)
2a9bf3d0 979 lpfc_worker_wake_up(phba);
0f65ff68 980 }
4f774513
JS
981 }
982
983
984 /*
985 * Clean all volatile data fields, preserve iotag and node struct.
986 */
987 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 988 iocbq->sli4_lxritag = NO_XRI;
4f774513
JS
989 iocbq->sli4_xritag = NO_XRI;
990 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
991}
992
2a9bf3d0 993
e59058c4 994/**
3772a991 995 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
996 * @phba: Pointer to HBA context object.
997 * @iocbq: Pointer to driver iocb object.
998 *
999 * This function is called with hbalock held to release driver
1000 * iocb object to the iocb pool. The iotag in the iocb object
1001 * does not change for each use of the iocb object. This function
1002 * clears all other fields of the iocb object when it is freed.
1003 **/
a6ababd2 1004static void
3772a991 1005__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1006{
2e0fef85 1007 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
1008
1009 /*
1010 * Clean all volatile data fields, preserve iotag and node struct.
1011 */
1012 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1013 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1014 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1015}
1016
3772a991
JS
1017/**
1018 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1019 * @phba: Pointer to HBA context object.
1020 * @iocbq: Pointer to driver iocb object.
1021 *
1022 * This function is called with hbalock held to release driver
1023 * iocb object to the iocb pool. The iotag in the iocb object
1024 * does not change for each use of the iocb object. This function
1025 * clears all other fields of the iocb object when it is freed.
1026 **/
1027static void
1028__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1029{
1030 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1031 phba->iocb_cnt--;
3772a991
JS
1032}
1033
e59058c4 1034/**
3621a710 1035 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1036 * @phba: Pointer to HBA context object.
1037 * @iocbq: Pointer to driver iocb object.
1038 *
1039 * This function is called with no lock held to release the iocb to
1040 * iocb pool.
1041 **/
2e0fef85
JS
1042void
1043lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1044{
1045 unsigned long iflags;
1046
1047 /*
1048 * Clean all volatile data fields, preserve iotag and node struct.
1049 */
1050 spin_lock_irqsave(&phba->hbalock, iflags);
1051 __lpfc_sli_release_iocbq(phba, iocbq);
1052 spin_unlock_irqrestore(&phba->hbalock, iflags);
1053}
1054
a257bf90
JS
1055/**
1056 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1057 * @phba: Pointer to HBA context object.
1058 * @iocblist: List of IOCBs.
1059 * @ulpstatus: ULP status in IOCB command field.
1060 * @ulpWord4: ULP word-4 in IOCB command field.
1061 *
1062 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1063 * on the list by invoking the complete callback function associated with the
1064 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1065 * fields.
1066 **/
1067void
1068lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1069 uint32_t ulpstatus, uint32_t ulpWord4)
1070{
1071 struct lpfc_iocbq *piocb;
1072
1073 while (!list_empty(iocblist)) {
1074 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1075
1076 if (!piocb->iocb_cmpl)
1077 lpfc_sli_release_iocbq(phba, piocb);
1078 else {
1079 piocb->iocb.ulpStatus = ulpstatus;
1080 piocb->iocb.un.ulpWord[4] = ulpWord4;
1081 (piocb->iocb_cmpl) (phba, piocb, piocb);
1082 }
1083 }
1084 return;
1085}
1086
e59058c4 1087/**
3621a710
JS
1088 * lpfc_sli_iocb_cmd_type - Get the iocb type
1089 * @iocb_cmnd: iocb command code.
e59058c4
JS
1090 *
1091 * This function is called by ring event handler function to get the iocb type.
1092 * This function translates the iocb command to an iocb command type used to
1093 * decide the final disposition of each completed IOCB.
1094 * The function returns
1095 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1096 * LPFC_SOL_IOCB if it is a solicited iocb completion
1097 * LPFC_ABORT_IOCB if it is an abort iocb
1098 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1099 *
1100 * The caller is not required to hold any lock.
1101 **/
dea3101e
JB
1102static lpfc_iocb_type
1103lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1104{
1105 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1106
1107 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1108 return 0;
1109
1110 switch (iocb_cmnd) {
1111 case CMD_XMIT_SEQUENCE_CR:
1112 case CMD_XMIT_SEQUENCE_CX:
1113 case CMD_XMIT_BCAST_CN:
1114 case CMD_XMIT_BCAST_CX:
1115 case CMD_ELS_REQUEST_CR:
1116 case CMD_ELS_REQUEST_CX:
1117 case CMD_CREATE_XRI_CR:
1118 case CMD_CREATE_XRI_CX:
1119 case CMD_GET_RPI_CN:
1120 case CMD_XMIT_ELS_RSP_CX:
1121 case CMD_GET_RPI_CR:
1122 case CMD_FCP_IWRITE_CR:
1123 case CMD_FCP_IWRITE_CX:
1124 case CMD_FCP_IREAD_CR:
1125 case CMD_FCP_IREAD_CX:
1126 case CMD_FCP_ICMND_CR:
1127 case CMD_FCP_ICMND_CX:
f5603511
JS
1128 case CMD_FCP_TSEND_CX:
1129 case CMD_FCP_TRSP_CX:
1130 case CMD_FCP_TRECEIVE_CX:
1131 case CMD_FCP_AUTO_TRSP_CX:
dea3101e
JB
1132 case CMD_ADAPTER_MSG:
1133 case CMD_ADAPTER_DUMP:
1134 case CMD_XMIT_SEQUENCE64_CR:
1135 case CMD_XMIT_SEQUENCE64_CX:
1136 case CMD_XMIT_BCAST64_CN:
1137 case CMD_XMIT_BCAST64_CX:
1138 case CMD_ELS_REQUEST64_CR:
1139 case CMD_ELS_REQUEST64_CX:
1140 case CMD_FCP_IWRITE64_CR:
1141 case CMD_FCP_IWRITE64_CX:
1142 case CMD_FCP_IREAD64_CR:
1143 case CMD_FCP_IREAD64_CX:
1144 case CMD_FCP_ICMND64_CR:
1145 case CMD_FCP_ICMND64_CX:
f5603511
JS
1146 case CMD_FCP_TSEND64_CX:
1147 case CMD_FCP_TRSP64_CX:
1148 case CMD_FCP_TRECEIVE64_CX:
dea3101e
JB
1149 case CMD_GEN_REQUEST64_CR:
1150 case CMD_GEN_REQUEST64_CX:
1151 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1152 case DSSCMD_IWRITE64_CR:
1153 case DSSCMD_IWRITE64_CX:
1154 case DSSCMD_IREAD64_CR:
1155 case DSSCMD_IREAD64_CX:
dea3101e
JB
1156 type = LPFC_SOL_IOCB;
1157 break;
1158 case CMD_ABORT_XRI_CN:
1159 case CMD_ABORT_XRI_CX:
1160 case CMD_CLOSE_XRI_CN:
1161 case CMD_CLOSE_XRI_CX:
1162 case CMD_XRI_ABORTED_CX:
1163 case CMD_ABORT_MXRI64_CN:
6669f9bb 1164 case CMD_XMIT_BLS_RSP64_CX:
dea3101e
JB
1165 type = LPFC_ABORT_IOCB;
1166 break;
1167 case CMD_RCV_SEQUENCE_CX:
1168 case CMD_RCV_ELS_REQ_CX:
1169 case CMD_RCV_SEQUENCE64_CX:
1170 case CMD_RCV_ELS_REQ64_CX:
57127f15 1171 case CMD_ASYNC_STATUS:
ed957684
JS
1172 case CMD_IOCB_RCV_SEQ64_CX:
1173 case CMD_IOCB_RCV_ELS64_CX:
1174 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1175 case CMD_IOCB_RET_XRI64_CX:
dea3101e
JB
1176 type = LPFC_UNSOL_IOCB;
1177 break;
3163f725
JS
1178 case CMD_IOCB_XMIT_MSEQ64_CR:
1179 case CMD_IOCB_XMIT_MSEQ64_CX:
1180 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1181 case CMD_IOCB_RCV_ELS_LIST64_CX:
1182 case CMD_IOCB_CLOSE_EXTENDED_CN:
1183 case CMD_IOCB_ABORT_EXTENDED_CN:
1184 case CMD_IOCB_RET_HBQE64_CN:
1185 case CMD_IOCB_FCP_IBIDIR64_CR:
1186 case CMD_IOCB_FCP_IBIDIR64_CX:
1187 case CMD_IOCB_FCP_ITASKMGT64_CX:
1188 case CMD_IOCB_LOGENTRY_CN:
1189 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1190 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1191 __func__, iocb_cmnd);
3163f725
JS
1192 type = LPFC_UNKNOWN_IOCB;
1193 break;
dea3101e
JB
1194 default:
1195 type = LPFC_UNKNOWN_IOCB;
1196 break;
1197 }
1198
1199 return type;
1200}
1201
e59058c4 1202/**
3621a710 1203 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1204 * @phba: Pointer to HBA context object.
1205 *
1206 * This function is called from SLI initialization code
1207 * to configure every ring of the HBA's SLI interface. The
1208 * caller is not required to hold any lock. This function issues
1209 * a config_ring mailbox command for each ring.
1210 * This function returns zero if successful else returns a negative
1211 * error code.
1212 **/
dea3101e 1213static int
ed957684 1214lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e
JB
1215{
1216 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1217 LPFC_MBOXQ_t *pmb;
1218 MAILBOX_t *pmbox;
1219 int i, rc, ret = 0;
dea3101e 1220
ed957684
JS
1221 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1222 if (!pmb)
1223 return -ENOMEM;
04c68496 1224 pmbox = &pmb->u.mb;
ed957684 1225 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1226 for (i = 0; i < psli->num_rings; i++) {
dea3101e
JB
1227 lpfc_config_ring(phba, i, pmb);
1228 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1229 if (rc != MBX_SUCCESS) {
92d7f7b0 1230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1231 "0446 Adapter failed to init (%d), "
dea3101e
JB
1232 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1233 "ring %d\n",
e8b62011
JS
1234 rc, pmbox->mbxCommand,
1235 pmbox->mbxStatus, i);
2e0fef85 1236 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1237 ret = -ENXIO;
1238 break;
dea3101e
JB
1239 }
1240 }
ed957684
JS
1241 mempool_free(pmb, phba->mbox_mem_pool);
1242 return ret;
dea3101e
JB
1243}
1244
e59058c4 1245/**
3621a710 1246 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1247 * @phba: Pointer to HBA context object.
1248 * @pring: Pointer to driver SLI ring object.
1249 * @piocb: Pointer to the driver iocb object.
1250 *
1251 * This function is called with hbalock held. The function adds the
1252 * new iocb to txcmplq of the given ring. This function always returns
1253 * 0. If this function is called for ELS ring, this function checks if
1254 * there is a vport associated with the ELS command. This function also
1255 * starts els_tmofunc timer if this is an ELS command.
1256 **/
dea3101e 1257static int
2e0fef85
JS
1258lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1259 struct lpfc_iocbq *piocb)
dea3101e 1260{
dea3101e 1261 list_add_tail(&piocb->list, &pring->txcmplq);
2a9bf3d0 1262 piocb->iocb_flag |= LPFC_IO_ON_Q;
dea3101e 1263 pring->txcmplq_cnt++;
2a9bf3d0
JS
1264 if (pring->txcmplq_cnt > pring->txcmplq_max)
1265 pring->txcmplq_max = pring->txcmplq_cnt;
1266
92d7f7b0
JS
1267 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1268 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1269 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1270 if (!piocb->vport)
1271 BUG();
1272 else
1273 mod_timer(&piocb->vport->els_tmofunc,
1274 jiffies + HZ * (phba->fc_ratov << 1));
1275 }
1276
dea3101e 1277
2e0fef85 1278 return 0;
dea3101e
JB
1279}
1280
e59058c4 1281/**
3621a710 1282 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1283 * @phba: Pointer to HBA context object.
1284 * @pring: Pointer to driver SLI ring object.
1285 *
1286 * This function is called with hbalock held to get next
1287 * iocb in txq of the given ring. If there is any iocb in
1288 * the txq, the function returns first iocb in the list after
1289 * removing the iocb from the list, else it returns NULL.
1290 **/
2a9bf3d0 1291struct lpfc_iocbq *
2e0fef85 1292lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1293{
dea3101e
JB
1294 struct lpfc_iocbq *cmd_iocb;
1295
858c9f6c
JS
1296 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1297 if (cmd_iocb != NULL)
dea3101e 1298 pring->txq_cnt--;
2e0fef85 1299 return cmd_iocb;
dea3101e
JB
1300}
1301
e59058c4 1302/**
3621a710 1303 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1304 * @phba: Pointer to HBA context object.
1305 * @pring: Pointer to driver SLI ring object.
1306 *
1307 * This function is called with hbalock held and the caller must post the
1308 * iocb without releasing the lock. If the caller releases the lock,
1309 * iocb slot returned by the function is not guaranteed to be available.
1310 * The function returns pointer to the next available iocb slot if there
1311 * is available slot in the ring, else it returns NULL.
1312 * If the get index of the ring is ahead of the put index, the function
1313 * will post an error attention event to the worker thread to take the
1314 * HBA to offline state.
1315 **/
dea3101e
JB
1316static IOCB_t *
1317lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1318{
34b02dcd 1319 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 1320 uint32_t max_cmd_idx = pring->numCiocb;
dea3101e
JB
1321 if ((pring->next_cmdidx == pring->cmdidx) &&
1322 (++pring->next_cmdidx >= max_cmd_idx))
1323 pring->next_cmdidx = 0;
1324
1325 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1326
1327 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1328
1329 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1330 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1331 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1332 "is bigger than cmd ring %d\n",
e8b62011 1333 pring->ringno,
dea3101e
JB
1334 pring->local_getidx, max_cmd_idx);
1335
2e0fef85 1336 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1337 /*
1338 * All error attention handlers are posted to
1339 * worker thread
1340 */
1341 phba->work_ha |= HA_ERATT;
1342 phba->work_hs = HS_FFER3;
92d7f7b0 1343
5e9d9b82 1344 lpfc_worker_wake_up(phba);
dea3101e
JB
1345
1346 return NULL;
1347 }
1348
1349 if (pring->local_getidx == pring->next_cmdidx)
1350 return NULL;
1351 }
1352
ed957684 1353 return lpfc_cmd_iocb(phba, pring);
dea3101e
JB
1354}
1355
e59058c4 1356/**
3621a710 1357 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1358 * @phba: Pointer to HBA context object.
1359 * @iocbq: Pointer to driver iocb object.
1360 *
1361 * This function gets an iotag for the iocb. If there is no unused iotag and
1362 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1363 * array and assigns a new iotag.
1364 * The function returns the allocated iotag if successful, else returns zero.
1365 * Zero is not a valid iotag.
1366 * The caller is not required to hold any lock.
1367 **/
604a3e30 1368uint16_t
2e0fef85 1369lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1370{
2e0fef85
JS
1371 struct lpfc_iocbq **new_arr;
1372 struct lpfc_iocbq **old_arr;
604a3e30
JB
1373 size_t new_len;
1374 struct lpfc_sli *psli = &phba->sli;
1375 uint16_t iotag;
dea3101e 1376
2e0fef85 1377 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1378 iotag = psli->last_iotag;
1379 if(++iotag < psli->iocbq_lookup_len) {
1380 psli->last_iotag = iotag;
1381 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1382 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1383 iocbq->iotag = iotag;
1384 return iotag;
2e0fef85 1385 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1386 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1387 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
1388 spin_unlock_irq(&phba->hbalock);
1389 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1390 GFP_KERNEL);
1391 if (new_arr) {
2e0fef85 1392 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1393 old_arr = psli->iocbq_lookup;
1394 if (new_len <= psli->iocbq_lookup_len) {
1395 /* highly unprobable case */
1396 kfree(new_arr);
1397 iotag = psli->last_iotag;
1398 if(++iotag < psli->iocbq_lookup_len) {
1399 psli->last_iotag = iotag;
1400 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1401 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1402 iocbq->iotag = iotag;
1403 return iotag;
1404 }
2e0fef85 1405 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1406 return 0;
1407 }
1408 if (psli->iocbq_lookup)
1409 memcpy(new_arr, old_arr,
1410 ((psli->last_iotag + 1) *
311464ec 1411 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1412 psli->iocbq_lookup = new_arr;
1413 psli->iocbq_lookup_len = new_len;
1414 psli->last_iotag = iotag;
1415 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1416 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1417 iocbq->iotag = iotag;
1418 kfree(old_arr);
1419 return iotag;
1420 }
8f6d98d2 1421 } else
2e0fef85 1422 spin_unlock_irq(&phba->hbalock);
dea3101e 1423
bc73905a 1424 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1425 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1426 psli->last_iotag);
dea3101e 1427
604a3e30 1428 return 0;
dea3101e
JB
1429}
1430
e59058c4 1431/**
3621a710 1432 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1433 * @phba: Pointer to HBA context object.
1434 * @pring: Pointer to driver SLI ring object.
1435 * @iocb: Pointer to iocb slot in the ring.
1436 * @nextiocb: Pointer to driver iocb object which need to be
1437 * posted to firmware.
1438 *
1439 * This function is called with hbalock held to post a new iocb to
1440 * the firmware. This function copies the new iocb to ring iocb slot and
1441 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1442 * a completion call back for this iocb else the function will free the
1443 * iocb object.
1444 **/
dea3101e
JB
1445static void
1446lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1447 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1448{
1449 /*
604a3e30 1450 * Set up an iotag
dea3101e 1451 */
604a3e30 1452 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1453
e2a0a9d6 1454
a58cbd52
JS
1455 if (pring->ringno == LPFC_ELS_RING) {
1456 lpfc_debugfs_slow_ring_trc(phba,
1457 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1458 *(((uint32_t *) &nextiocb->iocb) + 4),
1459 *(((uint32_t *) &nextiocb->iocb) + 6),
1460 *(((uint32_t *) &nextiocb->iocb) + 7));
1461 }
1462
dea3101e
JB
1463 /*
1464 * Issue iocb command to adapter
1465 */
92d7f7b0 1466 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e
JB
1467 wmb();
1468 pring->stats.iocb_cmd++;
1469
1470 /*
1471 * If there is no completion routine to call, we can release the
1472 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1473 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1474 */
1475 if (nextiocb->iocb_cmpl)
1476 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1477 else
2e0fef85 1478 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e
JB
1479
1480 /*
1481 * Let the HBA know what IOCB slot will be the next one the
1482 * driver will put a command into.
1483 */
1484 pring->cmdidx = pring->next_cmdidx;
ed957684 1485 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e
JB
1486}
1487
e59058c4 1488/**
3621a710 1489 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1490 * @phba: Pointer to HBA context object.
1491 * @pring: Pointer to driver SLI ring object.
1492 *
1493 * The caller is not required to hold any lock for calling this function.
1494 * This function updates the chip attention bits for the ring to inform firmware
1495 * that there are pending work to be done for this ring and requests an
1496 * interrupt when there is space available in the ring. This function is
1497 * called when the driver is unable to post more iocbs to the ring due
1498 * to unavailability of space in the ring.
1499 **/
dea3101e 1500static void
2e0fef85 1501lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1502{
1503 int ringno = pring->ringno;
1504
1505 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1506
1507 wmb();
1508
1509 /*
1510 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1511 * The HBA will tell us when an IOCB entry is available.
1512 */
1513 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1514 readl(phba->CAregaddr); /* flush */
1515
1516 pring->stats.iocb_cmd_full++;
1517}
1518
e59058c4 1519/**
3621a710 1520 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1521 * @phba: Pointer to HBA context object.
1522 * @pring: Pointer to driver SLI ring object.
1523 *
1524 * This function updates the chip attention register bit for the
1525 * given ring to inform HBA that there is more work to be done
1526 * in this ring. The caller is not required to hold any lock.
1527 **/
dea3101e 1528static void
2e0fef85 1529lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1530{
1531 int ringno = pring->ringno;
1532
1533 /*
1534 * Tell the HBA that there is work to do in this ring.
1535 */
34b02dcd
JS
1536 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1537 wmb();
1538 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1539 readl(phba->CAregaddr); /* flush */
1540 }
dea3101e
JB
1541}
1542
e59058c4 1543/**
3621a710 1544 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1545 * @phba: Pointer to HBA context object.
1546 * @pring: Pointer to driver SLI ring object.
1547 *
1548 * This function is called with hbalock held to post pending iocbs
1549 * in the txq to the firmware. This function is called when driver
1550 * detects space available in the ring.
1551 **/
dea3101e 1552static void
2e0fef85 1553lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1554{
1555 IOCB_t *iocb;
1556 struct lpfc_iocbq *nextiocb;
1557
1558 /*
1559 * Check to see if:
1560 * (a) there is anything on the txq to send
1561 * (b) link is up
1562 * (c) link attention events can be processed (fcp ring only)
1563 * (d) IOCB processing is not blocked by the outstanding mbox command.
1564 */
1565 if (pring->txq_cnt &&
2e0fef85 1566 lpfc_is_link_up(phba) &&
dea3101e 1567 (pring->ringno != phba->sli.fcp_ring ||
0b727fea 1568 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e
JB
1569
1570 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1571 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1572 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1573
1574 if (iocb)
1575 lpfc_sli_update_ring(phba, pring);
1576 else
1577 lpfc_sli_update_full_ring(phba, pring);
1578 }
1579
1580 return;
1581}
1582
e59058c4 1583/**
3621a710 1584 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1585 * @phba: Pointer to HBA context object.
1586 * @hbqno: HBQ number.
1587 *
1588 * This function is called with hbalock held to get the next
1589 * available slot for the given HBQ. If there is free slot
1590 * available for the HBQ it will return pointer to the next available
1591 * HBQ entry else it will return NULL.
1592 **/
a6ababd2 1593static struct lpfc_hbq_entry *
ed957684
JS
1594lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1595{
1596 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1597
1598 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1599 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1600 hbqp->next_hbqPutIdx = 0;
1601
1602 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1603 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1604 uint32_t getidx = le32_to_cpu(raw_index);
1605
1606 hbqp->local_hbqGetIdx = getidx;
1607
1608 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1609 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1610 LOG_SLI | LOG_VPORT,
e8b62011 1611 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1612 "%u is > than hbqp->entry_count %u\n",
e8b62011 1613 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1614 hbqp->entry_count);
1615
1616 phba->link_state = LPFC_HBA_ERROR;
1617 return NULL;
1618 }
1619
1620 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1621 return NULL;
1622 }
1623
51ef4c26
JS
1624 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1625 hbqp->hbqPutIdx;
ed957684
JS
1626}
1627
e59058c4 1628/**
3621a710 1629 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1630 * @phba: Pointer to HBA context object.
1631 *
1632 * This function is called with no lock held to free all the
1633 * hbq buffers while uninitializing the SLI interface. It also
1634 * frees the HBQ buffers returned by the firmware but not yet
1635 * processed by the upper layers.
1636 **/
ed957684
JS
1637void
1638lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1639{
92d7f7b0
JS
1640 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1641 struct hbq_dmabuf *hbq_buf;
3163f725 1642 unsigned long flags;
51ef4c26 1643 int i, hbq_count;
3163f725 1644 uint32_t hbqno;
ed957684 1645
51ef4c26 1646 hbq_count = lpfc_sli_hbq_count();
ed957684 1647 /* Return all memory used by all HBQs */
3163f725 1648 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1649 for (i = 0; i < hbq_count; ++i) {
1650 list_for_each_entry_safe(dmabuf, next_dmabuf,
1651 &phba->hbqs[i].hbq_buffer_list, list) {
1652 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1653 list_del(&hbq_buf->dbuf.list);
1654 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1655 }
a8adb832 1656 phba->hbqs[i].buffer_count = 0;
ed957684 1657 }
3163f725 1658 /* Return all HBQ buffer that are in-fly */
3772a991
JS
1659 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1660 list) {
3163f725
JS
1661 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1662 list_del(&hbq_buf->dbuf.list);
1663 if (hbq_buf->tag == -1) {
1664 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1665 (phba, hbq_buf);
1666 } else {
1667 hbqno = hbq_buf->tag >> 16;
1668 if (hbqno >= LPFC_MAX_HBQS)
1669 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1670 (phba, hbq_buf);
1671 else
1672 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1673 hbq_buf);
1674 }
1675 }
1676
1677 /* Mark the HBQs not in use */
1678 phba->hbq_in_use = 0;
1679 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1680}
1681
e59058c4 1682/**
3621a710 1683 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1684 * @phba: Pointer to HBA context object.
1685 * @hbqno: HBQ number.
1686 * @hbq_buf: Pointer to HBQ buffer.
1687 *
1688 * This function is called with the hbalock held to post a
1689 * hbq buffer to the firmware. If the function finds an empty
1690 * slot in the HBQ, it will post the buffer. The function will return
1691 * pointer to the hbq entry if it successfully post the buffer
1692 * else it will return NULL.
1693 **/
3772a991 1694static int
ed957684 1695lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1696 struct hbq_dmabuf *hbq_buf)
3772a991
JS
1697{
1698 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1699}
1700
1701/**
1702 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1703 * @phba: Pointer to HBA context object.
1704 * @hbqno: HBQ number.
1705 * @hbq_buf: Pointer to HBQ buffer.
1706 *
1707 * This function is called with the hbalock held to post a hbq buffer to the
1708 * firmware. If the function finds an empty slot in the HBQ, it will post the
1709 * buffer and place it on the hbq_buffer_list. The function will return zero if
1710 * it successfully post the buffer else it will return an error.
1711 **/
1712static int
1713lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1714 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1715{
1716 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1717 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
1718
1719 /* Get next HBQ entry slot to use */
1720 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1721 if (hbqe) {
1722 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1723
92d7f7b0
JS
1724 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1725 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 1726 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 1727 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1728 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1729 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1730 /* Sync SLIM */
ed957684
JS
1731 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1732 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1733 /* flush */
ed957684 1734 readl(phba->hbq_put + hbqno);
51ef4c26 1735 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1736 return 0;
1737 } else
1738 return -ENOMEM;
ed957684
JS
1739}
1740
4f774513
JS
1741/**
1742 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1743 * @phba: Pointer to HBA context object.
1744 * @hbqno: HBQ number.
1745 * @hbq_buf: Pointer to HBQ buffer.
1746 *
1747 * This function is called with the hbalock held to post an RQE to the SLI4
1748 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1749 * the hbq_buffer_list and return zero, otherwise it will return an error.
1750 **/
1751static int
1752lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1753 struct hbq_dmabuf *hbq_buf)
1754{
1755 int rc;
1756 struct lpfc_rqe hrqe;
1757 struct lpfc_rqe drqe;
1758
1759 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1760 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1761 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1762 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1763 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1764 &hrqe, &drqe);
1765 if (rc < 0)
1766 return rc;
1767 hbq_buf->tag = rc;
1768 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1769 return 0;
1770}
1771
e59058c4 1772/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1773static struct lpfc_hbq_init lpfc_els_hbq = {
1774 .rn = 1,
def9c7a9 1775 .entry_count = 256,
92d7f7b0
JS
1776 .mask_count = 0,
1777 .profile = 0,
51ef4c26 1778 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1779 .buffer_count = 0,
a257bf90
JS
1780 .init_count = 40,
1781 .add_count = 40,
92d7f7b0 1782};
ed957684 1783
e59058c4 1784/* HBQ for the extra ring if needed */
51ef4c26
JS
1785static struct lpfc_hbq_init lpfc_extra_hbq = {
1786 .rn = 1,
1787 .entry_count = 200,
1788 .mask_count = 0,
1789 .profile = 0,
1790 .ring_mask = (1 << LPFC_EXTRA_RING),
1791 .buffer_count = 0,
1792 .init_count = 0,
1793 .add_count = 5,
1794};
1795
e59058c4 1796/* Array of HBQs */
78b2d852 1797struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 1798 &lpfc_els_hbq,
51ef4c26 1799 &lpfc_extra_hbq,
92d7f7b0 1800};
ed957684 1801
e59058c4 1802/**
3621a710 1803 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1804 * @phba: Pointer to HBA context object.
1805 * @hbqno: HBQ number.
1806 * @count: Number of HBQ buffers to be posted.
1807 *
d7c255b2
JS
1808 * This function is called with no lock held to post more hbq buffers to the
1809 * given HBQ. The function returns the number of HBQ buffers successfully
1810 * posted.
e59058c4 1811 **/
311464ec 1812static int
92d7f7b0 1813lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1814{
d7c255b2 1815 uint32_t i, posted = 0;
3163f725 1816 unsigned long flags;
92d7f7b0 1817 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1818 LIST_HEAD(hbq_buf_list);
eafe1df9 1819 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1820 return 0;
51ef4c26 1821
d7c255b2
JS
1822 if ((phba->hbqs[hbqno].buffer_count + count) >
1823 lpfc_hbq_defs[hbqno]->entry_count)
1824 count = lpfc_hbq_defs[hbqno]->entry_count -
1825 phba->hbqs[hbqno].buffer_count;
1826 if (!count)
1827 return 0;
1828 /* Allocate HBQ entries */
1829 for (i = 0; i < count; i++) {
1830 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1831 if (!hbq_buffer)
1832 break;
1833 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1834 }
3163f725
JS
1835 /* Check whether HBQ is still in use */
1836 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1837 if (!phba->hbq_in_use)
d7c255b2
JS
1838 goto err;
1839 while (!list_empty(&hbq_buf_list)) {
1840 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1841 dbuf.list);
1842 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1843 (hbqno << 16));
3772a991 1844 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1845 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1846 posted++;
1847 } else
51ef4c26 1848 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1849 }
3163f725 1850 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1851 return posted;
1852err:
eafe1df9 1853 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1854 while (!list_empty(&hbq_buf_list)) {
1855 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1856 dbuf.list);
1857 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1858 }
1859 return 0;
ed957684
JS
1860}
1861
e59058c4 1862/**
3621a710 1863 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1864 * @phba: Pointer to HBA context object.
1865 * @qno: HBQ number.
1866 *
1867 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1868 * is called with no lock held. The function returns the number of HBQ entries
1869 * successfully allocated.
e59058c4 1870 **/
92d7f7b0
JS
1871int
1872lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1873{
def9c7a9
JS
1874 if (phba->sli_rev == LPFC_SLI_REV4)
1875 return 0;
1876 else
1877 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1878 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1879}
ed957684 1880
e59058c4 1881/**
3621a710 1882 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1883 * @phba: Pointer to HBA context object.
1884 * @qno: HBQ queue number.
1885 *
1886 * This function is called from SLI initialization code path with
1887 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 1888 * function returns the number of HBQ entries successfully allocated.
e59058c4 1889 **/
a6ababd2 1890static int
92d7f7b0
JS
1891lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1892{
def9c7a9
JS
1893 if (phba->sli_rev == LPFC_SLI_REV4)
1894 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 1895 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
1896 else
1897 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1898 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
1899}
1900
3772a991
JS
1901/**
1902 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1903 * @phba: Pointer to HBA context object.
1904 * @hbqno: HBQ number.
1905 *
1906 * This function removes the first hbq buffer on an hbq list and returns a
1907 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1908 **/
1909static struct hbq_dmabuf *
1910lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1911{
1912 struct lpfc_dmabuf *d_buf;
1913
1914 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1915 if (!d_buf)
1916 return NULL;
1917 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1918}
1919
e59058c4 1920/**
3621a710 1921 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
1922 * @phba: Pointer to HBA context object.
1923 * @tag: Tag of the hbq buffer.
1924 *
1925 * This function is called with hbalock held. This function searches
1926 * for the hbq buffer associated with the given tag in the hbq buffer
1927 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1928 * it returns NULL.
1929 **/
a6ababd2 1930static struct hbq_dmabuf *
92d7f7b0 1931lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 1932{
92d7f7b0
JS
1933 struct lpfc_dmabuf *d_buf;
1934 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
1935 uint32_t hbqno;
1936
1937 hbqno = tag >> 16;
a0a74e45 1938 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 1939 return NULL;
ed957684 1940
3772a991 1941 spin_lock_irq(&phba->hbalock);
51ef4c26 1942 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 1943 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 1944 if (hbq_buf->tag == tag) {
3772a991 1945 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1946 return hbq_buf;
ed957684
JS
1947 }
1948 }
3772a991 1949 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 1951 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 1952 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 1953 return NULL;
ed957684
JS
1954}
1955
e59058c4 1956/**
3621a710 1957 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
1958 * @phba: Pointer to HBA context object.
1959 * @hbq_buffer: Pointer to HBQ buffer.
1960 *
1961 * This function is called with hbalock. This function gives back
1962 * the hbq buffer to firmware. If the HBQ does not have space to
1963 * post the buffer, it will free the buffer.
1964 **/
ed957684 1965void
51ef4c26 1966lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
1967{
1968 uint32_t hbqno;
1969
51ef4c26
JS
1970 if (hbq_buffer) {
1971 hbqno = hbq_buffer->tag >> 16;
3772a991 1972 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 1973 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
1974 }
1975}
1976
e59058c4 1977/**
3621a710 1978 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
1979 * @mbxCommand: mailbox command code.
1980 *
1981 * This function is called by the mailbox event handler function to verify
1982 * that the completed mailbox command is a legitimate mailbox command. If the
1983 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1984 * and the mailbox event handler will take the HBA offline.
1985 **/
dea3101e
JB
1986static int
1987lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1988{
1989 uint8_t ret;
1990
1991 switch (mbxCommand) {
1992 case MBX_LOAD_SM:
1993 case MBX_READ_NV:
1994 case MBX_WRITE_NV:
a8adb832 1995 case MBX_WRITE_VPARMS:
dea3101e
JB
1996 case MBX_RUN_BIU_DIAG:
1997 case MBX_INIT_LINK:
1998 case MBX_DOWN_LINK:
1999 case MBX_CONFIG_LINK:
2000 case MBX_CONFIG_RING:
2001 case MBX_RESET_RING:
2002 case MBX_READ_CONFIG:
2003 case MBX_READ_RCONFIG:
2004 case MBX_READ_SPARM:
2005 case MBX_READ_STATUS:
2006 case MBX_READ_RPI:
2007 case MBX_READ_XRI:
2008 case MBX_READ_REV:
2009 case MBX_READ_LNK_STAT:
2010 case MBX_REG_LOGIN:
2011 case MBX_UNREG_LOGIN:
dea3101e
JB
2012 case MBX_CLEAR_LA:
2013 case MBX_DUMP_MEMORY:
2014 case MBX_DUMP_CONTEXT:
2015 case MBX_RUN_DIAGS:
2016 case MBX_RESTART:
2017 case MBX_UPDATE_CFG:
2018 case MBX_DOWN_LOAD:
2019 case MBX_DEL_LD_ENTRY:
2020 case MBX_RUN_PROGRAM:
2021 case MBX_SET_MASK:
09372820 2022 case MBX_SET_VARIABLE:
dea3101e 2023 case MBX_UNREG_D_ID:
41415862 2024 case MBX_KILL_BOARD:
dea3101e 2025 case MBX_CONFIG_FARP:
41415862 2026 case MBX_BEACON:
dea3101e
JB
2027 case MBX_LOAD_AREA:
2028 case MBX_RUN_BIU_DIAG64:
2029 case MBX_CONFIG_PORT:
2030 case MBX_READ_SPARM64:
2031 case MBX_READ_RPI64:
2032 case MBX_REG_LOGIN64:
76a95d75 2033 case MBX_READ_TOPOLOGY:
09372820 2034 case MBX_WRITE_WWN:
dea3101e
JB
2035 case MBX_SET_DEBUG:
2036 case MBX_LOAD_EXP_ROM:
57127f15 2037 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2038 case MBX_REG_VPI:
2039 case MBX_UNREG_VPI:
858c9f6c 2040 case MBX_HEARTBEAT:
84774a4d
JS
2041 case MBX_PORT_CAPABILITIES:
2042 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2043 case MBX_SLI4_CONFIG:
2044 case MBX_SLI4_REQ_FTRS:
2045 case MBX_REG_FCFI:
2046 case MBX_UNREG_FCFI:
2047 case MBX_REG_VFI:
2048 case MBX_UNREG_VFI:
2049 case MBX_INIT_VPI:
2050 case MBX_INIT_VFI:
2051 case MBX_RESUME_RPI:
c7495937
JS
2052 case MBX_READ_EVENT_LOG_STATUS:
2053 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2054 case MBX_SECURITY_MGMT:
2055 case MBX_AUTH_PORT:
dea3101e
JB
2056 ret = mbxCommand;
2057 break;
2058 default:
2059 ret = MBX_SHUTDOWN;
2060 break;
2061 }
2e0fef85 2062 return ret;
dea3101e 2063}
e59058c4
JS
2064
2065/**
3621a710 2066 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2067 * @phba: Pointer to HBA context object.
2068 * @pmboxq: Pointer to mailbox command.
2069 *
2070 * This is completion handler function for mailbox commands issued from
2071 * lpfc_sli_issue_mbox_wait function. This function is called by the
2072 * mailbox event handler function with no lock held. This function
2073 * will wake up thread waiting on the wait queue pointed by context1
2074 * of the mailbox.
2075 **/
04c68496 2076void
2e0fef85 2077lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e
JB
2078{
2079 wait_queue_head_t *pdone_q;
858c9f6c 2080 unsigned long drvr_flag;
dea3101e
JB
2081
2082 /*
2083 * If pdone_q is empty, the driver thread gave up waiting and
2084 * continued running.
2085 */
7054a606 2086 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2087 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e
JB
2088 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2089 if (pdone_q)
2090 wake_up_interruptible(pdone_q);
858c9f6c 2091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
2092 return;
2093}
2094
e59058c4
JS
2095
2096/**
3621a710 2097 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2098 * @phba: Pointer to HBA context object.
2099 * @pmb: Pointer to mailbox object.
2100 *
2101 * This function is the default mailbox completion handler. It
2102 * frees the memory resources associated with the completed mailbox
2103 * command. If the completed command is a REG_LOGIN mailbox command,
2104 * this function will issue a UREG_LOGIN to re-claim the RPI.
2105 **/
dea3101e 2106void
2e0fef85 2107lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2108{
d439d286 2109 struct lpfc_vport *vport = pmb->vport;
dea3101e 2110 struct lpfc_dmabuf *mp;
d439d286 2111 struct lpfc_nodelist *ndlp;
5af5eee7 2112 struct Scsi_Host *shost;
04c68496 2113 uint16_t rpi, vpi;
7054a606
JS
2114 int rc;
2115
dea3101e 2116 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 2117
dea3101e
JB
2118 if (mp) {
2119 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2120 kfree(mp);
2121 }
7054a606
JS
2122
2123 /*
2124 * If a REG_LOGIN succeeded after node is destroyed or node
2125 * is in re-discovery driver need to cleanup the RPI.
2126 */
2e0fef85 2127 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2128 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2129 !pmb->u.mb.mbxStatus) {
2130 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2131 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2132 lpfc_unreg_login(phba, vpi, rpi, pmb);
92d7f7b0 2133 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2135 if (rc != MBX_NOT_FINISHED)
2136 return;
2137 }
2138
695a814e
JS
2139 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2140 !(phba->pport->load_flag & FC_UNLOADING) &&
2141 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2142 shost = lpfc_shost_from_vport(vport);
2143 spin_lock_irq(shost->host_lock);
2144 vport->vpi_state |= LPFC_VPI_REGISTERED;
2145 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2146 spin_unlock_irq(shost->host_lock);
695a814e
JS
2147 }
2148
d439d286
JS
2149 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2150 ndlp = (struct lpfc_nodelist *)pmb->context2;
2151 lpfc_nlp_put(ndlp);
2152 pmb->context2 = NULL;
2153 }
2154
dcf2a4e0
JS
2155 /* Check security permission status on INIT_LINK mailbox command */
2156 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2157 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2159 "2860 SLI authentication is required "
2160 "for INIT_LINK but has not done yet\n");
2161
04c68496
JS
2162 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2163 lpfc_sli4_mbox_cmd_free(phba, pmb);
2164 else
2165 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
2166}
2167
e59058c4 2168/**
3621a710 2169 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2170 * @phba: Pointer to HBA context object.
2171 *
2172 * This function is called with no lock held. This function processes all
2173 * the completed mailbox commands and gives it to upper layers. The interrupt
2174 * service routine processes mailbox completion interrupt and adds completed
2175 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2176 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2177 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2178 * function returns the mailbox commands to the upper layer by calling the
2179 * completion handler function of each mailbox.
2180 **/
dea3101e 2181int
2e0fef85 2182lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2183{
92d7f7b0 2184 MAILBOX_t *pmbox;
dea3101e 2185 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2186 int rc;
2187 LIST_HEAD(cmplq);
dea3101e
JB
2188
2189 phba->sli.slistat.mbox_event++;
2190
92d7f7b0
JS
2191 /* Get all completed mailboxe buffers into the cmplq */
2192 spin_lock_irq(&phba->hbalock);
2193 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2194 spin_unlock_irq(&phba->hbalock);
dea3101e 2195
92d7f7b0
JS
2196 /* Get a Mailbox buffer to setup mailbox commands for callback */
2197 do {
2198 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2199 if (pmb == NULL)
2200 break;
2e0fef85 2201
04c68496 2202 pmbox = &pmb->u.mb;
dea3101e 2203
858c9f6c
JS
2204 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2205 if (pmb->vport) {
2206 lpfc_debugfs_disc_trc(pmb->vport,
2207 LPFC_DISC_TRC_MBOX_VPORT,
2208 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2209 (uint32_t)pmbox->mbxCommand,
2210 pmbox->un.varWords[0],
2211 pmbox->un.varWords[1]);
2212 }
2213 else {
2214 lpfc_debugfs_disc_trc(phba->pport,
2215 LPFC_DISC_TRC_MBOX,
2216 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2217 (uint32_t)pmbox->mbxCommand,
2218 pmbox->un.varWords[0],
2219 pmbox->un.varWords[1]);
2220 }
2221 }
2222
dea3101e
JB
2223 /*
2224 * It is a fatal error if unknown mbox command completion.
2225 */
2226 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2227 MBX_SHUTDOWN) {
af901ca1 2228 /* Unknown mailbox command compl */
92d7f7b0 2229 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2230 "(%d):0323 Unknown Mailbox command "
a183a15f 2231 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2232 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2233 pmbox->mbxCommand,
a183a15f
JS
2234 lpfc_sli_config_mbox_subsys_get(phba,
2235 pmb),
2236 lpfc_sli_config_mbox_opcode_get(phba,
2237 pmb));
2e0fef85 2238 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
2239 phba->work_hs = HS_FFER3;
2240 lpfc_handle_eratt(phba);
92d7f7b0 2241 continue;
dea3101e
JB
2242 }
2243
dea3101e
JB
2244 if (pmbox->mbxStatus) {
2245 phba->sli.slistat.mbox_stat_err++;
2246 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2247 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2248 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2249 LOG_MBOX | LOG_SLI,
2250 "(%d):0305 Mbox cmd cmpl "
2251 "error - RETRYing Data: x%x "
2252 "(x%x/x%x) x%x x%x x%x\n",
2253 pmb->vport ? pmb->vport->vpi : 0,
2254 pmbox->mbxCommand,
2255 lpfc_sli_config_mbox_subsys_get(phba,
2256 pmb),
2257 lpfc_sli_config_mbox_opcode_get(phba,
2258 pmb),
2259 pmbox->mbxStatus,
2260 pmbox->un.varWords[0],
2261 pmb->vport->port_state);
dea3101e
JB
2262 pmbox->mbxStatus = 0;
2263 pmbox->mbxOwner = OWN_HOST;
dea3101e 2264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2265 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2266 continue;
dea3101e
JB
2267 }
2268 }
2269
2270 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2272 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
dea3101e 2273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
92d7f7b0 2274 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2275 pmbox->mbxCommand,
a183a15f
JS
2276 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2277 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e
JB
2278 pmb->mbox_cmpl,
2279 *((uint32_t *) pmbox),
2280 pmbox->un.varWords[0],
2281 pmbox->un.varWords[1],
2282 pmbox->un.varWords[2],
2283 pmbox->un.varWords[3],
2284 pmbox->un.varWords[4],
2285 pmbox->un.varWords[5],
2286 pmbox->un.varWords[6],
2287 pmbox->un.varWords[7]);
2288
92d7f7b0 2289 if (pmb->mbox_cmpl)
dea3101e 2290 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2291 } while (1);
2292 return 0;
2293}
dea3101e 2294
e59058c4 2295/**
3621a710 2296 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2297 * @phba: Pointer to HBA context object.
2298 * @pring: Pointer to driver SLI ring object.
2299 * @tag: buffer tag.
2300 *
2301 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2302 * is set in the tag the buffer is posted for a particular exchange,
2303 * the function will return the buffer without replacing the buffer.
2304 * If the buffer is for unsolicited ELS or CT traffic, this function
2305 * returns the buffer and also posts another buffer to the firmware.
2306 **/
76bb24ef
JS
2307static struct lpfc_dmabuf *
2308lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2309 struct lpfc_sli_ring *pring,
2310 uint32_t tag)
76bb24ef 2311{
9f1e1b50
JS
2312 struct hbq_dmabuf *hbq_entry;
2313
76bb24ef
JS
2314 if (tag & QUE_BUFTAG_BIT)
2315 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2316 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2317 if (!hbq_entry)
2318 return NULL;
2319 return &hbq_entry->dbuf;
76bb24ef 2320}
57127f15 2321
3772a991
JS
2322/**
2323 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2324 * @phba: Pointer to HBA context object.
2325 * @pring: Pointer to driver SLI ring object.
2326 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2327 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2328 * @fch_type: the type for the first frame of the sequence.
2329 *
2330 * This function is called with no lock held. This function uses the r_ctl and
2331 * type of the received sequence to find the correct callback function to call
2332 * to process the sequence.
2333 **/
2334static int
2335lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2336 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2337 uint32_t fch_type)
2338{
2339 int i;
2340
2341 /* unSolicited Responses */
2342 if (pring->prt[0].profile) {
2343 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2344 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2345 saveq);
2346 return 1;
2347 }
2348 /* We must search, based on rctl / type
2349 for the right routine */
2350 for (i = 0; i < pring->num_mask; i++) {
2351 if ((pring->prt[i].rctl == fch_r_ctl) &&
2352 (pring->prt[i].type == fch_type)) {
2353 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2354 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2355 (phba, pring, saveq);
2356 return 1;
2357 }
2358 }
2359 return 0;
2360}
e59058c4
JS
2361
2362/**
3621a710 2363 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2364 * @phba: Pointer to HBA context object.
2365 * @pring: Pointer to driver SLI ring object.
2366 * @saveq: Pointer to the unsolicited iocb.
2367 *
2368 * This function is called with no lock held by the ring event handler
2369 * when there is an unsolicited iocb posted to the response ring by the
2370 * firmware. This function gets the buffer associated with the iocbs
2371 * and calls the event handler for the ring. This function handles both
2372 * qring buffers and hbq buffers.
2373 * When the function returns 1 the caller can free the iocb object otherwise
2374 * upper layer functions will free the iocb objects.
2375 **/
dea3101e
JB
2376static int
2377lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2378 struct lpfc_iocbq *saveq)
2379{
2380 IOCB_t * irsp;
2381 WORD5 * w5p;
2382 uint32_t Rctl, Type;
3772a991 2383 uint32_t match;
76bb24ef 2384 struct lpfc_iocbq *iocbq;
3163f725 2385 struct lpfc_dmabuf *dmzbuf;
dea3101e
JB
2386
2387 match = 0;
2388 irsp = &(saveq->iocb);
57127f15
JS
2389
2390 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2391 if (pring->lpfc_sli_rcv_async_status)
2392 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2393 else
2394 lpfc_printf_log(phba,
2395 KERN_WARNING,
2396 LOG_SLI,
2397 "0316 Ring %d handler: unexpected "
2398 "ASYNC_STATUS iocb received evt_code "
2399 "0x%x\n",
2400 pring->ringno,
2401 irsp->un.asyncstat.evt_code);
2402 return 1;
2403 }
2404
3163f725
JS
2405 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2406 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2407 if (irsp->ulpBdeCount > 0) {
2408 dmzbuf = lpfc_sli_get_buff(phba, pring,
2409 irsp->un.ulpWord[3]);
2410 lpfc_in_buf_free(phba, dmzbuf);
2411 }
2412
2413 if (irsp->ulpBdeCount > 1) {
2414 dmzbuf = lpfc_sli_get_buff(phba, pring,
2415 irsp->unsli3.sli3Words[3]);
2416 lpfc_in_buf_free(phba, dmzbuf);
2417 }
2418
2419 if (irsp->ulpBdeCount > 2) {
2420 dmzbuf = lpfc_sli_get_buff(phba, pring,
2421 irsp->unsli3.sli3Words[7]);
2422 lpfc_in_buf_free(phba, dmzbuf);
2423 }
2424
2425 return 1;
2426 }
2427
92d7f7b0 2428 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2429 if (irsp->ulpBdeCount != 0) {
2430 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2431 irsp->un.ulpWord[3]);
2432 if (!saveq->context2)
2433 lpfc_printf_log(phba,
2434 KERN_ERR,
2435 LOG_SLI,
2436 "0341 Ring %d Cannot find buffer for "
2437 "an unsolicited iocb. tag 0x%x\n",
2438 pring->ringno,
2439 irsp->un.ulpWord[3]);
76bb24ef
JS
2440 }
2441 if (irsp->ulpBdeCount == 2) {
2442 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2443 irsp->unsli3.sli3Words[7]);
2444 if (!saveq->context3)
2445 lpfc_printf_log(phba,
2446 KERN_ERR,
2447 LOG_SLI,
2448 "0342 Ring %d Cannot find buffer for an"
2449 " unsolicited iocb. tag 0x%x\n",
2450 pring->ringno,
2451 irsp->unsli3.sli3Words[7]);
2452 }
2453 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2454 irsp = &(iocbq->iocb);
76bb24ef
JS
2455 if (irsp->ulpBdeCount != 0) {
2456 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2457 irsp->un.ulpWord[3]);
9c2face6 2458 if (!iocbq->context2)
76bb24ef
JS
2459 lpfc_printf_log(phba,
2460 KERN_ERR,
2461 LOG_SLI,
2462 "0343 Ring %d Cannot find "
2463 "buffer for an unsolicited iocb"
2464 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2465 irsp->un.ulpWord[3]);
76bb24ef
JS
2466 }
2467 if (irsp->ulpBdeCount == 2) {
2468 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2469 irsp->unsli3.sli3Words[7]);
9c2face6 2470 if (!iocbq->context3)
76bb24ef
JS
2471 lpfc_printf_log(phba,
2472 KERN_ERR,
2473 LOG_SLI,
2474 "0344 Ring %d Cannot find "
2475 "buffer for an unsolicited "
2476 "iocb. tag 0x%x\n",
2477 pring->ringno,
2478 irsp->unsli3.sli3Words[7]);
2479 }
2480 }
92d7f7b0 2481 }
9c2face6
JS
2482 if (irsp->ulpBdeCount != 0 &&
2483 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2484 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2485 int found = 0;
2486
2487 /* search continue save q for same XRI */
2488 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2489 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2490 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2491 list_add_tail(&saveq->list, &iocbq->list);
2492 found = 1;
2493 break;
2494 }
2495 }
2496 if (!found)
2497 list_add_tail(&saveq->clist,
2498 &pring->iocb_continue_saveq);
2499 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2500 list_del_init(&iocbq->clist);
2501 saveq = iocbq;
2502 irsp = &(saveq->iocb);
2503 } else
2504 return 0;
2505 }
2506 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2507 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2508 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2509 Rctl = FC_RCTL_ELS_REQ;
2510 Type = FC_TYPE_ELS;
9c2face6
JS
2511 } else {
2512 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2513 Rctl = w5p->hcsw.Rctl;
2514 Type = w5p->hcsw.Type;
2515
2516 /* Firmware Workaround */
2517 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2518 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2519 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2520 Rctl = FC_RCTL_ELS_REQ;
2521 Type = FC_TYPE_ELS;
9c2face6
JS
2522 w5p->hcsw.Rctl = Rctl;
2523 w5p->hcsw.Type = Type;
2524 }
2525 }
92d7f7b0 2526
3772a991 2527 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2528 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2529 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2530 "Type x%x received\n",
e8b62011 2531 pring->ringno, Rctl, Type);
3772a991 2532
92d7f7b0 2533 return 1;
dea3101e
JB
2534}
2535
e59058c4 2536/**
3621a710 2537 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2538 * @phba: Pointer to HBA context object.
2539 * @pring: Pointer to driver SLI ring object.
2540 * @prspiocb: Pointer to response iocb object.
2541 *
2542 * This function looks up the iocb_lookup table to get the command iocb
2543 * corresponding to the given response iocb using the iotag of the
2544 * response iocb. This function is called with the hbalock held.
2545 * This function returns the command iocb object if it finds the command
2546 * iocb else returns NULL.
2547 **/
dea3101e 2548static struct lpfc_iocbq *
2e0fef85
JS
2549lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2550 struct lpfc_sli_ring *pring,
2551 struct lpfc_iocbq *prspiocb)
dea3101e 2552{
dea3101e
JB
2553 struct lpfc_iocbq *cmd_iocb = NULL;
2554 uint16_t iotag;
2555
604a3e30
JB
2556 iotag = prspiocb->iocb.ulpIoTag;
2557
2558 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2559 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 2560 list_del_init(&cmd_iocb->list);
2a9bf3d0
JS
2561 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2562 pring->txcmplq_cnt--;
2563 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2564 }
604a3e30 2565 return cmd_iocb;
dea3101e
JB
2566 }
2567
dea3101e 2568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2569 "0317 iotag x%x is out off "
604a3e30 2570 "range: max iotag x%x wd0 x%x\n",
e8b62011 2571 iotag, phba->sli.last_iotag,
604a3e30 2572 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e
JB
2573 return NULL;
2574}
2575
3772a991
JS
2576/**
2577 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2578 * @phba: Pointer to HBA context object.
2579 * @pring: Pointer to driver SLI ring object.
2580 * @iotag: IOCB tag.
2581 *
2582 * This function looks up the iocb_lookup table to get the command iocb
2583 * corresponding to the given iotag. This function is called with the
2584 * hbalock held.
2585 * This function returns the command iocb object if it finds the command
2586 * iocb else returns NULL.
2587 **/
2588static struct lpfc_iocbq *
2589lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2590 struct lpfc_sli_ring *pring, uint16_t iotag)
2591{
2592 struct lpfc_iocbq *cmd_iocb;
2593
2594 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2595 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2596 list_del_init(&cmd_iocb->list);
2a9bf3d0
JS
2597 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2598 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2599 pring->txcmplq_cnt--;
2600 }
3772a991
JS
2601 return cmd_iocb;
2602 }
2603
2604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2605 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2606 iotag, phba->sli.last_iotag);
2607 return NULL;
2608}
2609
e59058c4 2610/**
3621a710 2611 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2612 * @phba: Pointer to HBA context object.
2613 * @pring: Pointer to driver SLI ring object.
2614 * @saveq: Pointer to the response iocb to be processed.
2615 *
2616 * This function is called by the ring event handler for non-fcp
2617 * rings when there is a new response iocb in the response ring.
2618 * The caller is not required to hold any locks. This function
2619 * gets the command iocb associated with the response iocb and
2620 * calls the completion handler for the command iocb. If there
2621 * is no completion handler, the function will free the resources
2622 * associated with command iocb. If the response iocb is for
2623 * an already aborted command iocb, the status of the completion
2624 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2625 * This function always returns 1.
2626 **/
dea3101e 2627static int
2e0fef85 2628lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e
JB
2629 struct lpfc_iocbq *saveq)
2630{
2e0fef85 2631 struct lpfc_iocbq *cmdiocbp;
dea3101e
JB
2632 int rc = 1;
2633 unsigned long iflag;
2634
2635 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2636 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2637 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2638 spin_unlock_irqrestore(&phba->hbalock, iflag);
2639
dea3101e
JB
2640 if (cmdiocbp) {
2641 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2642 /*
2643 * If an ELS command failed send an event to mgmt
2644 * application.
2645 */
2646 if (saveq->iocb.ulpStatus &&
2647 (pring->ringno == LPFC_ELS_RING) &&
2648 (cmdiocbp->iocb.ulpCommand ==
2649 CMD_ELS_REQUEST64_CR))
2650 lpfc_send_els_failure_event(phba,
2651 cmdiocbp, saveq);
2652
dea3101e
JB
2653 /*
2654 * Post all ELS completions to the worker thread.
2655 * All other are passed to the completion callback.
2656 */
2657 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
2658 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2659 (cmdiocbp->iocb_flag &
2660 LPFC_DRIVER_ABORTED)) {
2661 spin_lock_irqsave(&phba->hbalock,
2662 iflag);
07951076
JS
2663 cmdiocbp->iocb_flag &=
2664 ~LPFC_DRIVER_ABORTED;
341af102
JS
2665 spin_unlock_irqrestore(&phba->hbalock,
2666 iflag);
07951076
JS
2667 saveq->iocb.ulpStatus =
2668 IOSTAT_LOCAL_REJECT;
2669 saveq->iocb.un.ulpWord[4] =
2670 IOERR_SLI_ABORTED;
0ff10d46
JS
2671
2672 /* Firmware could still be in progress
2673 * of DMAing payload, so don't free data
2674 * buffer till after a hbeat.
2675 */
341af102
JS
2676 spin_lock_irqsave(&phba->hbalock,
2677 iflag);
0ff10d46 2678 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
2679 spin_unlock_irqrestore(&phba->hbalock,
2680 iflag);
2681 }
0f65ff68
JS
2682 if (phba->sli_rev == LPFC_SLI_REV4) {
2683 if (saveq->iocb_flag &
2684 LPFC_EXCHANGE_BUSY) {
2685 /* Set cmdiocb flag for the
2686 * exchange busy so sgl (xri)
2687 * will not be released until
2688 * the abort xri is received
2689 * from hba.
2690 */
2691 spin_lock_irqsave(
2692 &phba->hbalock, iflag);
2693 cmdiocbp->iocb_flag |=
2694 LPFC_EXCHANGE_BUSY;
2695 spin_unlock_irqrestore(
2696 &phba->hbalock, iflag);
2697 }
2698 if (cmdiocbp->iocb_flag &
2699 LPFC_DRIVER_ABORTED) {
2700 /*
2701 * Clear LPFC_DRIVER_ABORTED
2702 * bit in case it was driver
2703 * initiated abort.
2704 */
2705 spin_lock_irqsave(
2706 &phba->hbalock, iflag);
2707 cmdiocbp->iocb_flag &=
2708 ~LPFC_DRIVER_ABORTED;
2709 spin_unlock_irqrestore(
2710 &phba->hbalock, iflag);
2711 cmdiocbp->iocb.ulpStatus =
2712 IOSTAT_LOCAL_REJECT;
2713 cmdiocbp->iocb.un.ulpWord[4] =
2714 IOERR_ABORT_REQUESTED;
2715 /*
2716 * For SLI4, irsiocb contains
2717 * NO_XRI in sli_xritag, it
2718 * shall not affect releasing
2719 * sgl (xri) process.
2720 */
2721 saveq->iocb.ulpStatus =
2722 IOSTAT_LOCAL_REJECT;
2723 saveq->iocb.un.ulpWord[4] =
2724 IOERR_SLI_ABORTED;
2725 spin_lock_irqsave(
2726 &phba->hbalock, iflag);
2727 saveq->iocb_flag |=
2728 LPFC_DELAY_MEM_FREE;
2729 spin_unlock_irqrestore(
2730 &phba->hbalock, iflag);
2731 }
07951076 2732 }
dea3101e 2733 }
2e0fef85 2734 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2735 } else
2736 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e
JB
2737 } else {
2738 /*
2739 * Unknown initiating command based on the response iotag.
2740 * This could be the case on the ELS ring because of
2741 * lpfc_els_abort().
2742 */
2743 if (pring->ringno != LPFC_ELS_RING) {
2744 /*
2745 * Ring <ringno> handler: unexpected completion IoTag
2746 * <IoTag>
2747 */
a257bf90 2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2749 "0322 Ring %d handler: "
2750 "unexpected completion IoTag x%x "
2751 "Data: x%x x%x x%x x%x\n",
2752 pring->ringno,
2753 saveq->iocb.ulpIoTag,
2754 saveq->iocb.ulpStatus,
2755 saveq->iocb.un.ulpWord[4],
2756 saveq->iocb.ulpCommand,
2757 saveq->iocb.ulpContext);
dea3101e
JB
2758 }
2759 }
68876920 2760
dea3101e
JB
2761 return rc;
2762}
2763
e59058c4 2764/**
3621a710 2765 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2766 * @phba: Pointer to HBA context object.
2767 * @pring: Pointer to driver SLI ring object.
2768 *
2769 * This function is called from the iocb ring event handlers when
2770 * put pointer is ahead of the get pointer for a ring. This function signal
2771 * an error attention condition to the worker thread and the worker
2772 * thread will transition the HBA to offline state.
2773 **/
2e0fef85
JS
2774static void
2775lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2776{
34b02dcd 2777 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2778 /*
025dfdaf 2779 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2780 * rsp ring <portRspMax>
2781 */
2782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2783 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2784 "is bigger than rsp ring %d\n",
e8b62011 2785 pring->ringno, le32_to_cpu(pgp->rspPutInx),
875fbdfe
JSEC
2786 pring->numRiocb);
2787
2e0fef85 2788 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2789
2790 /*
2791 * All error attention handlers are posted to
2792 * worker thread
2793 */
2794 phba->work_ha |= HA_ERATT;
2795 phba->work_hs = HS_FFER3;
92d7f7b0 2796
5e9d9b82 2797 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2798
2799 return;
2800}
2801
9399627f 2802/**
3621a710 2803 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2804 * @ptr: Pointer to address of HBA context object.
2805 *
2806 * This function is invoked by the Error Attention polling timer when the
2807 * timer times out. It will check the SLI Error Attention register for
2808 * possible attention events. If so, it will post an Error Attention event
2809 * and wake up worker thread to process it. Otherwise, it will set up the
2810 * Error Attention polling timer for the next poll.
2811 **/
2812void lpfc_poll_eratt(unsigned long ptr)
2813{
2814 struct lpfc_hba *phba;
2815 uint32_t eratt = 0;
2816
2817 phba = (struct lpfc_hba *)ptr;
2818
2819 /* Check chip HA register for error event */
2820 eratt = lpfc_sli_check_eratt(phba);
2821
2822 if (eratt)
2823 /* Tell the worker thread there is work to do */
2824 lpfc_worker_wake_up(phba);
2825 else
2826 /* Restart the timer for next eratt poll */
2827 mod_timer(&phba->eratt_poll, jiffies +
2828 HZ * LPFC_ERATT_POLL_INTERVAL);
2829 return;
2830}
2831
875fbdfe 2832
e59058c4 2833/**
3621a710 2834 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
2835 * @phba: Pointer to HBA context object.
2836 * @pring: Pointer to driver SLI ring object.
2837 * @mask: Host attention register mask for this ring.
2838 *
2839 * This function is called from the interrupt context when there is a ring
2840 * event for the fcp ring. The caller does not hold any lock.
2841 * The function processes each response iocb in the response ring until it
25985edc 2842 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
2843 * LE bit set. The function will call the completion handler of the command iocb
2844 * if the response iocb indicates a completion for a command iocb or it is
2845 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2846 * function if this is an unsolicited iocb.
dea3101e 2847 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
2848 * to check it explicitly.
2849 */
2850int
2e0fef85
JS
2851lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2852 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2853{
34b02dcd 2854 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 2855 IOCB_t *irsp = NULL;
87f6eaff 2856 IOCB_t *entry = NULL;
dea3101e
JB
2857 struct lpfc_iocbq *cmdiocbq = NULL;
2858 struct lpfc_iocbq rspiocbq;
dea3101e
JB
2859 uint32_t status;
2860 uint32_t portRspPut, portRspMax;
2861 int rc = 1;
2862 lpfc_iocb_type type;
2863 unsigned long iflag;
2864 uint32_t rsp_cmpl = 0;
dea3101e 2865
2e0fef85 2866 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
2867 pring->stats.iocb_event++;
2868
dea3101e
JB
2869 /*
2870 * The next available response entry should never exceed the maximum
2871 * entries. If it does, treat it as an adapter hardware error.
2872 */
2873 portRspMax = pring->numRiocb;
2874 portRspPut = le32_to_cpu(pgp->rspPutInx);
2875 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 2876 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 2877 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
2878 return 1;
2879 }
45ed1190
JS
2880 if (phba->fcp_ring_in_use) {
2881 spin_unlock_irqrestore(&phba->hbalock, iflag);
2882 return 1;
2883 } else
2884 phba->fcp_ring_in_use = 1;
dea3101e
JB
2885
2886 rmb();
2887 while (pring->rspidx != portRspPut) {
87f6eaff
JSEC
2888 /*
2889 * Fetch an entry off the ring and copy it into a local data
2890 * structure. The copy involves a byte-swap since the
2891 * network byte order and pci byte orders are different.
2892 */
ed957684 2893 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 2894 phba->last_completion_time = jiffies;
875fbdfe
JSEC
2895
2896 if (++pring->rspidx >= portRspMax)
2897 pring->rspidx = 0;
2898
87f6eaff
JSEC
2899 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2900 (uint32_t *) &rspiocbq.iocb,
ed957684 2901 phba->iocb_rsp_size);
a4bc3379 2902 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
2903 irsp = &rspiocbq.iocb;
2904
dea3101e
JB
2905 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2906 pring->stats.iocb_rsp++;
2907 rsp_cmpl++;
2908
2909 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
2910 /*
2911 * If resource errors reported from HBA, reduce
2912 * queuedepths of the SCSI device.
2913 */
2914 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2915 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2916 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 2917 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
2918 spin_lock_irqsave(&phba->hbalock, iflag);
2919 }
2920
dea3101e
JB
2921 /* Rsp ring <ringno> error: IOCB */
2922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2923 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 2924 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 2925 pring->ringno,
92d7f7b0
JS
2926 irsp->un.ulpWord[0],
2927 irsp->un.ulpWord[1],
2928 irsp->un.ulpWord[2],
2929 irsp->un.ulpWord[3],
2930 irsp->un.ulpWord[4],
2931 irsp->un.ulpWord[5],
d7c255b2
JS
2932 *(uint32_t *)&irsp->un1,
2933 *((uint32_t *)&irsp->un1 + 1));
dea3101e
JB
2934 }
2935
2936 switch (type) {
2937 case LPFC_ABORT_IOCB:
2938 case LPFC_SOL_IOCB:
2939 /*
2940 * Idle exchange closed via ABTS from port. No iocb
2941 * resources need to be recovered.
2942 */
2943 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 2944 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 2945 "0333 IOCB cmd 0x%x"
dca9479b 2946 " processed. Skipping"
92d7f7b0 2947 " completion\n",
dca9479b 2948 irsp->ulpCommand);
dea3101e
JB
2949 break;
2950 }
2951
604a3e30
JB
2952 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2953 &rspiocbq);
0f65ff68
JS
2954 if (unlikely(!cmdiocbq))
2955 break;
2956 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2957 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2958 if (cmdiocbq->iocb_cmpl) {
2959 spin_unlock_irqrestore(&phba->hbalock, iflag);
2960 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2961 &rspiocbq);
2962 spin_lock_irqsave(&phba->hbalock, iflag);
2963 }
dea3101e 2964 break;
a4bc3379 2965 case LPFC_UNSOL_IOCB:
2e0fef85 2966 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 2967 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 2968 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 2969 break;
dea3101e
JB
2970 default:
2971 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2972 char adaptermsg[LPFC_MAX_ADPTMSG];
2973 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2974 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2975 MAX_MSG_DATA);
898eb71c
JP
2976 dev_warn(&((phba->pcidev)->dev),
2977 "lpfc%d: %s\n",
dea3101e
JB
2978 phba->brd_no, adaptermsg);
2979 } else {
2980 /* Unknown IOCB command */
2981 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2982 "0334 Unknown IOCB command "
92d7f7b0 2983 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 2984 type, irsp->ulpCommand,
92d7f7b0
JS
2985 irsp->ulpStatus,
2986 irsp->ulpIoTag,
2987 irsp->ulpContext);
dea3101e
JB
2988 }
2989 break;
2990 }
2991
2992 /*
2993 * The response IOCB has been processed. Update the ring
2994 * pointer in SLIM. If the port response put pointer has not
2995 * been updated, sync the pgp->rspPutInx and fetch the new port
2996 * response put pointer.
2997 */
ed957684 2998 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e
JB
2999
3000 if (pring->rspidx == portRspPut)
3001 portRspPut = le32_to_cpu(pgp->rspPutInx);
3002 }
3003
3004 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3005 pring->stats.iocb_rsp_full++;
3006 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3007 writel(status, phba->CAregaddr);
3008 readl(phba->CAregaddr);
3009 }
3010 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3011 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3012 pring->stats.iocb_cmd_empty++;
3013
3014 /* Force update of the local copy of cmdGetInx */
3015 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3016 lpfc_sli_resume_iocb(phba, pring);
3017
3018 if ((pring->lpfc_sli_cmd_available))
3019 (pring->lpfc_sli_cmd_available) (phba, pring);
3020
3021 }
3022
45ed1190 3023 phba->fcp_ring_in_use = 0;
2e0fef85 3024 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
3025 return rc;
3026}
3027
e59058c4 3028/**
3772a991
JS
3029 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3030 * @phba: Pointer to HBA context object.
3031 * @pring: Pointer to driver SLI ring object.
3032 * @rspiocbp: Pointer to driver response IOCB object.
3033 *
3034 * This function is called from the worker thread when there is a slow-path
3035 * response IOCB to process. This function chains all the response iocbs until
3036 * seeing the iocb with the LE bit set. The function will call
3037 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3038 * completion of a command iocb. The function will call the
3039 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3040 * The function frees the resources or calls the completion handler if this
3041 * iocb is an abort completion. The function returns NULL when the response
3042 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3043 * this function shall chain the iocb on to the iocb_continueq and return the
3044 * response iocb passed in.
3045 **/
3046static struct lpfc_iocbq *
3047lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3048 struct lpfc_iocbq *rspiocbp)
3049{
3050 struct lpfc_iocbq *saveq;
3051 struct lpfc_iocbq *cmdiocbp;
3052 struct lpfc_iocbq *next_iocb;
3053 IOCB_t *irsp = NULL;
3054 uint32_t free_saveq;
3055 uint8_t iocb_cmd_type;
3056 lpfc_iocb_type type;
3057 unsigned long iflag;
3058 int rc;
3059
3060 spin_lock_irqsave(&phba->hbalock, iflag);
3061 /* First add the response iocb to the countinueq list */
3062 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3063 pring->iocb_continueq_cnt++;
3064
70f23fd6 3065 /* Now, determine whether the list is completed for processing */
3772a991
JS
3066 irsp = &rspiocbp->iocb;
3067 if (irsp->ulpLe) {
3068 /*
3069 * By default, the driver expects to free all resources
3070 * associated with this iocb completion.
3071 */
3072 free_saveq = 1;
3073 saveq = list_get_first(&pring->iocb_continueq,
3074 struct lpfc_iocbq, list);
3075 irsp = &(saveq->iocb);
3076 list_del_init(&pring->iocb_continueq);
3077 pring->iocb_continueq_cnt = 0;
3078
3079 pring->stats.iocb_rsp++;
3080
3081 /*
3082 * If resource errors reported from HBA, reduce
3083 * queuedepths of the SCSI device.
3084 */
3085 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3086 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
3087 spin_unlock_irqrestore(&phba->hbalock, iflag);
3088 phba->lpfc_rampdown_queue_depth(phba);
3089 spin_lock_irqsave(&phba->hbalock, iflag);
3090 }
3091
3092 if (irsp->ulpStatus) {
3093 /* Rsp ring <ringno> error: IOCB */
3094 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3095 "0328 Rsp Ring %d error: "
3096 "IOCB Data: "
3097 "x%x x%x x%x x%x "
3098 "x%x x%x x%x x%x "
3099 "x%x x%x x%x x%x "
3100 "x%x x%x x%x x%x\n",
3101 pring->ringno,
3102 irsp->un.ulpWord[0],
3103 irsp->un.ulpWord[1],
3104 irsp->un.ulpWord[2],
3105 irsp->un.ulpWord[3],
3106 irsp->un.ulpWord[4],
3107 irsp->un.ulpWord[5],
3108 *(((uint32_t *) irsp) + 6),
3109 *(((uint32_t *) irsp) + 7),
3110 *(((uint32_t *) irsp) + 8),
3111 *(((uint32_t *) irsp) + 9),
3112 *(((uint32_t *) irsp) + 10),
3113 *(((uint32_t *) irsp) + 11),
3114 *(((uint32_t *) irsp) + 12),
3115 *(((uint32_t *) irsp) + 13),
3116 *(((uint32_t *) irsp) + 14),
3117 *(((uint32_t *) irsp) + 15));
3118 }
3119
3120 /*
3121 * Fetch the IOCB command type and call the correct completion
3122 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3123 * get freed back to the lpfc_iocb_list by the discovery
3124 * kernel thread.
3125 */
3126 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3127 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3128 switch (type) {
3129 case LPFC_SOL_IOCB:
3130 spin_unlock_irqrestore(&phba->hbalock, iflag);
3131 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3132 spin_lock_irqsave(&phba->hbalock, iflag);
3133 break;
3134
3135 case LPFC_UNSOL_IOCB:
3136 spin_unlock_irqrestore(&phba->hbalock, iflag);
3137 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3138 spin_lock_irqsave(&phba->hbalock, iflag);
3139 if (!rc)
3140 free_saveq = 0;
3141 break;
3142
3143 case LPFC_ABORT_IOCB:
3144 cmdiocbp = NULL;
3145 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3146 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3147 saveq);
3148 if (cmdiocbp) {
3149 /* Call the specified completion routine */
3150 if (cmdiocbp->iocb_cmpl) {
3151 spin_unlock_irqrestore(&phba->hbalock,
3152 iflag);
3153 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3154 saveq);
3155 spin_lock_irqsave(&phba->hbalock,
3156 iflag);
3157 } else
3158 __lpfc_sli_release_iocbq(phba,
3159 cmdiocbp);
3160 }
3161 break;
3162
3163 case LPFC_UNKNOWN_IOCB:
3164 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3165 char adaptermsg[LPFC_MAX_ADPTMSG];
3166 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3167 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3168 MAX_MSG_DATA);
3169 dev_warn(&((phba->pcidev)->dev),
3170 "lpfc%d: %s\n",
3171 phba->brd_no, adaptermsg);
3172 } else {
3173 /* Unknown IOCB command */
3174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3175 "0335 Unknown IOCB "
3176 "command Data: x%x "
3177 "x%x x%x x%x\n",
3178 irsp->ulpCommand,
3179 irsp->ulpStatus,
3180 irsp->ulpIoTag,
3181 irsp->ulpContext);
3182 }
3183 break;
3184 }
3185
3186 if (free_saveq) {
3187 list_for_each_entry_safe(rspiocbp, next_iocb,
3188 &saveq->list, list) {
3189 list_del(&rspiocbp->list);
3190 __lpfc_sli_release_iocbq(phba, rspiocbp);
3191 }
3192 __lpfc_sli_release_iocbq(phba, saveq);
3193 }
3194 rspiocbp = NULL;
3195 }
3196 spin_unlock_irqrestore(&phba->hbalock, iflag);
3197 return rspiocbp;
3198}
3199
3200/**
3201 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3202 * @phba: Pointer to HBA context object.
3203 * @pring: Pointer to driver SLI ring object.
3204 * @mask: Host attention register mask for this ring.
3205 *
3772a991
JS
3206 * This routine wraps the actual slow_ring event process routine from the
3207 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3208 **/
3772a991 3209void
2e0fef85
JS
3210lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3211 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3212{
3213 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3214}
3215
3216/**
3217 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3218 * @phba: Pointer to HBA context object.
3219 * @pring: Pointer to driver SLI ring object.
3220 * @mask: Host attention register mask for this ring.
3221 *
3222 * This function is called from the worker thread when there is a ring event
3223 * for non-fcp rings. The caller does not hold any lock. The function will
3224 * remove each response iocb in the response ring and calls the handle
3225 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3226 **/
3227static void
3228lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3229 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3230{
34b02dcd 3231 struct lpfc_pgp *pgp;
dea3101e
JB
3232 IOCB_t *entry;
3233 IOCB_t *irsp = NULL;
3234 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3235 uint32_t portRspPut, portRspMax;
dea3101e 3236 unsigned long iflag;
3772a991 3237 uint32_t status;
dea3101e 3238
34b02dcd 3239 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3240 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
3241 pring->stats.iocb_event++;
3242
dea3101e
JB
3243 /*
3244 * The next available response entry should never exceed the maximum
3245 * entries. If it does, treat it as an adapter hardware error.
3246 */
3247 portRspMax = pring->numRiocb;
3248 portRspPut = le32_to_cpu(pgp->rspPutInx);
3249 if (portRspPut >= portRspMax) {
3250 /*
025dfdaf 3251 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e
JB
3252 * rsp ring <portRspMax>
3253 */
ed957684 3254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3255 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3256 "is bigger than rsp ring %d\n",
e8b62011 3257 pring->ringno, portRspPut, portRspMax);
dea3101e 3258
2e0fef85
JS
3259 phba->link_state = LPFC_HBA_ERROR;
3260 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
3261
3262 phba->work_hs = HS_FFER3;
3263 lpfc_handle_eratt(phba);
3264
3772a991 3265 return;
dea3101e
JB
3266 }
3267
3268 rmb();
dea3101e
JB
3269 while (pring->rspidx != portRspPut) {
3270 /*
3271 * Build a completion list and call the appropriate handler.
3272 * The process is to get the next available response iocb, get
3273 * a free iocb from the list, copy the response data into the
3274 * free iocb, insert to the continuation list, and update the
3275 * next response index to slim. This process makes response
3276 * iocb's in the ring available to DMA as fast as possible but
3277 * pays a penalty for a copy operation. Since the iocb is
3278 * only 32 bytes, this penalty is considered small relative to
3279 * the PCI reads for register values and a slim write. When
3280 * the ulpLe field is set, the entire Command has been
3281 * received.
3282 */
ed957684
JS
3283 entry = lpfc_resp_iocb(phba, pring);
3284
858c9f6c 3285 phba->last_completion_time = jiffies;
2e0fef85 3286 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
3287 if (rspiocbp == NULL) {
3288 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3289 "completion.\n", __func__);
dea3101e
JB
3290 break;
3291 }
3292
ed957684
JS
3293 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3294 phba->iocb_rsp_size);
dea3101e
JB
3295 irsp = &rspiocbp->iocb;
3296
3297 if (++pring->rspidx >= portRspMax)
3298 pring->rspidx = 0;
3299
a58cbd52
JS
3300 if (pring->ringno == LPFC_ELS_RING) {
3301 lpfc_debugfs_slow_ring_trc(phba,
3302 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3303 *(((uint32_t *) irsp) + 4),
3304 *(((uint32_t *) irsp) + 6),
3305 *(((uint32_t *) irsp) + 7));
3306 }
3307
ed957684 3308 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3309
3772a991
JS
3310 spin_unlock_irqrestore(&phba->hbalock, iflag);
3311 /* Handle the response IOCB */
3312 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3313 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
3314
3315 /*
3316 * If the port response put pointer has not been updated, sync
3317 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3318 * response put pointer.
3319 */
3320 if (pring->rspidx == portRspPut) {
3321 portRspPut = le32_to_cpu(pgp->rspPutInx);
3322 }
3323 } /* while (pring->rspidx != portRspPut) */
3324
92d7f7b0 3325 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e
JB
3326 /* At least one response entry has been freed */
3327 pring->stats.iocb_rsp_full++;
3328 /* SET RxRE_RSP in Chip Att register */
3329 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3330 writel(status, phba->CAregaddr);
3331 readl(phba->CAregaddr); /* flush */
3332 }
3333 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3334 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3335 pring->stats.iocb_cmd_empty++;
3336
3337 /* Force update of the local copy of cmdGetInx */
3338 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3339 lpfc_sli_resume_iocb(phba, pring);
3340
3341 if ((pring->lpfc_sli_cmd_available))
3342 (pring->lpfc_sli_cmd_available) (phba, pring);
3343
3344 }
3345
2e0fef85 3346 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3347 return;
dea3101e
JB
3348}
3349
4f774513
JS
3350/**
3351 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3352 * @phba: Pointer to HBA context object.
3353 * @pring: Pointer to driver SLI ring object.
3354 * @mask: Host attention register mask for this ring.
3355 *
3356 * This function is called from the worker thread when there is a pending
3357 * ELS response iocb on the driver internal slow-path response iocb worker
3358 * queue. The caller does not hold any lock. The function will remove each
3359 * response iocb from the response worker queue and calls the handle
3360 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3361 **/
3362static void
3363lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3364 struct lpfc_sli_ring *pring, uint32_t mask)
3365{
3366 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3367 struct hbq_dmabuf *dmabuf;
3368 struct lpfc_cq_event *cq_event;
4f774513
JS
3369 unsigned long iflag;
3370
45ed1190
JS
3371 spin_lock_irqsave(&phba->hbalock, iflag);
3372 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3373 spin_unlock_irqrestore(&phba->hbalock, iflag);
3374 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3375 /* Get the response iocb from the head of work queue */
3376 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3377 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3378 cq_event, struct lpfc_cq_event, list);
4f774513 3379 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3380
3381 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3382 case CQE_CODE_COMPL_WQE:
3383 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3384 cq_event);
45ed1190
JS
3385 /* Translate ELS WCQE to response IOCBQ */
3386 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3387 irspiocbq);
3388 if (irspiocbq)
3389 lpfc_sli_sp_handle_rspiocb(phba, pring,
3390 irspiocbq);
4d9ab994
JS
3391 break;
3392 case CQE_CODE_RECEIVE:
7851fe2c 3393 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3394 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3395 cq_event);
3396 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3397 break;
3398 default:
3399 break;
3400 }
4f774513
JS
3401 }
3402}
3403
e59058c4 3404/**
3621a710 3405 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3406 * @phba: Pointer to HBA context object.
3407 * @pring: Pointer to driver SLI ring object.
3408 *
3409 * This function aborts all iocbs in the given ring and frees all the iocb
3410 * objects in txq. This function issues an abort iocb for all the iocb commands
3411 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3412 * the return of this function. The caller is not required to hold any locks.
3413 **/
2e0fef85 3414void
dea3101e
JB
3415lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3416{
2534ba75 3417 LIST_HEAD(completions);
dea3101e 3418 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3419
92d7f7b0
JS
3420 if (pring->ringno == LPFC_ELS_RING) {
3421 lpfc_fabric_abort_hba(phba);
3422 }
3423
dea3101e
JB
3424 /* Error everything on txq and txcmplq
3425 * First do the txq.
3426 */
2e0fef85 3427 spin_lock_irq(&phba->hbalock);
2534ba75 3428 list_splice_init(&pring->txq, &completions);
dea3101e 3429 pring->txq_cnt = 0;
dea3101e
JB
3430
3431 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
3432 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3433 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 3434
2e0fef85 3435 spin_unlock_irq(&phba->hbalock);
dea3101e 3436
a257bf90
JS
3437 /* Cancel all the IOCBs from the completions list */
3438 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3439 IOERR_SLI_ABORTED);
dea3101e
JB
3440}
3441
a8e497d5 3442/**
3621a710 3443 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3444 * @phba: Pointer to HBA context object.
3445 *
3446 * This function flushes all iocbs in the fcp ring and frees all the iocb
3447 * objects in txq and txcmplq. This function will not issue abort iocbs
3448 * for all the iocb commands in txcmplq, they will just be returned with
3449 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3450 * slot has been permanently disabled.
3451 **/
3452void
3453lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3454{
3455 LIST_HEAD(txq);
3456 LIST_HEAD(txcmplq);
a8e497d5
JS
3457 struct lpfc_sli *psli = &phba->sli;
3458 struct lpfc_sli_ring *pring;
3459
3460 /* Currently, only one fcp ring */
3461 pring = &psli->ring[psli->fcp_ring];
3462
3463 spin_lock_irq(&phba->hbalock);
3464 /* Retrieve everything on txq */
3465 list_splice_init(&pring->txq, &txq);
3466 pring->txq_cnt = 0;
3467
3468 /* Retrieve everything on the txcmplq */
3469 list_splice_init(&pring->txcmplq, &txcmplq);
3470 pring->txcmplq_cnt = 0;
3471 spin_unlock_irq(&phba->hbalock);
3472
3473 /* Flush the txq */
a257bf90
JS
3474 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3475 IOERR_SLI_DOWN);
a8e497d5
JS
3476
3477 /* Flush the txcmpq */
a257bf90
JS
3478 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3479 IOERR_SLI_DOWN);
a8e497d5
JS
3480}
3481
e59058c4 3482/**
3772a991 3483 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
3484 * @phba: Pointer to HBA context object.
3485 * @mask: Bit mask to be checked.
3486 *
3487 * This function reads the host status register and compares
3488 * with the provided bit mask to check if HBA completed
3489 * the restart. This function will wait in a loop for the
3490 * HBA to complete restart. If the HBA does not restart within
3491 * 15 iterations, the function will reset the HBA again. The
3492 * function returns 1 when HBA fail to restart otherwise returns
3493 * zero.
3494 **/
3772a991
JS
3495static int
3496lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3497{
41415862
JW
3498 uint32_t status;
3499 int i = 0;
3500 int retval = 0;
dea3101e 3501
41415862 3502 /* Read the HBA Host Status Register */
9940b97b
JS
3503 if (lpfc_readl(phba->HSregaddr, &status))
3504 return 1;
dea3101e 3505
41415862
JW
3506 /*
3507 * Check status register every 100ms for 5 retries, then every
3508 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3509 * every 2.5 sec for 4.
3510 * Break our of the loop if errors occurred during init.
3511 */
3512 while (((status & mask) != mask) &&
3513 !(status & HS_FFERM) &&
3514 i++ < 20) {
dea3101e 3515
41415862
JW
3516 if (i <= 5)
3517 msleep(10);
3518 else if (i <= 10)
3519 msleep(500);
3520 else
3521 msleep(2500);
dea3101e 3522
41415862 3523 if (i == 15) {
2e0fef85 3524 /* Do post */
92d7f7b0 3525 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3526 lpfc_sli_brdrestart(phba);
3527 }
3528 /* Read the HBA Host Status Register */
9940b97b
JS
3529 if (lpfc_readl(phba->HSregaddr, &status)) {
3530 retval = 1;
3531 break;
3532 }
41415862 3533 }
dea3101e 3534
41415862
JW
3535 /* Check to see if any errors occurred during init */
3536 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
3537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3538 "2751 Adapter failed to restart, "
3539 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3540 status,
3541 readl(phba->MBslimaddr + 0xa8),
3542 readl(phba->MBslimaddr + 0xac));
2e0fef85 3543 phba->link_state = LPFC_HBA_ERROR;
41415862 3544 retval = 1;
dea3101e 3545 }
dea3101e 3546
41415862
JW
3547 return retval;
3548}
dea3101e 3549
da0436e9
JS
3550/**
3551 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3552 * @phba: Pointer to HBA context object.
3553 * @mask: Bit mask to be checked.
3554 *
3555 * This function checks the host status register to check if HBA is
3556 * ready. This function will wait in a loop for the HBA to be ready
3557 * If the HBA is not ready , the function will will reset the HBA PCI
3558 * function again. The function returns 1 when HBA fail to be ready
3559 * otherwise returns zero.
3560 **/
3561static int
3562lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3563{
3564 uint32_t status;
3565 int retval = 0;
3566
3567 /* Read the HBA Host Status Register */
3568 status = lpfc_sli4_post_status_check(phba);
3569
3570 if (status) {
3571 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3572 lpfc_sli_brdrestart(phba);
3573 status = lpfc_sli4_post_status_check(phba);
3574 }
3575
3576 /* Check to see if any errors occurred during init */
3577 if (status) {
3578 phba->link_state = LPFC_HBA_ERROR;
3579 retval = 1;
3580 } else
3581 phba->sli4_hba.intr_enable = 0;
3582
3583 return retval;
3584}
3585
3586/**
3587 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3588 * @phba: Pointer to HBA context object.
3589 * @mask: Bit mask to be checked.
3590 *
3591 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3592 * from the API jump table function pointer from the lpfc_hba struct.
3593 **/
3594int
3595lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3596{
3597 return phba->lpfc_sli_brdready(phba, mask);
3598}
3599
9290831f
JS
3600#define BARRIER_TEST_PATTERN (0xdeadbeef)
3601
e59058c4 3602/**
3621a710 3603 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3604 * @phba: Pointer to HBA context object.
3605 *
1b51197d
JS
3606 * This function is called before resetting an HBA. This function is called
3607 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 3608 **/
2e0fef85 3609void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3610{
65a29c16
JS
3611 uint32_t __iomem *resp_buf;
3612 uint32_t __iomem *mbox_buf;
9290831f 3613 volatile uint32_t mbox;
9940b97b 3614 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
3615 int i;
3616 uint8_t hdrtype;
3617
3618 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3619 if (hdrtype != 0x80 ||
3620 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3621 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3622 return;
3623
3624 /*
3625 * Tell the other part of the chip to suspend temporarily all
3626 * its DMA activity.
3627 */
65a29c16 3628 resp_buf = phba->MBslimaddr;
9290831f
JS
3629
3630 /* Disable the error attention */
9940b97b
JS
3631 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3632 return;
9290831f
JS
3633 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3634 readl(phba->HCregaddr); /* flush */
2e0fef85 3635 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 3636
9940b97b
JS
3637 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3638 return;
3639 if (ha_copy & HA_ERATT) {
9290831f
JS
3640 /* Clear Chip error bit */
3641 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3642 phba->pport->stopped = 1;
9290831f
JS
3643 }
3644
3645 mbox = 0;
3646 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3647 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3648
3649 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 3650 mbox_buf = phba->MBslimaddr;
9290831f
JS
3651 writel(mbox, mbox_buf);
3652
9940b97b
JS
3653 for (i = 0; i < 50; i++) {
3654 if (lpfc_readl((resp_buf + 1), &resp_data))
3655 return;
3656 if (resp_data != ~(BARRIER_TEST_PATTERN))
3657 mdelay(1);
3658 else
3659 break;
3660 }
3661 resp_data = 0;
3662 if (lpfc_readl((resp_buf + 1), &resp_data))
3663 return;
3664 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 3665 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 3666 phba->pport->stopped)
9290831f
JS
3667 goto restore_hc;
3668 else
3669 goto clear_errat;
3670 }
3671
3672 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
3673 resp_data = 0;
3674 for (i = 0; i < 500; i++) {
3675 if (lpfc_readl(resp_buf, &resp_data))
3676 return;
3677 if (resp_data != mbox)
3678 mdelay(1);
3679 else
3680 break;
3681 }
9290831f
JS
3682
3683clear_errat:
3684
9940b97b
JS
3685 while (++i < 500) {
3686 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3687 return;
3688 if (!(ha_copy & HA_ERATT))
3689 mdelay(1);
3690 else
3691 break;
3692 }
9290831f
JS
3693
3694 if (readl(phba->HAregaddr) & HA_ERATT) {
3695 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3696 phba->pport->stopped = 1;
9290831f
JS
3697 }
3698
3699restore_hc:
2e0fef85 3700 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
3701 writel(hc_copy, phba->HCregaddr);
3702 readl(phba->HCregaddr); /* flush */
3703}
3704
e59058c4 3705/**
3621a710 3706 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
3707 * @phba: Pointer to HBA context object.
3708 *
3709 * This function issues a kill_board mailbox command and waits for
3710 * the error attention interrupt. This function is called for stopping
3711 * the firmware processing. The caller is not required to hold any
3712 * locks. This function calls lpfc_hba_down_post function to free
3713 * any pending commands after the kill. The function will return 1 when it
3714 * fails to kill the board else will return 0.
3715 **/
41415862 3716int
2e0fef85 3717lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
3718{
3719 struct lpfc_sli *psli;
3720 LPFC_MBOXQ_t *pmb;
3721 uint32_t status;
3722 uint32_t ha_copy;
3723 int retval;
3724 int i = 0;
dea3101e 3725
41415862 3726 psli = &phba->sli;
dea3101e 3727
41415862 3728 /* Kill HBA */
ed957684 3729 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3730 "0329 Kill HBA Data: x%x x%x\n",
3731 phba->pport->port_state, psli->sli_flag);
41415862 3732
98c9ea5c
JS
3733 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3734 if (!pmb)
41415862 3735 return 1;
41415862
JW
3736
3737 /* Disable the error attention */
2e0fef85 3738 spin_lock_irq(&phba->hbalock);
9940b97b
JS
3739 if (lpfc_readl(phba->HCregaddr, &status)) {
3740 spin_unlock_irq(&phba->hbalock);
3741 mempool_free(pmb, phba->mbox_mem_pool);
3742 return 1;
3743 }
41415862
JW
3744 status &= ~HC_ERINT_ENA;
3745 writel(status, phba->HCregaddr);
3746 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
3747 phba->link_flag |= LS_IGNORE_ERATT;
3748 spin_unlock_irq(&phba->hbalock);
41415862
JW
3749
3750 lpfc_kill_board(phba, pmb);
3751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3752 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3753
3754 if (retval != MBX_SUCCESS) {
3755 if (retval != MBX_BUSY)
3756 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
3757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3758 "2752 KILL_BOARD command failed retval %d\n",
3759 retval);
2e0fef85
JS
3760 spin_lock_irq(&phba->hbalock);
3761 phba->link_flag &= ~LS_IGNORE_ERATT;
3762 spin_unlock_irq(&phba->hbalock);
41415862
JW
3763 return 1;
3764 }
3765
f4b4c68f
JS
3766 spin_lock_irq(&phba->hbalock);
3767 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3768 spin_unlock_irq(&phba->hbalock);
9290831f 3769
41415862
JW
3770 mempool_free(pmb, phba->mbox_mem_pool);
3771
3772 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3773 * attention every 100ms for 3 seconds. If we don't get ERATT after
3774 * 3 seconds we still set HBA_ERROR state because the status of the
3775 * board is now undefined.
3776 */
9940b97b
JS
3777 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3778 return 1;
41415862
JW
3779 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3780 mdelay(100);
9940b97b
JS
3781 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3782 return 1;
41415862
JW
3783 }
3784
3785 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
3786 if (ha_copy & HA_ERATT) {
3787 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3788 phba->pport->stopped = 1;
9290831f 3789 }
2e0fef85 3790 spin_lock_irq(&phba->hbalock);
41415862 3791 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 3792 psli->mbox_active = NULL;
2e0fef85
JS
3793 phba->link_flag &= ~LS_IGNORE_ERATT;
3794 spin_unlock_irq(&phba->hbalock);
41415862 3795
41415862 3796 lpfc_hba_down_post(phba);
2e0fef85 3797 phba->link_state = LPFC_HBA_ERROR;
41415862 3798
2e0fef85 3799 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e
JB
3800}
3801
e59058c4 3802/**
3772a991 3803 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
3804 * @phba: Pointer to HBA context object.
3805 *
3806 * This function resets the HBA by writing HC_INITFF to the control
3807 * register. After the HBA resets, this function resets all the iocb ring
3808 * indices. This function disables PCI layer parity checking during
3809 * the reset.
3810 * This function returns 0 always.
3811 * The caller is not required to hold any locks.
3812 **/
41415862 3813int
2e0fef85 3814lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 3815{
41415862 3816 struct lpfc_sli *psli;
dea3101e 3817 struct lpfc_sli_ring *pring;
41415862 3818 uint16_t cfg_value;
dea3101e 3819 int i;
dea3101e 3820
41415862 3821 psli = &phba->sli;
dea3101e 3822
41415862
JW
3823 /* Reset HBA */
3824 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3825 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 3826 phba->pport->port_state, psli->sli_flag);
dea3101e
JB
3827
3828 /* perform board reset */
3829 phba->fc_eventTag = 0;
4d9ab994 3830 phba->link_events = 0;
2e0fef85
JS
3831 phba->pport->fc_myDID = 0;
3832 phba->pport->fc_prevDID = 0;
dea3101e 3833
41415862
JW
3834 /* Turn off parity checking and serr during the physical reset */
3835 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3836 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3837 (cfg_value &
3838 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3839
3772a991
JS
3840 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3841
41415862
JW
3842 /* Now toggle INITFF bit in the Host Control Register */
3843 writel(HC_INITFF, phba->HCregaddr);
3844 mdelay(1);
3845 readl(phba->HCregaddr); /* flush */
3846 writel(0, phba->HCregaddr);
3847 readl(phba->HCregaddr); /* flush */
3848
3849 /* Restore PCI cmd register */
3850 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e
JB
3851
3852 /* Initialize relevant SLI info */
41415862
JW
3853 for (i = 0; i < psli->num_rings; i++) {
3854 pring = &psli->ring[i];
dea3101e
JB
3855 pring->flag = 0;
3856 pring->rspidx = 0;
3857 pring->next_cmdidx = 0;
3858 pring->local_getidx = 0;
3859 pring->cmdidx = 0;
3860 pring->missbufcnt = 0;
3861 }
dea3101e 3862
2e0fef85 3863 phba->link_state = LPFC_WARM_START;
41415862
JW
3864 return 0;
3865}
3866
e59058c4 3867/**
da0436e9
JS
3868 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3869 * @phba: Pointer to HBA context object.
3870 *
3871 * This function resets a SLI4 HBA. This function disables PCI layer parity
3872 * checking during resets the device. The caller is not required to hold
3873 * any locks.
3874 *
3875 * This function returns 0 always.
3876 **/
3877int
3878lpfc_sli4_brdreset(struct lpfc_hba *phba)
3879{
3880 struct lpfc_sli *psli = &phba->sli;
3881 uint16_t cfg_value;
da0436e9
JS
3882
3883 /* Reset HBA */
3884 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3885 "0295 Reset HBA Data: x%x x%x\n",
3886 phba->pport->port_state, psli->sli_flag);
3887
3888 /* perform board reset */
3889 phba->fc_eventTag = 0;
4d9ab994 3890 phba->link_events = 0;
da0436e9
JS
3891 phba->pport->fc_myDID = 0;
3892 phba->pport->fc_prevDID = 0;
3893
da0436e9
JS
3894 spin_lock_irq(&phba->hbalock);
3895 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3896 phba->fcf.fcf_flag = 0;
da0436e9
JS
3897 spin_unlock_irq(&phba->hbalock);
3898
3899 /* Now physically reset the device */
3900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3901 "0389 Performing PCI function reset!\n");
be858b65
JS
3902
3903 /* Turn off parity checking and serr during the physical reset */
3904 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3905 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3906 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3907
da0436e9 3908 /* Perform FCoE PCI function reset */
2e90f4b5 3909 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
3910 lpfc_pci_function_reset(phba);
3911
be858b65
JS
3912 /* Restore PCI cmd register */
3913 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3914
da0436e9
JS
3915 return 0;
3916}
3917
3918/**
3919 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
3920 * @phba: Pointer to HBA context object.
3921 *
3922 * This function is called in the SLI initialization code path to
3923 * restart the HBA. The caller is not required to hold any lock.
3924 * This function writes MBX_RESTART mailbox command to the SLIM and
3925 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3926 * function to free any pending commands. The function enables
3927 * POST only during the first initialization. The function returns zero.
3928 * The function does not guarantee completion of MBX_RESTART mailbox
3929 * command before the return of this function.
3930 **/
da0436e9
JS
3931static int
3932lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
3933{
3934 MAILBOX_t *mb;
3935 struct lpfc_sli *psli;
41415862
JW
3936 volatile uint32_t word0;
3937 void __iomem *to_slim;
0d878419 3938 uint32_t hba_aer_enabled;
41415862 3939
2e0fef85 3940 spin_lock_irq(&phba->hbalock);
41415862 3941
0d878419
JS
3942 /* Take PCIe device Advanced Error Reporting (AER) state */
3943 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3944
41415862
JW
3945 psli = &phba->sli;
3946
3947 /* Restart HBA */
3948 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3949 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 3950 phba->pport->port_state, psli->sli_flag);
41415862
JW
3951
3952 word0 = 0;
3953 mb = (MAILBOX_t *) &word0;
3954 mb->mbxCommand = MBX_RESTART;
3955 mb->mbxHc = 1;
3956
9290831f
JS
3957 lpfc_reset_barrier(phba);
3958
41415862
JW
3959 to_slim = phba->MBslimaddr;
3960 writel(*(uint32_t *) mb, to_slim);
3961 readl(to_slim); /* flush */
3962
3963 /* Only skip post after fc_ffinit is completed */
eaf15d5b 3964 if (phba->pport->port_state)
41415862 3965 word0 = 1; /* This is really setting up word1 */
eaf15d5b 3966 else
41415862 3967 word0 = 0; /* This is really setting up word1 */
65a29c16 3968 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
3969 writel(*(uint32_t *) mb, to_slim);
3970 readl(to_slim); /* flush */
dea3101e 3971
41415862 3972 lpfc_sli_brdreset(phba);
2e0fef85
JS
3973 phba->pport->stopped = 0;
3974 phba->link_state = LPFC_INIT_START;
da0436e9 3975 phba->hba_flag = 0;
2e0fef85 3976 spin_unlock_irq(&phba->hbalock);
41415862 3977
64ba8818
JS
3978 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3979 psli->stats_start = get_seconds();
3980
eaf15d5b
JS
3981 /* Give the INITFF and Post time to settle. */
3982 mdelay(100);
41415862 3983
0d878419
JS
3984 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3985 if (hba_aer_enabled)
3986 pci_disable_pcie_error_reporting(phba->pcidev);
3987
41415862 3988 lpfc_hba_down_post(phba);
dea3101e
JB
3989
3990 return 0;
3991}
3992
da0436e9
JS
3993/**
3994 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3995 * @phba: Pointer to HBA context object.
3996 *
3997 * This function is called in the SLI initialization code path to restart
3998 * a SLI4 HBA. The caller is not required to hold any lock.
3999 * At the end of the function, it calls lpfc_hba_down_post function to
4000 * free any pending commands.
4001 **/
4002static int
4003lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4004{
4005 struct lpfc_sli *psli = &phba->sli;
75baf696 4006 uint32_t hba_aer_enabled;
da0436e9
JS
4007
4008 /* Restart HBA */
4009 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4010 "0296 Restart HBA Data: x%x x%x\n",
4011 phba->pport->port_state, psli->sli_flag);
4012
75baf696
JS
4013 /* Take PCIe device Advanced Error Reporting (AER) state */
4014 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4015
da0436e9
JS
4016 lpfc_sli4_brdreset(phba);
4017
4018 spin_lock_irq(&phba->hbalock);
4019 phba->pport->stopped = 0;
4020 phba->link_state = LPFC_INIT_START;
4021 phba->hba_flag = 0;
4022 spin_unlock_irq(&phba->hbalock);
4023
4024 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4025 psli->stats_start = get_seconds();
4026
75baf696
JS
4027 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4028 if (hba_aer_enabled)
4029 pci_disable_pcie_error_reporting(phba->pcidev);
4030
da0436e9
JS
4031 lpfc_hba_down_post(phba);
4032
4033 return 0;
4034}
4035
4036/**
4037 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4038 * @phba: Pointer to HBA context object.
4039 *
4040 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4041 * API jump table function pointer from the lpfc_hba struct.
4042**/
4043int
4044lpfc_sli_brdrestart(struct lpfc_hba *phba)
4045{
4046 return phba->lpfc_sli_brdrestart(phba);
4047}
4048
e59058c4 4049/**
3621a710 4050 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4051 * @phba: Pointer to HBA context object.
4052 *
4053 * This function is called after a HBA restart to wait for successful
4054 * restart of the HBA. Successful restart of the HBA is indicated by
4055 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4056 * iteration, the function will restart the HBA again. The function returns
4057 * zero if HBA successfully restarted else returns negative error code.
4058 **/
dea3101e
JB
4059static int
4060lpfc_sli_chipset_init(struct lpfc_hba *phba)
4061{
4062 uint32_t status, i = 0;
4063
4064 /* Read the HBA Host Status Register */
9940b97b
JS
4065 if (lpfc_readl(phba->HSregaddr, &status))
4066 return -EIO;
dea3101e
JB
4067
4068 /* Check status register to see what current state is */
4069 i = 0;
4070 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4071
dcf2a4e0
JS
4072 /* Check every 10ms for 10 retries, then every 100ms for 90
4073 * retries, then every 1 sec for 50 retires for a total of
4074 * ~60 seconds before reset the board again and check every
4075 * 1 sec for 50 retries. The up to 60 seconds before the
4076 * board ready is required by the Falcon FIPS zeroization
4077 * complete, and any reset the board in between shall cause
4078 * restart of zeroization, further delay the board ready.
dea3101e 4079 */
dcf2a4e0 4080 if (i++ >= 200) {
dea3101e
JB
4081 /* Adapter failed to init, timeout, status reg
4082 <status> */
ed957684 4083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4084 "0436 Adapter failed to init, "
09372820
JS
4085 "timeout, status reg x%x, "
4086 "FW Data: A8 x%x AC x%x\n", status,
4087 readl(phba->MBslimaddr + 0xa8),
4088 readl(phba->MBslimaddr + 0xac));
2e0fef85 4089 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4090 return -ETIMEDOUT;
4091 }
4092
4093 /* Check to see if any errors occurred during init */
4094 if (status & HS_FFERM) {
4095 /* ERROR: During chipset initialization */
4096 /* Adapter failed to init, chipset, status reg
4097 <status> */
ed957684 4098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4099 "0437 Adapter failed to init, "
09372820
JS
4100 "chipset, status reg x%x, "
4101 "FW Data: A8 x%x AC x%x\n", status,
4102 readl(phba->MBslimaddr + 0xa8),
4103 readl(phba->MBslimaddr + 0xac));
2e0fef85 4104 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4105 return -EIO;
4106 }
4107
dcf2a4e0 4108 if (i <= 10)
dea3101e 4109 msleep(10);
dcf2a4e0
JS
4110 else if (i <= 100)
4111 msleep(100);
4112 else
4113 msleep(1000);
dea3101e 4114
dcf2a4e0
JS
4115 if (i == 150) {
4116 /* Do post */
92d7f7b0 4117 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4118 lpfc_sli_brdrestart(phba);
dea3101e
JB
4119 }
4120 /* Read the HBA Host Status Register */
9940b97b
JS
4121 if (lpfc_readl(phba->HSregaddr, &status))
4122 return -EIO;
dea3101e
JB
4123 }
4124
4125 /* Check to see if any errors occurred during init */
4126 if (status & HS_FFERM) {
4127 /* ERROR: During chipset initialization */
4128 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4130 "0438 Adapter failed to init, chipset, "
09372820
JS
4131 "status reg x%x, "
4132 "FW Data: A8 x%x AC x%x\n", status,
4133 readl(phba->MBslimaddr + 0xa8),
4134 readl(phba->MBslimaddr + 0xac));
2e0fef85 4135 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4136 return -EIO;
4137 }
4138
4139 /* Clear all interrupt enable conditions */
4140 writel(0, phba->HCregaddr);
4141 readl(phba->HCregaddr); /* flush */
4142
4143 /* setup host attn register */
4144 writel(0xffffffff, phba->HAregaddr);
4145 readl(phba->HAregaddr); /* flush */
4146 return 0;
4147}
4148
e59058c4 4149/**
3621a710 4150 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4151 *
4152 * This function calculates and returns the number of HBQs required to be
4153 * configured.
4154 **/
78b2d852 4155int
ed957684
JS
4156lpfc_sli_hbq_count(void)
4157{
92d7f7b0 4158 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4159}
4160
e59058c4 4161/**
3621a710 4162 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4163 *
4164 * This function adds the number of hbq entries in every HBQ to get
4165 * the total number of hbq entries required for the HBA and returns
4166 * the total count.
4167 **/
ed957684
JS
4168static int
4169lpfc_sli_hbq_entry_count(void)
4170{
4171 int hbq_count = lpfc_sli_hbq_count();
4172 int count = 0;
4173 int i;
4174
4175 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4176 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4177 return count;
4178}
4179
e59058c4 4180/**
3621a710 4181 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4182 *
4183 * This function calculates amount of memory required for all hbq entries
4184 * to be configured and returns the total memory required.
4185 **/
dea3101e 4186int
ed957684
JS
4187lpfc_sli_hbq_size(void)
4188{
4189 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4190}
4191
e59058c4 4192/**
3621a710 4193 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4194 * @phba: Pointer to HBA context object.
4195 *
4196 * This function is called during the SLI initialization to configure
4197 * all the HBQs and post buffers to the HBQ. The caller is not
4198 * required to hold any locks. This function will return zero if successful
4199 * else it will return negative error code.
4200 **/
ed957684
JS
4201static int
4202lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4203{
4204 int hbq_count = lpfc_sli_hbq_count();
4205 LPFC_MBOXQ_t *pmb;
4206 MAILBOX_t *pmbox;
4207 uint32_t hbqno;
4208 uint32_t hbq_entry_index;
ed957684 4209
92d7f7b0
JS
4210 /* Get a Mailbox buffer to setup mailbox
4211 * commands for HBA initialization
4212 */
ed957684
JS
4213 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4214
4215 if (!pmb)
4216 return -ENOMEM;
4217
04c68496 4218 pmbox = &pmb->u.mb;
ed957684
JS
4219
4220 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4221 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4222 phba->hbq_in_use = 1;
ed957684
JS
4223
4224 hbq_entry_index = 0;
4225 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4226 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4227 phba->hbqs[hbqno].hbqPutIdx = 0;
4228 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4229 phba->hbqs[hbqno].entry_count =
92d7f7b0 4230 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4231 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4232 hbq_entry_index, pmb);
ed957684
JS
4233 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4234
4235 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4236 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4237 mbxStatus <status>, ring <num> */
4238
4239 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4240 LOG_SLI | LOG_VPORT,
e8b62011 4241 "1805 Adapter failed to init. "
ed957684 4242 "Data: x%x x%x x%x\n",
e8b62011 4243 pmbox->mbxCommand,
ed957684
JS
4244 pmbox->mbxStatus, hbqno);
4245
4246 phba->link_state = LPFC_HBA_ERROR;
4247 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4248 return -ENXIO;
ed957684
JS
4249 }
4250 }
4251 phba->hbq_count = hbq_count;
4252
ed957684
JS
4253 mempool_free(pmb, phba->mbox_mem_pool);
4254
92d7f7b0 4255 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4256 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4257 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4258 return 0;
4259}
4260
4f774513
JS
4261/**
4262 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4263 * @phba: Pointer to HBA context object.
4264 *
4265 * This function is called during the SLI initialization to configure
4266 * all the HBQs and post buffers to the HBQ. The caller is not
4267 * required to hold any locks. This function will return zero if successful
4268 * else it will return negative error code.
4269 **/
4270static int
4271lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4272{
4273 phba->hbq_in_use = 1;
4274 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4275 phba->hbq_count = 1;
4276 /* Initially populate or replenish the HBQs */
4277 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4278 return 0;
4279}
4280
e59058c4 4281/**
3621a710 4282 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4283 * @phba: Pointer to HBA context object.
4284 * @sli_mode: sli mode - 2/3
4285 *
4286 * This function is called by the sli intialization code path
4287 * to issue config_port mailbox command. This function restarts the
4288 * HBA firmware and issues a config_port mailbox command to configure
4289 * the SLI interface in the sli mode specified by sli_mode
4290 * variable. The caller is not required to hold any locks.
4291 * The function returns 0 if successful, else returns negative error
4292 * code.
4293 **/
9399627f
JS
4294int
4295lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e
JB
4296{
4297 LPFC_MBOXQ_t *pmb;
4298 uint32_t resetcount = 0, rc = 0, done = 0;
4299
4300 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4301 if (!pmb) {
2e0fef85 4302 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
4303 return -ENOMEM;
4304 }
4305
ed957684 4306 phba->sli_rev = sli_mode;
dea3101e 4307 while (resetcount < 2 && !done) {
2e0fef85 4308 spin_lock_irq(&phba->hbalock);
1c067a42 4309 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4310 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4311 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4312 lpfc_sli_brdrestart(phba);
dea3101e
JB
4313 rc = lpfc_sli_chipset_init(phba);
4314 if (rc)
4315 break;
4316
2e0fef85 4317 spin_lock_irq(&phba->hbalock);
1c067a42 4318 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4319 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
4320 resetcount++;
4321
ed957684
JS
4322 /* Call pre CONFIG_PORT mailbox command initialization. A
4323 * value of 0 means the call was successful. Any other
4324 * nonzero value is a failure, but if ERESTART is returned,
4325 * the driver may reset the HBA and try again.
4326 */
dea3101e
JB
4327 rc = lpfc_config_port_prep(phba);
4328 if (rc == -ERESTART) {
ed957684 4329 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4330 continue;
34b02dcd 4331 } else if (rc)
dea3101e 4332 break;
6d368e53 4333
2e0fef85 4334 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
4335 lpfc_config_port(phba, pmb);
4336 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4337 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4338 LPFC_SLI3_HBQ_ENABLED |
4339 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
4340 LPFC_SLI3_BG_ENABLED |
4341 LPFC_SLI3_DSS_ENABLED);
ed957684 4342 if (rc != MBX_SUCCESS) {
dea3101e 4343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4344 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 4345 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 4346 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 4347 spin_lock_irq(&phba->hbalock);
04c68496 4348 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
4349 spin_unlock_irq(&phba->hbalock);
4350 rc = -ENXIO;
04c68496
JS
4351 } else {
4352 /* Allow asynchronous mailbox command to go through */
4353 spin_lock_irq(&phba->hbalock);
4354 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4355 spin_unlock_irq(&phba->hbalock);
ed957684 4356 done = 1;
cb69f7de
JS
4357
4358 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4359 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4361 "3110 Port did not grant ASABT\n");
04c68496 4362 }
dea3101e 4363 }
ed957684
JS
4364 if (!done) {
4365 rc = -EINVAL;
4366 goto do_prep_failed;
4367 }
04c68496
JS
4368 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4369 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
4370 rc = -ENXIO;
4371 goto do_prep_failed;
4372 }
04c68496 4373 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 4374 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
4375 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4376 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4377 phba->max_vpi : phba->max_vports;
4378
34b02dcd
JS
4379 } else
4380 phba->max_vpi = 0;
bc73905a
JS
4381 phba->fips_level = 0;
4382 phba->fips_spec_rev = 0;
4383 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 4384 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
4385 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4386 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4388 "2850 Security Crypto Active. FIPS x%d "
4389 "(Spec Rev: x%d)",
4390 phba->fips_level, phba->fips_spec_rev);
4391 }
4392 if (pmb->u.mb.un.varCfgPort.sec_err) {
4393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4394 "2856 Config Port Security Crypto "
4395 "Error: x%x ",
4396 pmb->u.mb.un.varCfgPort.sec_err);
4397 }
04c68496 4398 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 4399 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 4400 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 4401 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
4402
4403 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4404 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6
JS
4405
4406 if (phba->cfg_enable_bg) {
04c68496 4407 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
4408 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4409 else
4410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4411 "0443 Adapter did not grant "
4412 "BlockGuard\n");
4413 }
34b02dcd 4414 } else {
8f34f4ce 4415 phba->hbq_get = NULL;
34b02dcd 4416 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 4417 phba->max_vpi = 0;
ed957684 4418 }
92d7f7b0 4419do_prep_failed:
ed957684
JS
4420 mempool_free(pmb, phba->mbox_mem_pool);
4421 return rc;
4422}
4423
e59058c4
JS
4424
4425/**
3621a710 4426 * lpfc_sli_hba_setup - SLI intialization function
e59058c4
JS
4427 * @phba: Pointer to HBA context object.
4428 *
4429 * This function is the main SLI intialization function. This function
4430 * is called by the HBA intialization code, HBA reset code and HBA
4431 * error attention handler code. Caller is not required to hold any
4432 * locks. This function issues config_port mailbox command to configure
4433 * the SLI, setup iocb rings and HBQ rings. In the end the function
4434 * calls the config_port_post function to issue init_link mailbox
4435 * command and to start the discovery. The function will return zero
4436 * if successful, else it will return negative error code.
4437 **/
ed957684
JS
4438int
4439lpfc_sli_hba_setup(struct lpfc_hba *phba)
4440{
4441 uint32_t rc;
6d368e53
JS
4442 int mode = 3, i;
4443 int longs;
ed957684
JS
4444
4445 switch (lpfc_sli_mode) {
4446 case 2:
78b2d852 4447 if (phba->cfg_enable_npiv) {
92d7f7b0 4448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 4449 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 4450 "parameter (%d) to auto (0).\n",
e8b62011 4451 lpfc_sli_mode);
92d7f7b0
JS
4452 break;
4453 }
ed957684
JS
4454 mode = 2;
4455 break;
4456 case 0:
4457 case 3:
4458 break;
4459 default:
92d7f7b0 4460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4461 "1819 Unrecognized lpfc_sli_mode "
4462 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
4463
4464 break;
4465 }
4466
9399627f
JS
4467 rc = lpfc_sli_config_port(phba, mode);
4468
ed957684 4469 if (rc && lpfc_sli_mode == 3)
92d7f7b0 4470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4471 "1820 Unable to select SLI-3. "
4472 "Not supported by adapter.\n");
ed957684 4473 if (rc && mode != 2)
9399627f 4474 rc = lpfc_sli_config_port(phba, 2);
ed957684 4475 if (rc)
dea3101e
JB
4476 goto lpfc_sli_hba_setup_error;
4477
0d878419
JS
4478 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4479 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4480 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4481 if (!rc) {
4482 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4483 "2709 This device supports "
4484 "Advanced Error Reporting (AER)\n");
4485 spin_lock_irq(&phba->hbalock);
4486 phba->hba_flag |= HBA_AER_ENABLED;
4487 spin_unlock_irq(&phba->hbalock);
4488 } else {
4489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4490 "2708 This device does not support "
4491 "Advanced Error Reporting (AER)\n");
4492 phba->cfg_aer_support = 0;
4493 }
4494 }
4495
ed957684
JS
4496 if (phba->sli_rev == 3) {
4497 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4498 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
4499 } else {
4500 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4501 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 4502 phba->sli3_options = 0;
ed957684
JS
4503 }
4504
4505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4506 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4507 phba->sli_rev, phba->max_vpi);
ed957684 4508 rc = lpfc_sli_ring_map(phba);
dea3101e
JB
4509
4510 if (rc)
4511 goto lpfc_sli_hba_setup_error;
4512
6d368e53
JS
4513 /* Initialize VPIs. */
4514 if (phba->sli_rev == LPFC_SLI_REV3) {
4515 /*
4516 * The VPI bitmask and physical ID array are allocated
4517 * and initialized once only - at driver load. A port
4518 * reset doesn't need to reinitialize this memory.
4519 */
4520 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4521 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4522 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4523 GFP_KERNEL);
4524 if (!phba->vpi_bmask) {
4525 rc = -ENOMEM;
4526 goto lpfc_sli_hba_setup_error;
4527 }
4528
4529 phba->vpi_ids = kzalloc(
4530 (phba->max_vpi+1) * sizeof(uint16_t),
4531 GFP_KERNEL);
4532 if (!phba->vpi_ids) {
4533 kfree(phba->vpi_bmask);
4534 rc = -ENOMEM;
4535 goto lpfc_sli_hba_setup_error;
4536 }
4537 for (i = 0; i < phba->max_vpi; i++)
4538 phba->vpi_ids[i] = i;
4539 }
4540 }
4541
9399627f 4542 /* Init HBQs */
ed957684
JS
4543 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4544 rc = lpfc_sli_hbq_setup(phba);
4545 if (rc)
4546 goto lpfc_sli_hba_setup_error;
4547 }
04c68496 4548 spin_lock_irq(&phba->hbalock);
dea3101e 4549 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 4550 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
4551
4552 rc = lpfc_config_port_post(phba);
4553 if (rc)
4554 goto lpfc_sli_hba_setup_error;
4555
ed957684
JS
4556 return rc;
4557
92d7f7b0 4558lpfc_sli_hba_setup_error:
2e0fef85 4559 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 4560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4561 "0445 Firmware initialization failed\n");
dea3101e
JB
4562 return rc;
4563}
4564
e59058c4 4565/**
da0436e9
JS
4566 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4567 * @phba: Pointer to HBA context object.
4568 * @mboxq: mailbox pointer.
4569 * This function issue a dump mailbox command to read config region
4570 * 23 and parse the records in the region and populate driver
4571 * data structure.
e59058c4 4572 **/
da0436e9 4573static int
ff78d8f9 4574lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 4575{
ff78d8f9 4576 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
4577 struct lpfc_dmabuf *mp;
4578 struct lpfc_mqe *mqe;
4579 uint32_t data_length;
4580 int rc;
dea3101e 4581
da0436e9
JS
4582 /* Program the default value of vlan_id and fc_map */
4583 phba->valid_vlan = 0;
4584 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4585 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4586 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4587
ff78d8f9
JS
4588 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4589 if (!mboxq)
da0436e9
JS
4590 return -ENOMEM;
4591
ff78d8f9
JS
4592 mqe = &mboxq->u.mqe;
4593 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4594 rc = -ENOMEM;
4595 goto out_free_mboxq;
4596 }
4597
da0436e9
JS
4598 mp = (struct lpfc_dmabuf *) mboxq->context1;
4599 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4600
4601 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4602 "(%d):2571 Mailbox cmd x%x Status x%x "
4603 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4604 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4605 "CQ: x%x x%x x%x x%x\n",
4606 mboxq->vport ? mboxq->vport->vpi : 0,
4607 bf_get(lpfc_mqe_command, mqe),
4608 bf_get(lpfc_mqe_status, mqe),
4609 mqe->un.mb_words[0], mqe->un.mb_words[1],
4610 mqe->un.mb_words[2], mqe->un.mb_words[3],
4611 mqe->un.mb_words[4], mqe->un.mb_words[5],
4612 mqe->un.mb_words[6], mqe->un.mb_words[7],
4613 mqe->un.mb_words[8], mqe->un.mb_words[9],
4614 mqe->un.mb_words[10], mqe->un.mb_words[11],
4615 mqe->un.mb_words[12], mqe->un.mb_words[13],
4616 mqe->un.mb_words[14], mqe->un.mb_words[15],
4617 mqe->un.mb_words[16], mqe->un.mb_words[50],
4618 mboxq->mcqe.word0,
4619 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4620 mboxq->mcqe.trailer);
4621
4622 if (rc) {
4623 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4624 kfree(mp);
ff78d8f9
JS
4625 rc = -EIO;
4626 goto out_free_mboxq;
da0436e9
JS
4627 }
4628 data_length = mqe->un.mb_words[5];
a0c87cbd 4629 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
4630 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4631 kfree(mp);
ff78d8f9
JS
4632 rc = -EIO;
4633 goto out_free_mboxq;
d11e31dd 4634 }
dea3101e 4635
da0436e9
JS
4636 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4637 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4638 kfree(mp);
ff78d8f9
JS
4639 rc = 0;
4640
4641out_free_mboxq:
4642 mempool_free(mboxq, phba->mbox_mem_pool);
4643 return rc;
da0436e9 4644}
e59058c4
JS
4645
4646/**
da0436e9
JS
4647 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4648 * @phba: pointer to lpfc hba data structure.
4649 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4650 * @vpd: pointer to the memory to hold resulting port vpd data.
4651 * @vpd_size: On input, the number of bytes allocated to @vpd.
4652 * On output, the number of data bytes in @vpd.
e59058c4 4653 *
da0436e9
JS
4654 * This routine executes a READ_REV SLI4 mailbox command. In
4655 * addition, this routine gets the port vpd data.
4656 *
4657 * Return codes
af901ca1 4658 * 0 - successful
d439d286 4659 * -ENOMEM - could not allocated memory.
e59058c4 4660 **/
da0436e9
JS
4661static int
4662lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4663 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 4664{
da0436e9
JS
4665 int rc = 0;
4666 uint32_t dma_size;
4667 struct lpfc_dmabuf *dmabuf;
4668 struct lpfc_mqe *mqe;
dea3101e 4669
da0436e9
JS
4670 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4671 if (!dmabuf)
4672 return -ENOMEM;
4673
4674 /*
4675 * Get a DMA buffer for the vpd data resulting from the READ_REV
4676 * mailbox command.
a257bf90 4677 */
da0436e9
JS
4678 dma_size = *vpd_size;
4679 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4680 dma_size,
4681 &dmabuf->phys,
4682 GFP_KERNEL);
4683 if (!dmabuf->virt) {
4684 kfree(dmabuf);
4685 return -ENOMEM;
a257bf90 4686 }
da0436e9 4687 memset(dmabuf->virt, 0, dma_size);
a257bf90 4688
da0436e9
JS
4689 /*
4690 * The SLI4 implementation of READ_REV conflicts at word1,
4691 * bits 31:16 and SLI4 adds vpd functionality not present
4692 * in SLI3. This code corrects the conflicts.
1dcb58e5 4693 */
da0436e9
JS
4694 lpfc_read_rev(phba, mboxq);
4695 mqe = &mboxq->u.mqe;
4696 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4697 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4698 mqe->un.read_rev.word1 &= 0x0000FFFF;
4699 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4700 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4701
4702 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4703 if (rc) {
4704 dma_free_coherent(&phba->pcidev->dev, dma_size,
4705 dmabuf->virt, dmabuf->phys);
def9c7a9 4706 kfree(dmabuf);
da0436e9
JS
4707 return -EIO;
4708 }
1dcb58e5 4709
da0436e9
JS
4710 /*
4711 * The available vpd length cannot be bigger than the
4712 * DMA buffer passed to the port. Catch the less than
4713 * case and update the caller's size.
4714 */
4715 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4716 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 4717
d7c47992
JS
4718 memcpy(vpd, dmabuf->virt, *vpd_size);
4719
da0436e9
JS
4720 dma_free_coherent(&phba->pcidev->dev, dma_size,
4721 dmabuf->virt, dmabuf->phys);
4722 kfree(dmabuf);
4723 return 0;
dea3101e
JB
4724}
4725
cd1c8301
JS
4726/**
4727 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4728 * @phba: pointer to lpfc hba data structure.
4729 *
4730 * This routine retrieves SLI4 device physical port name this PCI function
4731 * is attached to.
4732 *
4733 * Return codes
4734 * 0 - sucessful
4735 * otherwise - failed to retrieve physical port name
4736 **/
4737static int
4738lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4739{
4740 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
4741 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4742 struct lpfc_controller_attribute *cntl_attr;
4743 struct lpfc_mbx_get_port_name *get_port_name;
4744 void *virtaddr = NULL;
4745 uint32_t alloclen, reqlen;
4746 uint32_t shdr_status, shdr_add_status;
4747 union lpfc_sli4_cfg_shdr *shdr;
4748 char cport_name = 0;
4749 int rc;
4750
4751 /* We assume nothing at this point */
4752 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4753 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4754
4755 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4756 if (!mboxq)
4757 return -ENOMEM;
cd1c8301 4758 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
4759 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4760 lpfc_sli4_read_config(phba);
4761 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4762 goto retrieve_ppname;
cd1c8301
JS
4763
4764 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4765 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4766 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4767 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4768 LPFC_SLI4_MBX_NEMBED);
4769 if (alloclen < reqlen) {
4770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4771 "3084 Allocated DMA memory size (%d) is "
4772 "less than the requested DMA memory size "
4773 "(%d)\n", alloclen, reqlen);
4774 rc = -ENOMEM;
4775 goto out_free_mboxq;
4776 }
4777 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4778 virtaddr = mboxq->sge_array->addr[0];
4779 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4780 shdr = &mbx_cntl_attr->cfg_shdr;
4781 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4782 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4783 if (shdr_status || shdr_add_status || rc) {
4784 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4785 "3085 Mailbox x%x (x%x/x%x) failed, "
4786 "rc:x%x, status:x%x, add_status:x%x\n",
4787 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4788 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4789 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4790 rc, shdr_status, shdr_add_status);
4791 rc = -ENXIO;
4792 goto out_free_mboxq;
4793 }
4794 cntl_attr = &mbx_cntl_attr->cntl_attr;
4795 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4796 phba->sli4_hba.lnk_info.lnk_tp =
4797 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4798 phba->sli4_hba.lnk_info.lnk_no =
4799 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4800 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4801 "3086 lnk_type:%d, lnk_numb:%d\n",
4802 phba->sli4_hba.lnk_info.lnk_tp,
4803 phba->sli4_hba.lnk_info.lnk_no);
4804
4805retrieve_ppname:
4806 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4807 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4808 sizeof(struct lpfc_mbx_get_port_name) -
4809 sizeof(struct lpfc_sli4_cfg_mhdr),
4810 LPFC_SLI4_MBX_EMBED);
4811 get_port_name = &mboxq->u.mqe.un.get_port_name;
4812 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4813 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4814 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4815 phba->sli4_hba.lnk_info.lnk_tp);
4816 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4817 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4818 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4819 if (shdr_status || shdr_add_status || rc) {
4820 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4821 "3087 Mailbox x%x (x%x/x%x) failed: "
4822 "rc:x%x, status:x%x, add_status:x%x\n",
4823 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4824 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4825 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4826 rc, shdr_status, shdr_add_status);
4827 rc = -ENXIO;
4828 goto out_free_mboxq;
4829 }
4830 switch (phba->sli4_hba.lnk_info.lnk_no) {
4831 case LPFC_LINK_NUMBER_0:
4832 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4833 &get_port_name->u.response);
4834 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4835 break;
4836 case LPFC_LINK_NUMBER_1:
4837 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4838 &get_port_name->u.response);
4839 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4840 break;
4841 case LPFC_LINK_NUMBER_2:
4842 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4843 &get_port_name->u.response);
4844 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4845 break;
4846 case LPFC_LINK_NUMBER_3:
4847 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4848 &get_port_name->u.response);
4849 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4850 break;
4851 default:
4852 break;
4853 }
4854
4855 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4856 phba->Port[0] = cport_name;
4857 phba->Port[1] = '\0';
4858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4859 "3091 SLI get port name: %s\n", phba->Port);
4860 }
4861
4862out_free_mboxq:
4863 if (rc != MBX_TIMEOUT) {
4864 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4865 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4866 else
4867 mempool_free(mboxq, phba->mbox_mem_pool);
4868 }
4869 return rc;
4870}
4871
e59058c4 4872/**
da0436e9
JS
4873 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4874 * @phba: pointer to lpfc hba data structure.
e59058c4 4875 *
da0436e9
JS
4876 * This routine is called to explicitly arm the SLI4 device's completion and
4877 * event queues
4878 **/
4879static void
4880lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4881{
4882 uint8_t fcp_eqidx;
4883
4884 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4885 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
0558056c 4886 fcp_eqidx = 0;
2e90f4b5
JS
4887 if (phba->sli4_hba.fcp_cq) {
4888 do
4889 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4890 LPFC_QUEUE_REARM);
4891 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4892 }
da0436e9 4893 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
2e90f4b5
JS
4894 if (phba->sli4_hba.fp_eq) {
4895 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4896 fcp_eqidx++)
4897 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4898 LPFC_QUEUE_REARM);
4899 }
da0436e9
JS
4900}
4901
6d368e53
JS
4902/**
4903 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4904 * @phba: Pointer to HBA context object.
4905 * @type: The resource extent type.
b76f2dc9
JS
4906 * @extnt_count: buffer to hold port available extent count.
4907 * @extnt_size: buffer to hold element count per extent.
6d368e53 4908 *
b76f2dc9
JS
4909 * This function calls the port and retrievs the number of available
4910 * extents and their size for a particular extent type.
4911 *
4912 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 4913 **/
b76f2dc9 4914int
6d368e53
JS
4915lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4916 uint16_t *extnt_count, uint16_t *extnt_size)
4917{
4918 int rc = 0;
4919 uint32_t length;
4920 uint32_t mbox_tmo;
4921 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4922 LPFC_MBOXQ_t *mbox;
4923
4924 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4925 if (!mbox)
4926 return -ENOMEM;
4927
4928 /* Find out how many extents are available for this resource type */
4929 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4930 sizeof(struct lpfc_sli4_cfg_mhdr));
4931 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4932 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4933 length, LPFC_SLI4_MBX_EMBED);
4934
4935 /* Send an extents count of 0 - the GET doesn't use it. */
4936 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4937 LPFC_SLI4_MBX_EMBED);
4938 if (unlikely(rc)) {
4939 rc = -EIO;
4940 goto err_exit;
4941 }
4942
4943 if (!phba->sli4_hba.intr_enable)
4944 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4945 else {
a183a15f 4946 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
4947 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4948 }
4949 if (unlikely(rc)) {
4950 rc = -EIO;
4951 goto err_exit;
4952 }
4953
4954 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4955 if (bf_get(lpfc_mbox_hdr_status,
4956 &rsrc_info->header.cfg_shdr.response)) {
4957 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4958 "2930 Failed to get resource extents "
4959 "Status 0x%x Add'l Status 0x%x\n",
4960 bf_get(lpfc_mbox_hdr_status,
4961 &rsrc_info->header.cfg_shdr.response),
4962 bf_get(lpfc_mbox_hdr_add_status,
4963 &rsrc_info->header.cfg_shdr.response));
4964 rc = -EIO;
4965 goto err_exit;
4966 }
4967
4968 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4969 &rsrc_info->u.rsp);
4970 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4971 &rsrc_info->u.rsp);
8a9d2e80
JS
4972
4973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4974 "3162 Retrieved extents type-%d from port: count:%d, "
4975 "size:%d\n", type, *extnt_count, *extnt_size);
4976
4977err_exit:
6d368e53
JS
4978 mempool_free(mbox, phba->mbox_mem_pool);
4979 return rc;
4980}
4981
4982/**
4983 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4984 * @phba: Pointer to HBA context object.
4985 * @type: The extent type to check.
4986 *
4987 * This function reads the current available extents from the port and checks
4988 * if the extent count or extent size has changed since the last access.
4989 * Callers use this routine post port reset to understand if there is a
4990 * extent reprovisioning requirement.
4991 *
4992 * Returns:
4993 * -Error: error indicates problem.
4994 * 1: Extent count or size has changed.
4995 * 0: No changes.
4996 **/
4997static int
4998lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4999{
5000 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5001 uint16_t size_diff, rsrc_ext_size;
5002 int rc = 0;
5003 struct lpfc_rsrc_blks *rsrc_entry;
5004 struct list_head *rsrc_blk_list = NULL;
5005
5006 size_diff = 0;
5007 curr_ext_cnt = 0;
5008 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5009 &rsrc_ext_cnt,
5010 &rsrc_ext_size);
5011 if (unlikely(rc))
5012 return -EIO;
5013
5014 switch (type) {
5015 case LPFC_RSC_TYPE_FCOE_RPI:
5016 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5017 break;
5018 case LPFC_RSC_TYPE_FCOE_VPI:
5019 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5020 break;
5021 case LPFC_RSC_TYPE_FCOE_XRI:
5022 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5023 break;
5024 case LPFC_RSC_TYPE_FCOE_VFI:
5025 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5026 break;
5027 default:
5028 break;
5029 }
5030
5031 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5032 curr_ext_cnt++;
5033 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5034 size_diff++;
5035 }
5036
5037 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5038 rc = 1;
5039
5040 return rc;
5041}
5042
5043/**
5044 * lpfc_sli4_cfg_post_extnts -
5045 * @phba: Pointer to HBA context object.
5046 * @extnt_cnt - number of available extents.
5047 * @type - the extent type (rpi, xri, vfi, vpi).
5048 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5049 * @mbox - pointer to the caller's allocated mailbox structure.
5050 *
5051 * This function executes the extents allocation request. It also
5052 * takes care of the amount of memory needed to allocate or get the
5053 * allocated extents. It is the caller's responsibility to evaluate
5054 * the response.
5055 *
5056 * Returns:
5057 * -Error: Error value describes the condition found.
5058 * 0: if successful
5059 **/
5060static int
8a9d2e80 5061lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5062 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5063{
5064 int rc = 0;
5065 uint32_t req_len;
5066 uint32_t emb_len;
5067 uint32_t alloc_len, mbox_tmo;
5068
5069 /* Calculate the total requested length of the dma memory */
8a9d2e80 5070 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5071
5072 /*
5073 * Calculate the size of an embedded mailbox. The uint32_t
5074 * accounts for extents-specific word.
5075 */
5076 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5077 sizeof(uint32_t);
5078
5079 /*
5080 * Presume the allocation and response will fit into an embedded
5081 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5082 */
5083 *emb = LPFC_SLI4_MBX_EMBED;
5084 if (req_len > emb_len) {
8a9d2e80 5085 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5086 sizeof(union lpfc_sli4_cfg_shdr) +
5087 sizeof(uint32_t);
5088 *emb = LPFC_SLI4_MBX_NEMBED;
5089 }
5090
5091 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5092 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5093 req_len, *emb);
5094 if (alloc_len < req_len) {
5095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5096 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5097 "less than the requested DMA memory "
5098 "size (x%x)\n", alloc_len, req_len);
5099 return -ENOMEM;
5100 }
8a9d2e80 5101 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5102 if (unlikely(rc))
5103 return -EIO;
5104
5105 if (!phba->sli4_hba.intr_enable)
5106 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5107 else {
a183a15f 5108 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5109 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5110 }
5111
5112 if (unlikely(rc))
5113 rc = -EIO;
5114 return rc;
5115}
5116
5117/**
5118 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5119 * @phba: Pointer to HBA context object.
5120 * @type: The resource extent type to allocate.
5121 *
5122 * This function allocates the number of elements for the specified
5123 * resource type.
5124 **/
5125static int
5126lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5127{
5128 bool emb = false;
5129 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5130 uint16_t rsrc_id, rsrc_start, j, k;
5131 uint16_t *ids;
5132 int i, rc;
5133 unsigned long longs;
5134 unsigned long *bmask;
5135 struct lpfc_rsrc_blks *rsrc_blks;
5136 LPFC_MBOXQ_t *mbox;
5137 uint32_t length;
5138 struct lpfc_id_range *id_array = NULL;
5139 void *virtaddr = NULL;
5140 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5141 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5142 struct list_head *ext_blk_list;
5143
5144 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5145 &rsrc_cnt,
5146 &rsrc_size);
5147 if (unlikely(rc))
5148 return -EIO;
5149
5150 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5151 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5152 "3009 No available Resource Extents "
5153 "for resource type 0x%x: Count: 0x%x, "
5154 "Size 0x%x\n", type, rsrc_cnt,
5155 rsrc_size);
5156 return -ENOMEM;
5157 }
5158
8a9d2e80
JS
5159 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5160 "2903 Post resource extents type-0x%x: "
5161 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5162
5163 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5164 if (!mbox)
5165 return -ENOMEM;
5166
8a9d2e80 5167 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5168 if (unlikely(rc)) {
5169 rc = -EIO;
5170 goto err_exit;
5171 }
5172
5173 /*
5174 * Figure out where the response is located. Then get local pointers
5175 * to the response data. The port does not guarantee to respond to
5176 * all extents counts request so update the local variable with the
5177 * allocated count from the port.
5178 */
5179 if (emb == LPFC_SLI4_MBX_EMBED) {
5180 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5181 id_array = &rsrc_ext->u.rsp.id[0];
5182 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5183 } else {
5184 virtaddr = mbox->sge_array->addr[0];
5185 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5186 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5187 id_array = &n_rsrc->id;
5188 }
5189
5190 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5191 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5192
5193 /*
5194 * Based on the resource size and count, correct the base and max
5195 * resource values.
5196 */
5197 length = sizeof(struct lpfc_rsrc_blks);
5198 switch (type) {
5199 case LPFC_RSC_TYPE_FCOE_RPI:
5200 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5201 sizeof(unsigned long),
5202 GFP_KERNEL);
5203 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5204 rc = -ENOMEM;
5205 goto err_exit;
5206 }
5207 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5208 sizeof(uint16_t),
5209 GFP_KERNEL);
5210 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5211 kfree(phba->sli4_hba.rpi_bmask);
5212 rc = -ENOMEM;
5213 goto err_exit;
5214 }
5215
5216 /*
5217 * The next_rpi was initialized with the maximum available
5218 * count but the port may allocate a smaller number. Catch
5219 * that case and update the next_rpi.
5220 */
5221 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5222
5223 /* Initialize local ptrs for common extent processing later. */
5224 bmask = phba->sli4_hba.rpi_bmask;
5225 ids = phba->sli4_hba.rpi_ids;
5226 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5227 break;
5228 case LPFC_RSC_TYPE_FCOE_VPI:
5229 phba->vpi_bmask = kzalloc(longs *
5230 sizeof(unsigned long),
5231 GFP_KERNEL);
5232 if (unlikely(!phba->vpi_bmask)) {
5233 rc = -ENOMEM;
5234 goto err_exit;
5235 }
5236 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5237 sizeof(uint16_t),
5238 GFP_KERNEL);
5239 if (unlikely(!phba->vpi_ids)) {
5240 kfree(phba->vpi_bmask);
5241 rc = -ENOMEM;
5242 goto err_exit;
5243 }
5244
5245 /* Initialize local ptrs for common extent processing later. */
5246 bmask = phba->vpi_bmask;
5247 ids = phba->vpi_ids;
5248 ext_blk_list = &phba->lpfc_vpi_blk_list;
5249 break;
5250 case LPFC_RSC_TYPE_FCOE_XRI:
5251 phba->sli4_hba.xri_bmask = kzalloc(longs *
5252 sizeof(unsigned long),
5253 GFP_KERNEL);
5254 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5255 rc = -ENOMEM;
5256 goto err_exit;
5257 }
8a9d2e80 5258 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5259 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5260 sizeof(uint16_t),
5261 GFP_KERNEL);
5262 if (unlikely(!phba->sli4_hba.xri_ids)) {
5263 kfree(phba->sli4_hba.xri_bmask);
5264 rc = -ENOMEM;
5265 goto err_exit;
5266 }
5267
5268 /* Initialize local ptrs for common extent processing later. */
5269 bmask = phba->sli4_hba.xri_bmask;
5270 ids = phba->sli4_hba.xri_ids;
5271 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5272 break;
5273 case LPFC_RSC_TYPE_FCOE_VFI:
5274 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5275 sizeof(unsigned long),
5276 GFP_KERNEL);
5277 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5278 rc = -ENOMEM;
5279 goto err_exit;
5280 }
5281 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5282 sizeof(uint16_t),
5283 GFP_KERNEL);
5284 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5285 kfree(phba->sli4_hba.vfi_bmask);
5286 rc = -ENOMEM;
5287 goto err_exit;
5288 }
5289
5290 /* Initialize local ptrs for common extent processing later. */
5291 bmask = phba->sli4_hba.vfi_bmask;
5292 ids = phba->sli4_hba.vfi_ids;
5293 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5294 break;
5295 default:
5296 /* Unsupported Opcode. Fail call. */
5297 id_array = NULL;
5298 bmask = NULL;
5299 ids = NULL;
5300 ext_blk_list = NULL;
5301 goto err_exit;
5302 }
5303
5304 /*
5305 * Complete initializing the extent configuration with the
5306 * allocated ids assigned to this function. The bitmask serves
5307 * as an index into the array and manages the available ids. The
5308 * array just stores the ids communicated to the port via the wqes.
5309 */
5310 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5311 if ((i % 2) == 0)
5312 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5313 &id_array[k]);
5314 else
5315 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5316 &id_array[k]);
5317
5318 rsrc_blks = kzalloc(length, GFP_KERNEL);
5319 if (unlikely(!rsrc_blks)) {
5320 rc = -ENOMEM;
5321 kfree(bmask);
5322 kfree(ids);
5323 goto err_exit;
5324 }
5325 rsrc_blks->rsrc_start = rsrc_id;
5326 rsrc_blks->rsrc_size = rsrc_size;
5327 list_add_tail(&rsrc_blks->list, ext_blk_list);
5328 rsrc_start = rsrc_id;
5329 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5330 phba->sli4_hba.scsi_xri_start = rsrc_start +
5331 lpfc_sli4_get_els_iocb_cnt(phba);
5332
5333 while (rsrc_id < (rsrc_start + rsrc_size)) {
5334 ids[j] = rsrc_id;
5335 rsrc_id++;
5336 j++;
5337 }
5338 /* Entire word processed. Get next word.*/
5339 if ((i % 2) == 1)
5340 k++;
5341 }
5342 err_exit:
5343 lpfc_sli4_mbox_cmd_free(phba, mbox);
5344 return rc;
5345}
5346
5347/**
5348 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5349 * @phba: Pointer to HBA context object.
5350 * @type: the extent's type.
5351 *
5352 * This function deallocates all extents of a particular resource type.
5353 * SLI4 does not allow for deallocating a particular extent range. It
5354 * is the caller's responsibility to release all kernel memory resources.
5355 **/
5356static int
5357lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5358{
5359 int rc;
5360 uint32_t length, mbox_tmo = 0;
5361 LPFC_MBOXQ_t *mbox;
5362 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5363 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5364
5365 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5366 if (!mbox)
5367 return -ENOMEM;
5368
5369 /*
5370 * This function sends an embedded mailbox because it only sends the
5371 * the resource type. All extents of this type are released by the
5372 * port.
5373 */
5374 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5375 sizeof(struct lpfc_sli4_cfg_mhdr));
5376 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5377 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5378 length, LPFC_SLI4_MBX_EMBED);
5379
5380 /* Send an extents count of 0 - the dealloc doesn't use it. */
5381 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5382 LPFC_SLI4_MBX_EMBED);
5383 if (unlikely(rc)) {
5384 rc = -EIO;
5385 goto out_free_mbox;
5386 }
5387 if (!phba->sli4_hba.intr_enable)
5388 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5389 else {
a183a15f 5390 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5391 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5392 }
5393 if (unlikely(rc)) {
5394 rc = -EIO;
5395 goto out_free_mbox;
5396 }
5397
5398 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5399 if (bf_get(lpfc_mbox_hdr_status,
5400 &dealloc_rsrc->header.cfg_shdr.response)) {
5401 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5402 "2919 Failed to release resource extents "
5403 "for type %d - Status 0x%x Add'l Status 0x%x. "
5404 "Resource memory not released.\n",
5405 type,
5406 bf_get(lpfc_mbox_hdr_status,
5407 &dealloc_rsrc->header.cfg_shdr.response),
5408 bf_get(lpfc_mbox_hdr_add_status,
5409 &dealloc_rsrc->header.cfg_shdr.response));
5410 rc = -EIO;
5411 goto out_free_mbox;
5412 }
5413
5414 /* Release kernel memory resources for the specific type. */
5415 switch (type) {
5416 case LPFC_RSC_TYPE_FCOE_VPI:
5417 kfree(phba->vpi_bmask);
5418 kfree(phba->vpi_ids);
5419 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5420 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5421 &phba->lpfc_vpi_blk_list, list) {
5422 list_del_init(&rsrc_blk->list);
5423 kfree(rsrc_blk);
5424 }
5425 break;
5426 case LPFC_RSC_TYPE_FCOE_XRI:
5427 kfree(phba->sli4_hba.xri_bmask);
5428 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5429 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5430 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5431 list_del_init(&rsrc_blk->list);
5432 kfree(rsrc_blk);
5433 }
5434 break;
5435 case LPFC_RSC_TYPE_FCOE_VFI:
5436 kfree(phba->sli4_hba.vfi_bmask);
5437 kfree(phba->sli4_hba.vfi_ids);
5438 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5439 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5440 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5441 list_del_init(&rsrc_blk->list);
5442 kfree(rsrc_blk);
5443 }
5444 break;
5445 case LPFC_RSC_TYPE_FCOE_RPI:
5446 /* RPI bitmask and physical id array are cleaned up earlier. */
5447 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5448 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5449 list_del_init(&rsrc_blk->list);
5450 kfree(rsrc_blk);
5451 }
5452 break;
5453 default:
5454 break;
5455 }
5456
5457 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5458
5459 out_free_mbox:
5460 mempool_free(mbox, phba->mbox_mem_pool);
5461 return rc;
5462}
5463
5464/**
5465 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5466 * @phba: Pointer to HBA context object.
5467 *
5468 * This function allocates all SLI4 resource identifiers.
5469 **/
5470int
5471lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5472{
5473 int i, rc, error = 0;
5474 uint16_t count, base;
5475 unsigned long longs;
5476
ff78d8f9
JS
5477 if (!phba->sli4_hba.rpi_hdrs_in_use)
5478 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
5479 if (phba->sli4_hba.extents_in_use) {
5480 /*
5481 * The port supports resource extents. The XRI, VPI, VFI, RPI
5482 * resource extent count must be read and allocated before
5483 * provisioning the resource id arrays.
5484 */
5485 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5486 LPFC_IDX_RSRC_RDY) {
5487 /*
5488 * Extent-based resources are set - the driver could
5489 * be in a port reset. Figure out if any corrective
5490 * actions need to be taken.
5491 */
5492 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5493 LPFC_RSC_TYPE_FCOE_VFI);
5494 if (rc != 0)
5495 error++;
5496 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5497 LPFC_RSC_TYPE_FCOE_VPI);
5498 if (rc != 0)
5499 error++;
5500 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5501 LPFC_RSC_TYPE_FCOE_XRI);
5502 if (rc != 0)
5503 error++;
5504 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5505 LPFC_RSC_TYPE_FCOE_RPI);
5506 if (rc != 0)
5507 error++;
5508
5509 /*
5510 * It's possible that the number of resources
5511 * provided to this port instance changed between
5512 * resets. Detect this condition and reallocate
5513 * resources. Otherwise, there is no action.
5514 */
5515 if (error) {
5516 lpfc_printf_log(phba, KERN_INFO,
5517 LOG_MBOX | LOG_INIT,
5518 "2931 Detected extent resource "
5519 "change. Reallocating all "
5520 "extents.\n");
5521 rc = lpfc_sli4_dealloc_extent(phba,
5522 LPFC_RSC_TYPE_FCOE_VFI);
5523 rc = lpfc_sli4_dealloc_extent(phba,
5524 LPFC_RSC_TYPE_FCOE_VPI);
5525 rc = lpfc_sli4_dealloc_extent(phba,
5526 LPFC_RSC_TYPE_FCOE_XRI);
5527 rc = lpfc_sli4_dealloc_extent(phba,
5528 LPFC_RSC_TYPE_FCOE_RPI);
5529 } else
5530 return 0;
5531 }
5532
5533 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5534 if (unlikely(rc))
5535 goto err_exit;
5536
5537 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5538 if (unlikely(rc))
5539 goto err_exit;
5540
5541 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5542 if (unlikely(rc))
5543 goto err_exit;
5544
5545 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5546 if (unlikely(rc))
5547 goto err_exit;
5548 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5549 LPFC_IDX_RSRC_RDY);
5550 return rc;
5551 } else {
5552 /*
5553 * The port does not support resource extents. The XRI, VPI,
5554 * VFI, RPI resource ids were determined from READ_CONFIG.
5555 * Just allocate the bitmasks and provision the resource id
5556 * arrays. If a port reset is active, the resources don't
5557 * need any action - just exit.
5558 */
5559 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
5560 LPFC_IDX_RSRC_RDY) {
5561 lpfc_sli4_dealloc_resource_identifiers(phba);
5562 lpfc_sli4_remove_rpis(phba);
5563 }
6d368e53
JS
5564 /* RPIs. */
5565 count = phba->sli4_hba.max_cfg_param.max_rpi;
5566 base = phba->sli4_hba.max_cfg_param.rpi_base;
5567 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5568 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5569 sizeof(unsigned long),
5570 GFP_KERNEL);
5571 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5572 rc = -ENOMEM;
5573 goto err_exit;
5574 }
5575 phba->sli4_hba.rpi_ids = kzalloc(count *
5576 sizeof(uint16_t),
5577 GFP_KERNEL);
5578 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5579 rc = -ENOMEM;
5580 goto free_rpi_bmask;
5581 }
5582
5583 for (i = 0; i < count; i++)
5584 phba->sli4_hba.rpi_ids[i] = base + i;
5585
5586 /* VPIs. */
5587 count = phba->sli4_hba.max_cfg_param.max_vpi;
5588 base = phba->sli4_hba.max_cfg_param.vpi_base;
5589 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5590 phba->vpi_bmask = kzalloc(longs *
5591 sizeof(unsigned long),
5592 GFP_KERNEL);
5593 if (unlikely(!phba->vpi_bmask)) {
5594 rc = -ENOMEM;
5595 goto free_rpi_ids;
5596 }
5597 phba->vpi_ids = kzalloc(count *
5598 sizeof(uint16_t),
5599 GFP_KERNEL);
5600 if (unlikely(!phba->vpi_ids)) {
5601 rc = -ENOMEM;
5602 goto free_vpi_bmask;
5603 }
5604
5605 for (i = 0; i < count; i++)
5606 phba->vpi_ids[i] = base + i;
5607
5608 /* XRIs. */
5609 count = phba->sli4_hba.max_cfg_param.max_xri;
5610 base = phba->sli4_hba.max_cfg_param.xri_base;
5611 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5612 phba->sli4_hba.xri_bmask = kzalloc(longs *
5613 sizeof(unsigned long),
5614 GFP_KERNEL);
5615 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5616 rc = -ENOMEM;
5617 goto free_vpi_ids;
5618 }
41899be7 5619 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5620 phba->sli4_hba.xri_ids = kzalloc(count *
5621 sizeof(uint16_t),
5622 GFP_KERNEL);
5623 if (unlikely(!phba->sli4_hba.xri_ids)) {
5624 rc = -ENOMEM;
5625 goto free_xri_bmask;
5626 }
5627
5628 for (i = 0; i < count; i++)
5629 phba->sli4_hba.xri_ids[i] = base + i;
5630
5631 /* VFIs. */
5632 count = phba->sli4_hba.max_cfg_param.max_vfi;
5633 base = phba->sli4_hba.max_cfg_param.vfi_base;
5634 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5635 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5636 sizeof(unsigned long),
5637 GFP_KERNEL);
5638 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5639 rc = -ENOMEM;
5640 goto free_xri_ids;
5641 }
5642 phba->sli4_hba.vfi_ids = kzalloc(count *
5643 sizeof(uint16_t),
5644 GFP_KERNEL);
5645 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5646 rc = -ENOMEM;
5647 goto free_vfi_bmask;
5648 }
5649
5650 for (i = 0; i < count; i++)
5651 phba->sli4_hba.vfi_ids[i] = base + i;
5652
5653 /*
5654 * Mark all resources ready. An HBA reset doesn't need
5655 * to reset the initialization.
5656 */
5657 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5658 LPFC_IDX_RSRC_RDY);
5659 return 0;
5660 }
5661
5662 free_vfi_bmask:
5663 kfree(phba->sli4_hba.vfi_bmask);
5664 free_xri_ids:
5665 kfree(phba->sli4_hba.xri_ids);
5666 free_xri_bmask:
5667 kfree(phba->sli4_hba.xri_bmask);
5668 free_vpi_ids:
5669 kfree(phba->vpi_ids);
5670 free_vpi_bmask:
5671 kfree(phba->vpi_bmask);
5672 free_rpi_ids:
5673 kfree(phba->sli4_hba.rpi_ids);
5674 free_rpi_bmask:
5675 kfree(phba->sli4_hba.rpi_bmask);
5676 err_exit:
5677 return rc;
5678}
5679
5680/**
5681 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5682 * @phba: Pointer to HBA context object.
5683 *
5684 * This function allocates the number of elements for the specified
5685 * resource type.
5686 **/
5687int
5688lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5689{
5690 if (phba->sli4_hba.extents_in_use) {
5691 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5692 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5693 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5694 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5695 } else {
5696 kfree(phba->vpi_bmask);
5697 kfree(phba->vpi_ids);
5698 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5699 kfree(phba->sli4_hba.xri_bmask);
5700 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5701 kfree(phba->sli4_hba.vfi_bmask);
5702 kfree(phba->sli4_hba.vfi_ids);
5703 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5704 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5705 }
5706
5707 return 0;
5708}
5709
b76f2dc9
JS
5710/**
5711 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5712 * @phba: Pointer to HBA context object.
5713 * @type: The resource extent type.
5714 * @extnt_count: buffer to hold port extent count response
5715 * @extnt_size: buffer to hold port extent size response.
5716 *
5717 * This function calls the port to read the host allocated extents
5718 * for a particular type.
5719 **/
5720int
5721lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5722 uint16_t *extnt_cnt, uint16_t *extnt_size)
5723{
5724 bool emb;
5725 int rc = 0;
5726 uint16_t curr_blks = 0;
5727 uint32_t req_len, emb_len;
5728 uint32_t alloc_len, mbox_tmo;
5729 struct list_head *blk_list_head;
5730 struct lpfc_rsrc_blks *rsrc_blk;
5731 LPFC_MBOXQ_t *mbox;
5732 void *virtaddr = NULL;
5733 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5734 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5735 union lpfc_sli4_cfg_shdr *shdr;
5736
5737 switch (type) {
5738 case LPFC_RSC_TYPE_FCOE_VPI:
5739 blk_list_head = &phba->lpfc_vpi_blk_list;
5740 break;
5741 case LPFC_RSC_TYPE_FCOE_XRI:
5742 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5743 break;
5744 case LPFC_RSC_TYPE_FCOE_VFI:
5745 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5746 break;
5747 case LPFC_RSC_TYPE_FCOE_RPI:
5748 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5749 break;
5750 default:
5751 return -EIO;
5752 }
5753
5754 /* Count the number of extents currently allocatd for this type. */
5755 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5756 if (curr_blks == 0) {
5757 /*
5758 * The GET_ALLOCATED mailbox does not return the size,
5759 * just the count. The size should be just the size
5760 * stored in the current allocated block and all sizes
5761 * for an extent type are the same so set the return
5762 * value now.
5763 */
5764 *extnt_size = rsrc_blk->rsrc_size;
5765 }
5766 curr_blks++;
5767 }
5768
5769 /* Calculate the total requested length of the dma memory. */
5770 req_len = curr_blks * sizeof(uint16_t);
5771
5772 /*
5773 * Calculate the size of an embedded mailbox. The uint32_t
5774 * accounts for extents-specific word.
5775 */
5776 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5777 sizeof(uint32_t);
5778
5779 /*
5780 * Presume the allocation and response will fit into an embedded
5781 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5782 */
5783 emb = LPFC_SLI4_MBX_EMBED;
5784 req_len = emb_len;
5785 if (req_len > emb_len) {
5786 req_len = curr_blks * sizeof(uint16_t) +
5787 sizeof(union lpfc_sli4_cfg_shdr) +
5788 sizeof(uint32_t);
5789 emb = LPFC_SLI4_MBX_NEMBED;
5790 }
5791
5792 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5793 if (!mbox)
5794 return -ENOMEM;
5795 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5796
5797 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5798 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5799 req_len, emb);
5800 if (alloc_len < req_len) {
5801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5802 "2983 Allocated DMA memory size (x%x) is "
5803 "less than the requested DMA memory "
5804 "size (x%x)\n", alloc_len, req_len);
5805 rc = -ENOMEM;
5806 goto err_exit;
5807 }
5808 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5809 if (unlikely(rc)) {
5810 rc = -EIO;
5811 goto err_exit;
5812 }
5813
5814 if (!phba->sli4_hba.intr_enable)
5815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5816 else {
a183a15f 5817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
5818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5819 }
5820
5821 if (unlikely(rc)) {
5822 rc = -EIO;
5823 goto err_exit;
5824 }
5825
5826 /*
5827 * Figure out where the response is located. Then get local pointers
5828 * to the response data. The port does not guarantee to respond to
5829 * all extents counts request so update the local variable with the
5830 * allocated count from the port.
5831 */
5832 if (emb == LPFC_SLI4_MBX_EMBED) {
5833 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5834 shdr = &rsrc_ext->header.cfg_shdr;
5835 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5836 } else {
5837 virtaddr = mbox->sge_array->addr[0];
5838 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5839 shdr = &n_rsrc->cfg_shdr;
5840 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5841 }
5842
5843 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5844 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5845 "2984 Failed to read allocated resources "
5846 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5847 type,
5848 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5849 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5850 rc = -EIO;
5851 goto err_exit;
5852 }
5853 err_exit:
5854 lpfc_sli4_mbox_cmd_free(phba, mbox);
5855 return rc;
5856}
5857
8a9d2e80
JS
5858/**
5859 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5860 * @phba: pointer to lpfc hba data structure.
5861 *
5862 * This routine walks the list of els buffers that have been allocated and
5863 * repost them to the port by using SGL block post. This is needed after a
5864 * pci_function_reset/warm_start or start. It attempts to construct blocks
5865 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5866 * SGL block post mailbox commands to post them to the port. For single els
5867 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5868 * mailbox command for posting.
5869 *
5870 * Returns: 0 = success, non-zero failure.
5871 **/
5872static int
5873lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5874{
5875 struct lpfc_sglq *sglq_entry = NULL;
5876 struct lpfc_sglq *sglq_entry_next = NULL;
5877 struct lpfc_sglq *sglq_entry_first = NULL;
5878 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5879 int last_xritag = NO_XRI;
5880 LIST_HEAD(prep_sgl_list);
5881 LIST_HEAD(blck_sgl_list);
5882 LIST_HEAD(allc_sgl_list);
5883 LIST_HEAD(post_sgl_list);
5884 LIST_HEAD(free_sgl_list);
5885
5886 spin_lock(&phba->hbalock);
5887 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5888 spin_unlock(&phba->hbalock);
5889
5890 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5891 &allc_sgl_list, list) {
5892 list_del_init(&sglq_entry->list);
5893 block_cnt++;
5894 if ((last_xritag != NO_XRI) &&
5895 (sglq_entry->sli4_xritag != last_xritag + 1)) {
5896 /* a hole in xri block, form a sgl posting block */
5897 list_splice_init(&prep_sgl_list, &blck_sgl_list);
5898 post_cnt = block_cnt - 1;
5899 /* prepare list for next posting block */
5900 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5901 block_cnt = 1;
5902 } else {
5903 /* prepare list for next posting block */
5904 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5905 /* enough sgls for non-embed sgl mbox command */
5906 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5907 list_splice_init(&prep_sgl_list,
5908 &blck_sgl_list);
5909 post_cnt = block_cnt;
5910 block_cnt = 0;
5911 }
5912 }
5913 num_posted++;
5914
5915 /* keep track of last sgl's xritag */
5916 last_xritag = sglq_entry->sli4_xritag;
5917
5918 /* end of repost sgl list condition for els buffers */
5919 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5920 if (post_cnt == 0) {
5921 list_splice_init(&prep_sgl_list,
5922 &blck_sgl_list);
5923 post_cnt = block_cnt;
5924 } else if (block_cnt == 1) {
5925 status = lpfc_sli4_post_sgl(phba,
5926 sglq_entry->phys, 0,
5927 sglq_entry->sli4_xritag);
5928 if (!status) {
5929 /* successful, put sgl to posted list */
5930 list_add_tail(&sglq_entry->list,
5931 &post_sgl_list);
5932 } else {
5933 /* Failure, put sgl to free list */
5934 lpfc_printf_log(phba, KERN_WARNING,
5935 LOG_SLI,
5936 "3159 Failed to post els "
5937 "sgl, xritag:x%x\n",
5938 sglq_entry->sli4_xritag);
5939 list_add_tail(&sglq_entry->list,
5940 &free_sgl_list);
5941 spin_lock_irq(&phba->hbalock);
5942 phba->sli4_hba.els_xri_cnt--;
5943 spin_unlock_irq(&phba->hbalock);
5944 }
5945 }
5946 }
5947
5948 /* continue until a nembed page worth of sgls */
5949 if (post_cnt == 0)
5950 continue;
5951
5952 /* post the els buffer list sgls as a block */
5953 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
5954 post_cnt);
5955
5956 if (!status) {
5957 /* success, put sgl list to posted sgl list */
5958 list_splice_init(&blck_sgl_list, &post_sgl_list);
5959 } else {
5960 /* Failure, put sgl list to free sgl list */
5961 sglq_entry_first = list_first_entry(&blck_sgl_list,
5962 struct lpfc_sglq,
5963 list);
5964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5965 "3160 Failed to post els sgl-list, "
5966 "xritag:x%x-x%x\n",
5967 sglq_entry_first->sli4_xritag,
5968 (sglq_entry_first->sli4_xritag +
5969 post_cnt - 1));
5970 list_splice_init(&blck_sgl_list, &free_sgl_list);
5971 spin_lock_irq(&phba->hbalock);
5972 phba->sli4_hba.els_xri_cnt -= post_cnt;
5973 spin_unlock_irq(&phba->hbalock);
5974 }
5975
5976 /* don't reset xirtag due to hole in xri block */
5977 if (block_cnt == 0)
5978 last_xritag = NO_XRI;
5979
5980 /* reset els sgl post count for next round of posting */
5981 post_cnt = 0;
5982 }
5983
5984 /* free the els sgls failed to post */
5985 lpfc_free_sgl_list(phba, &free_sgl_list);
5986
5987 /* push els sgls posted to the availble list */
5988 if (!list_empty(&post_sgl_list)) {
5989 spin_lock(&phba->hbalock);
5990 list_splice_init(&post_sgl_list,
5991 &phba->sli4_hba.lpfc_sgl_list);
5992 spin_unlock(&phba->hbalock);
5993 } else {
5994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5995 "3161 Failure to post els sgl to port.\n");
5996 return -EIO;
5997 }
5998 return 0;
5999}
6000
da0436e9
JS
6001/**
6002 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6003 * @phba: Pointer to HBA context object.
6004 *
6005 * This function is the main SLI4 device intialization PCI function. This
6006 * function is called by the HBA intialization code, HBA reset code and
6007 * HBA error attention handler code. Caller is not required to hold any
6008 * locks.
6009 **/
6010int
6011lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6012{
6013 int rc;
6014 LPFC_MBOXQ_t *mboxq;
6015 struct lpfc_mqe *mqe;
6016 uint8_t *vpd;
6017 uint32_t vpd_size;
6018 uint32_t ftr_rsp = 0;
6019 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6020 struct lpfc_vport *vport = phba->pport;
6021 struct lpfc_dmabuf *mp;
6022
6023 /* Perform a PCI function reset to start from clean */
6024 rc = lpfc_pci_function_reset(phba);
6025 if (unlikely(rc))
6026 return -ENODEV;
6027
6028 /* Check the HBA Host Status Register for readyness */
6029 rc = lpfc_sli4_post_status_check(phba);
6030 if (unlikely(rc))
6031 return -ENODEV;
6032 else {
6033 spin_lock_irq(&phba->hbalock);
6034 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6035 spin_unlock_irq(&phba->hbalock);
6036 }
6037
6038 /*
6039 * Allocate a single mailbox container for initializing the
6040 * port.
6041 */
6042 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6043 if (!mboxq)
6044 return -ENOMEM;
6045
da0436e9 6046 /* Issue READ_REV to collect vpd and FW information. */
49198b37 6047 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
6048 vpd = kzalloc(vpd_size, GFP_KERNEL);
6049 if (!vpd) {
6050 rc = -ENOMEM;
6051 goto out_free_mbox;
6052 }
6053
6054 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
6055 if (unlikely(rc)) {
6056 kfree(vpd);
6057 goto out_free_mbox;
6058 }
da0436e9 6059 mqe = &mboxq->u.mqe;
f1126688
JS
6060 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6061 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
76a95d75
JS
6062 phba->hba_flag |= HBA_FCOE_MODE;
6063 else
6064 phba->hba_flag &= ~HBA_FCOE_MODE;
45ed1190
JS
6065
6066 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6067 LPFC_DCBX_CEE_MODE)
6068 phba->hba_flag |= HBA_FIP_SUPPORT;
6069 else
6070 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6071
c31098ce 6072 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
6073 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6074 "0376 READ_REV Error. SLI Level %d "
6075 "FCoE enabled %d\n",
76a95d75 6076 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 6077 rc = -EIO;
76a95d75
JS
6078 kfree(vpd);
6079 goto out_free_mbox;
da0436e9 6080 }
cd1c8301 6081
ff78d8f9
JS
6082 /*
6083 * Continue initialization with default values even if driver failed
6084 * to read FCoE param config regions, only read parameters if the
6085 * board is FCoE
6086 */
6087 if (phba->hba_flag & HBA_FCOE_MODE &&
6088 lpfc_sli4_read_fcoe_params(phba))
6089 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6090 "2570 Failed to read FCoE parameters\n");
6091
cd1c8301
JS
6092 /*
6093 * Retrieve sli4 device physical port name, failure of doing it
6094 * is considered as non-fatal.
6095 */
6096 rc = lpfc_sli4_retrieve_pport_name(phba);
6097 if (!rc)
6098 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6099 "3080 Successful retrieving SLI4 device "
6100 "physical port name: %s.\n", phba->Port);
6101
da0436e9
JS
6102 /*
6103 * Evaluate the read rev and vpd data. Populate the driver
6104 * state with the results. If this routine fails, the failure
6105 * is not fatal as the driver will use generic values.
6106 */
6107 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6108 if (unlikely(!rc)) {
6109 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6110 "0377 Error %d parsing vpd. "
6111 "Using defaults.\n", rc);
6112 rc = 0;
6113 }
76a95d75 6114 kfree(vpd);
da0436e9 6115
f1126688
JS
6116 /* Save information as VPD data */
6117 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6118 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6119 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6120 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6121 &mqe->un.read_rev);
6122 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6123 &mqe->un.read_rev);
6124 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6125 &mqe->un.read_rev);
6126 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6127 &mqe->un.read_rev);
6128 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6129 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6130 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6131 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6132 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6133 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6134 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6135 "(%d):0380 READ_REV Status x%x "
6136 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6137 mboxq->vport ? mboxq->vport->vpi : 0,
6138 bf_get(lpfc_mqe_status, mqe),
6139 phba->vpd.rev.opFwName,
6140 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6141 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9
JS
6142
6143 /*
6144 * Discover the port's supported feature set and match it against the
6145 * hosts requests.
6146 */
6147 lpfc_request_features(phba, mboxq);
6148 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6149 if (unlikely(rc)) {
6150 rc = -EIO;
76a95d75 6151 goto out_free_mbox;
da0436e9
JS
6152 }
6153
6154 /*
6155 * The port must support FCP initiator mode as this is the
6156 * only mode running in the host.
6157 */
6158 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6159 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6160 "0378 No support for fcpi mode.\n");
6161 ftr_rsp++;
6162 }
fedd3b7b
JS
6163 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6164 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6165 else
6166 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
da0436e9
JS
6167 /*
6168 * If the port cannot support the host's requested features
6169 * then turn off the global config parameters to disable the
6170 * feature in the driver. This is not a fatal error.
6171 */
bf08611b
JS
6172 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6173 if (phba->cfg_enable_bg) {
6174 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6175 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6176 else
6177 ftr_rsp++;
6178 }
da0436e9
JS
6179
6180 if (phba->max_vpi && phba->cfg_enable_npiv &&
6181 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6182 ftr_rsp++;
6183
6184 if (ftr_rsp) {
6185 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6186 "0379 Feature Mismatch Data: x%08x %08x "
6187 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6188 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6189 phba->cfg_enable_npiv, phba->max_vpi);
6190 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6191 phba->cfg_enable_bg = 0;
6192 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6193 phba->cfg_enable_npiv = 0;
6194 }
6195
6196 /* These SLI3 features are assumed in SLI4 */
6197 spin_lock_irq(&phba->hbalock);
6198 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6199 spin_unlock_irq(&phba->hbalock);
6200
6d368e53
JS
6201 /*
6202 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6203 * calls depends on these resources to complete port setup.
6204 */
6205 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6206 if (rc) {
6207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6208 "2920 Failed to alloc Resource IDs "
6209 "rc = x%x\n", rc);
6210 goto out_free_mbox;
6211 }
6212
da0436e9 6213 /* Read the port's service parameters. */
9f1177a3
JS
6214 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6215 if (rc) {
6216 phba->link_state = LPFC_HBA_ERROR;
6217 rc = -ENOMEM;
76a95d75 6218 goto out_free_mbox;
9f1177a3
JS
6219 }
6220
da0436e9
JS
6221 mboxq->vport = vport;
6222 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6223 mp = (struct lpfc_dmabuf *) mboxq->context1;
6224 if (rc == MBX_SUCCESS) {
6225 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6226 rc = 0;
6227 }
6228
6229 /*
6230 * This memory was allocated by the lpfc_read_sparam routine. Release
6231 * it to the mbuf pool.
6232 */
6233 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6234 kfree(mp);
6235 mboxq->context1 = NULL;
6236 if (unlikely(rc)) {
6237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6238 "0382 READ_SPARAM command failed "
6239 "status %d, mbxStatus x%x\n",
6240 rc, bf_get(lpfc_mqe_status, mqe));
6241 phba->link_state = LPFC_HBA_ERROR;
6242 rc = -EIO;
76a95d75 6243 goto out_free_mbox;
da0436e9
JS
6244 }
6245
0558056c 6246 lpfc_update_vport_wwn(vport);
da0436e9
JS
6247
6248 /* Update the fc_host data structures with new wwn. */
6249 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6250 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6251
8a9d2e80
JS
6252 /* update host els and scsi xri-sgl sizes and mappings */
6253 rc = lpfc_sli4_xri_sgl_update(phba);
6254 if (unlikely(rc)) {
6255 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6256 "1400 Failed to update xri-sgl size and "
6257 "mapping: %d\n", rc);
6258 goto out_free_mbox;
da0436e9
JS
6259 }
6260
8a9d2e80
JS
6261 /* register the els sgl pool to the port */
6262 rc = lpfc_sli4_repost_els_sgl_list(phba);
6263 if (unlikely(rc)) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6265 "0582 Error %d during els sgl post "
6266 "operation\n", rc);
6267 rc = -ENODEV;
6268 goto out_free_mbox;
6269 }
6270
6271 /* register the allocated scsi sgl pool to the port */
da0436e9
JS
6272 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6273 if (unlikely(rc)) {
6d368e53 6274 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
6275 "0383 Error %d during scsi sgl post "
6276 "operation\n", rc);
da0436e9
JS
6277 /* Some Scsi buffers were moved to the abort scsi list */
6278 /* A pci function reset will repost them */
6279 rc = -ENODEV;
76a95d75 6280 goto out_free_mbox;
da0436e9
JS
6281 }
6282
6283 /* Post the rpi header region to the device. */
6284 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6285 if (unlikely(rc)) {
6286 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6287 "0393 Error %d during rpi post operation\n",
6288 rc);
6289 rc = -ENODEV;
76a95d75 6290 goto out_free_mbox;
da0436e9 6291 }
97f2ecf1 6292 lpfc_sli4_node_prep(phba);
da0436e9 6293
5350d872
JS
6294 /* Create all the SLI4 queues */
6295 rc = lpfc_sli4_queue_create(phba);
6296 if (rc) {
6297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6298 "3089 Failed to allocate queues\n");
6299 rc = -ENODEV;
6300 goto out_stop_timers;
6301 }
da0436e9
JS
6302 /* Set up all the queues to the device */
6303 rc = lpfc_sli4_queue_setup(phba);
6304 if (unlikely(rc)) {
6305 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6306 "0381 Error %d during queue setup.\n ", rc);
5350d872 6307 goto out_destroy_queue;
da0436e9
JS
6308 }
6309
6310 /* Arm the CQs and then EQs on device */
6311 lpfc_sli4_arm_cqeq_intr(phba);
6312
6313 /* Indicate device interrupt mode */
6314 phba->sli4_hba.intr_enable = 1;
6315
6316 /* Allow asynchronous mailbox command to go through */
6317 spin_lock_irq(&phba->hbalock);
6318 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6319 spin_unlock_irq(&phba->hbalock);
6320
6321 /* Post receive buffers to the device */
6322 lpfc_sli4_rb_setup(phba);
6323
fc2b989b
JS
6324 /* Reset HBA FCF states after HBA reset */
6325 phba->fcf.fcf_flag = 0;
6326 phba->fcf.current_rec.flag = 0;
6327
da0436e9 6328 /* Start the ELS watchdog timer */
8fa38513
JS
6329 mod_timer(&vport->els_tmofunc,
6330 jiffies + HZ * (phba->fc_ratov * 2));
da0436e9
JS
6331
6332 /* Start heart beat timer */
6333 mod_timer(&phba->hb_tmofunc,
6334 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6335 phba->hb_outstanding = 0;
6336 phba->last_completion_time = jiffies;
6337
6338 /* Start error attention (ERATT) polling timer */
6339 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6340
75baf696
JS
6341 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6342 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6343 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6344 if (!rc) {
6345 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6346 "2829 This device supports "
6347 "Advanced Error Reporting (AER)\n");
6348 spin_lock_irq(&phba->hbalock);
6349 phba->hba_flag |= HBA_AER_ENABLED;
6350 spin_unlock_irq(&phba->hbalock);
6351 } else {
6352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6353 "2830 This device does not support "
6354 "Advanced Error Reporting (AER)\n");
6355 phba->cfg_aer_support = 0;
6356 }
0a96e975 6357 rc = 0;
75baf696
JS
6358 }
6359
76a95d75
JS
6360 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6361 /*
6362 * The FC Port needs to register FCFI (index 0)
6363 */
6364 lpfc_reg_fcfi(phba, mboxq);
6365 mboxq->vport = phba->pport;
6366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9589b062 6367 if (rc != MBX_SUCCESS)
76a95d75 6368 goto out_unset_queue;
9589b062
JS
6369 rc = 0;
6370 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6371 &mboxq->u.mqe.un.reg_fcfi);
026abb87
JS
6372
6373 /* Check if the port is configured to be disabled */
6374 lpfc_sli_read_link_ste(phba);
76a95d75 6375 }
026abb87 6376
da0436e9
JS
6377 /*
6378 * The port is ready, set the host's link state to LINK_DOWN
6379 * in preparation for link interrupts.
6380 */
da0436e9
JS
6381 spin_lock_irq(&phba->hbalock);
6382 phba->link_state = LPFC_LINK_DOWN;
6383 spin_unlock_irq(&phba->hbalock);
026abb87
JS
6384 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6385 (phba->hba_flag & LINK_DISABLED)) {
6386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6387 "3103 Adapter Link is disabled.\n");
6388 lpfc_down_link(phba, mboxq);
6389 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6390 if (rc != MBX_SUCCESS) {
6391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6392 "3104 Adapter failed to issue "
6393 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6394 goto out_unset_queue;
6395 }
6396 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
6397 /* don't perform init_link on SLI4 FC port loopback test */
6398 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6399 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6400 if (rc)
6401 goto out_unset_queue;
6402 }
5350d872
JS
6403 }
6404 mempool_free(mboxq, phba->mbox_mem_pool);
6405 return rc;
76a95d75 6406out_unset_queue:
da0436e9 6407 /* Unset all the queues set up in this routine when error out */
5350d872
JS
6408 lpfc_sli4_queue_unset(phba);
6409out_destroy_queue:
6410 lpfc_sli4_queue_destroy(phba);
da0436e9 6411out_stop_timers:
5350d872 6412 lpfc_stop_hba_timers(phba);
da0436e9
JS
6413out_free_mbox:
6414 mempool_free(mboxq, phba->mbox_mem_pool);
6415 return rc;
6416}
6417
6418/**
6419 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6420 * @ptr: context object - pointer to hba structure.
6421 *
6422 * This is the callback function for mailbox timer. The mailbox
6423 * timer is armed when a new mailbox command is issued and the timer
6424 * is deleted when the mailbox complete. The function is called by
6425 * the kernel timer code when a mailbox does not complete within
6426 * expected time. This function wakes up the worker thread to
6427 * process the mailbox timeout and returns. All the processing is
6428 * done by the worker thread function lpfc_mbox_timeout_handler.
6429 **/
6430void
6431lpfc_mbox_timeout(unsigned long ptr)
6432{
6433 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6434 unsigned long iflag;
6435 uint32_t tmo_posted;
6436
6437 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6438 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6439 if (!tmo_posted)
6440 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6441 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6442
6443 if (!tmo_posted)
6444 lpfc_worker_wake_up(phba);
6445 return;
6446}
6447
6448
6449/**
6450 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6451 * @phba: Pointer to HBA context object.
6452 *
6453 * This function is called from worker thread when a mailbox command times out.
6454 * The caller is not required to hold any locks. This function will reset the
6455 * HBA and recover all the pending commands.
6456 **/
6457void
6458lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6459{
6460 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
04c68496 6461 MAILBOX_t *mb = &pmbox->u.mb;
da0436e9
JS
6462 struct lpfc_sli *psli = &phba->sli;
6463 struct lpfc_sli_ring *pring;
6464
6465 /* Check the pmbox pointer first. There is a race condition
6466 * between the mbox timeout handler getting executed in the
6467 * worklist and the mailbox actually completing. When this
6468 * race condition occurs, the mbox_active will be NULL.
6469 */
6470 spin_lock_irq(&phba->hbalock);
6471 if (pmbox == NULL) {
6472 lpfc_printf_log(phba, KERN_WARNING,
6473 LOG_MBOX | LOG_SLI,
6474 "0353 Active Mailbox cleared - mailbox timeout "
6475 "exiting\n");
6476 spin_unlock_irq(&phba->hbalock);
6477 return;
6478 }
6479
6480 /* Mbox cmd <mbxCommand> timeout */
6481 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6482 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6483 mb->mbxCommand,
6484 phba->pport->port_state,
6485 phba->sli.sli_flag,
6486 phba->sli.mbox_active);
6487 spin_unlock_irq(&phba->hbalock);
6488
6489 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6490 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 6491 * it to fail all outstanding SCSI IO.
da0436e9
JS
6492 */
6493 spin_lock_irq(&phba->pport->work_port_lock);
6494 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6495 spin_unlock_irq(&phba->pport->work_port_lock);
6496 spin_lock_irq(&phba->hbalock);
6497 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 6498 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
6499 spin_unlock_irq(&phba->hbalock);
6500
6501 pring = &psli->ring[psli->fcp_ring];
6502 lpfc_sli_abort_iocb_ring(phba, pring);
6503
6504 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6505 "0345 Resetting board due to mailbox timeout\n");
6506
6507 /* Reset the HBA device */
6508 lpfc_reset_hba(phba);
6509}
6510
6511/**
6512 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6513 * @phba: Pointer to HBA context object.
6514 * @pmbox: Pointer to mailbox object.
6515 * @flag: Flag indicating how the mailbox need to be processed.
6516 *
6517 * This function is called by discovery code and HBA management code
6518 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6519 * function gets the hbalock to protect the data structures.
6520 * The mailbox command can be submitted in polling mode, in which case
6521 * this function will wait in a polling loop for the completion of the
6522 * mailbox.
6523 * If the mailbox is submitted in no_wait mode (not polling) the
6524 * function will submit the command and returns immediately without waiting
6525 * for the mailbox completion. The no_wait is supported only when HBA
6526 * is in SLI2/SLI3 mode - interrupts are enabled.
6527 * The SLI interface allows only one mailbox pending at a time. If the
6528 * mailbox is issued in polling mode and there is already a mailbox
6529 * pending, then the function will return an error. If the mailbox is issued
6530 * in NO_WAIT mode and there is a mailbox pending already, the function
6531 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6532 * The sli layer owns the mailbox object until the completion of mailbox
6533 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6534 * return codes the caller owns the mailbox command after the return of
6535 * the function.
e59058c4 6536 **/
3772a991
JS
6537static int
6538lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6539 uint32_t flag)
dea3101e 6540{
dea3101e 6541 MAILBOX_t *mb;
2e0fef85 6542 struct lpfc_sli *psli = &phba->sli;
dea3101e 6543 uint32_t status, evtctr;
9940b97b 6544 uint32_t ha_copy, hc_copy;
dea3101e 6545 int i;
09372820 6546 unsigned long timeout;
dea3101e 6547 unsigned long drvr_flag = 0;
34b02dcd 6548 uint32_t word0, ldata;
dea3101e 6549 void __iomem *to_slim;
58da1ffb
JS
6550 int processing_queue = 0;
6551
6552 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6553 if (!pmbox) {
8568a4d2 6554 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 6555 /* processing mbox queue from intr_handler */
3772a991
JS
6556 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6557 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6558 return MBX_SUCCESS;
6559 }
58da1ffb 6560 processing_queue = 1;
58da1ffb
JS
6561 pmbox = lpfc_mbox_get(phba);
6562 if (!pmbox) {
6563 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6564 return MBX_SUCCESS;
6565 }
6566 }
dea3101e 6567
ed957684 6568 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 6569 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 6570 if(!pmbox->vport) {
58da1ffb 6571 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 6572 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 6573 LOG_MBOX | LOG_VPORT,
e8b62011 6574 "1806 Mbox x%x failed. No vport\n",
3772a991 6575 pmbox->u.mb.mbxCommand);
ed957684 6576 dump_stack();
58da1ffb 6577 goto out_not_finished;
ed957684
JS
6578 }
6579 }
6580
8d63f375 6581 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
6582 if (unlikely(pci_channel_offline(phba->pcidev))) {
6583 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6584 goto out_not_finished;
6585 }
8d63f375 6586
a257bf90
JS
6587 /* If HBA has a deferred error attention, fail the iocb. */
6588 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6589 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6590 goto out_not_finished;
6591 }
6592
dea3101e 6593 psli = &phba->sli;
92d7f7b0 6594
3772a991 6595 mb = &pmbox->u.mb;
dea3101e
JB
6596 status = MBX_SUCCESS;
6597
2e0fef85
JS
6598 if (phba->link_state == LPFC_HBA_ERROR) {
6599 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
6600
6601 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6602 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6603 "(%d):0311 Mailbox command x%x cannot "
6604 "issue Data: x%x x%x\n",
6605 pmbox->vport ? pmbox->vport->vpi : 0,
6606 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 6607 goto out_not_finished;
41415862
JW
6608 }
6609
9940b97b
JS
6610 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6611 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6612 !(hc_copy & HC_MBINT_ENA)) {
6613 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
6615 "(%d):2528 Mailbox command x%x cannot "
6616 "issue Data: x%x x%x\n",
6617 pmbox->vport ? pmbox->vport->vpi : 0,
6618 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
6619 goto out_not_finished;
6620 }
9290831f
JS
6621 }
6622
dea3101e
JB
6623 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6624 /* Polling for a mbox command when another one is already active
6625 * is not allowed in SLI. Also, the driver must have established
6626 * SLI2 mode to queue and process multiple mbox commands.
6627 */
6628
6629 if (flag & MBX_POLL) {
2e0fef85 6630 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
6631
6632 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6633 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6634 "(%d):2529 Mailbox command x%x "
6635 "cannot issue Data: x%x x%x\n",
6636 pmbox->vport ? pmbox->vport->vpi : 0,
6637 pmbox->u.mb.mbxCommand,
6638 psli->sli_flag, flag);
58da1ffb 6639 goto out_not_finished;
dea3101e
JB
6640 }
6641
3772a991 6642 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 6643 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6644 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6645 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6646 "(%d):2530 Mailbox command x%x "
6647 "cannot issue Data: x%x x%x\n",
6648 pmbox->vport ? pmbox->vport->vpi : 0,
6649 pmbox->u.mb.mbxCommand,
6650 psli->sli_flag, flag);
58da1ffb 6651 goto out_not_finished;
dea3101e
JB
6652 }
6653
dea3101e
JB
6654 /* Another mailbox command is still being processed, queue this
6655 * command to be processed later.
6656 */
6657 lpfc_mbox_put(phba, pmbox);
6658
6659 /* Mbox cmd issue - BUSY */
ed957684 6660 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6661 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 6662 "x%x x%x x%x x%x\n",
92d7f7b0
JS
6663 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6664 mb->mbxCommand, phba->pport->port_state,
6665 psli->sli_flag, flag);
dea3101e
JB
6666
6667 psli->slistat.mbox_busy++;
2e0fef85 6668 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6669
858c9f6c
JS
6670 if (pmbox->vport) {
6671 lpfc_debugfs_disc_trc(pmbox->vport,
6672 LPFC_DISC_TRC_MBOX_VPORT,
6673 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
6674 (uint32_t)mb->mbxCommand,
6675 mb->un.varWords[0], mb->un.varWords[1]);
6676 }
6677 else {
6678 lpfc_debugfs_disc_trc(phba->pport,
6679 LPFC_DISC_TRC_MBOX,
6680 "MBOX Bsy: cmd:x%x mb:x%x x%x",
6681 (uint32_t)mb->mbxCommand,
6682 mb->un.varWords[0], mb->un.varWords[1]);
6683 }
6684
2e0fef85 6685 return MBX_BUSY;
dea3101e
JB
6686 }
6687
dea3101e
JB
6688 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6689
6690 /* If we are not polling, we MUST be in SLI2 mode */
6691 if (flag != MBX_POLL) {
3772a991 6692 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
41415862 6693 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 6694 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6695 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6696 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6698 "(%d):2531 Mailbox command x%x "
6699 "cannot issue Data: x%x x%x\n",
6700 pmbox->vport ? pmbox->vport->vpi : 0,
6701 pmbox->u.mb.mbxCommand,
6702 psli->sli_flag, flag);
58da1ffb 6703 goto out_not_finished;
dea3101e
JB
6704 }
6705 /* timeout active mbox command */
a309a6b6 6706 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 6707 (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
dea3101e
JB
6708 }
6709
6710 /* Mailbox cmd <cmd> issue */
ed957684 6711 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6712 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 6713 "x%x\n",
e8b62011 6714 pmbox->vport ? pmbox->vport->vpi : 0,
92d7f7b0
JS
6715 mb->mbxCommand, phba->pport->port_state,
6716 psli->sli_flag, flag);
dea3101e 6717
858c9f6c
JS
6718 if (mb->mbxCommand != MBX_HEARTBEAT) {
6719 if (pmbox->vport) {
6720 lpfc_debugfs_disc_trc(pmbox->vport,
6721 LPFC_DISC_TRC_MBOX_VPORT,
6722 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6723 (uint32_t)mb->mbxCommand,
6724 mb->un.varWords[0], mb->un.varWords[1]);
6725 }
6726 else {
6727 lpfc_debugfs_disc_trc(phba->pport,
6728 LPFC_DISC_TRC_MBOX,
6729 "MBOX Send: cmd:x%x mb:x%x x%x",
6730 (uint32_t)mb->mbxCommand,
6731 mb->un.varWords[0], mb->un.varWords[1]);
6732 }
6733 }
6734
dea3101e
JB
6735 psli->slistat.mbox_cmd++;
6736 evtctr = psli->slistat.mbox_event;
6737
6738 /* next set own bit for the adapter and copy over command word */
6739 mb->mbxOwner = OWN_CHIP;
6740
3772a991 6741 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
6742 /* Populate mbox extension offset word. */
6743 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6744 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6745 = (uint8_t *)phba->mbox_ext
6746 - (uint8_t *)phba->mbox;
6747 }
6748
6749 /* Copy the mailbox extension data */
6750 if (pmbox->in_ext_byte_len && pmbox->context2) {
6751 lpfc_sli_pcimem_bcopy(pmbox->context2,
6752 (uint8_t *)phba->mbox_ext,
6753 pmbox->in_ext_byte_len);
6754 }
6755 /* Copy command data to host SLIM area */
34b02dcd 6756 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6757 } else {
7a470277
JS
6758 /* Populate mbox extension offset word. */
6759 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6760 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6761 = MAILBOX_HBA_EXT_OFFSET;
6762
6763 /* Copy the mailbox extension data */
6764 if (pmbox->in_ext_byte_len && pmbox->context2) {
6765 lpfc_memcpy_to_slim(phba->MBslimaddr +
6766 MAILBOX_HBA_EXT_OFFSET,
6767 pmbox->context2, pmbox->in_ext_byte_len);
6768
6769 }
9290831f 6770 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6771 /* copy command data into host mbox for cmpl */
34b02dcd 6772 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e
JB
6773 }
6774
6775 /* First copy mbox command data to HBA SLIM, skip past first
6776 word */
6777 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6778 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
6779 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6780
6781 /* Next copy over first word, with mbxOwner set */
34b02dcd 6782 ldata = *((uint32_t *)mb);
dea3101e
JB
6783 to_slim = phba->MBslimaddr;
6784 writel(ldata, to_slim);
6785 readl(to_slim); /* flush */
6786
6787 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6788 /* switch over to host mailbox */
3772a991 6789 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e
JB
6790 }
6791 }
6792
6793 wmb();
dea3101e
JB
6794
6795 switch (flag) {
6796 case MBX_NOWAIT:
09372820 6797 /* Set up reference to mailbox command */
dea3101e 6798 psli->mbox_active = pmbox;
09372820
JS
6799 /* Interrupt board to do it */
6800 writel(CA_MBATT, phba->CAregaddr);
6801 readl(phba->CAregaddr); /* flush */
6802 /* Don't wait for it to finish, just return */
dea3101e
JB
6803 break;
6804
6805 case MBX_POLL:
09372820 6806 /* Set up null reference to mailbox command */
dea3101e 6807 psli->mbox_active = NULL;
09372820
JS
6808 /* Interrupt board to do it */
6809 writel(CA_MBATT, phba->CAregaddr);
6810 readl(phba->CAregaddr); /* flush */
6811
3772a991 6812 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6813 /* First read mbox status word */
34b02dcd 6814 word0 = *((uint32_t *)phba->mbox);
dea3101e
JB
6815 word0 = le32_to_cpu(word0);
6816 } else {
6817 /* First read mbox status word */
9940b97b
JS
6818 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6819 spin_unlock_irqrestore(&phba->hbalock,
6820 drvr_flag);
6821 goto out_not_finished;
6822 }
dea3101e
JB
6823 }
6824
6825 /* Read the HBA Host Attention Register */
9940b97b
JS
6826 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6827 spin_unlock_irqrestore(&phba->hbalock,
6828 drvr_flag);
6829 goto out_not_finished;
6830 }
a183a15f
JS
6831 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6832 1000) + jiffies;
09372820 6833 i = 0;
dea3101e 6834 /* Wait for command to complete */
41415862
JW
6835 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6836 (!(ha_copy & HA_MBATT) &&
2e0fef85 6837 (phba->link_state > LPFC_WARM_START))) {
09372820 6838 if (time_after(jiffies, timeout)) {
dea3101e 6839 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6840 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 6841 drvr_flag);
58da1ffb 6842 goto out_not_finished;
dea3101e
JB
6843 }
6844
6845 /* Check if we took a mbox interrupt while we were
6846 polling */
6847 if (((word0 & OWN_CHIP) != OWN_CHIP)
6848 && (evtctr != psli->slistat.mbox_event))
6849 break;
6850
09372820
JS
6851 if (i++ > 10) {
6852 spin_unlock_irqrestore(&phba->hbalock,
6853 drvr_flag);
6854 msleep(1);
6855 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6856 }
dea3101e 6857
3772a991 6858 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6859 /* First copy command data */
34b02dcd 6860 word0 = *((uint32_t *)phba->mbox);
dea3101e
JB
6861 word0 = le32_to_cpu(word0);
6862 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6863 MAILBOX_t *slimmb;
34b02dcd 6864 uint32_t slimword0;
dea3101e
JB
6865 /* Check real SLIM for any errors */
6866 slimword0 = readl(phba->MBslimaddr);
6867 slimmb = (MAILBOX_t *) & slimword0;
6868 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6869 && slimmb->mbxStatus) {
6870 psli->sli_flag &=
3772a991 6871 ~LPFC_SLI_ACTIVE;
dea3101e
JB
6872 word0 = slimword0;
6873 }
6874 }
6875 } else {
6876 /* First copy command data */
6877 word0 = readl(phba->MBslimaddr);
6878 }
6879 /* Read the HBA Host Attention Register */
9940b97b
JS
6880 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6881 spin_unlock_irqrestore(&phba->hbalock,
6882 drvr_flag);
6883 goto out_not_finished;
6884 }
dea3101e
JB
6885 }
6886
3772a991 6887 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6888 /* copy results back to user */
34b02dcd 6889 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
7a470277
JS
6890 /* Copy the mailbox extension data */
6891 if (pmbox->out_ext_byte_len && pmbox->context2) {
6892 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6893 pmbox->context2,
6894 pmbox->out_ext_byte_len);
6895 }
dea3101e
JB
6896 } else {
6897 /* First copy command data */
6898 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
6899 MAILBOX_CMD_SIZE);
7a470277
JS
6900 /* Copy the mailbox extension data */
6901 if (pmbox->out_ext_byte_len && pmbox->context2) {
6902 lpfc_memcpy_from_slim(pmbox->context2,
6903 phba->MBslimaddr +
6904 MAILBOX_HBA_EXT_OFFSET,
6905 pmbox->out_ext_byte_len);
dea3101e
JB
6906 }
6907 }
6908
6909 writel(HA_MBATT, phba->HAregaddr);
6910 readl(phba->HAregaddr); /* flush */
6911
6912 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6913 status = mb->mbxStatus;
6914 }
6915
2e0fef85
JS
6916 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6917 return status;
58da1ffb
JS
6918
6919out_not_finished:
6920 if (processing_queue) {
da0436e9 6921 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
6922 lpfc_mbox_cmpl_put(phba, pmbox);
6923 }
6924 return MBX_NOT_FINISHED;
dea3101e
JB
6925}
6926
f1126688
JS
6927/**
6928 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
6929 * @phba: Pointer to HBA context object.
6930 *
6931 * The function blocks the posting of SLI4 asynchronous mailbox commands from
6932 * the driver internal pending mailbox queue. It will then try to wait out the
6933 * possible outstanding mailbox command before return.
6934 *
6935 * Returns:
6936 * 0 - the outstanding mailbox command completed; otherwise, the wait for
6937 * the outstanding mailbox command timed out.
6938 **/
6939static int
6940lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
6941{
6942 struct lpfc_sli *psli = &phba->sli;
f1126688 6943 int rc = 0;
a183a15f 6944 unsigned long timeout = 0;
f1126688
JS
6945
6946 /* Mark the asynchronous mailbox command posting as blocked */
6947 spin_lock_irq(&phba->hbalock);
6948 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
6949 /* Determine how long we might wait for the active mailbox
6950 * command to be gracefully completed by firmware.
6951 */
a183a15f
JS
6952 if (phba->sli.mbox_active)
6953 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
6954 phba->sli.mbox_active) *
6955 1000) + jiffies;
6956 spin_unlock_irq(&phba->hbalock);
6957
f1126688
JS
6958 /* Wait for the outstnading mailbox command to complete */
6959 while (phba->sli.mbox_active) {
6960 /* Check active mailbox complete status every 2ms */
6961 msleep(2);
6962 if (time_after(jiffies, timeout)) {
6963 /* Timeout, marked the outstanding cmd not complete */
6964 rc = 1;
6965 break;
6966 }
6967 }
6968
6969 /* Can not cleanly block async mailbox command, fails it */
6970 if (rc) {
6971 spin_lock_irq(&phba->hbalock);
6972 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6973 spin_unlock_irq(&phba->hbalock);
6974 }
6975 return rc;
6976}
6977
6978/**
6979 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
6980 * @phba: Pointer to HBA context object.
6981 *
6982 * The function unblocks and resume posting of SLI4 asynchronous mailbox
6983 * commands from the driver internal pending mailbox queue. It makes sure
6984 * that there is no outstanding mailbox command before resuming posting
6985 * asynchronous mailbox commands. If, for any reason, there is outstanding
6986 * mailbox command, it will try to wait it out before resuming asynchronous
6987 * mailbox command posting.
6988 **/
6989static void
6990lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
6991{
6992 struct lpfc_sli *psli = &phba->sli;
6993
6994 spin_lock_irq(&phba->hbalock);
6995 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6996 /* Asynchronous mailbox posting is not blocked, do nothing */
6997 spin_unlock_irq(&phba->hbalock);
6998 return;
6999 }
7000
7001 /* Outstanding synchronous mailbox command is guaranteed to be done,
7002 * successful or timeout, after timing-out the outstanding mailbox
7003 * command shall always be removed, so just unblock posting async
7004 * mailbox command and resume
7005 */
7006 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7007 spin_unlock_irq(&phba->hbalock);
7008
7009 /* wake up worker thread to post asynchronlous mailbox command */
7010 lpfc_worker_wake_up(phba);
7011}
7012
da0436e9
JS
7013/**
7014 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7015 * @phba: Pointer to HBA context object.
7016 * @mboxq: Pointer to mailbox object.
7017 *
7018 * The function posts a mailbox to the port. The mailbox is expected
7019 * to be comletely filled in and ready for the port to operate on it.
7020 * This routine executes a synchronous completion operation on the
7021 * mailbox by polling for its completion.
7022 *
7023 * The caller must not be holding any locks when calling this routine.
7024 *
7025 * Returns:
7026 * MBX_SUCCESS - mailbox posted successfully
7027 * Any of the MBX error values.
7028 **/
7029static int
7030lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7031{
7032 int rc = MBX_SUCCESS;
7033 unsigned long iflag;
7034 uint32_t db_ready;
7035 uint32_t mcqe_status;
7036 uint32_t mbx_cmnd;
7037 unsigned long timeout;
7038 struct lpfc_sli *psli = &phba->sli;
7039 struct lpfc_mqe *mb = &mboxq->u.mqe;
7040 struct lpfc_bmbx_create *mbox_rgn;
7041 struct dma_address *dma_address;
7042 struct lpfc_register bmbx_reg;
7043
7044 /*
7045 * Only one mailbox can be active to the bootstrap mailbox region
7046 * at a time and there is no queueing provided.
7047 */
7048 spin_lock_irqsave(&phba->hbalock, iflag);
7049 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7050 spin_unlock_irqrestore(&phba->hbalock, iflag);
7051 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7052 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7053 "cannot issue Data: x%x x%x\n",
7054 mboxq->vport ? mboxq->vport->vpi : 0,
7055 mboxq->u.mb.mbxCommand,
a183a15f
JS
7056 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7057 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7058 psli->sli_flag, MBX_POLL);
7059 return MBXERR_ERROR;
7060 }
7061 /* The server grabs the token and owns it until release */
7062 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7063 phba->sli.mbox_active = mboxq;
7064 spin_unlock_irqrestore(&phba->hbalock, iflag);
7065
7066 /*
7067 * Initialize the bootstrap memory region to avoid stale data areas
7068 * in the mailbox post. Then copy the caller's mailbox contents to
7069 * the bmbx mailbox region.
7070 */
7071 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7072 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7073 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7074 sizeof(struct lpfc_mqe));
7075
7076 /* Post the high mailbox dma address to the port and wait for ready. */
7077 dma_address = &phba->sli4_hba.bmbx.dma_address;
7078 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7079
a183a15f 7080 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
da0436e9
JS
7081 * 1000) + jiffies;
7082 do {
7083 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7084 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7085 if (!db_ready)
7086 msleep(2);
7087
7088 if (time_after(jiffies, timeout)) {
7089 rc = MBXERR_ERROR;
7090 goto exit;
7091 }
7092 } while (!db_ready);
7093
7094 /* Post the low mailbox dma address to the port. */
7095 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
a183a15f 7096 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
da0436e9
JS
7097 * 1000) + jiffies;
7098 do {
7099 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7100 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7101 if (!db_ready)
7102 msleep(2);
7103
7104 if (time_after(jiffies, timeout)) {
7105 rc = MBXERR_ERROR;
7106 goto exit;
7107 }
7108 } while (!db_ready);
7109
7110 /*
7111 * Read the CQ to ensure the mailbox has completed.
7112 * If so, update the mailbox status so that the upper layers
7113 * can complete the request normally.
7114 */
7115 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7116 sizeof(struct lpfc_mqe));
7117 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7118 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7119 sizeof(struct lpfc_mcqe));
7120 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
7121 /*
7122 * When the CQE status indicates a failure and the mailbox status
7123 * indicates success then copy the CQE status into the mailbox status
7124 * (and prefix it with x4000).
7125 */
da0436e9 7126 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
7127 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7128 bf_set(lpfc_mqe_status, mb,
7129 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 7130 rc = MBXERR_ERROR;
d7c47992
JS
7131 } else
7132 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
7133
7134 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7135 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
7136 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7137 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
7138 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7139 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7140 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7141 bf_get(lpfc_mqe_status, mb),
7142 mb->un.mb_words[0], mb->un.mb_words[1],
7143 mb->un.mb_words[2], mb->un.mb_words[3],
7144 mb->un.mb_words[4], mb->un.mb_words[5],
7145 mb->un.mb_words[6], mb->un.mb_words[7],
7146 mb->un.mb_words[8], mb->un.mb_words[9],
7147 mb->un.mb_words[10], mb->un.mb_words[11],
7148 mb->un.mb_words[12], mboxq->mcqe.word0,
7149 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7150 mboxq->mcqe.trailer);
7151exit:
7152 /* We are holding the token, no needed for lock when release */
7153 spin_lock_irqsave(&phba->hbalock, iflag);
7154 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7155 phba->sli.mbox_active = NULL;
7156 spin_unlock_irqrestore(&phba->hbalock, iflag);
7157 return rc;
7158}
7159
7160/**
7161 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7162 * @phba: Pointer to HBA context object.
7163 * @pmbox: Pointer to mailbox object.
7164 * @flag: Flag indicating how the mailbox need to be processed.
7165 *
7166 * This function is called by discovery code and HBA management code to submit
7167 * a mailbox command to firmware with SLI-4 interface spec.
7168 *
7169 * Return codes the caller owns the mailbox command after the return of the
7170 * function.
7171 **/
7172static int
7173lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7174 uint32_t flag)
7175{
7176 struct lpfc_sli *psli = &phba->sli;
7177 unsigned long iflags;
7178 int rc;
7179
b76f2dc9
JS
7180 /* dump from issue mailbox command if setup */
7181 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7182
8fa38513
JS
7183 rc = lpfc_mbox_dev_check(phba);
7184 if (unlikely(rc)) {
7185 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7186 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
7187 "cannot issue Data: x%x x%x\n",
7188 mboxq->vport ? mboxq->vport->vpi : 0,
7189 mboxq->u.mb.mbxCommand,
a183a15f
JS
7190 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7191 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
7192 psli->sli_flag, flag);
7193 goto out_not_finished;
7194 }
7195
da0436e9
JS
7196 /* Detect polling mode and jump to a handler */
7197 if (!phba->sli4_hba.intr_enable) {
7198 if (flag == MBX_POLL)
7199 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7200 else
7201 rc = -EIO;
7202 if (rc != MBX_SUCCESS)
0558056c 7203 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 7204 "(%d):2541 Mailbox command x%x "
a183a15f
JS
7205 "(x%x/x%x) cannot issue Data: "
7206 "x%x x%x\n",
da0436e9
JS
7207 mboxq->vport ? mboxq->vport->vpi : 0,
7208 mboxq->u.mb.mbxCommand,
a183a15f
JS
7209 lpfc_sli_config_mbox_subsys_get(phba,
7210 mboxq),
7211 lpfc_sli_config_mbox_opcode_get(phba,
7212 mboxq),
da0436e9
JS
7213 psli->sli_flag, flag);
7214 return rc;
7215 } else if (flag == MBX_POLL) {
f1126688
JS
7216 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7217 "(%d):2542 Try to issue mailbox command "
a183a15f 7218 "x%x (x%x/x%x) synchronously ahead of async"
f1126688 7219 "mailbox command queue: x%x x%x\n",
da0436e9
JS
7220 mboxq->vport ? mboxq->vport->vpi : 0,
7221 mboxq->u.mb.mbxCommand,
a183a15f
JS
7222 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7223 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 7224 psli->sli_flag, flag);
f1126688
JS
7225 /* Try to block the asynchronous mailbox posting */
7226 rc = lpfc_sli4_async_mbox_block(phba);
7227 if (!rc) {
7228 /* Successfully blocked, now issue sync mbox cmd */
7229 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7230 if (rc != MBX_SUCCESS)
7231 lpfc_printf_log(phba, KERN_ERR,
a183a15f
JS
7232 LOG_MBOX | LOG_SLI,
7233 "(%d):2597 Mailbox command "
7234 "x%x (x%x/x%x) cannot issue "
7235 "Data: x%x x%x\n",
7236 mboxq->vport ?
7237 mboxq->vport->vpi : 0,
7238 mboxq->u.mb.mbxCommand,
7239 lpfc_sli_config_mbox_subsys_get(phba,
7240 mboxq),
7241 lpfc_sli_config_mbox_opcode_get(phba,
7242 mboxq),
7243 psli->sli_flag, flag);
f1126688
JS
7244 /* Unblock the async mailbox posting afterward */
7245 lpfc_sli4_async_mbox_unblock(phba);
7246 }
7247 return rc;
da0436e9
JS
7248 }
7249
7250 /* Now, interrupt mode asynchrous mailbox command */
7251 rc = lpfc_mbox_cmd_check(phba, mboxq);
7252 if (rc) {
7253 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7254 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7255 "cannot issue Data: x%x x%x\n",
7256 mboxq->vport ? mboxq->vport->vpi : 0,
7257 mboxq->u.mb.mbxCommand,
a183a15f
JS
7258 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7259 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7260 psli->sli_flag, flag);
7261 goto out_not_finished;
7262 }
da0436e9
JS
7263
7264 /* Put the mailbox command to the driver internal FIFO */
7265 psli->slistat.mbox_busy++;
7266 spin_lock_irqsave(&phba->hbalock, iflags);
7267 lpfc_mbox_put(phba, mboxq);
7268 spin_unlock_irqrestore(&phba->hbalock, iflags);
7269 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7270 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 7271 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
7272 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7273 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
7274 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7275 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7276 phba->pport->port_state,
7277 psli->sli_flag, MBX_NOWAIT);
7278 /* Wake up worker thread to transport mailbox command from head */
7279 lpfc_worker_wake_up(phba);
7280
7281 return MBX_BUSY;
7282
7283out_not_finished:
7284 return MBX_NOT_FINISHED;
7285}
7286
7287/**
7288 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7289 * @phba: Pointer to HBA context object.
7290 *
7291 * This function is called by worker thread to send a mailbox command to
7292 * SLI4 HBA firmware.
7293 *
7294 **/
7295int
7296lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7297{
7298 struct lpfc_sli *psli = &phba->sli;
7299 LPFC_MBOXQ_t *mboxq;
7300 int rc = MBX_SUCCESS;
7301 unsigned long iflags;
7302 struct lpfc_mqe *mqe;
7303 uint32_t mbx_cmnd;
7304
7305 /* Check interrupt mode before post async mailbox command */
7306 if (unlikely(!phba->sli4_hba.intr_enable))
7307 return MBX_NOT_FINISHED;
7308
7309 /* Check for mailbox command service token */
7310 spin_lock_irqsave(&phba->hbalock, iflags);
7311 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7312 spin_unlock_irqrestore(&phba->hbalock, iflags);
7313 return MBX_NOT_FINISHED;
7314 }
7315 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7316 spin_unlock_irqrestore(&phba->hbalock, iflags);
7317 return MBX_NOT_FINISHED;
7318 }
7319 if (unlikely(phba->sli.mbox_active)) {
7320 spin_unlock_irqrestore(&phba->hbalock, iflags);
7321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7322 "0384 There is pending active mailbox cmd\n");
7323 return MBX_NOT_FINISHED;
7324 }
7325 /* Take the mailbox command service token */
7326 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7327
7328 /* Get the next mailbox command from head of queue */
7329 mboxq = lpfc_mbox_get(phba);
7330
7331 /* If no more mailbox command waiting for post, we're done */
7332 if (!mboxq) {
7333 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7334 spin_unlock_irqrestore(&phba->hbalock, iflags);
7335 return MBX_SUCCESS;
7336 }
7337 phba->sli.mbox_active = mboxq;
7338 spin_unlock_irqrestore(&phba->hbalock, iflags);
7339
7340 /* Check device readiness for posting mailbox command */
7341 rc = lpfc_mbox_dev_check(phba);
7342 if (unlikely(rc))
7343 /* Driver clean routine will clean up pending mailbox */
7344 goto out_not_finished;
7345
7346 /* Prepare the mbox command to be posted */
7347 mqe = &mboxq->u.mqe;
7348 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7349
7350 /* Start timer for the mbox_tmo and log some mailbox post messages */
7351 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 7352 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
7353
7354 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7355 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
7356 "x%x x%x\n",
7357 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
7358 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7359 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7360 phba->pport->port_state, psli->sli_flag);
7361
7362 if (mbx_cmnd != MBX_HEARTBEAT) {
7363 if (mboxq->vport) {
7364 lpfc_debugfs_disc_trc(mboxq->vport,
7365 LPFC_DISC_TRC_MBOX_VPORT,
7366 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7367 mbx_cmnd, mqe->un.mb_words[0],
7368 mqe->un.mb_words[1]);
7369 } else {
7370 lpfc_debugfs_disc_trc(phba->pport,
7371 LPFC_DISC_TRC_MBOX,
7372 "MBOX Send: cmd:x%x mb:x%x x%x",
7373 mbx_cmnd, mqe->un.mb_words[0],
7374 mqe->un.mb_words[1]);
7375 }
7376 }
7377 psli->slistat.mbox_cmd++;
7378
7379 /* Post the mailbox command to the port */
7380 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7381 if (rc != MBX_SUCCESS) {
7382 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7383 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7384 "cannot issue Data: x%x x%x\n",
7385 mboxq->vport ? mboxq->vport->vpi : 0,
7386 mboxq->u.mb.mbxCommand,
a183a15f
JS
7387 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7388 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7389 psli->sli_flag, MBX_NOWAIT);
7390 goto out_not_finished;
7391 }
7392
7393 return rc;
7394
7395out_not_finished:
7396 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
7397 if (phba->sli.mbox_active) {
7398 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7399 __lpfc_mbox_cmpl_put(phba, mboxq);
7400 /* Release the token */
7401 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7402 phba->sli.mbox_active = NULL;
7403 }
da0436e9
JS
7404 spin_unlock_irqrestore(&phba->hbalock, iflags);
7405
7406 return MBX_NOT_FINISHED;
7407}
7408
7409/**
7410 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7411 * @phba: Pointer to HBA context object.
7412 * @pmbox: Pointer to mailbox object.
7413 * @flag: Flag indicating how the mailbox need to be processed.
7414 *
7415 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7416 * the API jump table function pointer from the lpfc_hba struct.
7417 *
7418 * Return codes the caller owns the mailbox command after the return of the
7419 * function.
7420 **/
7421int
7422lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7423{
7424 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7425}
7426
7427/**
25985edc 7428 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
7429 * @phba: The hba struct for which this call is being executed.
7430 * @dev_grp: The HBA PCI-Device group number.
7431 *
7432 * This routine sets up the mbox interface API function jump table in @phba
7433 * struct.
7434 * Returns: 0 - success, -ENODEV - failure.
7435 **/
7436int
7437lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7438{
7439
7440 switch (dev_grp) {
7441 case LPFC_PCI_DEV_LP:
7442 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7443 phba->lpfc_sli_handle_slow_ring_event =
7444 lpfc_sli_handle_slow_ring_event_s3;
7445 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7446 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7447 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7448 break;
7449 case LPFC_PCI_DEV_OC:
7450 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7451 phba->lpfc_sli_handle_slow_ring_event =
7452 lpfc_sli_handle_slow_ring_event_s4;
7453 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7454 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7455 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7456 break;
7457 default:
7458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7459 "1420 Invalid HBA PCI-device group: 0x%x\n",
7460 dev_grp);
7461 return -ENODEV;
7462 break;
7463 }
7464 return 0;
7465}
7466
e59058c4 7467/**
3621a710 7468 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
7469 * @phba: Pointer to HBA context object.
7470 * @pring: Pointer to driver SLI ring object.
7471 * @piocb: Pointer to address of newly added command iocb.
7472 *
7473 * This function is called with hbalock held to add a command
7474 * iocb to the txq when SLI layer cannot submit the command iocb
7475 * to the ring.
7476 **/
2a9bf3d0 7477void
92d7f7b0 7478__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7479 struct lpfc_iocbq *piocb)
dea3101e
JB
7480{
7481 /* Insert the caller's iocb in the txq tail for later processing. */
7482 list_add_tail(&piocb->list, &pring->txq);
7483 pring->txq_cnt++;
dea3101e
JB
7484}
7485
e59058c4 7486/**
3621a710 7487 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
7488 * @phba: Pointer to HBA context object.
7489 * @pring: Pointer to driver SLI ring object.
7490 * @piocb: Pointer to address of newly added command iocb.
7491 *
7492 * This function is called with hbalock held before a new
7493 * iocb is submitted to the firmware. This function checks
7494 * txq to flush the iocbs in txq to Firmware before
7495 * submitting new iocbs to the Firmware.
7496 * If there are iocbs in the txq which need to be submitted
7497 * to firmware, lpfc_sli_next_iocb returns the first element
7498 * of the txq after dequeuing it from txq.
7499 * If there is no iocb in the txq then the function will return
7500 * *piocb and *piocb is set to NULL. Caller needs to check
7501 * *piocb to find if there are more commands in the txq.
7502 **/
dea3101e
JB
7503static struct lpfc_iocbq *
7504lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7505 struct lpfc_iocbq **piocb)
dea3101e
JB
7506{
7507 struct lpfc_iocbq * nextiocb;
7508
7509 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7510 if (!nextiocb) {
7511 nextiocb = *piocb;
7512 *piocb = NULL;
7513 }
7514
7515 return nextiocb;
7516}
7517
e59058c4 7518/**
3772a991 7519 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 7520 * @phba: Pointer to HBA context object.
3772a991 7521 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
7522 * @piocb: Pointer to command iocb.
7523 * @flag: Flag indicating if this command can be put into txq.
7524 *
3772a991
JS
7525 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7526 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7527 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7528 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7529 * this function allows only iocbs for posting buffers. This function finds
7530 * next available slot in the command ring and posts the command to the
7531 * available slot and writes the port attention register to request HBA start
7532 * processing new iocb. If there is no slot available in the ring and
7533 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7534 * the function returns IOCB_BUSY.
e59058c4 7535 *
3772a991
JS
7536 * This function is called with hbalock held. The function will return success
7537 * after it successfully submit the iocb to firmware or after adding to the
7538 * txq.
e59058c4 7539 **/
98c9ea5c 7540static int
3772a991 7541__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e
JB
7542 struct lpfc_iocbq *piocb, uint32_t flag)
7543{
7544 struct lpfc_iocbq *nextiocb;
7545 IOCB_t *iocb;
3772a991 7546 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea3101e 7547
92d7f7b0
JS
7548 if (piocb->iocb_cmpl && (!piocb->vport) &&
7549 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7550 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7551 lpfc_printf_log(phba, KERN_ERR,
7552 LOG_SLI | LOG_VPORT,
e8b62011 7553 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
7554 piocb->iocb.ulpCommand);
7555 dump_stack();
7556 return IOCB_ERROR;
7557 }
7558
7559
8d63f375
LV
7560 /* If the PCI channel is in offline state, do not post iocbs. */
7561 if (unlikely(pci_channel_offline(phba->pcidev)))
7562 return IOCB_ERROR;
7563
a257bf90
JS
7564 /* If HBA has a deferred error attention, fail the iocb. */
7565 if (unlikely(phba->hba_flag & DEFER_ERATT))
7566 return IOCB_ERROR;
7567
dea3101e
JB
7568 /*
7569 * We should never get an IOCB if we are in a < LINK_DOWN state
7570 */
2e0fef85 7571 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e
JB
7572 return IOCB_ERROR;
7573
7574 /*
7575 * Check to see if we are blocking IOCB processing because of a
0b727fea 7576 * outstanding event.
dea3101e 7577 */
0b727fea 7578 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e
JB
7579 goto iocb_busy;
7580
2e0fef85 7581 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 7582 /*
2680eeaa 7583 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e
JB
7584 * can be issued if the link is not up.
7585 */
7586 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
7587 case CMD_GEN_REQUEST64_CR:
7588 case CMD_GEN_REQUEST64_CX:
7589 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7590 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 7591 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
7592 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7593 MENLO_TRANSPORT_TYPE))
7594
7595 goto iocb_busy;
7596 break;
dea3101e
JB
7597 case CMD_QUE_RING_BUF_CN:
7598 case CMD_QUE_RING_BUF64_CN:
dea3101e
JB
7599 /*
7600 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7601 * completion, iocb_cmpl MUST be 0.
7602 */
7603 if (piocb->iocb_cmpl)
7604 piocb->iocb_cmpl = NULL;
7605 /*FALLTHROUGH*/
7606 case CMD_CREATE_XRI_CR:
2680eeaa
JS
7607 case CMD_CLOSE_XRI_CN:
7608 case CMD_CLOSE_XRI_CX:
dea3101e
JB
7609 break;
7610 default:
7611 goto iocb_busy;
7612 }
7613
7614 /*
7615 * For FCP commands, we must be in a state where we can process link
7616 * attention events.
7617 */
7618 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 7619 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 7620 goto iocb_busy;
92d7f7b0 7621 }
dea3101e 7622
dea3101e
JB
7623 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7624 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7625 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7626
7627 if (iocb)
7628 lpfc_sli_update_ring(phba, pring);
7629 else
7630 lpfc_sli_update_full_ring(phba, pring);
7631
7632 if (!piocb)
7633 return IOCB_SUCCESS;
7634
7635 goto out_busy;
7636
7637 iocb_busy:
7638 pring->stats.iocb_cmd_delay++;
7639
7640 out_busy:
7641
7642 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 7643 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e
JB
7644 return IOCB_SUCCESS;
7645 }
7646
7647 return IOCB_BUSY;
7648}
7649
3772a991 7650/**
4f774513
JS
7651 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7652 * @phba: Pointer to HBA context object.
7653 * @piocb: Pointer to command iocb.
7654 * @sglq: Pointer to the scatter gather queue object.
7655 *
7656 * This routine converts the bpl or bde that is in the IOCB
7657 * to a sgl list for the sli4 hardware. The physical address
7658 * of the bpl/bde is converted back to a virtual address.
7659 * If the IOCB contains a BPL then the list of BDE's is
7660 * converted to sli4_sge's. If the IOCB contains a single
7661 * BDE then it is converted to a single sli_sge.
7662 * The IOCB is still in cpu endianess so the contents of
7663 * the bpl can be used without byte swapping.
7664 *
7665 * Returns valid XRI = Success, NO_XRI = Failure.
7666**/
7667static uint16_t
7668lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7669 struct lpfc_sglq *sglq)
3772a991 7670{
4f774513
JS
7671 uint16_t xritag = NO_XRI;
7672 struct ulp_bde64 *bpl = NULL;
7673 struct ulp_bde64 bde;
7674 struct sli4_sge *sgl = NULL;
1b51197d 7675 struct lpfc_dmabuf *dmabuf;
4f774513
JS
7676 IOCB_t *icmd;
7677 int numBdes = 0;
7678 int i = 0;
63e801ce
JS
7679 uint32_t offset = 0; /* accumulated offset in the sg request list */
7680 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 7681
4f774513
JS
7682 if (!piocbq || !sglq)
7683 return xritag;
7684
7685 sgl = (struct sli4_sge *)sglq->sgl;
7686 icmd = &piocbq->iocb;
6b5151fd
JS
7687 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7688 return sglq->sli4_xritag;
4f774513
JS
7689 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7690 numBdes = icmd->un.genreq64.bdl.bdeSize /
7691 sizeof(struct ulp_bde64);
7692 /* The addrHigh and addrLow fields within the IOCB
7693 * have not been byteswapped yet so there is no
7694 * need to swap them back.
7695 */
1b51197d
JS
7696 if (piocbq->context3)
7697 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7698 else
7699 return xritag;
4f774513 7700
1b51197d 7701 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
7702 if (!bpl)
7703 return xritag;
7704
7705 for (i = 0; i < numBdes; i++) {
7706 /* Should already be byte swapped. */
28baac74
JS
7707 sgl->addr_hi = bpl->addrHigh;
7708 sgl->addr_lo = bpl->addrLow;
7709
0558056c 7710 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7711 if ((i+1) == numBdes)
7712 bf_set(lpfc_sli4_sge_last, sgl, 1);
7713 else
7714 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
7715 /* swap the size field back to the cpu so we
7716 * can assign it to the sgl.
7717 */
7718 bde.tus.w = le32_to_cpu(bpl->tus.w);
7719 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
7720 /* The offsets in the sgl need to be accumulated
7721 * separately for the request and reply lists.
7722 * The request is always first, the reply follows.
7723 */
7724 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7725 /* add up the reply sg entries */
7726 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7727 inbound++;
7728 /* first inbound? reset the offset */
7729 if (inbound == 1)
7730 offset = 0;
7731 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
7732 bf_set(lpfc_sli4_sge_type, sgl,
7733 LPFC_SGE_TYPE_DATA);
63e801ce
JS
7734 offset += bde.tus.f.bdeSize;
7735 }
546fc854 7736 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
7737 bpl++;
7738 sgl++;
7739 }
7740 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7741 /* The addrHigh and addrLow fields of the BDE have not
7742 * been byteswapped yet so they need to be swapped
7743 * before putting them in the sgl.
7744 */
7745 sgl->addr_hi =
7746 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7747 sgl->addr_lo =
7748 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 7749 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7750 bf_set(lpfc_sli4_sge_last, sgl, 1);
7751 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
7752 sgl->sge_len =
7753 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
7754 }
7755 return sglq->sli4_xritag;
3772a991 7756}
92d7f7b0 7757
e59058c4 7758/**
4f774513 7759 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
e59058c4 7760 * @phba: Pointer to HBA context object.
e59058c4 7761 *
a93ff37a 7762 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
8fa38513
JS
7763 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7764 * held.
4f774513
JS
7765 *
7766 * Return: index into SLI4 fast-path FCP queue index.
e59058c4 7767 **/
4f774513 7768static uint32_t
8fa38513 7769lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
92d7f7b0 7770{
8fa38513
JS
7771 ++phba->fcp_qidx;
7772 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
7773 phba->fcp_qidx = 0;
92d7f7b0 7774
8fa38513 7775 return phba->fcp_qidx;
92d7f7b0
JS
7776}
7777
e59058c4 7778/**
4f774513 7779 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 7780 * @phba: Pointer to HBA context object.
4f774513
JS
7781 * @piocb: Pointer to command iocb.
7782 * @wqe: Pointer to the work queue entry.
e59058c4 7783 *
4f774513
JS
7784 * This routine converts the iocb command to its Work Queue Entry
7785 * equivalent. The wqe pointer should not have any fields set when
7786 * this routine is called because it will memcpy over them.
7787 * This routine does not set the CQ_ID or the WQEC bits in the
7788 * wqe.
e59058c4 7789 *
4f774513 7790 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 7791 **/
cf5bf97e 7792static int
4f774513
JS
7793lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7794 union lpfc_wqe *wqe)
cf5bf97e 7795{
5ffc266e 7796 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
7797 uint8_t ct = 0;
7798 uint32_t fip;
7799 uint32_t abort_tag;
7800 uint8_t command_type = ELS_COMMAND_NON_FIP;
7801 uint8_t cmnd;
7802 uint16_t xritag;
dcf2a4e0
JS
7803 uint16_t abrt_iotag;
7804 struct lpfc_iocbq *abrtiocbq;
4f774513 7805 struct ulp_bde64 *bpl = NULL;
f0d9bccc 7806 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
7807 int numBdes, i;
7808 struct ulp_bde64 bde;
c31098ce 7809 struct lpfc_nodelist *ndlp;
ff78d8f9 7810 uint32_t *pcmd;
1b51197d 7811 uint32_t if_type;
4f774513 7812
45ed1190 7813 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 7814 /* The fcp commands will set command type */
0c287589 7815 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 7816 command_type = FCP_COMMAND;
c868595d 7817 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
7818 command_type = ELS_COMMAND_FIP;
7819 else
7820 command_type = ELS_COMMAND_NON_FIP;
7821
4f774513
JS
7822 /* Some of the fields are in the right position already */
7823 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7824 abort_tag = (uint32_t) iocbq->iotag;
7825 xritag = iocbq->sli4_xritag;
f0d9bccc 7826 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
4f774513
JS
7827 /* words0-2 bpl convert bde */
7828 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
7829 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7830 sizeof(struct ulp_bde64);
4f774513
JS
7831 bpl = (struct ulp_bde64 *)
7832 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7833 if (!bpl)
7834 return IOCB_ERROR;
cf5bf97e 7835
4f774513
JS
7836 /* Should already be byte swapped. */
7837 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
7838 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
7839 /* swap the size field back to the cpu so we
7840 * can assign it to the sgl.
7841 */
7842 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
7843 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7844 total_len = 0;
7845 for (i = 0; i < numBdes; i++) {
7846 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
7847 total_len += bde.tus.f.bdeSize;
7848 }
4f774513 7849 } else
5ffc266e 7850 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 7851
4f774513
JS
7852 iocbq->iocb.ulpIoTag = iocbq->iotag;
7853 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 7854
4f774513
JS
7855 switch (iocbq->iocb.ulpCommand) {
7856 case CMD_ELS_REQUEST64_CR:
c31098ce 7857 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
7858 if (!iocbq->iocb.ulpLe) {
7859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7860 "2007 Only Limited Edition cmd Format"
7861 " supported 0x%x\n",
7862 iocbq->iocb.ulpCommand);
7863 return IOCB_ERROR;
7864 }
ff78d8f9 7865
5ffc266e 7866 wqe->els_req.payload_len = xmit_len;
4f774513
JS
7867 /* Els_reguest64 has a TMO */
7868 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
7869 iocbq->iocb.ulpTimeout);
7870 /* Need a VF for word 4 set the vf bit*/
7871 bf_set(els_req64_vf, &wqe->els_req, 0);
7872 /* And a VFID for word 12 */
7873 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 7874 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
7875 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7876 iocbq->iocb.ulpContext);
7877 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7878 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 7879 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 7880 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
7881 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7882 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
7883 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7884 iocbq->context2)->virt);
1b51197d
JS
7885 if_type = bf_get(lpfc_sli_intf_if_type,
7886 &phba->sli4_hba.sli_intf);
7887 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 7888 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 7889 *pcmd == ELS_CMD_SCR ||
6b5151fd 7890 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 7891 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
7892 *pcmd == ELS_CMD_PLOGI)) {
7893 bf_set(els_req64_sp, &wqe->els_req, 1);
7894 bf_set(els_req64_sid, &wqe->els_req,
7895 iocbq->vport->fc_myDID);
7896 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7897 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7898 phba->vpi_ids[phba->pport->vpi]);
3ef6d24c 7899 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
7900 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
7901 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7902 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7903 }
c868595d 7904 }
6d368e53
JS
7905 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7906 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
7907 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
7908 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
7909 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
7910 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7911 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7912 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7851fe2c 7913 break;
5ffc266e 7914 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
7915 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7916 iocbq->iocb.un.ulpWord[3]);
7917 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 7918 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
7919 /* The entire sequence is transmitted for this IOCB */
7920 xmit_len = total_len;
7921 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
7922 if (phba->link_flag & LS_LOOPBACK_MODE)
7923 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 7924 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
7925 /* word3 iocb=io_tag32 wqe=reserved */
7926 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
7927 /* word4 relative_offset memcpy */
7928 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
7929 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
7930 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
7931 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
7932 LPFC_WQE_IOD_WRITE);
7933 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
7934 LPFC_WQE_LENLOC_WORD12);
7935 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
7936 wqe->xmit_sequence.xmit_len = xmit_len;
7937 command_type = OTHER_COMMAND;
7851fe2c 7938 break;
4f774513 7939 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
7940 /* word3 iocb=iotag32 wqe=seq_payload_len */
7941 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
7942 /* word4 iocb=rsvd wqe=rsvd */
7943 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
7944 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 7945 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 7946 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
7947 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
7948 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
7949 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7950 LPFC_WQE_LENLOC_WORD3);
7951 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 7952 break;
4f774513
JS
7953 case CMD_FCP_IWRITE64_CR:
7954 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
7955 /* word3 iocb=iotag wqe=payload_offset_len */
7956 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7957 wqe->fcp_iwrite.payload_offset_len =
7958 xmit_len + sizeof(struct fcp_rsp);
7959 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7960 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7961 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
7962 iocbq->iocb.ulpFCP2Rcvy);
7963 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
7964 /* Always open the exchange */
7965 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
f0d9bccc
JS
7966 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
7967 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
7968 LPFC_WQE_LENLOC_WORD4);
7969 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7970 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b
JS
7971 if (iocbq->iocb_flag & LPFC_IO_DIF) {
7972 iocbq->iocb_flag &= ~LPFC_IO_DIF;
7973 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
7974 }
7975 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7851fe2c 7976 break;
4f774513 7977 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
7978 /* word3 iocb=iotag wqe=payload_offset_len */
7979 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7980 wqe->fcp_iread.payload_offset_len =
5ffc266e 7981 xmit_len + sizeof(struct fcp_rsp);
f0d9bccc
JS
7982 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7983 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7984 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
7985 iocbq->iocb.ulpFCP2Rcvy);
7986 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688
JS
7987 /* Always open the exchange */
7988 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
f0d9bccc
JS
7989 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
7990 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
7991 LPFC_WQE_LENLOC_WORD4);
7992 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7993 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b
JS
7994 if (iocbq->iocb_flag & LPFC_IO_DIF) {
7995 iocbq->iocb_flag &= ~LPFC_IO_DIF;
7996 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
7997 }
7998 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7851fe2c 7999 break;
4f774513 8000 case CMD_FCP_ICMND64_CR:
f0d9bccc
JS
8001 /* word3 iocb=IO_TAG wqe=reserved */
8002 wqe->fcp_icmd.rsrvd3 = 0;
8003 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 8004 /* Always open the exchange */
f0d9bccc
JS
8005 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8006 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8007 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8008 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8009 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8010 LPFC_WQE_LENLOC_NONE);
8011 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7851fe2c 8012 break;
4f774513 8013 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
8014 /* For this command calculate the xmit length of the
8015 * request bde.
8016 */
8017 xmit_len = 0;
8018 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8019 sizeof(struct ulp_bde64);
8020 for (i = 0; i < numBdes; i++) {
63e801ce 8021 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
8022 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8023 break;
63e801ce
JS
8024 xmit_len += bde.tus.f.bdeSize;
8025 }
f0d9bccc
JS
8026 /* word3 iocb=IO_TAG wqe=request_payload_len */
8027 wqe->gen_req.request_payload_len = xmit_len;
8028 /* word4 iocb=parameter wqe=relative_offset memcpy */
8029 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
8030 /* word6 context tag copied in memcpy */
8031 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8032 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8034 "2015 Invalid CT %x command 0x%x\n",
8035 ct, iocbq->iocb.ulpCommand);
8036 return IOCB_ERROR;
8037 }
f0d9bccc
JS
8038 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8039 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8040 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8041 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8042 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8043 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8044 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8045 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
4f774513 8046 command_type = OTHER_COMMAND;
7851fe2c 8047 break;
4f774513 8048 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 8049 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 8050 /* words0-2 BDE memcpy */
f0d9bccc
JS
8051 /* word3 iocb=iotag32 wqe=response_payload_len */
8052 wqe->xmit_els_rsp.response_payload_len = xmit_len;
4f774513 8053 /* word4 iocb=did wge=rsvd. */
f0d9bccc 8054 wqe->xmit_els_rsp.rsvd4 = 0;
4f774513
JS
8055 /* word5 iocb=rsvd wge=did */
8056 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8057 iocbq->iocb.un.elsreq64.remoteID);
f0d9bccc
JS
8058 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8059 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8060 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8061 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 8062 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 8063 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 8064 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 8065 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
8066 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8067 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8068 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8069 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8070 LPFC_WQE_LENLOC_WORD3);
8071 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
8072 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8073 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
8074 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8075 iocbq->context2)->virt);
8076 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8077 bf_set(els_req64_sp, &wqe->els_req, 1);
8078 bf_set(els_req64_sid, &wqe->els_req,
8079 iocbq->vport->fc_myDID);
8080 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8081 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8082 phba->vpi_ids[phba->pport->vpi]);
8083 }
4f774513 8084 command_type = OTHER_COMMAND;
7851fe2c 8085 break;
4f774513
JS
8086 case CMD_CLOSE_XRI_CN:
8087 case CMD_ABORT_XRI_CN:
8088 case CMD_ABORT_XRI_CX:
8089 /* words 0-2 memcpy should be 0 rserved */
8090 /* port will send abts */
dcf2a4e0
JS
8091 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8092 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8093 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8094 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8095 } else
8096 fip = 0;
8097
8098 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 8099 /*
dcf2a4e0
JS
8100 * The link is down, or the command was ELS_FIP
8101 * so the fw does not need to send abts
4f774513
JS
8102 * on the wire.
8103 */
8104 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8105 else
8106 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8107 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
8108 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8109 wqe->abort_cmd.rsrvd5 = 0;
8110 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
8111 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8112 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
8113 /*
8114 * The abort handler will send us CMD_ABORT_XRI_CN or
8115 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8116 */
f0d9bccc
JS
8117 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8118 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8119 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8120 LPFC_WQE_LENLOC_NONE);
4f774513
JS
8121 cmnd = CMD_ABORT_XRI_CX;
8122 command_type = OTHER_COMMAND;
8123 xritag = 0;
7851fe2c 8124 break;
6669f9bb 8125 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 8126 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 8127 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
8128 * we re-construct this WQE here based on information in
8129 * iocbq from scratch.
8130 */
8131 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 8132 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 8133 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
8134 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8135 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
8136 LPFC_ABTS_UNSOL_INT) {
8137 /* ABTS sent by initiator to CT exchange, the
8138 * RX_ID field will be filled with the newly
8139 * allocated responder XRI.
8140 */
8141 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8142 iocbq->sli4_xritag);
8143 } else {
8144 /* ABTS sent by responder to CT exchange, the
8145 * RX_ID field will be filled with the responder
8146 * RX_ID from ABTS.
8147 */
8148 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 8149 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 8150 }
6669f9bb
JS
8151 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8152 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
8153
8154 /* Use CT=VPI */
8155 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8156 ndlp->nlp_DID);
8157 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8158 iocbq->iocb.ulpContext);
8159 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 8160 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 8161 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
8162 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8163 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8164 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
8165 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8166 command_type = OTHER_COMMAND;
546fc854
JS
8167 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8168 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8169 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8170 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8171 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8172 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8173 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8174 }
8175
7851fe2c 8176 break;
4f774513
JS
8177 case CMD_XRI_ABORTED_CX:
8178 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
8179 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8180 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8181 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8182 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8183 default:
8184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8185 "2014 Invalid command 0x%x\n",
8186 iocbq->iocb.ulpCommand);
8187 return IOCB_ERROR;
7851fe2c 8188 break;
4f774513 8189 }
6d368e53 8190
f0d9bccc
JS
8191 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8192 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8193 wqe->generic.wqe_com.abort_tag = abort_tag;
8194 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8195 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8196 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8197 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
8198 return 0;
8199}
8200
8201/**
8202 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8203 * @phba: Pointer to HBA context object.
8204 * @ring_number: SLI ring number to issue iocb on.
8205 * @piocb: Pointer to command iocb.
8206 * @flag: Flag indicating if this command can be put into txq.
8207 *
8208 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8209 * an iocb command to an HBA with SLI-4 interface spec.
8210 *
8211 * This function is called with hbalock held. The function will return success
8212 * after it successfully submit the iocb to firmware or after adding to the
8213 * txq.
8214 **/
8215static int
8216__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8217 struct lpfc_iocbq *piocb, uint32_t flag)
8218{
8219 struct lpfc_sglq *sglq;
4f774513
JS
8220 union lpfc_wqe wqe;
8221 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
4f774513
JS
8222
8223 if (piocb->sli4_xritag == NO_XRI) {
8224 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 8225 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
8226 sglq = NULL;
8227 else {
2a9bf3d0
JS
8228 if (pring->txq_cnt) {
8229 if (!(flag & SLI_IOCB_RET_IOCB)) {
8230 __lpfc_sli_ringtx_put(phba,
8231 pring, piocb);
8232 return IOCB_SUCCESS;
8233 } else {
8234 return IOCB_BUSY;
8235 }
8236 } else {
6d368e53 8237 sglq = __lpfc_sli_get_sglq(phba, piocb);
2a9bf3d0
JS
8238 if (!sglq) {
8239 if (!(flag & SLI_IOCB_RET_IOCB)) {
8240 __lpfc_sli_ringtx_put(phba,
8241 pring,
8242 piocb);
8243 return IOCB_SUCCESS;
8244 } else
8245 return IOCB_BUSY;
8246 }
8247 }
4f774513
JS
8248 }
8249 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6d368e53
JS
8250 /* These IO's already have an XRI and a mapped sgl. */
8251 sglq = NULL;
4f774513 8252 } else {
6d368e53
JS
8253 /*
8254 * This is a continuation of a commandi,(CX) so this
4f774513
JS
8255 * sglq is on the active list
8256 */
8257 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
8258 if (!sglq)
8259 return IOCB_ERROR;
8260 }
8261
8262 if (sglq) {
6d368e53 8263 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 8264 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 8265 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
8266 return IOCB_ERROR;
8267 }
8268
8269 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8270 return IOCB_ERROR;
8271
341af102
JS
8272 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8273 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5ffc266e
JS
8274 /*
8275 * For FCP command IOCB, get a new WQ index to distribute
8276 * WQE across the WQsr. On the other hand, for abort IOCB,
8277 * it carries the same WQ index to the original command
8278 * IOCB.
8279 */
341af102 8280 if (piocb->iocb_flag & LPFC_IO_FCP)
5ffc266e 8281 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
2e90f4b5
JS
8282 if (unlikely(!phba->sli4_hba.fcp_wq))
8283 return IOCB_ERROR;
5ffc266e
JS
8284 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8285 &wqe))
4f774513
JS
8286 return IOCB_ERROR;
8287 } else {
8288 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8289 return IOCB_ERROR;
8290 }
8291 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8292
8293 return 0;
8294}
8295
8296/**
8297 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8298 *
8299 * This routine wraps the actual lockless version for issusing IOCB function
8300 * pointer from the lpfc_hba struct.
8301 *
8302 * Return codes:
8303 * IOCB_ERROR - Error
8304 * IOCB_SUCCESS - Success
8305 * IOCB_BUSY - Busy
8306 **/
2a9bf3d0 8307int
4f774513
JS
8308__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8309 struct lpfc_iocbq *piocb, uint32_t flag)
8310{
8311 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8312}
8313
8314/**
25985edc 8315 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
8316 * @phba: The hba struct for which this call is being executed.
8317 * @dev_grp: The HBA PCI-Device group number.
8318 *
8319 * This routine sets up the SLI interface API function jump table in @phba
8320 * struct.
8321 * Returns: 0 - success, -ENODEV - failure.
8322 **/
8323int
8324lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8325{
8326
8327 switch (dev_grp) {
8328 case LPFC_PCI_DEV_LP:
8329 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8330 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8331 break;
8332 case LPFC_PCI_DEV_OC:
8333 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8334 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8335 break;
8336 default:
8337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8338 "1419 Invalid HBA PCI-device group: 0x%x\n",
8339 dev_grp);
8340 return -ENODEV;
8341 break;
8342 }
8343 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8344 return 0;
8345}
8346
8347/**
8348 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8349 * @phba: Pointer to HBA context object.
8350 * @pring: Pointer to driver SLI ring object.
8351 * @piocb: Pointer to command iocb.
8352 * @flag: Flag indicating if this command can be put into txq.
8353 *
8354 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8355 * function. This function gets the hbalock and calls
8356 * __lpfc_sli_issue_iocb function and will return the error returned
8357 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8358 * functions which do not hold hbalock.
8359 **/
8360int
8361lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8362 struct lpfc_iocbq *piocb, uint32_t flag)
8363{
8364 unsigned long iflags;
8365 int rc;
8366
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8369 spin_unlock_irqrestore(&phba->hbalock, iflags);
8370
8371 return rc;
8372}
8373
8374/**
8375 * lpfc_extra_ring_setup - Extra ring setup function
8376 * @phba: Pointer to HBA context object.
8377 *
8378 * This function is called while driver attaches with the
8379 * HBA to setup the extra ring. The extra ring is used
8380 * only when driver needs to support target mode functionality
8381 * or IP over FC functionalities.
8382 *
8383 * This function is called with no lock held.
8384 **/
8385static int
8386lpfc_extra_ring_setup( struct lpfc_hba *phba)
8387{
8388 struct lpfc_sli *psli;
8389 struct lpfc_sli_ring *pring;
8390
8391 psli = &phba->sli;
8392
8393 /* Adjust cmd/rsp ring iocb entries more evenly */
8394
8395 /* Take some away from the FCP ring */
8396 pring = &psli->ring[psli->fcp_ring];
8397 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8398 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
cf5bf97e
JW
8399 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8400 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8401
a4bc3379
JS
8402 /* and give them to the extra ring */
8403 pring = &psli->ring[psli->extra_ring];
8404
cf5bf97e
JW
8405 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8406 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8407 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8408 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8409
8410 /* Setup default profile for this ring */
8411 pring->iotag_max = 4096;
8412 pring->num_mask = 1;
8413 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
8414 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8415 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
8416 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8417 return 0;
8418}
8419
cb69f7de
JS
8420/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
8421 * @vport: pointer to virtual port object.
8422 * @ndlp: nodelist pointer for the impacted rport.
8423 *
8424 * The driver calls this routine in response to a XRI ABORT CQE
8425 * event from the port. In this event, the driver is required to
8426 * recover its login to the rport even though its login may be valid
8427 * from the driver's perspective. The failed ABTS notice from the
8428 * port indicates the rport is not responding.
8429 */
8430static void
8431lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8432 struct lpfc_nodelist *ndlp)
8433{
8434 struct Scsi_Host *shost;
8435 struct lpfc_hba *phba;
8436 unsigned long flags = 0;
8437
8438 shost = lpfc_shost_from_vport(vport);
8439 phba = vport->phba;
8440 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8441 lpfc_printf_log(phba, KERN_INFO,
8442 LOG_SLI, "3093 No rport recovery needed. "
8443 "rport in state 0x%x\n",
8444 ndlp->nlp_state);
8445 return;
8446 }
8447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8448 "3094 Start rport recovery on shost id 0x%x "
8449 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8450 "flags 0x%x\n",
8451 shost->host_no, ndlp->nlp_DID,
8452 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8453 ndlp->nlp_flag);
8454 /*
8455 * The rport is not responding. Don't attempt ADISC recovery.
8456 * Remove the FCP-2 flag to force a PLOGI.
8457 */
8458 spin_lock_irqsave(shost->host_lock, flags);
8459 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8460 spin_unlock_irqrestore(shost->host_lock, flags);
8461 lpfc_disc_state_machine(vport, ndlp, NULL,
8462 NLP_EVT_DEVICE_RECOVERY);
8463 lpfc_cancel_retry_delay_tmo(vport, ndlp);
8464 spin_lock_irqsave(shost->host_lock, flags);
8465 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
8466 spin_unlock_irqrestore(shost->host_lock, flags);
8467 lpfc_disc_start(vport);
8468}
8469
8470/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8471 * @phba: Pointer to HBA context object.
8472 * @iocbq: Pointer to iocb object.
8473 *
8474 * The async_event handler calls this routine when it receives
8475 * an ASYNC_STATUS_CN event from the port. The port generates
8476 * this event when an Abort Sequence request to an rport fails
8477 * twice in succession. The abort could be originated by the
8478 * driver or by the port. The ABTS could have been for an ELS
8479 * or FCP IO. The port only generates this event when an ABTS
8480 * fails to complete after one retry.
8481 */
8482static void
8483lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8484 struct lpfc_iocbq *iocbq)
8485{
8486 struct lpfc_nodelist *ndlp = NULL;
8487 uint16_t rpi = 0, vpi = 0;
8488 struct lpfc_vport *vport = NULL;
8489
8490 /* The rpi in the ulpContext is vport-sensitive. */
8491 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8492 rpi = iocbq->iocb.ulpContext;
8493
8494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8495 "3092 Port generated ABTS async event "
8496 "on vpi %d rpi %d status 0x%x\n",
8497 vpi, rpi, iocbq->iocb.ulpStatus);
8498
8499 vport = lpfc_find_vport_by_vpid(phba, vpi);
8500 if (!vport)
8501 goto err_exit;
8502 ndlp = lpfc_findnode_rpi(vport, rpi);
8503 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8504 goto err_exit;
8505
8506 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8507 lpfc_sli_abts_recover_port(vport, ndlp);
8508 return;
8509
8510 err_exit:
8511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8512 "3095 Event Context not found, no "
8513 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8514 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8515 vpi, rpi);
8516}
8517
8518/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8519 * @phba: pointer to HBA context object.
8520 * @ndlp: nodelist pointer for the impacted rport.
8521 * @axri: pointer to the wcqe containing the failed exchange.
8522 *
8523 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8524 * port. The port generates this event when an abort exchange request to an
8525 * rport fails twice in succession with no reply. The abort could be originated
8526 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8527 */
8528void
8529lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8530 struct lpfc_nodelist *ndlp,
8531 struct sli4_wcqe_xri_aborted *axri)
8532{
8533 struct lpfc_vport *vport;
5c1db2ac 8534 uint32_t ext_status = 0;
cb69f7de 8535
6b5151fd 8536 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
8537 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8538 "3115 Node Context not found, driver "
8539 "ignoring abts err event\n");
6b5151fd
JS
8540 return;
8541 }
8542
cb69f7de
JS
8543 vport = ndlp->vport;
8544 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8545 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 8546 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
cb69f7de
JS
8547 ndlp->vport->vpi, ndlp->nlp_rpi,
8548 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
8549 bf_get(lpfc_wcqe_xa_status, axri),
8550 axri->parameter);
cb69f7de 8551
5c1db2ac
JS
8552 /*
8553 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8554 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8555 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8556 */
8557 ext_status = axri->parameter & WCQE_PARAM_MASK;
8558 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8559 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
8560 lpfc_sli_abts_recover_port(vport, ndlp);
8561}
8562
e59058c4 8563/**
3621a710 8564 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
8565 * @phba: Pointer to HBA context object.
8566 * @pring: Pointer to driver SLI ring object.
8567 * @iocbq: Pointer to iocb object.
8568 *
8569 * This function is called by the slow ring event handler
8570 * function when there is an ASYNC event iocb in the ring.
8571 * This function is called with no lock held.
8572 * Currently this function handles only temperature related
8573 * ASYNC events. The function decodes the temperature sensor
8574 * event message and posts events for the management applications.
8575 **/
98c9ea5c 8576static void
57127f15
JS
8577lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8578 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8579{
8580 IOCB_t *icmd;
8581 uint16_t evt_code;
57127f15
JS
8582 struct temp_event temp_event_data;
8583 struct Scsi_Host *shost;
a257bf90 8584 uint32_t *iocb_w;
57127f15
JS
8585
8586 icmd = &iocbq->iocb;
8587 evt_code = icmd->un.asyncstat.evt_code;
57127f15 8588
cb69f7de
JS
8589 switch (evt_code) {
8590 case ASYNC_TEMP_WARN:
8591 case ASYNC_TEMP_SAFE:
8592 temp_event_data.data = (uint32_t) icmd->ulpContext;
8593 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8594 if (evt_code == ASYNC_TEMP_WARN) {
8595 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8596 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8597 "0347 Adapter is very hot, please take "
8598 "corrective action. temperature : %d Celsius\n",
8599 (uint32_t) icmd->ulpContext);
8600 } else {
8601 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8602 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8603 "0340 Adapter temperature is OK now. "
8604 "temperature : %d Celsius\n",
8605 (uint32_t) icmd->ulpContext);
8606 }
8607
8608 /* Send temperature change event to applications */
8609 shost = lpfc_shost_from_vport(phba->pport);
8610 fc_host_post_vendor_event(shost, fc_get_event_number(),
8611 sizeof(temp_event_data), (char *) &temp_event_data,
8612 LPFC_NL_VENDOR_ID);
8613 break;
8614 case ASYNC_STATUS_CN:
8615 lpfc_sli_abts_err_handler(phba, iocbq);
8616 break;
8617 default:
a257bf90 8618 iocb_w = (uint32_t *) icmd;
cb69f7de 8619 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 8620 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 8621 " evt_code 0x%x\n"
a257bf90
JS
8622 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8623 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8624 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8625 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 8626 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
8627 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8628 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8629 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8630 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8631
cb69f7de 8632 break;
57127f15 8633 }
57127f15
JS
8634}
8635
8636
e59058c4 8637/**
3621a710 8638 * lpfc_sli_setup - SLI ring setup function
e59058c4
JS
8639 * @phba: Pointer to HBA context object.
8640 *
8641 * lpfc_sli_setup sets up rings of the SLI interface with
8642 * number of iocbs per ring and iotags. This function is
8643 * called while driver attach to the HBA and before the
8644 * interrupts are enabled. So there is no need for locking.
8645 *
8646 * This function always returns 0.
8647 **/
dea3101e
JB
8648int
8649lpfc_sli_setup(struct lpfc_hba *phba)
8650{
ed957684 8651 int i, totiocbsize = 0;
dea3101e
JB
8652 struct lpfc_sli *psli = &phba->sli;
8653 struct lpfc_sli_ring *pring;
8654
8655 psli->num_rings = MAX_CONFIGURED_RINGS;
8656 psli->sli_flag = 0;
8657 psli->fcp_ring = LPFC_FCP_RING;
8658 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 8659 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 8660
604a3e30
JB
8661 psli->iocbq_lookup = NULL;
8662 psli->iocbq_lookup_len = 0;
8663 psli->last_iotag = 0;
8664
dea3101e
JB
8665 for (i = 0; i < psli->num_rings; i++) {
8666 pring = &psli->ring[i];
8667 switch (i) {
8668 case LPFC_FCP_RING: /* ring 0 - FCP */
8669 /* numCiocb and numRiocb are used in config_port */
8670 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8671 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8672 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8673 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8674 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8675 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
ed957684 8676 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8677 SLI3_IOCB_CMD_SIZE :
8678 SLI2_IOCB_CMD_SIZE;
ed957684 8679 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8680 SLI3_IOCB_RSP_SIZE :
8681 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
8682 pring->iotag_ctr = 0;
8683 pring->iotag_max =
92d7f7b0 8684 (phba->cfg_hba_queue_depth * 2);
dea3101e
JB
8685 pring->fast_iotag = pring->iotag_max;
8686 pring->num_mask = 0;
8687 break;
a4bc3379 8688 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e
JB
8689 /* numCiocb and numRiocb are used in config_port */
8690 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8691 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
ed957684 8692 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8693 SLI3_IOCB_CMD_SIZE :
8694 SLI2_IOCB_CMD_SIZE;
ed957684 8695 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8696 SLI3_IOCB_RSP_SIZE :
8697 SLI2_IOCB_RSP_SIZE;
2e0fef85 8698 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e
JB
8699 pring->num_mask = 0;
8700 break;
8701 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8702 /* numCiocb and numRiocb are used in config_port */
8703 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8704 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
ed957684 8705 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8706 SLI3_IOCB_CMD_SIZE :
8707 SLI2_IOCB_CMD_SIZE;
ed957684 8708 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8709 SLI3_IOCB_RSP_SIZE :
8710 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
8711 pring->fast_iotag = 0;
8712 pring->iotag_ctr = 0;
8713 pring->iotag_max = 4096;
57127f15
JS
8714 pring->lpfc_sli_rcv_async_status =
8715 lpfc_sli_async_event_handler;
6669f9bb 8716 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 8717 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
8718 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8719 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 8720 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 8721 lpfc_els_unsol_event;
dea3101e 8722 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
8723 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8724 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 8725 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 8726 lpfc_els_unsol_event;
dea3101e
JB
8727 pring->prt[2].profile = 0; /* Mask 2 */
8728 /* NameServer Inquiry */
6a9c52cf 8729 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 8730 /* NameServer */
6a9c52cf 8731 pring->prt[2].type = FC_TYPE_CT;
dea3101e 8732 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 8733 lpfc_ct_unsol_event;
dea3101e
JB
8734 pring->prt[3].profile = 0; /* Mask 3 */
8735 /* NameServer response */
6a9c52cf 8736 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 8737 /* NameServer */
6a9c52cf 8738 pring->prt[3].type = FC_TYPE_CT;
dea3101e 8739 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 8740 lpfc_ct_unsol_event;
6669f9bb
JS
8741 /* abort unsolicited sequence */
8742 pring->prt[4].profile = 0; /* Mask 4 */
8743 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8744 pring->prt[4].type = FC_TYPE_BLS;
8745 pring->prt[4].lpfc_sli_rcv_unsol_event =
8746 lpfc_sli4_ct_abort_unsol_event;
dea3101e
JB
8747 break;
8748 }
ed957684 8749 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
92d7f7b0 8750 (pring->numRiocb * pring->sizeRiocb);
dea3101e 8751 }
ed957684 8752 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 8753 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
8754 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8755 "SLI2 SLIM Data: x%x x%lx\n",
8756 phba->brd_no, totiocbsize,
8757 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 8758 }
cf5bf97e
JW
8759 if (phba->cfg_multi_ring_support == 2)
8760 lpfc_extra_ring_setup(phba);
dea3101e
JB
8761
8762 return 0;
8763}
8764
e59058c4 8765/**
3621a710 8766 * lpfc_sli_queue_setup - Queue initialization function
e59058c4
JS
8767 * @phba: Pointer to HBA context object.
8768 *
8769 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8770 * ring. This function also initializes ring indices of each ring.
8771 * This function is called during the initialization of the SLI
8772 * interface of an HBA.
8773 * This function is called with no lock held and always returns
8774 * 1.
8775 **/
dea3101e 8776int
2e0fef85 8777lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e
JB
8778{
8779 struct lpfc_sli *psli;
8780 struct lpfc_sli_ring *pring;
604a3e30 8781 int i;
dea3101e
JB
8782
8783 psli = &phba->sli;
2e0fef85 8784 spin_lock_irq(&phba->hbalock);
dea3101e 8785 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 8786 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e
JB
8787 /* Initialize list headers for txq and txcmplq as double linked lists */
8788 for (i = 0; i < psli->num_rings; i++) {
8789 pring = &psli->ring[i];
8790 pring->ringno = i;
8791 pring->next_cmdidx = 0;
8792 pring->local_getidx = 0;
8793 pring->cmdidx = 0;
8794 INIT_LIST_HEAD(&pring->txq);
8795 INIT_LIST_HEAD(&pring->txcmplq);
8796 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 8797 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 8798 INIT_LIST_HEAD(&pring->postbufq);
dea3101e 8799 }
2e0fef85
JS
8800 spin_unlock_irq(&phba->hbalock);
8801 return 1;
dea3101e
JB
8802}
8803
04c68496
JS
8804/**
8805 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8806 * @phba: Pointer to HBA context object.
8807 *
8808 * This routine flushes the mailbox command subsystem. It will unconditionally
8809 * flush all the mailbox commands in the three possible stages in the mailbox
8810 * command sub-system: pending mailbox command queue; the outstanding mailbox
8811 * command; and completed mailbox command queue. It is caller's responsibility
8812 * to make sure that the driver is in the proper state to flush the mailbox
8813 * command sub-system. Namely, the posting of mailbox commands into the
8814 * pending mailbox command queue from the various clients must be stopped;
8815 * either the HBA is in a state that it will never works on the outstanding
8816 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8817 * mailbox command has been completed.
8818 **/
8819static void
8820lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8821{
8822 LIST_HEAD(completions);
8823 struct lpfc_sli *psli = &phba->sli;
8824 LPFC_MBOXQ_t *pmb;
8825 unsigned long iflag;
8826
8827 /* Flush all the mailbox commands in the mbox system */
8828 spin_lock_irqsave(&phba->hbalock, iflag);
8829 /* The pending mailbox command queue */
8830 list_splice_init(&phba->sli.mboxq, &completions);
8831 /* The outstanding active mailbox command */
8832 if (psli->mbox_active) {
8833 list_add_tail(&psli->mbox_active->list, &completions);
8834 psli->mbox_active = NULL;
8835 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8836 }
8837 /* The completed mailbox command queue */
8838 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8839 spin_unlock_irqrestore(&phba->hbalock, iflag);
8840
8841 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
8842 while (!list_empty(&completions)) {
8843 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
8844 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
8845 if (pmb->mbox_cmpl)
8846 pmb->mbox_cmpl(phba, pmb);
8847 }
8848}
8849
e59058c4 8850/**
3621a710 8851 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
8852 * @vport: Pointer to virtual port object.
8853 *
8854 * lpfc_sli_host_down is called to clean up the resources
8855 * associated with a vport before destroying virtual
8856 * port data structures.
8857 * This function does following operations:
8858 * - Free discovery resources associated with this virtual
8859 * port.
8860 * - Free iocbs associated with this virtual port in
8861 * the txq.
8862 * - Send abort for all iocb commands associated with this
8863 * vport in txcmplq.
8864 *
8865 * This function is called with no lock held and always returns 1.
8866 **/
92d7f7b0
JS
8867int
8868lpfc_sli_host_down(struct lpfc_vport *vport)
8869{
858c9f6c 8870 LIST_HEAD(completions);
92d7f7b0
JS
8871 struct lpfc_hba *phba = vport->phba;
8872 struct lpfc_sli *psli = &phba->sli;
8873 struct lpfc_sli_ring *pring;
8874 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
8875 int i;
8876 unsigned long flags = 0;
8877 uint16_t prev_pring_flag;
8878
8879 lpfc_cleanup_discovery_resources(vport);
8880
8881 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
8882 for (i = 0; i < psli->num_rings; i++) {
8883 pring = &psli->ring[i];
8884 prev_pring_flag = pring->flag;
5e9d9b82
JS
8885 /* Only slow rings */
8886 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 8887 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
8888 /* Set the lpfc data pending flag */
8889 set_bit(LPFC_DATA_READY, &phba->data_flags);
8890 }
92d7f7b0
JS
8891 /*
8892 * Error everything on the txq since these iocbs have not been
8893 * given to the FW yet.
8894 */
92d7f7b0
JS
8895 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
8896 if (iocb->vport != vport)
8897 continue;
858c9f6c 8898 list_move_tail(&iocb->list, &completions);
92d7f7b0 8899 pring->txq_cnt--;
92d7f7b0
JS
8900 }
8901
8902 /* Next issue ABTS for everything on the txcmplq */
8903 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
8904 list) {
8905 if (iocb->vport != vport)
8906 continue;
8907 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
8908 }
8909
8910 pring->flag = prev_pring_flag;
8911 }
8912
8913 spin_unlock_irqrestore(&phba->hbalock, flags);
8914
a257bf90
JS
8915 /* Cancel all the IOCBs from the completions list */
8916 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8917 IOERR_SLI_DOWN);
92d7f7b0
JS
8918 return 1;
8919}
8920
e59058c4 8921/**
3621a710 8922 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
8923 * @phba: Pointer to HBA context object.
8924 *
8925 * This function cleans up all iocb, buffers, mailbox commands
8926 * while shutting down the HBA. This function is called with no
8927 * lock held and always returns 1.
8928 * This function does the following to cleanup driver resources:
8929 * - Free discovery resources for each virtual port
8930 * - Cleanup any pending fabric iocbs
8931 * - Iterate through the iocb txq and free each entry
8932 * in the list.
8933 * - Free up any buffer posted to the HBA
8934 * - Free mailbox commands in the mailbox queue.
8935 **/
dea3101e 8936int
2e0fef85 8937lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 8938{
2534ba75 8939 LIST_HEAD(completions);
2e0fef85 8940 struct lpfc_sli *psli = &phba->sli;
dea3101e 8941 struct lpfc_sli_ring *pring;
0ff10d46 8942 struct lpfc_dmabuf *buf_ptr;
dea3101e 8943 unsigned long flags = 0;
04c68496
JS
8944 int i;
8945
8946 /* Shutdown the mailbox command sub-system */
8947 lpfc_sli_mbox_sys_shutdown(phba);
dea3101e 8948
dea3101e
JB
8949 lpfc_hba_down_prep(phba);
8950
92d7f7b0
JS
8951 lpfc_fabric_abort_hba(phba);
8952
2e0fef85 8953 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e
JB
8954 for (i = 0; i < psli->num_rings; i++) {
8955 pring = &psli->ring[i];
5e9d9b82
JS
8956 /* Only slow rings */
8957 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 8958 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
8959 /* Set the lpfc data pending flag */
8960 set_bit(LPFC_DATA_READY, &phba->data_flags);
8961 }
dea3101e
JB
8962
8963 /*
8964 * Error everything on the txq since these iocbs have not been
8965 * given to the FW yet.
8966 */
2534ba75 8967 list_splice_init(&pring->txq, &completions);
dea3101e
JB
8968 pring->txq_cnt = 0;
8969
2534ba75 8970 }
2e0fef85 8971 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 8972
a257bf90
JS
8973 /* Cancel all the IOCBs from the completions list */
8974 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8975 IOERR_SLI_DOWN);
dea3101e 8976
0ff10d46
JS
8977 spin_lock_irqsave(&phba->hbalock, flags);
8978 list_splice_init(&phba->elsbuf, &completions);
8979 phba->elsbuf_cnt = 0;
8980 phba->elsbuf_prev_cnt = 0;
8981 spin_unlock_irqrestore(&phba->hbalock, flags);
8982
8983 while (!list_empty(&completions)) {
8984 list_remove_head(&completions, buf_ptr,
8985 struct lpfc_dmabuf, list);
8986 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
8987 kfree(buf_ptr);
8988 }
8989
dea3101e
JB
8990 /* Return any active mbox cmds */
8991 del_timer_sync(&psli->mbox_tmo);
2e0fef85 8992
da0436e9 8993 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 8994 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 8995 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 8996
da0436e9
JS
8997 return 1;
8998}
8999
e59058c4 9000/**
3621a710 9001 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
9002 * @srcp: Source memory pointer.
9003 * @destp: Destination memory pointer.
9004 * @cnt: Number of words required to be copied.
9005 *
9006 * This function is used for copying data between driver memory
9007 * and the SLI memory. This function also changes the endianness
9008 * of each word if native endianness is different from SLI
9009 * endianness. This function can be called with or without
9010 * lock.
9011 **/
dea3101e
JB
9012void
9013lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9014{
9015 uint32_t *src = srcp;
9016 uint32_t *dest = destp;
9017 uint32_t ldata;
9018 int i;
9019
9020 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9021 ldata = *src;
9022 ldata = le32_to_cpu(ldata);
9023 *dest = ldata;
9024 src++;
9025 dest++;
9026 }
9027}
9028
e59058c4 9029
a0c87cbd
JS
9030/**
9031 * lpfc_sli_bemem_bcopy - SLI memory copy function
9032 * @srcp: Source memory pointer.
9033 * @destp: Destination memory pointer.
9034 * @cnt: Number of words required to be copied.
9035 *
9036 * This function is used for copying data between a data structure
9037 * with big endian representation to local endianness.
9038 * This function can be called with or without lock.
9039 **/
9040void
9041lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9042{
9043 uint32_t *src = srcp;
9044 uint32_t *dest = destp;
9045 uint32_t ldata;
9046 int i;
9047
9048 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9049 ldata = *src;
9050 ldata = be32_to_cpu(ldata);
9051 *dest = ldata;
9052 src++;
9053 dest++;
9054 }
9055}
9056
e59058c4 9057/**
3621a710 9058 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
9059 * @phba: Pointer to HBA context object.
9060 * @pring: Pointer to driver SLI ring object.
9061 * @mp: Pointer to driver buffer object.
9062 *
9063 * This function is called with no lock held.
9064 * It always return zero after adding the buffer to the postbufq
9065 * buffer list.
9066 **/
dea3101e 9067int
2e0fef85
JS
9068lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9069 struct lpfc_dmabuf *mp)
dea3101e
JB
9070{
9071 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9072 later */
2e0fef85 9073 spin_lock_irq(&phba->hbalock);
dea3101e 9074 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 9075 pring->postbufq_cnt++;
2e0fef85 9076 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
9077 return 0;
9078}
9079
e59058c4 9080/**
3621a710 9081 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
9082 * @phba: Pointer to HBA context object.
9083 *
9084 * When HBQ is enabled, buffers are searched based on tags. This function
9085 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9086 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9087 * does not conflict with tags of buffer posted for unsolicited events.
9088 * The function returns the allocated tag. The function is called with
9089 * no locks held.
9090 **/
76bb24ef
JS
9091uint32_t
9092lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9093{
9094 spin_lock_irq(&phba->hbalock);
9095 phba->buffer_tag_count++;
9096 /*
9097 * Always set the QUE_BUFTAG_BIT to distiguish between
9098 * a tag assigned by HBQ.
9099 */
9100 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9101 spin_unlock_irq(&phba->hbalock);
9102 return phba->buffer_tag_count;
9103}
9104
e59058c4 9105/**
3621a710 9106 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
9107 * @phba: Pointer to HBA context object.
9108 * @pring: Pointer to driver SLI ring object.
9109 * @tag: Buffer tag.
9110 *
9111 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9112 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9113 * iocb is posted to the response ring with the tag of the buffer.
9114 * This function searches the pring->postbufq list using the tag
9115 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9116 * iocb. If the buffer is found then lpfc_dmabuf object of the
9117 * buffer is returned to the caller else NULL is returned.
9118 * This function is called with no lock held.
9119 **/
76bb24ef
JS
9120struct lpfc_dmabuf *
9121lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9122 uint32_t tag)
9123{
9124 struct lpfc_dmabuf *mp, *next_mp;
9125 struct list_head *slp = &pring->postbufq;
9126
25985edc 9127 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
9128 spin_lock_irq(&phba->hbalock);
9129 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9130 if (mp->buffer_tag == tag) {
9131 list_del_init(&mp->list);
9132 pring->postbufq_cnt--;
9133 spin_unlock_irq(&phba->hbalock);
9134 return mp;
9135 }
9136 }
9137
9138 spin_unlock_irq(&phba->hbalock);
9139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 9140 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
9141 "ring %d Data x%lx x%p x%p x%x\n",
9142 pring->ringno, (unsigned long) tag,
9143 slp->next, slp->prev, pring->postbufq_cnt);
9144
9145 return NULL;
9146}
dea3101e 9147
e59058c4 9148/**
3621a710 9149 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
9150 * @phba: Pointer to HBA context object.
9151 * @pring: Pointer to driver SLI ring object.
9152 * @phys: DMA address of the buffer.
9153 *
9154 * This function searches the buffer list using the dma_address
9155 * of unsolicited event to find the driver's lpfc_dmabuf object
9156 * corresponding to the dma_address. The function returns the
9157 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9158 * This function is called by the ct and els unsolicited event
9159 * handlers to get the buffer associated with the unsolicited
9160 * event.
9161 *
9162 * This function is called with no lock held.
9163 **/
dea3101e
JB
9164struct lpfc_dmabuf *
9165lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9166 dma_addr_t phys)
9167{
9168 struct lpfc_dmabuf *mp, *next_mp;
9169 struct list_head *slp = &pring->postbufq;
9170
25985edc 9171 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 9172 spin_lock_irq(&phba->hbalock);
dea3101e
JB
9173 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9174 if (mp->phys == phys) {
9175 list_del_init(&mp->list);
9176 pring->postbufq_cnt--;
2e0fef85 9177 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
9178 return mp;
9179 }
9180 }
9181
2e0fef85 9182 spin_unlock_irq(&phba->hbalock);
dea3101e 9183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 9184 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 9185 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 9186 pring->ringno, (unsigned long long)phys,
dea3101e
JB
9187 slp->next, slp->prev, pring->postbufq_cnt);
9188 return NULL;
9189}
9190
e59058c4 9191/**
3621a710 9192 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
9193 * @phba: Pointer to HBA context object.
9194 * @cmdiocb: Pointer to driver command iocb object.
9195 * @rspiocb: Pointer to driver response iocb object.
9196 *
9197 * This function is the completion handler for the abort iocbs for
9198 * ELS commands. This function is called from the ELS ring event
9199 * handler with no lock held. This function frees memory resources
9200 * associated with the abort iocb.
9201 **/
dea3101e 9202static void
2e0fef85
JS
9203lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9204 struct lpfc_iocbq *rspiocb)
dea3101e 9205{
2e0fef85 9206 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 9207 uint16_t abort_iotag, abort_context;
ff78d8f9 9208 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
9209
9210 if (irsp->ulpStatus) {
ff78d8f9
JS
9211
9212 /*
9213 * Assume that the port already completed and returned, or
9214 * will return the iocb. Just Log the message.
9215 */
2680eeaa
JS
9216 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9217 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9218
2e0fef85 9219 spin_lock_irq(&phba->hbalock);
45ed1190
JS
9220 if (phba->sli_rev < LPFC_SLI_REV4) {
9221 if (abort_iotag != 0 &&
9222 abort_iotag <= phba->sli.last_iotag)
9223 abort_iocb =
9224 phba->sli.iocbq_lookup[abort_iotag];
9225 } else
9226 /* For sli4 the abort_tag is the XRI,
9227 * so the abort routine puts the iotag of the iocb
9228 * being aborted in the context field of the abort
9229 * IOCB.
9230 */
9231 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 9232
2a9bf3d0
JS
9233 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9234 "0327 Cannot abort els iocb %p "
9235 "with tag %x context %x, abort status %x, "
9236 "abort code %x\n",
9237 abort_iocb, abort_iotag, abort_context,
9238 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 9239
ff78d8f9 9240 spin_unlock_irq(&phba->hbalock);
2680eeaa 9241 }
604a3e30 9242 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e
JB
9243 return;
9244}
9245
e59058c4 9246/**
3621a710 9247 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
9248 * @phba: Pointer to HBA context object.
9249 * @cmdiocb: Pointer to driver command iocb object.
9250 * @rspiocb: Pointer to driver response iocb object.
9251 *
9252 * The function is called from SLI ring event handler with no
9253 * lock held. This function is the completion handler for ELS commands
9254 * which are aborted. The function frees memory resources used for
9255 * the aborted ELS commands.
9256 **/
92d7f7b0
JS
9257static void
9258lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9259 struct lpfc_iocbq *rspiocb)
9260{
9261 IOCB_t *irsp = &rspiocb->iocb;
9262
9263 /* ELS cmd tag <ulpIoTag> completes */
9264 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 9265 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 9266 "x%x x%x x%x\n",
e8b62011 9267 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 9268 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
9269 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9270 lpfc_ct_free_iocb(phba, cmdiocb);
9271 else
9272 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
9273 return;
9274}
9275
e59058c4 9276/**
5af5eee7 9277 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
9278 * @phba: Pointer to HBA context object.
9279 * @pring: Pointer to driver SLI ring object.
9280 * @cmdiocb: Pointer to driver command iocb object.
9281 *
5af5eee7
JS
9282 * This function issues an abort iocb for the provided command iocb down to
9283 * the port. Other than the case the outstanding command iocb is an abort
9284 * request, this function issues abort out unconditionally. This function is
9285 * called with hbalock held. The function returns 0 when it fails due to
9286 * memory allocation failure or when the command iocb is an abort request.
e59058c4 9287 **/
5af5eee7
JS
9288static int
9289lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9290 struct lpfc_iocbq *cmdiocb)
dea3101e 9291{
2e0fef85 9292 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 9293 struct lpfc_iocbq *abtsiocbp;
dea3101e
JB
9294 IOCB_t *icmd = NULL;
9295 IOCB_t *iabt = NULL;
5af5eee7 9296 int retval;
07951076 9297
92d7f7b0
JS
9298 /*
9299 * There are certain command types we don't want to abort. And we
9300 * don't want to abort commands that are already in the process of
9301 * being aborted.
07951076
JS
9302 */
9303 icmd = &cmdiocb->iocb;
2e0fef85 9304 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
9305 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9306 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
9307 return 0;
9308
dea3101e 9309 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 9310 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
9311 if (abtsiocbp == NULL)
9312 return 0;
dea3101e 9313
07951076 9314 /* This signals the response to set the correct status
341af102 9315 * before calling the completion handler
07951076
JS
9316 */
9317 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9318
dea3101e 9319 iabt = &abtsiocbp->iocb;
07951076
JS
9320 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9321 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 9322 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 9323 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
9324 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9325 }
da0436e9
JS
9326 else
9327 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
9328 iabt->ulpLe = 1;
9329 iabt->ulpClass = icmd->ulpClass;
dea3101e 9330
5ffc266e
JS
9331 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9332 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
341af102
JS
9333 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9334 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9335
2e0fef85 9336 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
9337 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9338 else
9339 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 9340
07951076 9341 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 9342
e8b62011
JS
9343 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9344 "0339 Abort xri x%x, original iotag x%x, "
9345 "abort cmd iotag x%x\n",
2a9bf3d0 9346 iabt->un.acxri.abortIoTag,
e8b62011 9347 iabt->un.acxri.abortContextTag,
2a9bf3d0 9348 abtsiocbp->iotag);
da0436e9 9349 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
dea3101e 9350
d7c255b2
JS
9351 if (retval)
9352 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
9353
9354 /*
9355 * Caller to this routine should check for IOCB_ERROR
9356 * and handle it properly. This routine no longer removes
9357 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9358 */
9359 return retval;
9360}
9361
9362/**
9363 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9364 * @phba: Pointer to HBA context object.
9365 * @pring: Pointer to driver SLI ring object.
9366 * @cmdiocb: Pointer to driver command iocb object.
9367 *
9368 * This function issues an abort iocb for the provided command iocb. In case
9369 * of unloading, the abort iocb will not be issued to commands on the ELS
9370 * ring. Instead, the callback function shall be changed to those commands
9371 * so that nothing happens when them finishes. This function is called with
9372 * hbalock held. The function returns 0 when the command iocb is an abort
9373 * request.
9374 **/
9375int
9376lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9377 struct lpfc_iocbq *cmdiocb)
9378{
9379 struct lpfc_vport *vport = cmdiocb->vport;
9380 int retval = IOCB_ERROR;
9381 IOCB_t *icmd = NULL;
9382
9383 /*
9384 * There are certain command types we don't want to abort. And we
9385 * don't want to abort commands that are already in the process of
9386 * being aborted.
9387 */
9388 icmd = &cmdiocb->iocb;
9389 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9390 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9391 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9392 return 0;
9393
9394 /*
9395 * If we're unloading, don't abort iocb on the ELS ring, but change
9396 * the callback so that nothing happens when it finishes.
9397 */
9398 if ((vport->load_flag & FC_UNLOADING) &&
9399 (pring->ringno == LPFC_ELS_RING)) {
9400 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9401 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9402 else
9403 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9404 goto abort_iotag_exit;
9405 }
9406
9407 /* Now, we try to issue the abort to the cmdiocb out */
9408 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9409
07951076 9410abort_iotag_exit:
2e0fef85
JS
9411 /*
9412 * Caller to this routine should check for IOCB_ERROR
9413 * and handle it properly. This routine no longer removes
9414 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 9415 */
2e0fef85 9416 return retval;
dea3101e
JB
9417}
9418
5af5eee7
JS
9419/**
9420 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9421 * @phba: Pointer to HBA context object.
9422 * @pring: Pointer to driver SLI ring object.
9423 *
9424 * This function aborts all iocbs in the given ring and frees all the iocb
9425 * objects in txq. This function issues abort iocbs unconditionally for all
9426 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9427 * to complete before the return of this function. The caller is not required
9428 * to hold any locks.
9429 **/
9430static void
9431lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9432{
9433 LIST_HEAD(completions);
9434 struct lpfc_iocbq *iocb, *next_iocb;
9435
9436 if (pring->ringno == LPFC_ELS_RING)
9437 lpfc_fabric_abort_hba(phba);
9438
9439 spin_lock_irq(&phba->hbalock);
9440
9441 /* Take off all the iocbs on txq for cancelling */
9442 list_splice_init(&pring->txq, &completions);
9443 pring->txq_cnt = 0;
9444
9445 /* Next issue ABTS for everything on the txcmplq */
9446 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9447 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9448
9449 spin_unlock_irq(&phba->hbalock);
9450
9451 /* Cancel all the IOCBs from the completions list */
9452 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9453 IOERR_SLI_ABORTED);
9454}
9455
9456/**
9457 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9458 * @phba: pointer to lpfc HBA data structure.
9459 *
9460 * This routine will abort all pending and outstanding iocbs to an HBA.
9461 **/
9462void
9463lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9464{
9465 struct lpfc_sli *psli = &phba->sli;
9466 struct lpfc_sli_ring *pring;
9467 int i;
9468
9469 for (i = 0; i < psli->num_rings; i++) {
9470 pring = &psli->ring[i];
9471 lpfc_sli_iocb_ring_abort(phba, pring);
9472 }
9473}
9474
e59058c4 9475/**
3621a710 9476 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
9477 * @iocbq: Pointer to driver iocb object.
9478 * @vport: Pointer to driver virtual port object.
9479 * @tgt_id: SCSI ID of the target.
9480 * @lun_id: LUN ID of the scsi device.
9481 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9482 *
3621a710 9483 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
9484 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9485 * 0 if the filtering criteria is met for the given iocb and will return
9486 * 1 if the filtering criteria is not met.
9487 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9488 * given iocb is for the SCSI device specified by vport, tgt_id and
9489 * lun_id parameter.
9490 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9491 * given iocb is for the SCSI target specified by vport and tgt_id
9492 * parameters.
9493 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9494 * given iocb is for the SCSI host associated with the given vport.
9495 * This function is called with no locks held.
9496 **/
dea3101e 9497static int
51ef4c26
JS
9498lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9499 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 9500 lpfc_ctx_cmd ctx_cmd)
dea3101e 9501{
0bd4ca25 9502 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e
JB
9503 int rc = 1;
9504
0bd4ca25
JSEC
9505 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9506 return rc;
9507
51ef4c26
JS
9508 if (iocbq->vport != vport)
9509 return rc;
9510
0bd4ca25 9511 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 9512
495a714c 9513 if (lpfc_cmd->pCmd == NULL)
dea3101e
JB
9514 return rc;
9515
9516 switch (ctx_cmd) {
9517 case LPFC_CTX_LUN:
495a714c
JS
9518 if ((lpfc_cmd->rdata->pnode) &&
9519 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9520 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e
JB
9521 rc = 0;
9522 break;
9523 case LPFC_CTX_TGT:
495a714c
JS
9524 if ((lpfc_cmd->rdata->pnode) &&
9525 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e
JB
9526 rc = 0;
9527 break;
dea3101e
JB
9528 case LPFC_CTX_HOST:
9529 rc = 0;
9530 break;
9531 default:
9532 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 9533 __func__, ctx_cmd);
dea3101e
JB
9534 break;
9535 }
9536
9537 return rc;
9538}
9539
e59058c4 9540/**
3621a710 9541 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
9542 * @vport: Pointer to virtual port.
9543 * @tgt_id: SCSI ID of the target.
9544 * @lun_id: LUN ID of the scsi device.
9545 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9546 *
9547 * This function returns number of FCP commands pending for the vport.
9548 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9549 * commands pending on the vport associated with SCSI device specified
9550 * by tgt_id and lun_id parameters.
9551 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9552 * commands pending on the vport associated with SCSI target specified
9553 * by tgt_id parameter.
9554 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9555 * commands pending on the vport.
9556 * This function returns the number of iocbs which satisfy the filter.
9557 * This function is called without any lock held.
9558 **/
dea3101e 9559int
51ef4c26
JS
9560lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9561 lpfc_ctx_cmd ctx_cmd)
dea3101e 9562{
51ef4c26 9563 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9564 struct lpfc_iocbq *iocbq;
9565 int sum, i;
dea3101e 9566
0bd4ca25
JSEC
9567 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9568 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9569
51ef4c26
JS
9570 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9571 ctx_cmd) == 0)
0bd4ca25 9572 sum++;
dea3101e 9573 }
0bd4ca25 9574
dea3101e
JB
9575 return sum;
9576}
9577
e59058c4 9578/**
3621a710 9579 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
9580 * @phba: Pointer to HBA context object
9581 * @cmdiocb: Pointer to command iocb object.
9582 * @rspiocb: Pointer to response iocb object.
9583 *
9584 * This function is called when an aborted FCP iocb completes. This
9585 * function is called by the ring event handler with no lock held.
9586 * This function frees the iocb.
9587 **/
5eb95af0 9588void
2e0fef85
JS
9589lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9590 struct lpfc_iocbq *rspiocb)
5eb95af0 9591{
cb69f7de
JS
9592 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9593 "3096 ABORT_XRI_CN completing on xri x%x "
9594 "original iotag x%x, abort cmd iotag x%x "
9595 "status 0x%x, reason 0x%x\n",
9596 cmdiocb->iocb.un.acxri.abortContextTag,
9597 cmdiocb->iocb.un.acxri.abortIoTag,
9598 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9599 rspiocb->iocb.un.ulpWord[4]);
604a3e30 9600 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
9601 return;
9602}
9603
e59058c4 9604/**
3621a710 9605 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
9606 * @vport: Pointer to virtual port.
9607 * @pring: Pointer to driver SLI ring object.
9608 * @tgt_id: SCSI ID of the target.
9609 * @lun_id: LUN ID of the scsi device.
9610 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9611 *
9612 * This function sends an abort command for every SCSI command
9613 * associated with the given virtual port pending on the ring
9614 * filtered by lpfc_sli_validate_fcp_iocb function.
9615 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9616 * FCP iocbs associated with lun specified by tgt_id and lun_id
9617 * parameters
9618 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9619 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9620 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9621 * FCP iocbs associated with virtual port.
9622 * This function returns number of iocbs it failed to abort.
9623 * This function is called with no locks held.
9624 **/
dea3101e 9625int
51ef4c26
JS
9626lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9627 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 9628{
51ef4c26 9629 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9630 struct lpfc_iocbq *iocbq;
9631 struct lpfc_iocbq *abtsiocb;
dea3101e 9632 IOCB_t *cmd = NULL;
dea3101e 9633 int errcnt = 0, ret_val = 0;
0bd4ca25 9634 int i;
dea3101e 9635
0bd4ca25
JSEC
9636 for (i = 1; i <= phba->sli.last_iotag; i++) {
9637 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9638
51ef4c26 9639 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 9640 abort_cmd) != 0)
dea3101e
JB
9641 continue;
9642
9643 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 9644 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
9645 if (abtsiocb == NULL) {
9646 errcnt++;
9647 continue;
9648 }
dea3101e 9649
0bd4ca25 9650 cmd = &iocbq->iocb;
dea3101e
JB
9651 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9652 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
9653 if (phba->sli_rev == LPFC_SLI_REV4)
9654 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9655 else
9656 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e
JB
9657 abtsiocb->iocb.ulpLe = 1;
9658 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 9659 abtsiocb->vport = phba->pport;
dea3101e 9660
5ffc266e
JS
9661 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9662 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
341af102
JS
9663 if (iocbq->iocb_flag & LPFC_IO_FCP)
9664 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9665
2e0fef85 9666 if (lpfc_is_link_up(phba))
dea3101e
JB
9667 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9668 else
9669 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9670
5eb95af0
JSEC
9671 /* Setup callback routine and issue the command. */
9672 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
9673 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9674 abtsiocb, 0);
dea3101e 9675 if (ret_val == IOCB_ERROR) {
604a3e30 9676 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e
JB
9677 errcnt++;
9678 continue;
9679 }
9680 }
9681
9682 return errcnt;
9683}
9684
e59058c4 9685/**
3621a710 9686 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
9687 * @phba: Pointer to HBA context object.
9688 * @cmdiocbq: Pointer to command iocb.
9689 * @rspiocbq: Pointer to response iocb.
9690 *
9691 * This function is the completion handler for iocbs issued using
9692 * lpfc_sli_issue_iocb_wait function. This function is called by the
9693 * ring event handler function without any lock held. This function
9694 * can be called from both worker thread context and interrupt
9695 * context. This function also can be called from other thread which
9696 * cleans up the SLI layer objects.
9697 * This function copy the contents of the response iocb to the
9698 * response iocb memory object provided by the caller of
9699 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9700 * sleeps for the iocb completion.
9701 **/
68876920
JSEC
9702static void
9703lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9704 struct lpfc_iocbq *cmdiocbq,
9705 struct lpfc_iocbq *rspiocbq)
dea3101e 9706{
68876920
JSEC
9707 wait_queue_head_t *pdone_q;
9708 unsigned long iflags;
0f65ff68 9709 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9710
2e0fef85 9711 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
9712 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9713 if (cmdiocbq->context2 && rspiocbq)
9714 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9715 &rspiocbq->iocb, sizeof(IOCB_t));
9716
0f65ff68
JS
9717 /* Set the exchange busy flag for task management commands */
9718 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9719 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9720 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9721 cur_iocbq);
9722 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9723 }
9724
68876920 9725 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
9726 if (pdone_q)
9727 wake_up(pdone_q);
858c9f6c 9728 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
9729 return;
9730}
9731
d11e31dd
JS
9732/**
9733 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9734 * @phba: Pointer to HBA context object..
9735 * @piocbq: Pointer to command iocb.
9736 * @flag: Flag to test.
9737 *
9738 * This routine grabs the hbalock and then test the iocb_flag to
9739 * see if the passed in flag is set.
9740 * Returns:
9741 * 1 if flag is set.
9742 * 0 if flag is not set.
9743 **/
9744static int
9745lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9746 struct lpfc_iocbq *piocbq, uint32_t flag)
9747{
9748 unsigned long iflags;
9749 int ret;
9750
9751 spin_lock_irqsave(&phba->hbalock, iflags);
9752 ret = piocbq->iocb_flag & flag;
9753 spin_unlock_irqrestore(&phba->hbalock, iflags);
9754 return ret;
9755
9756}
9757
e59058c4 9758/**
3621a710 9759 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
9760 * @phba: Pointer to HBA context object..
9761 * @pring: Pointer to sli ring.
9762 * @piocb: Pointer to command iocb.
9763 * @prspiocbq: Pointer to response iocb.
9764 * @timeout: Timeout in number of seconds.
9765 *
9766 * This function issues the iocb to firmware and waits for the
9767 * iocb to complete. If the iocb command is not
9768 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9769 * Caller should not free the iocb resources if this function
9770 * returns IOCB_TIMEDOUT.
9771 * The function waits for the iocb completion using an
9772 * non-interruptible wait.
9773 * This function will sleep while waiting for iocb completion.
9774 * So, this function should not be called from any context which
9775 * does not allow sleeping. Due to the same reason, this function
9776 * cannot be called with interrupt disabled.
9777 * This function assumes that the iocb completions occur while
9778 * this function sleep. So, this function cannot be called from
9779 * the thread which process iocb completion for this ring.
9780 * This function clears the iocb_flag of the iocb object before
9781 * issuing the iocb and the iocb completion handler sets this
9782 * flag and wakes this thread when the iocb completes.
9783 * The contents of the response iocb will be copied to prspiocbq
9784 * by the completion handler when the command completes.
9785 * This function returns IOCB_SUCCESS when success.
9786 * This function is called with no lock held.
9787 **/
dea3101e 9788int
2e0fef85 9789lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 9790 uint32_t ring_number,
2e0fef85
JS
9791 struct lpfc_iocbq *piocb,
9792 struct lpfc_iocbq *prspiocbq,
68876920 9793 uint32_t timeout)
dea3101e 9794{
7259f0d0 9795 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
9796 long timeleft, timeout_req = 0;
9797 int retval = IOCB_SUCCESS;
875fbdfe 9798 uint32_t creg_val;
2a9bf3d0 9799 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 9800 /*
68876920
JSEC
9801 * If the caller has provided a response iocbq buffer, then context2
9802 * is NULL or its an error.
dea3101e 9803 */
68876920
JSEC
9804 if (prspiocbq) {
9805 if (piocb->context2)
9806 return IOCB_ERROR;
9807 piocb->context2 = prspiocbq;
dea3101e
JB
9808 }
9809
68876920
JSEC
9810 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9811 piocb->context_un.wait_queue = &done_q;
9812 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 9813
875fbdfe 9814 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
9815 if (lpfc_readl(phba->HCregaddr, &creg_val))
9816 return IOCB_ERROR;
875fbdfe
JSEC
9817 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9818 writel(creg_val, phba->HCregaddr);
9819 readl(phba->HCregaddr); /* flush */
9820 }
9821
2a9bf3d0
JS
9822 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9823 SLI_IOCB_RET_IOCB);
68876920
JSEC
9824 if (retval == IOCB_SUCCESS) {
9825 timeout_req = timeout * HZ;
68876920 9826 timeleft = wait_event_timeout(done_q,
d11e31dd 9827 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 9828 timeout_req);
dea3101e 9829
7054a606
JS
9830 if (piocb->iocb_flag & LPFC_IO_WAKE) {
9831 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 9832 "0331 IOCB wake signaled\n");
7054a606 9833 } else if (timeleft == 0) {
68876920 9834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
9835 "0338 IOCB wait timeout error - no "
9836 "wake response Data x%x\n", timeout);
68876920 9837 retval = IOCB_TIMEDOUT;
7054a606 9838 } else {
68876920 9839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
9840 "0330 IOCB wake NOT set, "
9841 "Data x%x x%lx\n",
68876920
JSEC
9842 timeout, (timeleft / jiffies));
9843 retval = IOCB_TIMEDOUT;
dea3101e 9844 }
2a9bf3d0
JS
9845 } else if (retval == IOCB_BUSY) {
9846 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9847 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
9848 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
9849 return retval;
68876920
JSEC
9850 } else {
9851 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 9852 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 9853 retval);
68876920 9854 retval = IOCB_ERROR;
dea3101e
JB
9855 }
9856
875fbdfe 9857 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
9858 if (lpfc_readl(phba->HCregaddr, &creg_val))
9859 return IOCB_ERROR;
875fbdfe
JSEC
9860 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
9861 writel(creg_val, phba->HCregaddr);
9862 readl(phba->HCregaddr); /* flush */
9863 }
9864
68876920
JSEC
9865 if (prspiocbq)
9866 piocb->context2 = NULL;
9867
9868 piocb->context_un.wait_queue = NULL;
9869 piocb->iocb_cmpl = NULL;
dea3101e
JB
9870 return retval;
9871}
68876920 9872
e59058c4 9873/**
3621a710 9874 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
9875 * @phba: Pointer to HBA context object.
9876 * @pmboxq: Pointer to driver mailbox object.
9877 * @timeout: Timeout in number of seconds.
9878 *
9879 * This function issues the mailbox to firmware and waits for the
9880 * mailbox command to complete. If the mailbox command is not
9881 * completed within timeout seconds, it returns MBX_TIMEOUT.
9882 * The function waits for the mailbox completion using an
9883 * interruptible wait. If the thread is woken up due to a
9884 * signal, MBX_TIMEOUT error is returned to the caller. Caller
9885 * should not free the mailbox resources, if this function returns
9886 * MBX_TIMEOUT.
9887 * This function will sleep while waiting for mailbox completion.
9888 * So, this function should not be called from any context which
9889 * does not allow sleeping. Due to the same reason, this function
9890 * cannot be called with interrupt disabled.
9891 * This function assumes that the mailbox completion occurs while
9892 * this function sleep. So, this function cannot be called from
9893 * the worker thread which processes mailbox completion.
9894 * This function is called in the context of HBA management
9895 * applications.
9896 * This function returns MBX_SUCCESS when successful.
9897 * This function is called with no lock held.
9898 **/
dea3101e 9899int
2e0fef85 9900lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e
JB
9901 uint32_t timeout)
9902{
7259f0d0 9903 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 9904 int retval;
858c9f6c 9905 unsigned long flag;
dea3101e
JB
9906
9907 /* The caller must leave context1 empty. */
98c9ea5c 9908 if (pmboxq->context1)
2e0fef85 9909 return MBX_NOT_FINISHED;
dea3101e 9910
495a714c 9911 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e
JB
9912 /* setup wake call as IOCB callback */
9913 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
9914 /* setup context field to pass wait_queue pointer to wake function */
9915 pmboxq->context1 = &done_q;
9916
dea3101e
JB
9917 /* now issue the command */
9918 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 9919 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
9920 wait_event_interruptible_timeout(done_q,
9921 pmboxq->mbox_flag & LPFC_MBX_WAKE,
9922 timeout * HZ);
9923
858c9f6c 9924 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 9925 pmboxq->context1 = NULL;
7054a606
JS
9926 /*
9927 * if LPFC_MBX_WAKE flag is set the mailbox is completed
9928 * else do not free the resources.
9929 */
d7c47992 9930 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 9931 retval = MBX_SUCCESS;
d7c47992
JS
9932 lpfc_sli4_swap_str(phba, pmboxq);
9933 } else {
7054a606 9934 retval = MBX_TIMEOUT;
858c9f6c
JS
9935 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9936 }
9937 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e
JB
9938 }
9939
dea3101e
JB
9940 return retval;
9941}
9942
e59058c4 9943/**
3772a991 9944 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
9945 * @phba: Pointer to HBA context.
9946 *
3772a991
JS
9947 * This function is called to shutdown the driver's mailbox sub-system.
9948 * It first marks the mailbox sub-system is in a block state to prevent
9949 * the asynchronous mailbox command from issued off the pending mailbox
9950 * command queue. If the mailbox command sub-system shutdown is due to
9951 * HBA error conditions such as EEH or ERATT, this routine shall invoke
9952 * the mailbox sub-system flush routine to forcefully bring down the
9953 * mailbox sub-system. Otherwise, if it is due to normal condition (such
9954 * as with offline or HBA function reset), this routine will wait for the
9955 * outstanding mailbox command to complete before invoking the mailbox
9956 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 9957 **/
3772a991
JS
9958void
9959lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
b4c02652 9960{
3772a991 9961 struct lpfc_sli *psli = &phba->sli;
3772a991 9962 unsigned long timeout;
b4c02652 9963
a183a15f 9964 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 9965
3772a991
JS
9966 spin_lock_irq(&phba->hbalock);
9967 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 9968
3772a991 9969 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
9970 /* Determine how long we might wait for the active mailbox
9971 * command to be gracefully completed by firmware.
9972 */
a183a15f
JS
9973 if (phba->sli.mbox_active)
9974 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9975 phba->sli.mbox_active) *
9976 1000) + jiffies;
9977 spin_unlock_irq(&phba->hbalock);
9978
3772a991
JS
9979 while (phba->sli.mbox_active) {
9980 /* Check active mailbox complete status every 2ms */
9981 msleep(2);
9982 if (time_after(jiffies, timeout))
9983 /* Timeout, let the mailbox flush routine to
9984 * forcefully release active mailbox command
9985 */
9986 break;
9987 }
d7069f09
JS
9988 } else
9989 spin_unlock_irq(&phba->hbalock);
9990
3772a991
JS
9991 lpfc_sli_mbox_sys_flush(phba);
9992}
ed957684 9993
3772a991
JS
9994/**
9995 * lpfc_sli_eratt_read - read sli-3 error attention events
9996 * @phba: Pointer to HBA context.
9997 *
9998 * This function is called to read the SLI3 device error attention registers
9999 * for possible error attention events. The caller must hold the hostlock
10000 * with spin_lock_irq().
10001 *
25985edc 10002 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
10003 * Register and returns 0 otherwise.
10004 **/
10005static int
10006lpfc_sli_eratt_read(struct lpfc_hba *phba)
10007{
10008 uint32_t ha_copy;
b4c02652 10009
3772a991 10010 /* Read chip Host Attention (HA) register */
9940b97b
JS
10011 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10012 goto unplug_err;
10013
3772a991
JS
10014 if (ha_copy & HA_ERATT) {
10015 /* Read host status register to retrieve error event */
9940b97b
JS
10016 if (lpfc_sli_read_hs(phba))
10017 goto unplug_err;
b4c02652 10018
3772a991
JS
10019 /* Check if there is a deferred error condition is active */
10020 if ((HS_FFER1 & phba->work_hs) &&
10021 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 10022 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 10023 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
10024 /* Clear all interrupt enable conditions */
10025 writel(0, phba->HCregaddr);
10026 readl(phba->HCregaddr);
10027 }
10028
10029 /* Set the driver HA work bitmap */
3772a991
JS
10030 phba->work_ha |= HA_ERATT;
10031 /* Indicate polling handles this ERATT */
10032 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
10033 return 1;
10034 }
10035 return 0;
9940b97b
JS
10036
10037unplug_err:
10038 /* Set the driver HS work bitmap */
10039 phba->work_hs |= UNPLUG_ERR;
10040 /* Set the driver HA work bitmap */
10041 phba->work_ha |= HA_ERATT;
10042 /* Indicate polling handles this ERATT */
10043 phba->hba_flag |= HBA_ERATT_HANDLED;
10044 return 1;
b4c02652
JS
10045}
10046
da0436e9
JS
10047/**
10048 * lpfc_sli4_eratt_read - read sli-4 error attention events
10049 * @phba: Pointer to HBA context.
10050 *
10051 * This function is called to read the SLI4 device error attention registers
10052 * for possible error attention events. The caller must hold the hostlock
10053 * with spin_lock_irq().
10054 *
25985edc 10055 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
10056 * Register and returns 0 otherwise.
10057 **/
10058static int
10059lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10060{
10061 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
10062 uint32_t if_type, portsmphr;
10063 struct lpfc_register portstat_reg;
da0436e9 10064
2fcee4bf
JS
10065 /*
10066 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
10067 * registers for error attention. This can be changed later.
10068 */
2fcee4bf
JS
10069 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10070 switch (if_type) {
10071 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
10072 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10073 &uerr_sta_lo) ||
10074 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10075 &uerr_sta_hi)) {
10076 phba->work_hs |= UNPLUG_ERR;
10077 phba->work_ha |= HA_ERATT;
10078 phba->hba_flag |= HBA_ERATT_HANDLED;
10079 return 1;
10080 }
2fcee4bf
JS
10081 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10082 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10084 "1423 HBA Unrecoverable error: "
10085 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10086 "ue_mask_lo_reg=0x%x, "
10087 "ue_mask_hi_reg=0x%x\n",
10088 uerr_sta_lo, uerr_sta_hi,
10089 phba->sli4_hba.ue_mask_lo,
10090 phba->sli4_hba.ue_mask_hi);
10091 phba->work_status[0] = uerr_sta_lo;
10092 phba->work_status[1] = uerr_sta_hi;
10093 phba->work_ha |= HA_ERATT;
10094 phba->hba_flag |= HBA_ERATT_HANDLED;
10095 return 1;
10096 }
10097 break;
10098 case LPFC_SLI_INTF_IF_TYPE_2:
9940b97b
JS
10099 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10100 &portstat_reg.word0) ||
10101 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10102 &portsmphr)){
10103 phba->work_hs |= UNPLUG_ERR;
10104 phba->work_ha |= HA_ERATT;
10105 phba->hba_flag |= HBA_ERATT_HANDLED;
10106 return 1;
10107 }
2fcee4bf
JS
10108 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10109 phba->work_status[0] =
10110 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10111 phba->work_status[1] =
10112 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 10114 "2885 Port Status Event: "
2fcee4bf
JS
10115 "port status reg 0x%x, "
10116 "port smphr reg 0x%x, "
10117 "error 1=0x%x, error 2=0x%x\n",
10118 portstat_reg.word0,
10119 portsmphr,
10120 phba->work_status[0],
10121 phba->work_status[1]);
10122 phba->work_ha |= HA_ERATT;
10123 phba->hba_flag |= HBA_ERATT_HANDLED;
10124 return 1;
10125 }
10126 break;
10127 case LPFC_SLI_INTF_IF_TYPE_1:
10128 default:
a747c9ce 10129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
10130 "2886 HBA Error Attention on unsupported "
10131 "if type %d.", if_type);
a747c9ce 10132 return 1;
da0436e9 10133 }
2fcee4bf 10134
da0436e9
JS
10135 return 0;
10136}
10137
e59058c4 10138/**
3621a710 10139 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
10140 * @phba: Pointer to HBA context.
10141 *
3772a991 10142 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
10143 * error attention register bit for error attention events.
10144 *
25985edc 10145 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
10146 * Register and returns 0 otherwise.
10147 **/
10148int
10149lpfc_sli_check_eratt(struct lpfc_hba *phba)
10150{
10151 uint32_t ha_copy;
10152
10153 /* If somebody is waiting to handle an eratt, don't process it
10154 * here. The brdkill function will do this.
10155 */
10156 if (phba->link_flag & LS_IGNORE_ERATT)
10157 return 0;
10158
10159 /* Check if interrupt handler handles this ERATT */
10160 spin_lock_irq(&phba->hbalock);
10161 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10162 /* Interrupt handler has handled ERATT */
10163 spin_unlock_irq(&phba->hbalock);
10164 return 0;
10165 }
10166
a257bf90
JS
10167 /*
10168 * If there is deferred error attention, do not check for error
10169 * attention
10170 */
10171 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10172 spin_unlock_irq(&phba->hbalock);
10173 return 0;
10174 }
10175
3772a991
JS
10176 /* If PCI channel is offline, don't process it */
10177 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 10178 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10179 return 0;
10180 }
10181
10182 switch (phba->sli_rev) {
10183 case LPFC_SLI_REV2:
10184 case LPFC_SLI_REV3:
10185 /* Read chip Host Attention (HA) register */
10186 ha_copy = lpfc_sli_eratt_read(phba);
10187 break;
da0436e9 10188 case LPFC_SLI_REV4:
2fcee4bf 10189 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
10190 ha_copy = lpfc_sli4_eratt_read(phba);
10191 break;
3772a991
JS
10192 default:
10193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10194 "0299 Invalid SLI revision (%d)\n",
10195 phba->sli_rev);
10196 ha_copy = 0;
10197 break;
9399627f
JS
10198 }
10199 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10200
10201 return ha_copy;
10202}
10203
10204/**
10205 * lpfc_intr_state_check - Check device state for interrupt handling
10206 * @phba: Pointer to HBA context.
10207 *
10208 * This inline routine checks whether a device or its PCI slot is in a state
10209 * that the interrupt should be handled.
10210 *
10211 * This function returns 0 if the device or the PCI slot is in a state that
10212 * interrupt should be handled, otherwise -EIO.
10213 */
10214static inline int
10215lpfc_intr_state_check(struct lpfc_hba *phba)
10216{
10217 /* If the pci channel is offline, ignore all the interrupts */
10218 if (unlikely(pci_channel_offline(phba->pcidev)))
10219 return -EIO;
10220
10221 /* Update device level interrupt statistics */
10222 phba->sli.slistat.sli_intr++;
10223
10224 /* Ignore all interrupts during initialization. */
10225 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10226 return -EIO;
10227
9399627f
JS
10228 return 0;
10229}
10230
10231/**
3772a991 10232 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
10233 * @irq: Interrupt number.
10234 * @dev_id: The device context pointer.
10235 *
9399627f 10236 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10237 * service routine when device with SLI-3 interface spec is enabled with
10238 * MSI-X multi-message interrupt mode and there are slow-path events in
10239 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10240 * interrupt mode, this function is called as part of the device-level
10241 * interrupt handler. When the PCI slot is in error recovery or the HBA
10242 * is undergoing initialization, the interrupt handler will not process
10243 * the interrupt. The link attention and ELS ring attention events are
10244 * handled by the worker thread. The interrupt handler signals the worker
10245 * thread and returns for these events. This function is called without
10246 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
10247 * structures.
10248 *
10249 * This function returns IRQ_HANDLED when interrupt is handled else it
10250 * returns IRQ_NONE.
e59058c4 10251 **/
dea3101e 10252irqreturn_t
3772a991 10253lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 10254{
2e0fef85 10255 struct lpfc_hba *phba;
a747c9ce 10256 uint32_t ha_copy, hc_copy;
dea3101e
JB
10257 uint32_t work_ha_copy;
10258 unsigned long status;
5b75da2f 10259 unsigned long iflag;
dea3101e
JB
10260 uint32_t control;
10261
92d7f7b0 10262 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
10263 struct lpfc_vport *vport;
10264 struct lpfc_nodelist *ndlp;
10265 struct lpfc_dmabuf *mp;
92d7f7b0
JS
10266 LPFC_MBOXQ_t *pmb;
10267 int rc;
10268
dea3101e
JB
10269 /*
10270 * Get the driver's phba structure from the dev_id and
10271 * assume the HBA is not interrupting.
10272 */
9399627f 10273 phba = (struct lpfc_hba *)dev_id;
dea3101e
JB
10274
10275 if (unlikely(!phba))
10276 return IRQ_NONE;
10277
dea3101e 10278 /*
9399627f
JS
10279 * Stuff needs to be attented to when this function is invoked as an
10280 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 10281 */
9399627f 10282 if (phba->intr_type == MSIX) {
3772a991
JS
10283 /* Check device state for handling interrupt */
10284 if (lpfc_intr_state_check(phba))
9399627f
JS
10285 return IRQ_NONE;
10286 /* Need to read HA REG for slow-path events */
5b75da2f 10287 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10288 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10289 goto unplug_error;
9399627f
JS
10290 /* If somebody is waiting to handle an eratt don't process it
10291 * here. The brdkill function will do this.
10292 */
10293 if (phba->link_flag & LS_IGNORE_ERATT)
10294 ha_copy &= ~HA_ERATT;
10295 /* Check the need for handling ERATT in interrupt handler */
10296 if (ha_copy & HA_ERATT) {
10297 if (phba->hba_flag & HBA_ERATT_HANDLED)
10298 /* ERATT polling has handled ERATT */
10299 ha_copy &= ~HA_ERATT;
10300 else
10301 /* Indicate interrupt handler handles ERATT */
10302 phba->hba_flag |= HBA_ERATT_HANDLED;
10303 }
a257bf90
JS
10304
10305 /*
10306 * If there is deferred error attention, do not check for any
10307 * interrupt.
10308 */
10309 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10310 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10311 return IRQ_NONE;
10312 }
10313
9399627f 10314 /* Clear up only attention source related to slow-path */
9940b97b
JS
10315 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10316 goto unplug_error;
10317
a747c9ce
JS
10318 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10319 HC_LAINT_ENA | HC_ERINT_ENA),
10320 phba->HCregaddr);
9399627f
JS
10321 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10322 phba->HAregaddr);
a747c9ce 10323 writel(hc_copy, phba->HCregaddr);
9399627f 10324 readl(phba->HAregaddr); /* flush */
5b75da2f 10325 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10326 } else
10327 ha_copy = phba->ha_copy;
dea3101e 10328
dea3101e
JB
10329 work_ha_copy = ha_copy & phba->work_ha_mask;
10330
9399627f 10331 if (work_ha_copy) {
dea3101e
JB
10332 if (work_ha_copy & HA_LATT) {
10333 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10334 /*
10335 * Turn off Link Attention interrupts
10336 * until CLEAR_LA done
10337 */
5b75da2f 10338 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10339 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
10340 if (lpfc_readl(phba->HCregaddr, &control))
10341 goto unplug_error;
dea3101e
JB
10342 control &= ~HC_LAINT_ENA;
10343 writel(control, phba->HCregaddr);
10344 readl(phba->HCregaddr); /* flush */
5b75da2f 10345 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
10346 }
10347 else
10348 work_ha_copy &= ~HA_LATT;
10349 }
10350
9399627f 10351 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
10352 /*
10353 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10354 * the only slow ring.
10355 */
10356 status = (work_ha_copy &
10357 (HA_RXMASK << (4*LPFC_ELS_RING)));
10358 status >>= (4*LPFC_ELS_RING);
10359 if (status & HA_RXMASK) {
5b75da2f 10360 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10361 if (lpfc_readl(phba->HCregaddr, &control))
10362 goto unplug_error;
a58cbd52
JS
10363
10364 lpfc_debugfs_slow_ring_trc(phba,
10365 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10366 control, status,
10367 (uint32_t)phba->sli.slistat.sli_intr);
10368
858c9f6c 10369 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
10370 lpfc_debugfs_slow_ring_trc(phba,
10371 "ISR Disable ring:"
10372 "pwork:x%x hawork:x%x wait:x%x",
10373 phba->work_ha, work_ha_copy,
10374 (uint32_t)((unsigned long)
5e9d9b82 10375 &phba->work_waitq));
a58cbd52 10376
858c9f6c
JS
10377 control &=
10378 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e
JB
10379 writel(control, phba->HCregaddr);
10380 readl(phba->HCregaddr); /* flush */
dea3101e 10381 }
a58cbd52
JS
10382 else {
10383 lpfc_debugfs_slow_ring_trc(phba,
10384 "ISR slow ring: pwork:"
10385 "x%x hawork:x%x wait:x%x",
10386 phba->work_ha, work_ha_copy,
10387 (uint32_t)((unsigned long)
5e9d9b82 10388 &phba->work_waitq));
a58cbd52 10389 }
5b75da2f 10390 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
10391 }
10392 }
5b75da2f 10393 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 10394 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
10395 if (lpfc_sli_read_hs(phba))
10396 goto unplug_error;
a257bf90
JS
10397 /*
10398 * Check if there is a deferred error condition
10399 * is active
10400 */
10401 if ((HS_FFER1 & phba->work_hs) &&
10402 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
10403 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10404 phba->work_hs)) {
a257bf90
JS
10405 phba->hba_flag |= DEFER_ERATT;
10406 /* Clear all interrupt enable conditions */
10407 writel(0, phba->HCregaddr);
10408 readl(phba->HCregaddr);
10409 }
10410 }
10411
9399627f 10412 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 10413 pmb = phba->sli.mbox_active;
04c68496 10414 pmbox = &pmb->u.mb;
34b02dcd 10415 mbox = phba->mbox;
858c9f6c 10416 vport = pmb->vport;
92d7f7b0
JS
10417
10418 /* First check out the status word */
10419 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10420 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 10421 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
10422 /*
10423 * Stray Mailbox Interrupt, mbxCommand <cmd>
10424 * mbxStatus <status>
10425 */
09372820 10426 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 10427 LOG_SLI,
e8b62011 10428 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
10429 "Interrupt mbxCommand x%x "
10430 "mbxStatus x%x\n",
e8b62011 10431 (vport ? vport->vpi : 0),
92d7f7b0
JS
10432 pmbox->mbxCommand,
10433 pmbox->mbxStatus);
09372820
JS
10434 /* clear mailbox attention bit */
10435 work_ha_copy &= ~HA_MBATT;
10436 } else {
97eab634 10437 phba->sli.mbox_active = NULL;
5b75da2f 10438 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
10439 phba->last_completion_time = jiffies;
10440 del_timer(&phba->sli.mbox_tmo);
09372820
JS
10441 if (pmb->mbox_cmpl) {
10442 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10443 MAILBOX_CMD_SIZE);
7a470277
JS
10444 if (pmb->out_ext_byte_len &&
10445 pmb->context2)
10446 lpfc_sli_pcimem_bcopy(
10447 phba->mbox_ext,
10448 pmb->context2,
10449 pmb->out_ext_byte_len);
09372820
JS
10450 }
10451 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10452 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10453
10454 lpfc_debugfs_disc_trc(vport,
10455 LPFC_DISC_TRC_MBOX_VPORT,
10456 "MBOX dflt rpi: : "
10457 "status:x%x rpi:x%x",
10458 (uint32_t)pmbox->mbxStatus,
10459 pmbox->un.varWords[0], 0);
10460
10461 if (!pmbox->mbxStatus) {
10462 mp = (struct lpfc_dmabuf *)
10463 (pmb->context1);
10464 ndlp = (struct lpfc_nodelist *)
10465 pmb->context2;
10466
10467 /* Reg_LOGIN of dflt RPI was
10468 * successful. new lets get
10469 * rid of the RPI using the
10470 * same mbox buffer.
10471 */
10472 lpfc_unreg_login(phba,
10473 vport->vpi,
10474 pmbox->un.varWords[0],
10475 pmb);
10476 pmb->mbox_cmpl =
10477 lpfc_mbx_cmpl_dflt_rpi;
10478 pmb->context1 = mp;
10479 pmb->context2 = ndlp;
10480 pmb->vport = vport;
58da1ffb
JS
10481 rc = lpfc_sli_issue_mbox(phba,
10482 pmb,
10483 MBX_NOWAIT);
10484 if (rc != MBX_BUSY)
10485 lpfc_printf_log(phba,
10486 KERN_ERR,
10487 LOG_MBOX | LOG_SLI,
d7c255b2 10488 "0350 rc should have"
6a9c52cf 10489 "been MBX_BUSY\n");
3772a991
JS
10490 if (rc != MBX_NOT_FINISHED)
10491 goto send_current_mbox;
09372820 10492 }
858c9f6c 10493 }
5b75da2f
JS
10494 spin_lock_irqsave(
10495 &phba->pport->work_port_lock,
10496 iflag);
09372820
JS
10497 phba->pport->work_port_events &=
10498 ~WORKER_MBOX_TMO;
5b75da2f
JS
10499 spin_unlock_irqrestore(
10500 &phba->pport->work_port_lock,
10501 iflag);
09372820 10502 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 10503 }
97eab634 10504 } else
5b75da2f 10505 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 10506
92d7f7b0
JS
10507 if ((work_ha_copy & HA_MBATT) &&
10508 (phba->sli.mbox_active == NULL)) {
858c9f6c 10509send_current_mbox:
92d7f7b0 10510 /* Process next mailbox command if there is one */
58da1ffb
JS
10511 do {
10512 rc = lpfc_sli_issue_mbox(phba, NULL,
10513 MBX_NOWAIT);
10514 } while (rc == MBX_NOT_FINISHED);
10515 if (rc != MBX_SUCCESS)
10516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10517 LOG_SLI, "0349 rc should be "
6a9c52cf 10518 "MBX_SUCCESS\n");
92d7f7b0
JS
10519 }
10520
5b75da2f 10521 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10522 phba->work_ha |= work_ha_copy;
5b75da2f 10523 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 10524 lpfc_worker_wake_up(phba);
dea3101e 10525 }
9399627f 10526 return IRQ_HANDLED;
9940b97b
JS
10527unplug_error:
10528 spin_unlock_irqrestore(&phba->hbalock, iflag);
10529 return IRQ_HANDLED;
dea3101e 10530
3772a991 10531} /* lpfc_sli_sp_intr_handler */
9399627f
JS
10532
10533/**
3772a991 10534 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
10535 * @irq: Interrupt number.
10536 * @dev_id: The device context pointer.
10537 *
10538 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10539 * service routine when device with SLI-3 interface spec is enabled with
10540 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10541 * ring event in the HBA. However, when the device is enabled with either
10542 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10543 * device-level interrupt handler. When the PCI slot is in error recovery
10544 * or the HBA is undergoing initialization, the interrupt handler will not
10545 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10546 * the intrrupt context. This function is called without any lock held.
10547 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
10548 *
10549 * This function returns IRQ_HANDLED when interrupt is handled else it
10550 * returns IRQ_NONE.
10551 **/
10552irqreturn_t
3772a991 10553lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
10554{
10555 struct lpfc_hba *phba;
10556 uint32_t ha_copy;
10557 unsigned long status;
5b75da2f 10558 unsigned long iflag;
9399627f
JS
10559
10560 /* Get the driver's phba structure from the dev_id and
10561 * assume the HBA is not interrupting.
10562 */
10563 phba = (struct lpfc_hba *) dev_id;
10564
10565 if (unlikely(!phba))
10566 return IRQ_NONE;
10567
10568 /*
10569 * Stuff needs to be attented to when this function is invoked as an
10570 * individual interrupt handler in MSI-X multi-message interrupt mode
10571 */
10572 if (phba->intr_type == MSIX) {
3772a991
JS
10573 /* Check device state for handling interrupt */
10574 if (lpfc_intr_state_check(phba))
9399627f
JS
10575 return IRQ_NONE;
10576 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
10577 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10578 return IRQ_HANDLED;
9399627f 10579 /* Clear up only attention source related to fast-path */
5b75da2f 10580 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
10581 /*
10582 * If there is deferred error attention, do not check for
10583 * any interrupt.
10584 */
10585 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10586 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10587 return IRQ_NONE;
10588 }
9399627f
JS
10589 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10590 phba->HAregaddr);
10591 readl(phba->HAregaddr); /* flush */
5b75da2f 10592 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10593 } else
10594 ha_copy = phba->ha_copy;
dea3101e
JB
10595
10596 /*
9399627f 10597 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 10598 */
9399627f
JS
10599 ha_copy &= ~(phba->work_ha_mask);
10600
10601 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 10602 status >>= (4*LPFC_FCP_RING);
858c9f6c 10603 if (status & HA_RXMASK)
dea3101e
JB
10604 lpfc_sli_handle_fast_ring_event(phba,
10605 &phba->sli.ring[LPFC_FCP_RING],
10606 status);
a4bc3379
JS
10607
10608 if (phba->cfg_multi_ring_support == 2) {
10609 /*
9399627f
JS
10610 * Process all events on extra ring. Take the optimized path
10611 * for extra ring IO.
a4bc3379 10612 */
9399627f 10613 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 10614 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 10615 if (status & HA_RXMASK) {
a4bc3379
JS
10616 lpfc_sli_handle_fast_ring_event(phba,
10617 &phba->sli.ring[LPFC_EXTRA_RING],
10618 status);
10619 }
10620 }
dea3101e 10621 return IRQ_HANDLED;
3772a991 10622} /* lpfc_sli_fp_intr_handler */
9399627f
JS
10623
10624/**
3772a991 10625 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
10626 * @irq: Interrupt number.
10627 * @dev_id: The device context pointer.
10628 *
3772a991
JS
10629 * This function is the HBA device-level interrupt handler to device with
10630 * SLI-3 interface spec, called from the PCI layer when either MSI or
10631 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10632 * requires driver attention. This function invokes the slow-path interrupt
10633 * attention handling function and fast-path interrupt attention handling
10634 * function in turn to process the relevant HBA attention events. This
10635 * function is called without any lock held. It gets the hbalock to access
10636 * and update SLI data structures.
9399627f
JS
10637 *
10638 * This function returns IRQ_HANDLED when interrupt is handled, else it
10639 * returns IRQ_NONE.
10640 **/
10641irqreturn_t
3772a991 10642lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
10643{
10644 struct lpfc_hba *phba;
10645 irqreturn_t sp_irq_rc, fp_irq_rc;
10646 unsigned long status1, status2;
a747c9ce 10647 uint32_t hc_copy;
9399627f
JS
10648
10649 /*
10650 * Get the driver's phba structure from the dev_id and
10651 * assume the HBA is not interrupting.
10652 */
10653 phba = (struct lpfc_hba *) dev_id;
10654
10655 if (unlikely(!phba))
10656 return IRQ_NONE;
10657
3772a991
JS
10658 /* Check device state for handling interrupt */
10659 if (lpfc_intr_state_check(phba))
9399627f
JS
10660 return IRQ_NONE;
10661
10662 spin_lock(&phba->hbalock);
9940b97b
JS
10663 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10664 spin_unlock(&phba->hbalock);
10665 return IRQ_HANDLED;
10666 }
10667
9399627f
JS
10668 if (unlikely(!phba->ha_copy)) {
10669 spin_unlock(&phba->hbalock);
10670 return IRQ_NONE;
10671 } else if (phba->ha_copy & HA_ERATT) {
10672 if (phba->hba_flag & HBA_ERATT_HANDLED)
10673 /* ERATT polling has handled ERATT */
10674 phba->ha_copy &= ~HA_ERATT;
10675 else
10676 /* Indicate interrupt handler handles ERATT */
10677 phba->hba_flag |= HBA_ERATT_HANDLED;
10678 }
10679
a257bf90
JS
10680 /*
10681 * If there is deferred error attention, do not check for any interrupt.
10682 */
10683 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 10684 spin_unlock(&phba->hbalock);
a257bf90
JS
10685 return IRQ_NONE;
10686 }
10687
9399627f 10688 /* Clear attention sources except link and error attentions */
9940b97b
JS
10689 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10690 spin_unlock(&phba->hbalock);
10691 return IRQ_HANDLED;
10692 }
a747c9ce
JS
10693 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10694 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10695 phba->HCregaddr);
9399627f 10696 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 10697 writel(hc_copy, phba->HCregaddr);
9399627f
JS
10698 readl(phba->HAregaddr); /* flush */
10699 spin_unlock(&phba->hbalock);
10700
10701 /*
10702 * Invokes slow-path host attention interrupt handling as appropriate.
10703 */
10704
10705 /* status of events with mailbox and link attention */
10706 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10707
10708 /* status of events with ELS ring */
10709 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10710 status2 >>= (4*LPFC_ELS_RING);
10711
10712 if (status1 || (status2 & HA_RXMASK))
3772a991 10713 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
10714 else
10715 sp_irq_rc = IRQ_NONE;
10716
10717 /*
10718 * Invoke fast-path host attention interrupt handling as appropriate.
10719 */
10720
10721 /* status of events with FCP ring */
10722 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10723 status1 >>= (4*LPFC_FCP_RING);
10724
10725 /* status of events with extra ring */
10726 if (phba->cfg_multi_ring_support == 2) {
10727 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10728 status2 >>= (4*LPFC_EXTRA_RING);
10729 } else
10730 status2 = 0;
10731
10732 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 10733 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
10734 else
10735 fp_irq_rc = IRQ_NONE;
dea3101e 10736
9399627f
JS
10737 /* Return device-level interrupt handling status */
10738 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 10739} /* lpfc_sli_intr_handler */
4f774513
JS
10740
10741/**
10742 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10743 * @phba: pointer to lpfc hba data structure.
10744 *
10745 * This routine is invoked by the worker thread to process all the pending
10746 * SLI4 FCP abort XRI events.
10747 **/
10748void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10749{
10750 struct lpfc_cq_event *cq_event;
10751
10752 /* First, declare the fcp xri abort event has been handled */
10753 spin_lock_irq(&phba->hbalock);
10754 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10755 spin_unlock_irq(&phba->hbalock);
10756 /* Now, handle all the fcp xri abort events */
10757 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10758 /* Get the first event from the head of the event queue */
10759 spin_lock_irq(&phba->hbalock);
10760 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10761 cq_event, struct lpfc_cq_event, list);
10762 spin_unlock_irq(&phba->hbalock);
10763 /* Notify aborted XRI for FCP work queue */
10764 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10765 /* Free the event processed back to the free pool */
10766 lpfc_sli4_cq_event_release(phba, cq_event);
10767 }
10768}
10769
10770/**
10771 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10772 * @phba: pointer to lpfc hba data structure.
10773 *
10774 * This routine is invoked by the worker thread to process all the pending
10775 * SLI4 els abort xri events.
10776 **/
10777void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10778{
10779 struct lpfc_cq_event *cq_event;
10780
10781 /* First, declare the els xri abort event has been handled */
10782 spin_lock_irq(&phba->hbalock);
10783 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10784 spin_unlock_irq(&phba->hbalock);
10785 /* Now, handle all the els xri abort events */
10786 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10787 /* Get the first event from the head of the event queue */
10788 spin_lock_irq(&phba->hbalock);
10789 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10790 cq_event, struct lpfc_cq_event, list);
10791 spin_unlock_irq(&phba->hbalock);
10792 /* Notify aborted XRI for ELS work queue */
10793 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10794 /* Free the event processed back to the free pool */
10795 lpfc_sli4_cq_event_release(phba, cq_event);
10796 }
10797}
10798
341af102
JS
10799/**
10800 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10801 * @phba: pointer to lpfc hba data structure
10802 * @pIocbIn: pointer to the rspiocbq
10803 * @pIocbOut: pointer to the cmdiocbq
10804 * @wcqe: pointer to the complete wcqe
10805 *
10806 * This routine transfers the fields of a command iocbq to a response iocbq
10807 * by copying all the IOCB fields from command iocbq and transferring the
10808 * completion status information from the complete wcqe.
10809 **/
4f774513 10810static void
341af102
JS
10811lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10812 struct lpfc_iocbq *pIocbIn,
4f774513
JS
10813 struct lpfc_iocbq *pIocbOut,
10814 struct lpfc_wcqe_complete *wcqe)
10815{
341af102 10816 unsigned long iflags;
acd6859b 10817 uint32_t status;
4f774513
JS
10818 size_t offset = offsetof(struct lpfc_iocbq, iocb);
10819
10820 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10821 sizeof(struct lpfc_iocbq) - offset);
4f774513 10822 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
10823 status = bf_get(lpfc_wcqe_c_status, wcqe);
10824 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
10825 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10826 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10827 pIocbIn->iocb.un.fcpi.fcpi_parm =
10828 pIocbOut->iocb.un.fcpi.fcpi_parm -
10829 wcqe->total_data_placed;
10830 else
10831 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 10832 else {
4f774513 10833 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e
JS
10834 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10835 }
341af102 10836
acd6859b
JS
10837 /* Convert BG errors for completion status */
10838 if (status == CQE_STATUS_DI_ERROR) {
10839 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
10840
10841 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
10842 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
10843 else
10844 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
10845
10846 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
10847 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
10848 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10849 BGS_GUARD_ERR_MASK;
10850 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
10851 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10852 BGS_APPTAG_ERR_MASK;
10853 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
10854 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10855 BGS_REFTAG_ERR_MASK;
10856
10857 /* Check to see if there was any good data before the error */
10858 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
10859 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10860 BGS_HI_WATER_MARK_PRESENT_MASK;
10861 pIocbIn->iocb.unsli3.sli3_bg.bghm =
10862 wcqe->total_data_placed;
10863 }
10864
10865 /*
10866 * Set ALL the error bits to indicate we don't know what
10867 * type of error it is.
10868 */
10869 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
10870 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10871 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
10872 BGS_GUARD_ERR_MASK);
10873 }
10874
341af102
JS
10875 /* Pick up HBA exchange busy condition */
10876 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
10877 spin_lock_irqsave(&phba->hbalock, iflags);
10878 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
10879 spin_unlock_irqrestore(&phba->hbalock, iflags);
10880 }
4f774513
JS
10881}
10882
45ed1190
JS
10883/**
10884 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
10885 * @phba: Pointer to HBA context object.
10886 * @wcqe: Pointer to work-queue completion queue entry.
10887 *
10888 * This routine handles an ELS work-queue completion event and construct
10889 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
10890 * discovery engine to handle.
10891 *
10892 * Return: Pointer to the receive IOCBQ, NULL otherwise.
10893 **/
10894static struct lpfc_iocbq *
10895lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10896 struct lpfc_iocbq *irspiocbq)
10897{
10898 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10899 struct lpfc_iocbq *cmdiocbq;
10900 struct lpfc_wcqe_complete *wcqe;
10901 unsigned long iflags;
10902
10903 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10904 spin_lock_irqsave(&phba->hbalock, iflags);
10905 pring->stats.iocb_event++;
10906 /* Look up the ELS command IOCB and create pseudo response IOCB */
10907 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10908 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10909 spin_unlock_irqrestore(&phba->hbalock, iflags);
10910
10911 if (unlikely(!cmdiocbq)) {
10912 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10913 "0386 ELS complete with no corresponding "
10914 "cmdiocb: iotag (%d)\n",
10915 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10916 lpfc_sli_release_iocbq(phba, irspiocbq);
10917 return NULL;
10918 }
10919
10920 /* Fake the irspiocbq and copy necessary response information */
341af102 10921 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
10922
10923 return irspiocbq;
10924}
10925
04c68496
JS
10926/**
10927 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
10928 * @phba: Pointer to HBA context object.
10929 * @cqe: Pointer to mailbox completion queue entry.
10930 *
10931 * This routine process a mailbox completion queue entry with asynchrous
10932 * event.
10933 *
10934 * Return: true if work posted to worker thread, otherwise false.
10935 **/
10936static bool
10937lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10938{
10939 struct lpfc_cq_event *cq_event;
10940 unsigned long iflags;
10941
10942 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10943 "0392 Async Event: word0:x%x, word1:x%x, "
10944 "word2:x%x, word3:x%x\n", mcqe->word0,
10945 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
10946
10947 /* Allocate a new internal CQ_EVENT entry */
10948 cq_event = lpfc_sli4_cq_event_alloc(phba);
10949 if (!cq_event) {
10950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10951 "0394 Failed to allocate CQ_EVENT entry\n");
10952 return false;
10953 }
10954
10955 /* Move the CQE into an asynchronous event entry */
10956 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
10957 spin_lock_irqsave(&phba->hbalock, iflags);
10958 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
10959 /* Set the async event flag */
10960 phba->hba_flag |= ASYNC_EVENT;
10961 spin_unlock_irqrestore(&phba->hbalock, iflags);
10962
10963 return true;
10964}
10965
10966/**
10967 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
10968 * @phba: Pointer to HBA context object.
10969 * @cqe: Pointer to mailbox completion queue entry.
10970 *
10971 * This routine process a mailbox completion queue entry with mailbox
10972 * completion event.
10973 *
10974 * Return: true if work posted to worker thread, otherwise false.
10975 **/
10976static bool
10977lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10978{
10979 uint32_t mcqe_status;
10980 MAILBOX_t *mbox, *pmbox;
10981 struct lpfc_mqe *mqe;
10982 struct lpfc_vport *vport;
10983 struct lpfc_nodelist *ndlp;
10984 struct lpfc_dmabuf *mp;
10985 unsigned long iflags;
10986 LPFC_MBOXQ_t *pmb;
10987 bool workposted = false;
10988 int rc;
10989
10990 /* If not a mailbox complete MCQE, out by checking mailbox consume */
10991 if (!bf_get(lpfc_trailer_completed, mcqe))
10992 goto out_no_mqe_complete;
10993
10994 /* Get the reference to the active mbox command */
10995 spin_lock_irqsave(&phba->hbalock, iflags);
10996 pmb = phba->sli.mbox_active;
10997 if (unlikely(!pmb)) {
10998 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10999 "1832 No pending MBOX command to handle\n");
11000 spin_unlock_irqrestore(&phba->hbalock, iflags);
11001 goto out_no_mqe_complete;
11002 }
11003 spin_unlock_irqrestore(&phba->hbalock, iflags);
11004 mqe = &pmb->u.mqe;
11005 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11006 mbox = phba->mbox;
11007 vport = pmb->vport;
11008
11009 /* Reset heartbeat timer */
11010 phba->last_completion_time = jiffies;
11011 del_timer(&phba->sli.mbox_tmo);
11012
11013 /* Move mbox data to caller's mailbox region, do endian swapping */
11014 if (pmb->mbox_cmpl && mbox)
11015 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 11016
73d91e50
JS
11017 /*
11018 * For mcqe errors, conditionally move a modified error code to
11019 * the mbox so that the error will not be missed.
11020 */
11021 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11022 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11023 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11024 bf_set(lpfc_mqe_status, mqe,
11025 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11026 }
04c68496
JS
11027 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11028 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11029 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11030 "MBOX dflt rpi: status:x%x rpi:x%x",
11031 mcqe_status,
11032 pmbox->un.varWords[0], 0);
11033 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11034 mp = (struct lpfc_dmabuf *)(pmb->context1);
11035 ndlp = (struct lpfc_nodelist *)pmb->context2;
11036 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11037 * RID of the PPI using the same mbox buffer.
11038 */
11039 lpfc_unreg_login(phba, vport->vpi,
11040 pmbox->un.varWords[0], pmb);
11041 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11042 pmb->context1 = mp;
11043 pmb->context2 = ndlp;
11044 pmb->vport = vport;
11045 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11046 if (rc != MBX_BUSY)
11047 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11048 LOG_SLI, "0385 rc should "
11049 "have been MBX_BUSY\n");
11050 if (rc != MBX_NOT_FINISHED)
11051 goto send_current_mbox;
11052 }
11053 }
11054 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11055 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11056 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11057
11058 /* There is mailbox completion work to do */
11059 spin_lock_irqsave(&phba->hbalock, iflags);
11060 __lpfc_mbox_cmpl_put(phba, pmb);
11061 phba->work_ha |= HA_MBATT;
11062 spin_unlock_irqrestore(&phba->hbalock, iflags);
11063 workposted = true;
11064
11065send_current_mbox:
11066 spin_lock_irqsave(&phba->hbalock, iflags);
11067 /* Release the mailbox command posting token */
11068 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11069 /* Setting active mailbox pointer need to be in sync to flag clear */
11070 phba->sli.mbox_active = NULL;
11071 spin_unlock_irqrestore(&phba->hbalock, iflags);
11072 /* Wake up worker thread to post the next pending mailbox command */
11073 lpfc_worker_wake_up(phba);
11074out_no_mqe_complete:
11075 if (bf_get(lpfc_trailer_consumed, mcqe))
11076 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11077 return workposted;
11078}
11079
11080/**
11081 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11082 * @phba: Pointer to HBA context object.
11083 * @cqe: Pointer to mailbox completion queue entry.
11084 *
11085 * This routine process a mailbox completion queue entry, it invokes the
11086 * proper mailbox complete handling or asynchrous event handling routine
11087 * according to the MCQE's async bit.
11088 *
11089 * Return: true if work posted to worker thread, otherwise false.
11090 **/
11091static bool
11092lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11093{
11094 struct lpfc_mcqe mcqe;
11095 bool workposted;
11096
11097 /* Copy the mailbox MCQE and convert endian order as needed */
11098 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11099
11100 /* Invoke the proper event handling routine */
11101 if (!bf_get(lpfc_trailer_async, &mcqe))
11102 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11103 else
11104 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11105 return workposted;
11106}
11107
4f774513
JS
11108/**
11109 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11110 * @phba: Pointer to HBA context object.
11111 * @wcqe: Pointer to work-queue completion queue entry.
11112 *
11113 * This routine handles an ELS work-queue completion event.
11114 *
11115 * Return: true if work posted to worker thread, otherwise false.
11116 **/
11117static bool
11118lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
11119 struct lpfc_wcqe_complete *wcqe)
11120{
4f774513
JS
11121 struct lpfc_iocbq *irspiocbq;
11122 unsigned long iflags;
2a9bf3d0 11123 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
4f774513 11124
45ed1190 11125 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
11126 irspiocbq = lpfc_sli_get_iocbq(phba);
11127 if (!irspiocbq) {
11128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
11129 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11130 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11131 pring->txq_cnt, phba->iocb_cnt,
11132 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
11133 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
45ed1190 11134 return false;
4f774513 11135 }
4f774513 11136
45ed1190
JS
11137 /* Save off the slow-path queue event for work thread to process */
11138 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 11139 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 11140 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
11141 &phba->sli4_hba.sp_queue_event);
11142 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 11143 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 11144
45ed1190 11145 return true;
4f774513
JS
11146}
11147
11148/**
11149 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11150 * @phba: Pointer to HBA context object.
11151 * @wcqe: Pointer to work-queue completion queue entry.
11152 *
11153 * This routine handles slow-path WQ entry comsumed event by invoking the
11154 * proper WQ release routine to the slow-path WQ.
11155 **/
11156static void
11157lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11158 struct lpfc_wcqe_release *wcqe)
11159{
2e90f4b5
JS
11160 /* sanity check on queue memory */
11161 if (unlikely(!phba->sli4_hba.els_wq))
11162 return;
4f774513
JS
11163 /* Check for the slow-path ELS work queue */
11164 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11165 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11166 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11167 else
11168 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11169 "2579 Slow-path wqe consume event carries "
11170 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11171 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11172 phba->sli4_hba.els_wq->queue_id);
11173}
11174
11175/**
11176 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11177 * @phba: Pointer to HBA context object.
11178 * @cq: Pointer to a WQ completion queue.
11179 * @wcqe: Pointer to work-queue completion queue entry.
11180 *
11181 * This routine handles an XRI abort event.
11182 *
11183 * Return: true if work posted to worker thread, otherwise false.
11184 **/
11185static bool
11186lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11187 struct lpfc_queue *cq,
11188 struct sli4_wcqe_xri_aborted *wcqe)
11189{
11190 bool workposted = false;
11191 struct lpfc_cq_event *cq_event;
11192 unsigned long iflags;
11193
11194 /* Allocate a new internal CQ_EVENT entry */
11195 cq_event = lpfc_sli4_cq_event_alloc(phba);
11196 if (!cq_event) {
11197 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11198 "0602 Failed to allocate CQ_EVENT entry\n");
11199 return false;
11200 }
11201
11202 /* Move the CQE into the proper xri abort event list */
11203 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11204 switch (cq->subtype) {
11205 case LPFC_FCP:
11206 spin_lock_irqsave(&phba->hbalock, iflags);
11207 list_add_tail(&cq_event->list,
11208 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11209 /* Set the fcp xri abort event flag */
11210 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11211 spin_unlock_irqrestore(&phba->hbalock, iflags);
11212 workposted = true;
11213 break;
11214 case LPFC_ELS:
11215 spin_lock_irqsave(&phba->hbalock, iflags);
11216 list_add_tail(&cq_event->list,
11217 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11218 /* Set the els xri abort event flag */
11219 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11220 spin_unlock_irqrestore(&phba->hbalock, iflags);
11221 workposted = true;
11222 break;
11223 default:
11224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11225 "0603 Invalid work queue CQE subtype (x%x)\n",
11226 cq->subtype);
11227 workposted = false;
11228 break;
11229 }
11230 return workposted;
11231}
11232
4f774513
JS
11233/**
11234 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11235 * @phba: Pointer to HBA context object.
11236 * @rcqe: Pointer to receive-queue completion queue entry.
11237 *
11238 * This routine process a receive-queue completion queue entry.
11239 *
11240 * Return: true if work posted to worker thread, otherwise false.
11241 **/
11242static bool
4d9ab994 11243lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 11244{
4f774513
JS
11245 bool workposted = false;
11246 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11247 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11248 struct hbq_dmabuf *dma_buf;
7851fe2c 11249 uint32_t status, rq_id;
4f774513
JS
11250 unsigned long iflags;
11251
2e90f4b5
JS
11252 /* sanity check on queue memory */
11253 if (unlikely(!hrq) || unlikely(!drq))
11254 return workposted;
11255
7851fe2c
JS
11256 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11257 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11258 else
11259 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11260 if (rq_id != hrq->queue_id)
4f774513
JS
11261 goto out;
11262
4d9ab994 11263 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
11264 switch (status) {
11265 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11266 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11267 "2537 Receive Frame Truncated!!\n");
11268 case FC_STATUS_RQ_SUCCESS:
5ffc266e 11269 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
11270 spin_lock_irqsave(&phba->hbalock, iflags);
11271 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11272 if (!dma_buf) {
11273 spin_unlock_irqrestore(&phba->hbalock, iflags);
11274 goto out;
11275 }
4d9ab994 11276 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
4f774513 11277 /* save off the frame for the word thread to process */
4d9ab994 11278 list_add_tail(&dma_buf->cq_event.list,
45ed1190 11279 &phba->sli4_hba.sp_queue_event);
4f774513 11280 /* Frame received */
45ed1190 11281 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
11282 spin_unlock_irqrestore(&phba->hbalock, iflags);
11283 workposted = true;
11284 break;
11285 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11286 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11287 /* Post more buffers if possible */
11288 spin_lock_irqsave(&phba->hbalock, iflags);
11289 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11290 spin_unlock_irqrestore(&phba->hbalock, iflags);
11291 workposted = true;
11292 break;
11293 }
11294out:
11295 return workposted;
4f774513
JS
11296}
11297
4d9ab994
JS
11298/**
11299 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11300 * @phba: Pointer to HBA context object.
11301 * @cq: Pointer to the completion queue.
11302 * @wcqe: Pointer to a completion queue entry.
11303 *
25985edc 11304 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
11305 * entry.
11306 *
11307 * Return: true if work posted to worker thread, otherwise false.
11308 **/
11309static bool
11310lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11311 struct lpfc_cqe *cqe)
11312{
45ed1190 11313 struct lpfc_cqe cqevt;
4d9ab994
JS
11314 bool workposted = false;
11315
11316 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 11317 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
11318
11319 /* Check and process for different type of WCQE and dispatch */
45ed1190 11320 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 11321 case CQE_CODE_COMPL_WQE:
45ed1190 11322 /* Process the WQ/RQ complete event */
bc73905a 11323 phba->last_completion_time = jiffies;
4d9ab994 11324 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
45ed1190 11325 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
11326 break;
11327 case CQE_CODE_RELEASE_WQE:
11328 /* Process the WQ release event */
11329 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 11330 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
11331 break;
11332 case CQE_CODE_XRI_ABORTED:
11333 /* Process the WQ XRI abort event */
bc73905a 11334 phba->last_completion_time = jiffies;
4d9ab994 11335 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 11336 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
11337 break;
11338 case CQE_CODE_RECEIVE:
7851fe2c 11339 case CQE_CODE_RECEIVE_V1:
4d9ab994 11340 /* Process the RQ event */
bc73905a 11341 phba->last_completion_time = jiffies;
4d9ab994 11342 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 11343 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
11344 break;
11345 default:
11346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11347 "0388 Not a valid WCQE code: x%x\n",
45ed1190 11348 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
11349 break;
11350 }
11351 return workposted;
11352}
11353
4f774513
JS
11354/**
11355 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11356 * @phba: Pointer to HBA context object.
11357 * @eqe: Pointer to fast-path event queue entry.
11358 *
11359 * This routine process a event queue entry from the slow-path event queue.
11360 * It will check the MajorCode and MinorCode to determine this is for a
11361 * completion event on a completion queue, if not, an error shall be logged
11362 * and just return. Otherwise, it will get to the corresponding completion
11363 * queue and process all the entries on that completion queue, rearm the
11364 * completion queue, and then return.
11365 *
11366 **/
11367static void
11368lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11369{
11370 struct lpfc_queue *cq = NULL, *childq, *speq;
11371 struct lpfc_cqe *cqe;
11372 bool workposted = false;
11373 int ecount = 0;
11374 uint16_t cqid;
11375
cb5172ea 11376 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
4f774513
JS
11377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11378 "0359 Not a valid slow-path completion "
11379 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
11380 bf_get_le32(lpfc_eqe_major_code, eqe),
11381 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
11382 return;
11383 }
11384
11385 /* Get the reference to the corresponding CQ */
cb5172ea 11386 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513
JS
11387
11388 /* Search for completion queue pointer matching this cqid */
11389 speq = phba->sli4_hba.sp_eq;
2e90f4b5
JS
11390 /* sanity check on queue memory */
11391 if (unlikely(!speq))
11392 return;
4f774513
JS
11393 list_for_each_entry(childq, &speq->child_list, list) {
11394 if (childq->queue_id == cqid) {
11395 cq = childq;
11396 break;
11397 }
11398 }
11399 if (unlikely(!cq)) {
75baf696
JS
11400 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11402 "0365 Slow-path CQ identifier "
11403 "(%d) does not exist\n", cqid);
4f774513
JS
11404 return;
11405 }
11406
11407 /* Process all the entries to the CQ */
11408 switch (cq->type) {
11409 case LPFC_MCQ:
11410 while ((cqe = lpfc_sli4_cq_get(cq))) {
11411 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
73d91e50 11412 if (!(++ecount % cq->entry_repost))
4f774513
JS
11413 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11414 }
11415 break;
11416 case LPFC_WCQ:
11417 while ((cqe = lpfc_sli4_cq_get(cq))) {
0558056c
JS
11418 if (cq->subtype == LPFC_FCP)
11419 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11420 cqe);
11421 else
11422 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11423 cqe);
73d91e50 11424 if (!(++ecount % cq->entry_repost))
4f774513
JS
11425 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11426 }
11427 break;
11428 default:
11429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11430 "0370 Invalid completion queue type (%d)\n",
11431 cq->type);
11432 return;
11433 }
11434
11435 /* Catch the no cq entry condition, log an error */
11436 if (unlikely(ecount == 0))
11437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11438 "0371 No entry from the CQ: identifier "
11439 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11440
11441 /* In any case, flash and re-arm the RCQ */
11442 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11443
11444 /* wake up worker thread if there are works to be done */
11445 if (workposted)
11446 lpfc_worker_wake_up(phba);
11447}
11448
11449/**
11450 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11451 * @eqe: Pointer to fast-path completion queue entry.
11452 *
11453 * This routine process a fast-path work queue completion entry from fast-path
11454 * event queue for FCP command response completion.
11455 **/
11456static void
11457lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11458 struct lpfc_wcqe_complete *wcqe)
11459{
11460 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
11461 struct lpfc_iocbq *cmdiocbq;
11462 struct lpfc_iocbq irspiocbq;
11463 unsigned long iflags;
11464
11465 spin_lock_irqsave(&phba->hbalock, iflags);
11466 pring->stats.iocb_event++;
11467 spin_unlock_irqrestore(&phba->hbalock, iflags);
11468
11469 /* Check for response status */
11470 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11471 /* If resource errors reported from HBA, reduce queue
11472 * depth of the SCSI device.
11473 */
11474 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
11475 IOSTAT_LOCAL_REJECT) &&
11476 (wcqe->parameter == IOERR_NO_RESOURCES)) {
11477 phba->lpfc_rampdown_queue_depth(phba);
11478 }
11479 /* Log the error status */
11480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11481 "0373 FCP complete error: status=x%x, "
11482 "hw_status=x%x, total_data_specified=%d, "
11483 "parameter=x%x, word3=x%x\n",
11484 bf_get(lpfc_wcqe_c_status, wcqe),
11485 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11486 wcqe->total_data_placed, wcqe->parameter,
11487 wcqe->word3);
11488 }
11489
11490 /* Look up the FCP command IOCB and create pseudo response IOCB */
11491 spin_lock_irqsave(&phba->hbalock, iflags);
11492 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11493 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11494 spin_unlock_irqrestore(&phba->hbalock, iflags);
11495 if (unlikely(!cmdiocbq)) {
11496 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11497 "0374 FCP complete with no corresponding "
11498 "cmdiocb: iotag (%d)\n",
11499 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11500 return;
11501 }
11502 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11503 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11504 "0375 FCP cmdiocb not callback function "
11505 "iotag: (%d)\n",
11506 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11507 return;
11508 }
11509
11510 /* Fake the irspiocb and copy necessary response information */
341af102 11511 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 11512
0f65ff68
JS
11513 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11514 spin_lock_irqsave(&phba->hbalock, iflags);
11515 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11516 spin_unlock_irqrestore(&phba->hbalock, iflags);
11517 }
11518
4f774513
JS
11519 /* Pass the cmd_iocb and the rsp state to the upper layer */
11520 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11521}
11522
11523/**
11524 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11525 * @phba: Pointer to HBA context object.
11526 * @cq: Pointer to completion queue.
11527 * @wcqe: Pointer to work-queue completion queue entry.
11528 *
11529 * This routine handles an fast-path WQ entry comsumed event by invoking the
11530 * proper WQ release routine to the slow-path WQ.
11531 **/
11532static void
11533lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11534 struct lpfc_wcqe_release *wcqe)
11535{
11536 struct lpfc_queue *childwq;
11537 bool wqid_matched = false;
11538 uint16_t fcp_wqid;
11539
11540 /* Check for fast-path FCP work queue release */
11541 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11542 list_for_each_entry(childwq, &cq->child_list, list) {
11543 if (childwq->queue_id == fcp_wqid) {
11544 lpfc_sli4_wq_release(childwq,
11545 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11546 wqid_matched = true;
11547 break;
11548 }
11549 }
11550 /* Report warning log message if no match found */
11551 if (wqid_matched != true)
11552 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11553 "2580 Fast-path wqe consume event carries "
11554 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11555}
11556
11557/**
11558 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11559 * @cq: Pointer to the completion queue.
11560 * @eqe: Pointer to fast-path completion queue entry.
11561 *
11562 * This routine process a fast-path work queue completion entry from fast-path
11563 * event queue for FCP command response completion.
11564 **/
11565static int
11566lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11567 struct lpfc_cqe *cqe)
11568{
11569 struct lpfc_wcqe_release wcqe;
11570 bool workposted = false;
11571
11572 /* Copy the work queue CQE and convert endian order if needed */
11573 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11574
11575 /* Check and process for different type of WCQE and dispatch */
11576 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11577 case CQE_CODE_COMPL_WQE:
11578 /* Process the WQ complete event */
98fc5dd9 11579 phba->last_completion_time = jiffies;
4f774513
JS
11580 lpfc_sli4_fp_handle_fcp_wcqe(phba,
11581 (struct lpfc_wcqe_complete *)&wcqe);
11582 break;
11583 case CQE_CODE_RELEASE_WQE:
11584 /* Process the WQ release event */
11585 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11586 (struct lpfc_wcqe_release *)&wcqe);
11587 break;
11588 case CQE_CODE_XRI_ABORTED:
11589 /* Process the WQ XRI abort event */
bc73905a 11590 phba->last_completion_time = jiffies;
4f774513
JS
11591 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11592 (struct sli4_wcqe_xri_aborted *)&wcqe);
11593 break;
11594 default:
11595 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11596 "0144 Not a valid WCQE code: x%x\n",
11597 bf_get(lpfc_wcqe_c_code, &wcqe));
11598 break;
11599 }
11600 return workposted;
11601}
11602
11603/**
11604 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
11605 * @phba: Pointer to HBA context object.
11606 * @eqe: Pointer to fast-path event queue entry.
11607 *
11608 * This routine process a event queue entry from the fast-path event queue.
11609 * It will check the MajorCode and MinorCode to determine this is for a
11610 * completion event on a completion queue, if not, an error shall be logged
11611 * and just return. Otherwise, it will get to the corresponding completion
11612 * queue and process all the entries on the completion queue, rearm the
11613 * completion queue, and then return.
11614 **/
11615static void
11616lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11617 uint32_t fcp_cqidx)
11618{
11619 struct lpfc_queue *cq;
11620 struct lpfc_cqe *cqe;
11621 bool workposted = false;
11622 uint16_t cqid;
11623 int ecount = 0;
11624
cb5172ea 11625 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513
JS
11626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11627 "0366 Not a valid fast-path completion "
11628 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
11629 bf_get_le32(lpfc_eqe_major_code, eqe),
11630 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
11631 return;
11632 }
11633
2e90f4b5
JS
11634 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11635 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11636 "3146 Fast-path completion queues "
11637 "does not exist\n");
11638 return;
11639 }
4f774513
JS
11640 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
11641 if (unlikely(!cq)) {
75baf696
JS
11642 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11644 "0367 Fast-path completion queue "
2e90f4b5 11645 "(%d) does not exist\n", fcp_cqidx);
4f774513
JS
11646 return;
11647 }
11648
11649 /* Get the reference to the corresponding CQ */
cb5172ea 11650 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513
JS
11651 if (unlikely(cqid != cq->queue_id)) {
11652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11653 "0368 Miss-matched fast-path completion "
11654 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11655 cqid, cq->queue_id);
11656 return;
11657 }
11658
11659 /* Process all the entries to the CQ */
11660 while ((cqe = lpfc_sli4_cq_get(cq))) {
11661 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
73d91e50 11662 if (!(++ecount % cq->entry_repost))
4f774513
JS
11663 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11664 }
11665
11666 /* Catch the no cq entry condition */
11667 if (unlikely(ecount == 0))
11668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11669 "0369 No entry from fast-path completion "
11670 "queue fcpcqid=%d\n", cq->queue_id);
11671
11672 /* In any case, flash and re-arm the CQ */
11673 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11674
11675 /* wake up worker thread if there are works to be done */
11676 if (workposted)
11677 lpfc_worker_wake_up(phba);
11678}
11679
11680static void
11681lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11682{
11683 struct lpfc_eqe *eqe;
11684
11685 /* walk all the EQ entries and drop on the floor */
11686 while ((eqe = lpfc_sli4_eq_get(eq)))
11687 ;
11688
11689 /* Clear and re-arm the EQ */
11690 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11691}
11692
11693/**
11694 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
11695 * @irq: Interrupt number.
11696 * @dev_id: The device context pointer.
11697 *
11698 * This function is directly called from the PCI layer as an interrupt
11699 * service routine when device with SLI-4 interface spec is enabled with
11700 * MSI-X multi-message interrupt mode and there are slow-path events in
11701 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11702 * interrupt mode, this function is called as part of the device-level
11703 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11704 * undergoing initialization, the interrupt handler will not process the
11705 * interrupt. The link attention and ELS ring attention events are handled
11706 * by the worker thread. The interrupt handler signals the worker thread
11707 * and returns for these events. This function is called without any lock
11708 * held. It gets the hbalock to access and update SLI data structures.
11709 *
11710 * This function returns IRQ_HANDLED when interrupt is handled else it
11711 * returns IRQ_NONE.
11712 **/
11713irqreturn_t
11714lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11715{
11716 struct lpfc_hba *phba;
11717 struct lpfc_queue *speq;
11718 struct lpfc_eqe *eqe;
11719 unsigned long iflag;
11720 int ecount = 0;
11721
11722 /*
11723 * Get the driver's phba structure from the dev_id
11724 */
11725 phba = (struct lpfc_hba *)dev_id;
11726
11727 if (unlikely(!phba))
11728 return IRQ_NONE;
11729
11730 /* Get to the EQ struct associated with this vector */
11731 speq = phba->sli4_hba.sp_eq;
5350d872
JS
11732 if (unlikely(!speq))
11733 return IRQ_NONE;
4f774513
JS
11734
11735 /* Check device state for handling interrupt */
11736 if (unlikely(lpfc_intr_state_check(phba))) {
11737 /* Check again for link_state with lock held */
11738 spin_lock_irqsave(&phba->hbalock, iflag);
11739 if (phba->link_state < LPFC_LINK_DOWN)
11740 /* Flush, clear interrupt, and rearm the EQ */
11741 lpfc_sli4_eq_flush(phba, speq);
11742 spin_unlock_irqrestore(&phba->hbalock, iflag);
11743 return IRQ_NONE;
11744 }
11745
11746 /*
11747 * Process all the event on FCP slow-path EQ
11748 */
11749 while ((eqe = lpfc_sli4_eq_get(speq))) {
11750 lpfc_sli4_sp_handle_eqe(phba, eqe);
73d91e50 11751 if (!(++ecount % speq->entry_repost))
4f774513
JS
11752 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11753 }
11754
11755 /* Always clear and re-arm the slow-path EQ */
11756 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11757
11758 /* Catch the no cq entry condition */
11759 if (unlikely(ecount == 0)) {
11760 if (phba->intr_type == MSIX)
11761 /* MSI-X treated interrupt served as no EQ share INT */
11762 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11763 "0357 MSI-X interrupt with no EQE\n");
11764 else
11765 /* Non MSI-X treated on interrupt as EQ share INT */
11766 return IRQ_NONE;
11767 }
11768
11769 return IRQ_HANDLED;
11770} /* lpfc_sli4_sp_intr_handler */
11771
11772/**
11773 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11774 * @irq: Interrupt number.
11775 * @dev_id: The device context pointer.
11776 *
11777 * This function is directly called from the PCI layer as an interrupt
11778 * service routine when device with SLI-4 interface spec is enabled with
11779 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11780 * ring event in the HBA. However, when the device is enabled with either
11781 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11782 * device-level interrupt handler. When the PCI slot is in error recovery
11783 * or the HBA is undergoing initialization, the interrupt handler will not
11784 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11785 * the intrrupt context. This function is called without any lock held.
11786 * It gets the hbalock to access and update SLI data structures. Note that,
11787 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11788 * equal to that of FCP CQ index.
11789 *
11790 * This function returns IRQ_HANDLED when interrupt is handled else it
11791 * returns IRQ_NONE.
11792 **/
11793irqreturn_t
11794lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11795{
11796 struct lpfc_hba *phba;
11797 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11798 struct lpfc_queue *fpeq;
11799 struct lpfc_eqe *eqe;
11800 unsigned long iflag;
11801 int ecount = 0;
11802 uint32_t fcp_eqidx;
11803
11804 /* Get the driver's phba structure from the dev_id */
11805 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11806 phba = fcp_eq_hdl->phba;
11807 fcp_eqidx = fcp_eq_hdl->idx;
11808
11809 if (unlikely(!phba))
11810 return IRQ_NONE;
5350d872
JS
11811 if (unlikely(!phba->sli4_hba.fp_eq))
11812 return IRQ_NONE;
4f774513
JS
11813
11814 /* Get to the EQ struct associated with this vector */
11815 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
2e90f4b5
JS
11816 if (unlikely(!fpeq))
11817 return IRQ_NONE;
4f774513
JS
11818
11819 /* Check device state for handling interrupt */
11820 if (unlikely(lpfc_intr_state_check(phba))) {
11821 /* Check again for link_state with lock held */
11822 spin_lock_irqsave(&phba->hbalock, iflag);
11823 if (phba->link_state < LPFC_LINK_DOWN)
11824 /* Flush, clear interrupt, and rearm the EQ */
11825 lpfc_sli4_eq_flush(phba, fpeq);
11826 spin_unlock_irqrestore(&phba->hbalock, iflag);
11827 return IRQ_NONE;
11828 }
11829
11830 /*
11831 * Process all the event on FCP fast-path EQ
11832 */
11833 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11834 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
73d91e50 11835 if (!(++ecount % fpeq->entry_repost))
4f774513
JS
11836 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11837 }
11838
11839 /* Always clear and re-arm the fast-path EQ */
11840 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11841
11842 if (unlikely(ecount == 0)) {
11843 if (phba->intr_type == MSIX)
11844 /* MSI-X treated interrupt served as no EQ share INT */
11845 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11846 "0358 MSI-X interrupt with no EQE\n");
11847 else
11848 /* Non MSI-X treated on interrupt as EQ share INT */
11849 return IRQ_NONE;
11850 }
11851
11852 return IRQ_HANDLED;
11853} /* lpfc_sli4_fp_intr_handler */
11854
11855/**
11856 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11857 * @irq: Interrupt number.
11858 * @dev_id: The device context pointer.
11859 *
11860 * This function is the device-level interrupt handler to device with SLI-4
11861 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
11862 * interrupt mode is enabled and there is an event in the HBA which requires
11863 * driver attention. This function invokes the slow-path interrupt attention
11864 * handling function and fast-path interrupt attention handling function in
11865 * turn to process the relevant HBA attention events. This function is called
11866 * without any lock held. It gets the hbalock to access and update SLI data
11867 * structures.
11868 *
11869 * This function returns IRQ_HANDLED when interrupt is handled, else it
11870 * returns IRQ_NONE.
11871 **/
11872irqreturn_t
11873lpfc_sli4_intr_handler(int irq, void *dev_id)
11874{
11875 struct lpfc_hba *phba;
11876 irqreturn_t sp_irq_rc, fp_irq_rc;
11877 bool fp_handled = false;
11878 uint32_t fcp_eqidx;
11879
11880 /* Get the driver's phba structure from the dev_id */
11881 phba = (struct lpfc_hba *)dev_id;
11882
11883 if (unlikely(!phba))
11884 return IRQ_NONE;
11885
11886 /*
11887 * Invokes slow-path host attention interrupt handling as appropriate.
11888 */
11889 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11890
11891 /*
11892 * Invoke fast-path host attention interrupt handling as appropriate.
11893 */
11894 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
11895 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
11896 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11897 if (fp_irq_rc == IRQ_HANDLED)
11898 fp_handled |= true;
11899 }
11900
11901 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
11902} /* lpfc_sli4_intr_handler */
11903
11904/**
11905 * lpfc_sli4_queue_free - free a queue structure and associated memory
11906 * @queue: The queue structure to free.
11907 *
b595076a 11908 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
11909 * the host resident queue. This function must be called after destroying the
11910 * queue on the HBA.
11911 **/
11912void
11913lpfc_sli4_queue_free(struct lpfc_queue *queue)
11914{
11915 struct lpfc_dmabuf *dmabuf;
11916
11917 if (!queue)
11918 return;
11919
11920 while (!list_empty(&queue->page_list)) {
11921 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
11922 list);
49198b37 11923 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
4f774513
JS
11924 dmabuf->virt, dmabuf->phys);
11925 kfree(dmabuf);
11926 }
11927 kfree(queue);
11928 return;
11929}
11930
11931/**
11932 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
11933 * @phba: The HBA that this queue is being created on.
11934 * @entry_size: The size of each queue entry for this queue.
11935 * @entry count: The number of entries that this queue will handle.
11936 *
11937 * This function allocates a queue structure and the DMAable memory used for
11938 * the host resident queue. This function must be called before creating the
11939 * queue on the HBA.
11940 **/
11941struct lpfc_queue *
11942lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
11943 uint32_t entry_count)
11944{
11945 struct lpfc_queue *queue;
11946 struct lpfc_dmabuf *dmabuf;
11947 int x, total_qe_count;
11948 void *dma_pointer;
cb5172ea 11949 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 11950
cb5172ea
JS
11951 if (!phba->sli4_hba.pc_sli4_params.supported)
11952 hw_page_size = SLI4_PAGE_SIZE;
11953
4f774513
JS
11954 queue = kzalloc(sizeof(struct lpfc_queue) +
11955 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
11956 if (!queue)
11957 return NULL;
cb5172ea
JS
11958 queue->page_count = (ALIGN(entry_size * entry_count,
11959 hw_page_size))/hw_page_size;
4f774513
JS
11960 INIT_LIST_HEAD(&queue->list);
11961 INIT_LIST_HEAD(&queue->page_list);
11962 INIT_LIST_HEAD(&queue->child_list);
11963 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
11964 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
11965 if (!dmabuf)
11966 goto out_fail;
11967 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
cb5172ea 11968 hw_page_size, &dmabuf->phys,
4f774513
JS
11969 GFP_KERNEL);
11970 if (!dmabuf->virt) {
11971 kfree(dmabuf);
11972 goto out_fail;
11973 }
cb5172ea 11974 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
11975 dmabuf->buffer_tag = x;
11976 list_add_tail(&dmabuf->list, &queue->page_list);
11977 /* initialize queue's entry array */
11978 dma_pointer = dmabuf->virt;
11979 for (; total_qe_count < entry_count &&
cb5172ea 11980 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
11981 total_qe_count++, dma_pointer += entry_size) {
11982 queue->qe[total_qe_count].address = dma_pointer;
11983 }
11984 }
11985 queue->entry_size = entry_size;
11986 queue->entry_count = entry_count;
73d91e50
JS
11987
11988 /*
11989 * entry_repost is calculated based on the number of entries in the
11990 * queue. This works out except for RQs. If buffers are NOT initially
11991 * posted for every RQE, entry_repost should be adjusted accordingly.
11992 */
11993 queue->entry_repost = (entry_count >> 3);
11994 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
11995 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
4f774513
JS
11996 queue->phba = phba;
11997
11998 return queue;
11999out_fail:
12000 lpfc_sli4_queue_free(queue);
12001 return NULL;
12002}
12003
12004/**
12005 * lpfc_eq_create - Create an Event Queue on the HBA
12006 * @phba: HBA structure that indicates port to create a queue on.
12007 * @eq: The queue structure to use to create the event queue.
12008 * @imax: The maximum interrupt per second limit.
12009 *
12010 * This function creates an event queue, as detailed in @eq, on a port,
12011 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12012 *
12013 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12014 * is used to get the entry count and entry size that are necessary to
12015 * determine the number of pages to allocate and use for this queue. This
12016 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12017 * event queue. This function is asynchronous and will wait for the mailbox
12018 * command to finish before continuing.
12019 *
12020 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12021 * memory this function will return -ENOMEM. If the queue create mailbox command
12022 * fails this function will return -ENXIO.
4f774513
JS
12023 **/
12024uint32_t
12025lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
12026{
12027 struct lpfc_mbx_eq_create *eq_create;
12028 LPFC_MBOXQ_t *mbox;
12029 int rc, length, status = 0;
12030 struct lpfc_dmabuf *dmabuf;
12031 uint32_t shdr_status, shdr_add_status;
12032 union lpfc_sli4_cfg_shdr *shdr;
12033 uint16_t dmult;
49198b37
JS
12034 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12035
2e90f4b5
JS
12036 /* sanity check on queue memory */
12037 if (!eq)
12038 return -ENODEV;
49198b37
JS
12039 if (!phba->sli4_hba.pc_sli4_params.supported)
12040 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12041
12042 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12043 if (!mbox)
12044 return -ENOMEM;
12045 length = (sizeof(struct lpfc_mbx_eq_create) -
12046 sizeof(struct lpfc_sli4_cfg_mhdr));
12047 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12048 LPFC_MBOX_OPCODE_EQ_CREATE,
12049 length, LPFC_SLI4_MBX_EMBED);
12050 eq_create = &mbox->u.mqe.un.eq_create;
12051 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12052 eq->page_count);
12053 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12054 LPFC_EQE_SIZE);
12055 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12056 /* Calculate delay multiper from maximum interrupt per second */
12057 dmult = LPFC_DMULT_CONST/imax - 1;
12058 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12059 dmult);
12060 switch (eq->entry_count) {
12061 default:
12062 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12063 "0360 Unsupported EQ count. (%d)\n",
12064 eq->entry_count);
12065 if (eq->entry_count < 256)
12066 return -EINVAL;
12067 /* otherwise default to smallest count (drop through) */
12068 case 256:
12069 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12070 LPFC_EQ_CNT_256);
12071 break;
12072 case 512:
12073 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12074 LPFC_EQ_CNT_512);
12075 break;
12076 case 1024:
12077 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12078 LPFC_EQ_CNT_1024);
12079 break;
12080 case 2048:
12081 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12082 LPFC_EQ_CNT_2048);
12083 break;
12084 case 4096:
12085 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12086 LPFC_EQ_CNT_4096);
12087 break;
12088 }
12089 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 12090 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12091 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12092 putPaddrLow(dmabuf->phys);
12093 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12094 putPaddrHigh(dmabuf->phys);
12095 }
12096 mbox->vport = phba->pport;
12097 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12098 mbox->context1 = NULL;
12099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12100 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12101 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12102 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12103 if (shdr_status || shdr_add_status || rc) {
12104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12105 "2500 EQ_CREATE mailbox failed with "
12106 "status x%x add_status x%x, mbx status x%x\n",
12107 shdr_status, shdr_add_status, rc);
12108 status = -ENXIO;
12109 }
12110 eq->type = LPFC_EQ;
12111 eq->subtype = LPFC_NONE;
12112 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12113 if (eq->queue_id == 0xFFFF)
12114 status = -ENXIO;
12115 eq->host_index = 0;
12116 eq->hba_index = 0;
12117
8fa38513 12118 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12119 return status;
12120}
12121
12122/**
12123 * lpfc_cq_create - Create a Completion Queue on the HBA
12124 * @phba: HBA structure that indicates port to create a queue on.
12125 * @cq: The queue structure to use to create the completion queue.
12126 * @eq: The event queue to bind this completion queue to.
12127 *
12128 * This function creates a completion queue, as detailed in @wq, on a port,
12129 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12130 *
12131 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12132 * is used to get the entry count and entry size that are necessary to
12133 * determine the number of pages to allocate and use for this queue. The @eq
12134 * is used to indicate which event queue to bind this completion queue to. This
12135 * function will send the CQ_CREATE mailbox command to the HBA to setup the
12136 * completion queue. This function is asynchronous and will wait for the mailbox
12137 * command to finish before continuing.
12138 *
12139 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12140 * memory this function will return -ENOMEM. If the queue create mailbox command
12141 * fails this function will return -ENXIO.
4f774513
JS
12142 **/
12143uint32_t
12144lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12145 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12146{
12147 struct lpfc_mbx_cq_create *cq_create;
12148 struct lpfc_dmabuf *dmabuf;
12149 LPFC_MBOXQ_t *mbox;
12150 int rc, length, status = 0;
12151 uint32_t shdr_status, shdr_add_status;
12152 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12153 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12154
2e90f4b5
JS
12155 /* sanity check on queue memory */
12156 if (!cq || !eq)
12157 return -ENODEV;
49198b37
JS
12158 if (!phba->sli4_hba.pc_sli4_params.supported)
12159 hw_page_size = SLI4_PAGE_SIZE;
12160
4f774513
JS
12161 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12162 if (!mbox)
12163 return -ENOMEM;
12164 length = (sizeof(struct lpfc_mbx_cq_create) -
12165 sizeof(struct lpfc_sli4_cfg_mhdr));
12166 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12167 LPFC_MBOX_OPCODE_CQ_CREATE,
12168 length, LPFC_SLI4_MBX_EMBED);
12169 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 12170 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
12171 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12172 cq->page_count);
12173 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12174 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
12175 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12176 phba->sli4_hba.pc_sli4_params.cqv);
12177 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
c31098ce
JS
12178 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12179 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
5a6f133e
JS
12180 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12181 eq->queue_id);
12182 } else {
12183 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12184 eq->queue_id);
12185 }
4f774513
JS
12186 switch (cq->entry_count) {
12187 default:
12188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12189 "0361 Unsupported CQ count. (%d)\n",
12190 cq->entry_count);
12191 if (cq->entry_count < 256)
12192 return -EINVAL;
12193 /* otherwise default to smallest count (drop through) */
12194 case 256:
12195 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12196 LPFC_CQ_CNT_256);
12197 break;
12198 case 512:
12199 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12200 LPFC_CQ_CNT_512);
12201 break;
12202 case 1024:
12203 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12204 LPFC_CQ_CNT_1024);
12205 break;
12206 }
12207 list_for_each_entry(dmabuf, &cq->page_list, list) {
49198b37 12208 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12209 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12210 putPaddrLow(dmabuf->phys);
12211 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12212 putPaddrHigh(dmabuf->phys);
12213 }
12214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12215
12216 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12217 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12218 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12219 if (shdr_status || shdr_add_status || rc) {
12220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12221 "2501 CQ_CREATE mailbox failed with "
12222 "status x%x add_status x%x, mbx status x%x\n",
12223 shdr_status, shdr_add_status, rc);
12224 status = -ENXIO;
12225 goto out;
12226 }
12227 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12228 if (cq->queue_id == 0xFFFF) {
12229 status = -ENXIO;
12230 goto out;
12231 }
12232 /* link the cq onto the parent eq child list */
12233 list_add_tail(&cq->list, &eq->child_list);
12234 /* Set up completion queue's type and subtype */
12235 cq->type = type;
12236 cq->subtype = subtype;
12237 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 12238 cq->assoc_qid = eq->queue_id;
4f774513
JS
12239 cq->host_index = 0;
12240 cq->hba_index = 0;
4f774513 12241
8fa38513
JS
12242out:
12243 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12244 return status;
12245}
12246
b19a061a
JS
12247/**
12248 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12249 * @phba: HBA structure that indicates port to create a queue on.
12250 * @mq: The queue structure to use to create the mailbox queue.
12251 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12252 * @cq: The completion queue to associate with this cq.
12253 *
12254 * This function provides failback (fb) functionality when the
12255 * mq_create_ext fails on older FW generations. It's purpose is identical
12256 * to mq_create_ext otherwise.
12257 *
12258 * This routine cannot fail as all attributes were previously accessed and
12259 * initialized in mq_create_ext.
12260 **/
12261static void
12262lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12263 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12264{
12265 struct lpfc_mbx_mq_create *mq_create;
12266 struct lpfc_dmabuf *dmabuf;
12267 int length;
12268
12269 length = (sizeof(struct lpfc_mbx_mq_create) -
12270 sizeof(struct lpfc_sli4_cfg_mhdr));
12271 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12272 LPFC_MBOX_OPCODE_MQ_CREATE,
12273 length, LPFC_SLI4_MBX_EMBED);
12274 mq_create = &mbox->u.mqe.un.mq_create;
12275 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12276 mq->page_count);
12277 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12278 cq->queue_id);
12279 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12280 switch (mq->entry_count) {
12281 case 16:
5a6f133e
JS
12282 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12283 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
12284 break;
12285 case 32:
5a6f133e
JS
12286 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12287 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
12288 break;
12289 case 64:
5a6f133e
JS
12290 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12291 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
12292 break;
12293 case 128:
5a6f133e
JS
12294 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12295 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
12296 break;
12297 }
12298 list_for_each_entry(dmabuf, &mq->page_list, list) {
12299 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12300 putPaddrLow(dmabuf->phys);
12301 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12302 putPaddrHigh(dmabuf->phys);
12303 }
12304}
12305
04c68496
JS
12306/**
12307 * lpfc_mq_create - Create a mailbox Queue on the HBA
12308 * @phba: HBA structure that indicates port to create a queue on.
12309 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
12310 * @cq: The completion queue to associate with this cq.
12311 * @subtype: The queue's subtype.
04c68496
JS
12312 *
12313 * This function creates a mailbox queue, as detailed in @mq, on a port,
12314 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12315 *
12316 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12317 * is used to get the entry count and entry size that are necessary to
12318 * determine the number of pages to allocate and use for this queue. This
12319 * function will send the MQ_CREATE mailbox command to the HBA to setup the
12320 * mailbox queue. This function is asynchronous and will wait for the mailbox
12321 * command to finish before continuing.
12322 *
12323 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12324 * memory this function will return -ENOMEM. If the queue create mailbox command
12325 * fails this function will return -ENXIO.
04c68496 12326 **/
b19a061a 12327int32_t
04c68496
JS
12328lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12329 struct lpfc_queue *cq, uint32_t subtype)
12330{
12331 struct lpfc_mbx_mq_create *mq_create;
b19a061a 12332 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
12333 struct lpfc_dmabuf *dmabuf;
12334 LPFC_MBOXQ_t *mbox;
12335 int rc, length, status = 0;
12336 uint32_t shdr_status, shdr_add_status;
12337 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12338 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 12339
2e90f4b5
JS
12340 /* sanity check on queue memory */
12341 if (!mq || !cq)
12342 return -ENODEV;
49198b37
JS
12343 if (!phba->sli4_hba.pc_sli4_params.supported)
12344 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 12345
04c68496
JS
12346 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12347 if (!mbox)
12348 return -ENOMEM;
b19a061a 12349 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
12350 sizeof(struct lpfc_sli4_cfg_mhdr));
12351 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 12352 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 12353 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
12354
12355 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 12356 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
12357 bf_set(lpfc_mbx_mq_create_ext_num_pages,
12358 &mq_create_ext->u.request, mq->page_count);
12359 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12360 &mq_create_ext->u.request, 1);
12361 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
12362 &mq_create_ext->u.request, 1);
12363 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12364 &mq_create_ext->u.request, 1);
70f3c073
JS
12365 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12366 &mq_create_ext->u.request, 1);
12367 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12368 &mq_create_ext->u.request, 1);
b19a061a 12369 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
12370 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12371 phba->sli4_hba.pc_sli4_params.mqv);
12372 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12373 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12374 cq->queue_id);
12375 else
12376 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12377 cq->queue_id);
04c68496
JS
12378 switch (mq->entry_count) {
12379 default:
12380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12381 "0362 Unsupported MQ count. (%d)\n",
12382 mq->entry_count);
12383 if (mq->entry_count < 16)
12384 return -EINVAL;
12385 /* otherwise default to smallest count (drop through) */
12386 case 16:
5a6f133e
JS
12387 bf_set(lpfc_mq_context_ring_size,
12388 &mq_create_ext->u.request.context,
12389 LPFC_MQ_RING_SIZE_16);
04c68496
JS
12390 break;
12391 case 32:
5a6f133e
JS
12392 bf_set(lpfc_mq_context_ring_size,
12393 &mq_create_ext->u.request.context,
12394 LPFC_MQ_RING_SIZE_32);
04c68496
JS
12395 break;
12396 case 64:
5a6f133e
JS
12397 bf_set(lpfc_mq_context_ring_size,
12398 &mq_create_ext->u.request.context,
12399 LPFC_MQ_RING_SIZE_64);
04c68496
JS
12400 break;
12401 case 128:
5a6f133e
JS
12402 bf_set(lpfc_mq_context_ring_size,
12403 &mq_create_ext->u.request.context,
12404 LPFC_MQ_RING_SIZE_128);
04c68496
JS
12405 break;
12406 }
12407 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 12408 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 12409 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 12410 putPaddrLow(dmabuf->phys);
b19a061a 12411 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
12412 putPaddrHigh(dmabuf->phys);
12413 }
12414 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
12415 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12416 &mq_create_ext->u.response);
12417 if (rc != MBX_SUCCESS) {
12418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12419 "2795 MQ_CREATE_EXT failed with "
12420 "status x%x. Failback to MQ_CREATE.\n",
12421 rc);
12422 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12423 mq_create = &mbox->u.mqe.un.mq_create;
12424 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12425 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12426 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12427 &mq_create->u.response);
12428 }
12429
04c68496 12430 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
12431 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12432 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12433 if (shdr_status || shdr_add_status || rc) {
12434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12435 "2502 MQ_CREATE mailbox failed with "
12436 "status x%x add_status x%x, mbx status x%x\n",
12437 shdr_status, shdr_add_status, rc);
12438 status = -ENXIO;
12439 goto out;
12440 }
04c68496
JS
12441 if (mq->queue_id == 0xFFFF) {
12442 status = -ENXIO;
12443 goto out;
12444 }
12445 mq->type = LPFC_MQ;
2a622bfb 12446 mq->assoc_qid = cq->queue_id;
04c68496
JS
12447 mq->subtype = subtype;
12448 mq->host_index = 0;
12449 mq->hba_index = 0;
12450
12451 /* link the mq onto the parent cq child list */
12452 list_add_tail(&mq->list, &cq->child_list);
12453out:
8fa38513 12454 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
12455 return status;
12456}
12457
4f774513
JS
12458/**
12459 * lpfc_wq_create - Create a Work Queue on the HBA
12460 * @phba: HBA structure that indicates port to create a queue on.
12461 * @wq: The queue structure to use to create the work queue.
12462 * @cq: The completion queue to bind this work queue to.
12463 * @subtype: The subtype of the work queue indicating its functionality.
12464 *
12465 * This function creates a work queue, as detailed in @wq, on a port, described
12466 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12467 *
12468 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12469 * is used to get the entry count and entry size that are necessary to
12470 * determine the number of pages to allocate and use for this queue. The @cq
12471 * is used to indicate which completion queue to bind this work queue to. This
12472 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12473 * work queue. This function is asynchronous and will wait for the mailbox
12474 * command to finish before continuing.
12475 *
12476 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12477 * memory this function will return -ENOMEM. If the queue create mailbox command
12478 * fails this function will return -ENXIO.
4f774513
JS
12479 **/
12480uint32_t
12481lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12482 struct lpfc_queue *cq, uint32_t subtype)
12483{
12484 struct lpfc_mbx_wq_create *wq_create;
12485 struct lpfc_dmabuf *dmabuf;
12486 LPFC_MBOXQ_t *mbox;
12487 int rc, length, status = 0;
12488 uint32_t shdr_status, shdr_add_status;
12489 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12490 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 12491 struct dma_address *page;
49198b37 12492
2e90f4b5
JS
12493 /* sanity check on queue memory */
12494 if (!wq || !cq)
12495 return -ENODEV;
49198b37
JS
12496 if (!phba->sli4_hba.pc_sli4_params.supported)
12497 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12498
12499 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12500 if (!mbox)
12501 return -ENOMEM;
12502 length = (sizeof(struct lpfc_mbx_wq_create) -
12503 sizeof(struct lpfc_sli4_cfg_mhdr));
12504 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12505 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12506 length, LPFC_SLI4_MBX_EMBED);
12507 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 12508 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
12509 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12510 wq->page_count);
12511 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12512 cq->queue_id);
5a6f133e
JS
12513 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12514 phba->sli4_hba.pc_sli4_params.wqv);
12515 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12516 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12517 wq->entry_count);
12518 switch (wq->entry_size) {
12519 default:
12520 case 64:
12521 bf_set(lpfc_mbx_wq_create_wqe_size,
12522 &wq_create->u.request_1,
12523 LPFC_WQ_WQE_SIZE_64);
12524 break;
12525 case 128:
12526 bf_set(lpfc_mbx_wq_create_wqe_size,
12527 &wq_create->u.request_1,
12528 LPFC_WQ_WQE_SIZE_128);
12529 break;
12530 }
12531 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12532 (PAGE_SIZE/SLI4_PAGE_SIZE));
12533 page = wq_create->u.request_1.page;
12534 } else {
12535 page = wq_create->u.request.page;
12536 }
4f774513 12537 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 12538 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
12539 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12540 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513
JS
12541 }
12542 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12543 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12544 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12545 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12546 if (shdr_status || shdr_add_status || rc) {
12547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12548 "2503 WQ_CREATE mailbox failed with "
12549 "status x%x add_status x%x, mbx status x%x\n",
12550 shdr_status, shdr_add_status, rc);
12551 status = -ENXIO;
12552 goto out;
12553 }
12554 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12555 if (wq->queue_id == 0xFFFF) {
12556 status = -ENXIO;
12557 goto out;
12558 }
12559 wq->type = LPFC_WQ;
2a622bfb 12560 wq->assoc_qid = cq->queue_id;
4f774513
JS
12561 wq->subtype = subtype;
12562 wq->host_index = 0;
12563 wq->hba_index = 0;
ff78d8f9 12564 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
12565
12566 /* link the wq onto the parent cq child list */
12567 list_add_tail(&wq->list, &cq->child_list);
12568out:
8fa38513 12569 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12570 return status;
12571}
12572
73d91e50
JS
12573/**
12574 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12575 * @phba: HBA structure that indicates port to create a queue on.
12576 * @rq: The queue structure to use for the receive queue.
12577 * @qno: The associated HBQ number
12578 *
12579 *
12580 * For SLI4 we need to adjust the RQ repost value based on
12581 * the number of buffers that are initially posted to the RQ.
12582 */
12583void
12584lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12585{
12586 uint32_t cnt;
12587
2e90f4b5
JS
12588 /* sanity check on queue memory */
12589 if (!rq)
12590 return;
73d91e50
JS
12591 cnt = lpfc_hbq_defs[qno]->entry_count;
12592
12593 /* Recalc repost for RQs based on buffers initially posted */
12594 cnt = (cnt >> 3);
12595 if (cnt < LPFC_QUEUE_MIN_REPOST)
12596 cnt = LPFC_QUEUE_MIN_REPOST;
12597
12598 rq->entry_repost = cnt;
12599}
12600
4f774513
JS
12601/**
12602 * lpfc_rq_create - Create a Receive Queue on the HBA
12603 * @phba: HBA structure that indicates port to create a queue on.
12604 * @hrq: The queue structure to use to create the header receive queue.
12605 * @drq: The queue structure to use to create the data receive queue.
12606 * @cq: The completion queue to bind this work queue to.
12607 *
12608 * This function creates a receive buffer queue pair , as detailed in @hrq and
12609 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12610 * to the HBA.
12611 *
12612 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12613 * struct is used to get the entry count that is necessary to determine the
12614 * number of pages to use for this queue. The @cq is used to indicate which
12615 * completion queue to bind received buffers that are posted to these queues to.
12616 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12617 * receive queue pair. This function is asynchronous and will wait for the
12618 * mailbox command to finish before continuing.
12619 *
12620 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12621 * memory this function will return -ENOMEM. If the queue create mailbox command
12622 * fails this function will return -ENXIO.
4f774513
JS
12623 **/
12624uint32_t
12625lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12626 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12627{
12628 struct lpfc_mbx_rq_create *rq_create;
12629 struct lpfc_dmabuf *dmabuf;
12630 LPFC_MBOXQ_t *mbox;
12631 int rc, length, status = 0;
12632 uint32_t shdr_status, shdr_add_status;
12633 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12634 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12635
2e90f4b5
JS
12636 /* sanity check on queue memory */
12637 if (!hrq || !drq || !cq)
12638 return -ENODEV;
49198b37
JS
12639 if (!phba->sli4_hba.pc_sli4_params.supported)
12640 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12641
12642 if (hrq->entry_count != drq->entry_count)
12643 return -EINVAL;
12644 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12645 if (!mbox)
12646 return -ENOMEM;
12647 length = (sizeof(struct lpfc_mbx_rq_create) -
12648 sizeof(struct lpfc_sli4_cfg_mhdr));
12649 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12650 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12651 length, LPFC_SLI4_MBX_EMBED);
12652 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
12653 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12654 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12655 phba->sli4_hba.pc_sli4_params.rqv);
12656 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12657 bf_set(lpfc_rq_context_rqe_count_1,
12658 &rq_create->u.request.context,
12659 hrq->entry_count);
12660 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
12661 bf_set(lpfc_rq_context_rqe_size,
12662 &rq_create->u.request.context,
12663 LPFC_RQE_SIZE_8);
12664 bf_set(lpfc_rq_context_page_size,
12665 &rq_create->u.request.context,
12666 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
12667 } else {
12668 switch (hrq->entry_count) {
12669 default:
12670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12671 "2535 Unsupported RQ count. (%d)\n",
12672 hrq->entry_count);
12673 if (hrq->entry_count < 512)
12674 return -EINVAL;
12675 /* otherwise default to smallest count (drop through) */
12676 case 512:
12677 bf_set(lpfc_rq_context_rqe_count,
12678 &rq_create->u.request.context,
12679 LPFC_RQ_RING_SIZE_512);
12680 break;
12681 case 1024:
12682 bf_set(lpfc_rq_context_rqe_count,
12683 &rq_create->u.request.context,
12684 LPFC_RQ_RING_SIZE_1024);
12685 break;
12686 case 2048:
12687 bf_set(lpfc_rq_context_rqe_count,
12688 &rq_create->u.request.context,
12689 LPFC_RQ_RING_SIZE_2048);
12690 break;
12691 case 4096:
12692 bf_set(lpfc_rq_context_rqe_count,
12693 &rq_create->u.request.context,
12694 LPFC_RQ_RING_SIZE_4096);
12695 break;
12696 }
12697 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12698 LPFC_HDR_BUF_SIZE);
4f774513
JS
12699 }
12700 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12701 cq->queue_id);
12702 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12703 hrq->page_count);
4f774513 12704 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 12705 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12706 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12707 putPaddrLow(dmabuf->phys);
12708 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12709 putPaddrHigh(dmabuf->phys);
12710 }
12711 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12712 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12714 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12715 if (shdr_status || shdr_add_status || rc) {
12716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12717 "2504 RQ_CREATE mailbox failed with "
12718 "status x%x add_status x%x, mbx status x%x\n",
12719 shdr_status, shdr_add_status, rc);
12720 status = -ENXIO;
12721 goto out;
12722 }
12723 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12724 if (hrq->queue_id == 0xFFFF) {
12725 status = -ENXIO;
12726 goto out;
12727 }
12728 hrq->type = LPFC_HRQ;
2a622bfb 12729 hrq->assoc_qid = cq->queue_id;
4f774513
JS
12730 hrq->subtype = subtype;
12731 hrq->host_index = 0;
12732 hrq->hba_index = 0;
12733
12734 /* now create the data queue */
12735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12736 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12737 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
12738 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12739 phba->sli4_hba.pc_sli4_params.rqv);
12740 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12741 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 12742 &rq_create->u.request.context, hrq->entry_count);
5a6f133e 12743 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
c31098ce
JS
12744 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
12745 LPFC_RQE_SIZE_8);
12746 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
12747 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
12748 } else {
12749 switch (drq->entry_count) {
12750 default:
12751 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12752 "2536 Unsupported RQ count. (%d)\n",
12753 drq->entry_count);
12754 if (drq->entry_count < 512)
12755 return -EINVAL;
12756 /* otherwise default to smallest count (drop through) */
12757 case 512:
12758 bf_set(lpfc_rq_context_rqe_count,
12759 &rq_create->u.request.context,
12760 LPFC_RQ_RING_SIZE_512);
12761 break;
12762 case 1024:
12763 bf_set(lpfc_rq_context_rqe_count,
12764 &rq_create->u.request.context,
12765 LPFC_RQ_RING_SIZE_1024);
12766 break;
12767 case 2048:
12768 bf_set(lpfc_rq_context_rqe_count,
12769 &rq_create->u.request.context,
12770 LPFC_RQ_RING_SIZE_2048);
12771 break;
12772 case 4096:
12773 bf_set(lpfc_rq_context_rqe_count,
12774 &rq_create->u.request.context,
12775 LPFC_RQ_RING_SIZE_4096);
12776 break;
12777 }
12778 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12779 LPFC_DATA_BUF_SIZE);
4f774513
JS
12780 }
12781 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12782 cq->queue_id);
12783 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12784 drq->page_count);
4f774513
JS
12785 list_for_each_entry(dmabuf, &drq->page_list, list) {
12786 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12787 putPaddrLow(dmabuf->phys);
12788 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12789 putPaddrHigh(dmabuf->phys);
12790 }
12791 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12792 /* The IOCTL status is embedded in the mailbox subheader. */
12793 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12796 if (shdr_status || shdr_add_status || rc) {
12797 status = -ENXIO;
12798 goto out;
12799 }
12800 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12801 if (drq->queue_id == 0xFFFF) {
12802 status = -ENXIO;
12803 goto out;
12804 }
12805 drq->type = LPFC_DRQ;
2a622bfb 12806 drq->assoc_qid = cq->queue_id;
4f774513
JS
12807 drq->subtype = subtype;
12808 drq->host_index = 0;
12809 drq->hba_index = 0;
12810
12811 /* link the header and data RQs onto the parent cq child list */
12812 list_add_tail(&hrq->list, &cq->child_list);
12813 list_add_tail(&drq->list, &cq->child_list);
12814
12815out:
8fa38513 12816 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12817 return status;
12818}
12819
12820/**
12821 * lpfc_eq_destroy - Destroy an event Queue on the HBA
12822 * @eq: The queue structure associated with the queue to destroy.
12823 *
12824 * This function destroys a queue, as detailed in @eq by sending an mailbox
12825 * command, specific to the type of queue, to the HBA.
12826 *
12827 * The @eq struct is used to get the queue ID of the queue to destroy.
12828 *
12829 * On success this function will return a zero. If the queue destroy mailbox
d439d286 12830 * command fails this function will return -ENXIO.
4f774513
JS
12831 **/
12832uint32_t
12833lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
12834{
12835 LPFC_MBOXQ_t *mbox;
12836 int rc, length, status = 0;
12837 uint32_t shdr_status, shdr_add_status;
12838 union lpfc_sli4_cfg_shdr *shdr;
12839
2e90f4b5 12840 /* sanity check on queue memory */
4f774513
JS
12841 if (!eq)
12842 return -ENODEV;
12843 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
12844 if (!mbox)
12845 return -ENOMEM;
12846 length = (sizeof(struct lpfc_mbx_eq_destroy) -
12847 sizeof(struct lpfc_sli4_cfg_mhdr));
12848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12849 LPFC_MBOX_OPCODE_EQ_DESTROY,
12850 length, LPFC_SLI4_MBX_EMBED);
12851 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
12852 eq->queue_id);
12853 mbox->vport = eq->phba->pport;
12854 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12855
12856 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
12857 /* The IOCTL status is embedded in the mailbox subheader. */
12858 shdr = (union lpfc_sli4_cfg_shdr *)
12859 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
12860 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12861 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12862 if (shdr_status || shdr_add_status || rc) {
12863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12864 "2505 EQ_DESTROY mailbox failed with "
12865 "status x%x add_status x%x, mbx status x%x\n",
12866 shdr_status, shdr_add_status, rc);
12867 status = -ENXIO;
12868 }
12869
12870 /* Remove eq from any list */
12871 list_del_init(&eq->list);
8fa38513 12872 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
12873 return status;
12874}
12875
12876/**
12877 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
12878 * @cq: The queue structure associated with the queue to destroy.
12879 *
12880 * This function destroys a queue, as detailed in @cq by sending an mailbox
12881 * command, specific to the type of queue, to the HBA.
12882 *
12883 * The @cq struct is used to get the queue ID of the queue to destroy.
12884 *
12885 * On success this function will return a zero. If the queue destroy mailbox
d439d286 12886 * command fails this function will return -ENXIO.
4f774513
JS
12887 **/
12888uint32_t
12889lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
12890{
12891 LPFC_MBOXQ_t *mbox;
12892 int rc, length, status = 0;
12893 uint32_t shdr_status, shdr_add_status;
12894 union lpfc_sli4_cfg_shdr *shdr;
12895
2e90f4b5 12896 /* sanity check on queue memory */
4f774513
JS
12897 if (!cq)
12898 return -ENODEV;
12899 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
12900 if (!mbox)
12901 return -ENOMEM;
12902 length = (sizeof(struct lpfc_mbx_cq_destroy) -
12903 sizeof(struct lpfc_sli4_cfg_mhdr));
12904 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12905 LPFC_MBOX_OPCODE_CQ_DESTROY,
12906 length, LPFC_SLI4_MBX_EMBED);
12907 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
12908 cq->queue_id);
12909 mbox->vport = cq->phba->pport;
12910 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12911 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
12912 /* The IOCTL status is embedded in the mailbox subheader. */
12913 shdr = (union lpfc_sli4_cfg_shdr *)
12914 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
12915 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12916 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12917 if (shdr_status || shdr_add_status || rc) {
12918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12919 "2506 CQ_DESTROY mailbox failed with "
12920 "status x%x add_status x%x, mbx status x%x\n",
12921 shdr_status, shdr_add_status, rc);
12922 status = -ENXIO;
12923 }
12924 /* Remove cq from any list */
12925 list_del_init(&cq->list);
8fa38513 12926 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
12927 return status;
12928}
12929
04c68496
JS
12930/**
12931 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
12932 * @qm: The queue structure associated with the queue to destroy.
12933 *
12934 * This function destroys a queue, as detailed in @mq by sending an mailbox
12935 * command, specific to the type of queue, to the HBA.
12936 *
12937 * The @mq struct is used to get the queue ID of the queue to destroy.
12938 *
12939 * On success this function will return a zero. If the queue destroy mailbox
d439d286 12940 * command fails this function will return -ENXIO.
04c68496
JS
12941 **/
12942uint32_t
12943lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
12944{
12945 LPFC_MBOXQ_t *mbox;
12946 int rc, length, status = 0;
12947 uint32_t shdr_status, shdr_add_status;
12948 union lpfc_sli4_cfg_shdr *shdr;
12949
2e90f4b5 12950 /* sanity check on queue memory */
04c68496
JS
12951 if (!mq)
12952 return -ENODEV;
12953 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
12954 if (!mbox)
12955 return -ENOMEM;
12956 length = (sizeof(struct lpfc_mbx_mq_destroy) -
12957 sizeof(struct lpfc_sli4_cfg_mhdr));
12958 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12959 LPFC_MBOX_OPCODE_MQ_DESTROY,
12960 length, LPFC_SLI4_MBX_EMBED);
12961 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
12962 mq->queue_id);
12963 mbox->vport = mq->phba->pport;
12964 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12965 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
12966 /* The IOCTL status is embedded in the mailbox subheader. */
12967 shdr = (union lpfc_sli4_cfg_shdr *)
12968 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
12969 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12971 if (shdr_status || shdr_add_status || rc) {
12972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12973 "2507 MQ_DESTROY mailbox failed with "
12974 "status x%x add_status x%x, mbx status x%x\n",
12975 shdr_status, shdr_add_status, rc);
12976 status = -ENXIO;
12977 }
12978 /* Remove mq from any list */
12979 list_del_init(&mq->list);
8fa38513 12980 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
12981 return status;
12982}
12983
4f774513
JS
12984/**
12985 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
12986 * @wq: The queue structure associated with the queue to destroy.
12987 *
12988 * This function destroys a queue, as detailed in @wq by sending an mailbox
12989 * command, specific to the type of queue, to the HBA.
12990 *
12991 * The @wq struct is used to get the queue ID of the queue to destroy.
12992 *
12993 * On success this function will return a zero. If the queue destroy mailbox
d439d286 12994 * command fails this function will return -ENXIO.
4f774513
JS
12995 **/
12996uint32_t
12997lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
12998{
12999 LPFC_MBOXQ_t *mbox;
13000 int rc, length, status = 0;
13001 uint32_t shdr_status, shdr_add_status;
13002 union lpfc_sli4_cfg_shdr *shdr;
13003
2e90f4b5 13004 /* sanity check on queue memory */
4f774513
JS
13005 if (!wq)
13006 return -ENODEV;
13007 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13008 if (!mbox)
13009 return -ENOMEM;
13010 length = (sizeof(struct lpfc_mbx_wq_destroy) -
13011 sizeof(struct lpfc_sli4_cfg_mhdr));
13012 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13013 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13014 length, LPFC_SLI4_MBX_EMBED);
13015 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13016 wq->queue_id);
13017 mbox->vport = wq->phba->pport;
13018 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13019 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13020 shdr = (union lpfc_sli4_cfg_shdr *)
13021 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13022 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13023 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13024 if (shdr_status || shdr_add_status || rc) {
13025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13026 "2508 WQ_DESTROY mailbox failed with "
13027 "status x%x add_status x%x, mbx status x%x\n",
13028 shdr_status, shdr_add_status, rc);
13029 status = -ENXIO;
13030 }
13031 /* Remove wq from any list */
13032 list_del_init(&wq->list);
8fa38513 13033 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
13034 return status;
13035}
13036
13037/**
13038 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13039 * @rq: The queue structure associated with the queue to destroy.
13040 *
13041 * This function destroys a queue, as detailed in @rq by sending an mailbox
13042 * command, specific to the type of queue, to the HBA.
13043 *
13044 * The @rq struct is used to get the queue ID of the queue to destroy.
13045 *
13046 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13047 * command fails this function will return -ENXIO.
4f774513
JS
13048 **/
13049uint32_t
13050lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13051 struct lpfc_queue *drq)
13052{
13053 LPFC_MBOXQ_t *mbox;
13054 int rc, length, status = 0;
13055 uint32_t shdr_status, shdr_add_status;
13056 union lpfc_sli4_cfg_shdr *shdr;
13057
2e90f4b5 13058 /* sanity check on queue memory */
4f774513
JS
13059 if (!hrq || !drq)
13060 return -ENODEV;
13061 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13062 if (!mbox)
13063 return -ENOMEM;
13064 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 13065 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
13066 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13067 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13068 length, LPFC_SLI4_MBX_EMBED);
13069 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13070 hrq->queue_id);
13071 mbox->vport = hrq->phba->pport;
13072 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13073 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13074 /* The IOCTL status is embedded in the mailbox subheader. */
13075 shdr = (union lpfc_sli4_cfg_shdr *)
13076 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13077 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13078 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13079 if (shdr_status || shdr_add_status || rc) {
13080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13081 "2509 RQ_DESTROY mailbox failed with "
13082 "status x%x add_status x%x, mbx status x%x\n",
13083 shdr_status, shdr_add_status, rc);
13084 if (rc != MBX_TIMEOUT)
13085 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13086 return -ENXIO;
13087 }
13088 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13089 drq->queue_id);
13090 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13091 shdr = (union lpfc_sli4_cfg_shdr *)
13092 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13093 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13094 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13095 if (shdr_status || shdr_add_status || rc) {
13096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13097 "2510 RQ_DESTROY mailbox failed with "
13098 "status x%x add_status x%x, mbx status x%x\n",
13099 shdr_status, shdr_add_status, rc);
13100 status = -ENXIO;
13101 }
13102 list_del_init(&hrq->list);
13103 list_del_init(&drq->list);
8fa38513 13104 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
13105 return status;
13106}
13107
13108/**
13109 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13110 * @phba: The virtual port for which this call being executed.
13111 * @pdma_phys_addr0: Physical address of the 1st SGL page.
13112 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13113 * @xritag: the xritag that ties this io to the SGL pages.
13114 *
13115 * This routine will post the sgl pages for the IO that has the xritag
13116 * that is in the iocbq structure. The xritag is assigned during iocbq
13117 * creation and persists for as long as the driver is loaded.
13118 * if the caller has fewer than 256 scatter gather segments to map then
13119 * pdma_phys_addr1 should be 0.
13120 * If the caller needs to map more than 256 scatter gather segment then
13121 * pdma_phys_addr1 should be a valid physical address.
13122 * physical address for SGLs must be 64 byte aligned.
13123 * If you are going to map 2 SGL's then the first one must have 256 entries
13124 * the second sgl can have between 1 and 256 entries.
13125 *
13126 * Return codes:
13127 * 0 - Success
13128 * -ENXIO, -ENOMEM - Failure
13129 **/
13130int
13131lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13132 dma_addr_t pdma_phys_addr0,
13133 dma_addr_t pdma_phys_addr1,
13134 uint16_t xritag)
13135{
13136 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13137 LPFC_MBOXQ_t *mbox;
13138 int rc;
13139 uint32_t shdr_status, shdr_add_status;
6d368e53 13140 uint32_t mbox_tmo;
4f774513
JS
13141 union lpfc_sli4_cfg_shdr *shdr;
13142
13143 if (xritag == NO_XRI) {
13144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13145 "0364 Invalid param:\n");
13146 return -EINVAL;
13147 }
13148
13149 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13150 if (!mbox)
13151 return -ENOMEM;
13152
13153 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13154 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13155 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 13156 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
13157
13158 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13159 &mbox->u.mqe.un.post_sgl_pages;
13160 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13161 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13162
13163 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13164 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13165 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13166 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13167
13168 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13169 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13170 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13171 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13172 if (!phba->sli4_hba.intr_enable)
13173 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 13174 else {
a183a15f 13175 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
13176 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13177 }
4f774513
JS
13178 /* The IOCTL status is embedded in the mailbox subheader. */
13179 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13180 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13181 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13182 if (rc != MBX_TIMEOUT)
13183 mempool_free(mbox, phba->mbox_mem_pool);
13184 if (shdr_status || shdr_add_status || rc) {
13185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13186 "2511 POST_SGL mailbox failed with "
13187 "status x%x add_status x%x, mbx status x%x\n",
13188 shdr_status, shdr_add_status, rc);
13189 rc = -ENXIO;
13190 }
13191 return 0;
13192}
4f774513 13193
6d368e53 13194/**
88a2cfbb 13195 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
13196 * @phba: pointer to lpfc hba data structure.
13197 *
13198 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
13199 * HBA consistent with the SLI-4 interface spec. This routine
13200 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13201 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 13202 *
88a2cfbb
JS
13203 * Returns
13204 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13205 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
13206 **/
6d368e53
JS
13207uint16_t
13208lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13209{
13210 unsigned long xri;
13211
13212 /*
13213 * Fetch the next logical xri. Because this index is logical,
13214 * the driver starts at 0 each time.
13215 */
13216 spin_lock_irq(&phba->hbalock);
13217 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13218 phba->sli4_hba.max_cfg_param.max_xri, 0);
13219 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13220 spin_unlock_irq(&phba->hbalock);
13221 return NO_XRI;
13222 } else {
13223 set_bit(xri, phba->sli4_hba.xri_bmask);
13224 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 13225 }
6d368e53
JS
13226 spin_unlock_irq(&phba->hbalock);
13227 return xri;
13228}
13229
13230/**
13231 * lpfc_sli4_free_xri - Release an xri for reuse.
13232 * @phba: pointer to lpfc hba data structure.
13233 *
13234 * This routine is invoked to release an xri to the pool of
13235 * available rpis maintained by the driver.
13236 **/
13237void
13238__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13239{
13240 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
13241 phba->sli4_hba.max_cfg_param.xri_used--;
13242 }
13243}
13244
13245/**
13246 * lpfc_sli4_free_xri - Release an xri for reuse.
13247 * @phba: pointer to lpfc hba data structure.
13248 *
13249 * This routine is invoked to release an xri to the pool of
13250 * available rpis maintained by the driver.
13251 **/
13252void
13253lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13254{
13255 spin_lock_irq(&phba->hbalock);
13256 __lpfc_sli4_free_xri(phba, xri);
13257 spin_unlock_irq(&phba->hbalock);
13258}
13259
4f774513
JS
13260/**
13261 * lpfc_sli4_next_xritag - Get an xritag for the io
13262 * @phba: Pointer to HBA context object.
13263 *
13264 * This function gets an xritag for the iocb. If there is no unused xritag
13265 * it will return 0xffff.
13266 * The function returns the allocated xritag if successful, else returns zero.
13267 * Zero is not a valid xritag.
13268 * The caller is not required to hold any lock.
13269 **/
13270uint16_t
13271lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13272{
6d368e53 13273 uint16_t xri_index;
4f774513 13274
6d368e53
JS
13275 xri_index = lpfc_sli4_alloc_xri(phba);
13276 if (xri_index != NO_XRI)
13277 return xri_index;
13278
13279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4f774513
JS
13280 "2004 Failed to allocate XRI.last XRITAG is %d"
13281 " Max XRI is %d, Used XRI is %d\n",
6d368e53 13282 xri_index,
4f774513
JS
13283 phba->sli4_hba.max_cfg_param.max_xri,
13284 phba->sli4_hba.max_cfg_param.xri_used);
6d368e53 13285 return NO_XRI;
4f774513
JS
13286}
13287
13288/**
6d368e53 13289 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
4f774513 13290 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
13291 * @post_sgl_list: pointer to els sgl entry list.
13292 * @count: number of els sgl entries on the list.
4f774513
JS
13293 *
13294 * This routine is invoked to post a block of driver's sgl pages to the
13295 * HBA using non-embedded mailbox command. No Lock is held. This routine
13296 * is only called when the driver is loading and after all IO has been
13297 * stopped.
13298 **/
8a9d2e80
JS
13299static int
13300lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13301 struct list_head *post_sgl_list,
13302 int post_cnt)
4f774513 13303{
8a9d2e80 13304 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
13305 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13306 struct sgl_page_pairs *sgl_pg_pairs;
13307 void *viraddr;
13308 LPFC_MBOXQ_t *mbox;
13309 uint32_t reqlen, alloclen, pg_pairs;
13310 uint32_t mbox_tmo;
8a9d2e80
JS
13311 uint16_t xritag_start = 0;
13312 int rc = 0;
4f774513
JS
13313 uint32_t shdr_status, shdr_add_status;
13314 union lpfc_sli4_cfg_shdr *shdr;
13315
8a9d2e80 13316 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
4f774513 13317 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13318 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13319 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13320 "2559 Block sgl registration required DMA "
13321 "size (%d) great than a page\n", reqlen);
13322 return -ENOMEM;
13323 }
13324 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 13325 if (!mbox)
4f774513 13326 return -ENOMEM;
4f774513
JS
13327
13328 /* Allocate DMA memory and set up the non-embedded mailbox command */
13329 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13330 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13331 LPFC_SLI4_MBX_NEMBED);
13332
13333 if (alloclen < reqlen) {
13334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13335 "0285 Allocated DMA memory size (%d) is "
13336 "less than the requested DMA memory "
13337 "size (%d)\n", alloclen, reqlen);
13338 lpfc_sli4_mbox_cmd_free(phba, mbox);
13339 return -ENOMEM;
13340 }
4f774513 13341 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 13342 viraddr = mbox->sge_array->addr[0];
4f774513
JS
13343 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13344 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13345
8a9d2e80
JS
13346 pg_pairs = 0;
13347 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
13348 /* Set up the sge entry */
13349 sgl_pg_pairs->sgl_pg0_addr_lo =
13350 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13351 sgl_pg_pairs->sgl_pg0_addr_hi =
13352 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13353 sgl_pg_pairs->sgl_pg1_addr_lo =
13354 cpu_to_le32(putPaddrLow(0));
13355 sgl_pg_pairs->sgl_pg1_addr_hi =
13356 cpu_to_le32(putPaddrHigh(0));
6d368e53 13357
4f774513
JS
13358 /* Keep the first xritag on the list */
13359 if (pg_pairs == 0)
13360 xritag_start = sglq_entry->sli4_xritag;
13361 sgl_pg_pairs++;
8a9d2e80 13362 pg_pairs++;
4f774513 13363 }
6d368e53
JS
13364
13365 /* Complete initialization and perform endian conversion. */
4f774513 13366 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
8a9d2e80 13367 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
4f774513 13368 sgl->word0 = cpu_to_le32(sgl->word0);
4f774513
JS
13369 if (!phba->sli4_hba.intr_enable)
13370 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13371 else {
a183a15f 13372 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13373 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13374 }
13375 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13376 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13377 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13378 if (rc != MBX_TIMEOUT)
13379 lpfc_sli4_mbox_cmd_free(phba, mbox);
13380 if (shdr_status || shdr_add_status || rc) {
13381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13382 "2513 POST_SGL_BLOCK mailbox command failed "
13383 "status x%x add_status x%x mbx status x%x\n",
13384 shdr_status, shdr_add_status, rc);
13385 rc = -ENXIO;
13386 }
13387 return rc;
13388}
13389
13390/**
13391 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13392 * @phba: pointer to lpfc hba data structure.
13393 * @sblist: pointer to scsi buffer list.
13394 * @count: number of scsi buffers on the list.
13395 *
13396 * This routine is invoked to post a block of @count scsi sgl pages from a
13397 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13398 * No Lock is held.
13399 *
13400 **/
13401int
8a9d2e80
JS
13402lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13403 struct list_head *sblist,
13404 int count)
4f774513
JS
13405{
13406 struct lpfc_scsi_buf *psb;
13407 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13408 struct sgl_page_pairs *sgl_pg_pairs;
13409 void *viraddr;
13410 LPFC_MBOXQ_t *mbox;
13411 uint32_t reqlen, alloclen, pg_pairs;
13412 uint32_t mbox_tmo;
13413 uint16_t xritag_start = 0;
13414 int rc = 0;
13415 uint32_t shdr_status, shdr_add_status;
13416 dma_addr_t pdma_phys_bpl1;
13417 union lpfc_sli4_cfg_shdr *shdr;
13418
13419 /* Calculate the requested length of the dma memory */
8a9d2e80 13420 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 13421 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13422 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13424 "0217 Block sgl registration required DMA "
13425 "size (%d) great than a page\n", reqlen);
13426 return -ENOMEM;
13427 }
13428 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13429 if (!mbox) {
13430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13431 "0283 Failed to allocate mbox cmd memory\n");
13432 return -ENOMEM;
13433 }
13434
13435 /* Allocate DMA memory and set up the non-embedded mailbox command */
13436 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13437 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13438 LPFC_SLI4_MBX_NEMBED);
13439
13440 if (alloclen < reqlen) {
13441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13442 "2561 Allocated DMA memory size (%d) is "
13443 "less than the requested DMA memory "
13444 "size (%d)\n", alloclen, reqlen);
13445 lpfc_sli4_mbox_cmd_free(phba, mbox);
13446 return -ENOMEM;
13447 }
6d368e53 13448
4f774513 13449 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
13450 viraddr = mbox->sge_array->addr[0];
13451
13452 /* Set up the SGL pages in the non-embedded DMA pages */
13453 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13454 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13455
13456 pg_pairs = 0;
13457 list_for_each_entry(psb, sblist, list) {
13458 /* Set up the sge entry */
13459 sgl_pg_pairs->sgl_pg0_addr_lo =
13460 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13461 sgl_pg_pairs->sgl_pg0_addr_hi =
13462 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13463 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13464 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13465 else
13466 pdma_phys_bpl1 = 0;
13467 sgl_pg_pairs->sgl_pg1_addr_lo =
13468 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13469 sgl_pg_pairs->sgl_pg1_addr_hi =
13470 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13471 /* Keep the first xritag on the list */
13472 if (pg_pairs == 0)
13473 xritag_start = psb->cur_iocbq.sli4_xritag;
13474 sgl_pg_pairs++;
13475 pg_pairs++;
13476 }
13477 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13478 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13479 /* Perform endian conversion if necessary */
13480 sgl->word0 = cpu_to_le32(sgl->word0);
13481
13482 if (!phba->sli4_hba.intr_enable)
13483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13484 else {
a183a15f 13485 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13486 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13487 }
13488 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13489 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13490 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13491 if (rc != MBX_TIMEOUT)
13492 lpfc_sli4_mbox_cmd_free(phba, mbox);
13493 if (shdr_status || shdr_add_status || rc) {
13494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13495 "2564 POST_SGL_BLOCK mailbox command failed "
13496 "status x%x add_status x%x mbx status x%x\n",
13497 shdr_status, shdr_add_status, rc);
13498 rc = -ENXIO;
13499 }
13500 return rc;
13501}
13502
13503/**
13504 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13505 * @phba: pointer to lpfc_hba struct that the frame was received on
13506 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13507 *
13508 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13509 * valid type of frame that the LPFC driver will handle. This function will
13510 * return a zero if the frame is a valid frame or a non zero value when the
13511 * frame does not pass the check.
13512 **/
13513static int
13514lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13515{
474ffb74
TH
13516 /* make rctl_names static to save stack space */
13517 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
4f774513
JS
13518 char *type_names[] = FC_TYPE_NAMES_INIT;
13519 struct fc_vft_header *fc_vft_hdr;
546fc854 13520 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
13521
13522 switch (fc_hdr->fh_r_ctl) {
13523 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13524 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13525 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13526 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13527 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13528 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13529 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13530 case FC_RCTL_DD_CMD_STATUS: /* command status */
13531 case FC_RCTL_ELS_REQ: /* extended link services request */
13532 case FC_RCTL_ELS_REP: /* extended link services reply */
13533 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13534 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13535 case FC_RCTL_BA_NOP: /* basic link service NOP */
13536 case FC_RCTL_BA_ABTS: /* basic link service abort */
13537 case FC_RCTL_BA_RMC: /* remove connection */
13538 case FC_RCTL_BA_ACC: /* basic accept */
13539 case FC_RCTL_BA_RJT: /* basic reject */
13540 case FC_RCTL_BA_PRMT:
13541 case FC_RCTL_ACK_1: /* acknowledge_1 */
13542 case FC_RCTL_ACK_0: /* acknowledge_0 */
13543 case FC_RCTL_P_RJT: /* port reject */
13544 case FC_RCTL_F_RJT: /* fabric reject */
13545 case FC_RCTL_P_BSY: /* port busy */
13546 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13547 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13548 case FC_RCTL_LCR: /* link credit reset */
13549 case FC_RCTL_END: /* end */
13550 break;
13551 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13552 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13553 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13554 return lpfc_fc_frame_check(phba, fc_hdr);
13555 default:
13556 goto drop;
13557 }
13558 switch (fc_hdr->fh_type) {
13559 case FC_TYPE_BLS:
13560 case FC_TYPE_ELS:
13561 case FC_TYPE_FCP:
13562 case FC_TYPE_CT:
13563 break;
13564 case FC_TYPE_IP:
13565 case FC_TYPE_ILS:
13566 default:
13567 goto drop;
13568 }
546fc854 13569
4f774513 13570 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
13571 "2538 Received frame rctl:%s type:%s "
13572 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
4f774513 13573 rctl_names[fc_hdr->fh_r_ctl],
546fc854
JS
13574 type_names[fc_hdr->fh_type],
13575 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13576 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13577 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
4f774513
JS
13578 return 0;
13579drop:
13580 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13581 "2539 Dropped frame rctl:%s type:%s\n",
13582 rctl_names[fc_hdr->fh_r_ctl],
13583 type_names[fc_hdr->fh_type]);
13584 return 1;
13585}
13586
13587/**
13588 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13589 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13590 *
13591 * This function processes the FC header to retrieve the VFI from the VF
13592 * header, if one exists. This function will return the VFI if one exists
13593 * or 0 if no VSAN Header exists.
13594 **/
13595static uint32_t
13596lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13597{
13598 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13599
13600 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13601 return 0;
13602 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13603}
13604
13605/**
13606 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13607 * @phba: Pointer to the HBA structure to search for the vport on
13608 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13609 * @fcfi: The FC Fabric ID that the frame came from
13610 *
13611 * This function searches the @phba for a vport that matches the content of the
13612 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13613 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13614 * returns the matching vport pointer or NULL if unable to match frame to a
13615 * vport.
13616 **/
13617static struct lpfc_vport *
13618lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13619 uint16_t fcfi)
13620{
13621 struct lpfc_vport **vports;
13622 struct lpfc_vport *vport = NULL;
13623 int i;
13624 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13625 fc_hdr->fh_d_id[1] << 8 |
13626 fc_hdr->fh_d_id[2]);
bf08611b
JS
13627 if (did == Fabric_DID)
13628 return phba->pport;
4f774513
JS
13629 vports = lpfc_create_vport_work_array(phba);
13630 if (vports != NULL)
13631 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
13632 if (phba->fcf.fcfi == fcfi &&
13633 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
13634 vports[i]->fc_myDID == did) {
13635 vport = vports[i];
13636 break;
13637 }
13638 }
13639 lpfc_destroy_vport_work_array(phba, vports);
13640 return vport;
13641}
13642
45ed1190
JS
13643/**
13644 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
13645 * @vport: The vport to work on.
13646 *
13647 * This function updates the receive sequence time stamp for this vport. The
13648 * receive sequence time stamp indicates the time that the last frame of the
13649 * the sequence that has been idle for the longest amount of time was received.
13650 * the driver uses this time stamp to indicate if any received sequences have
13651 * timed out.
13652 **/
13653void
13654lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
13655{
13656 struct lpfc_dmabuf *h_buf;
13657 struct hbq_dmabuf *dmabuf = NULL;
13658
13659 /* get the oldest sequence on the rcv list */
13660 h_buf = list_get_first(&vport->rcv_buffer_list,
13661 struct lpfc_dmabuf, list);
13662 if (!h_buf)
13663 return;
13664 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13665 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
13666}
13667
13668/**
13669 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
13670 * @vport: The vport that the received sequences were sent to.
13671 *
13672 * This function cleans up all outstanding received sequences. This is called
13673 * by the driver when a link event or user action invalidates all the received
13674 * sequences.
13675 **/
13676void
13677lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
13678{
13679 struct lpfc_dmabuf *h_buf, *hnext;
13680 struct lpfc_dmabuf *d_buf, *dnext;
13681 struct hbq_dmabuf *dmabuf = NULL;
13682
13683 /* start with the oldest sequence on the rcv list */
13684 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13685 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13686 list_del_init(&dmabuf->hbuf.list);
13687 list_for_each_entry_safe(d_buf, dnext,
13688 &dmabuf->dbuf.list, list) {
13689 list_del_init(&d_buf->list);
13690 lpfc_in_buf_free(vport->phba, d_buf);
13691 }
13692 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13693 }
13694}
13695
13696/**
13697 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
13698 * @vport: The vport that the received sequences were sent to.
13699 *
13700 * This function determines whether any received sequences have timed out by
13701 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
13702 * indicates that there is at least one timed out sequence this routine will
13703 * go through the received sequences one at a time from most inactive to most
13704 * active to determine which ones need to be cleaned up. Once it has determined
13705 * that a sequence needs to be cleaned up it will simply free up the resources
13706 * without sending an abort.
13707 **/
13708void
13709lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
13710{
13711 struct lpfc_dmabuf *h_buf, *hnext;
13712 struct lpfc_dmabuf *d_buf, *dnext;
13713 struct hbq_dmabuf *dmabuf = NULL;
13714 unsigned long timeout;
13715 int abort_count = 0;
13716
13717 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13718 vport->rcv_buffer_time_stamp);
13719 if (list_empty(&vport->rcv_buffer_list) ||
13720 time_before(jiffies, timeout))
13721 return;
13722 /* start with the oldest sequence on the rcv list */
13723 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13724 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13725 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13726 dmabuf->time_stamp);
13727 if (time_before(jiffies, timeout))
13728 break;
13729 abort_count++;
13730 list_del_init(&dmabuf->hbuf.list);
13731 list_for_each_entry_safe(d_buf, dnext,
13732 &dmabuf->dbuf.list, list) {
13733 list_del_init(&d_buf->list);
13734 lpfc_in_buf_free(vport->phba, d_buf);
13735 }
13736 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13737 }
13738 if (abort_count)
13739 lpfc_update_rcv_time_stamp(vport);
13740}
13741
4f774513
JS
13742/**
13743 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
13744 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
13745 *
13746 * This function searches through the existing incomplete sequences that have
13747 * been sent to this @vport. If the frame matches one of the incomplete
13748 * sequences then the dbuf in the @dmabuf is added to the list of frames that
13749 * make up that sequence. If no sequence is found that matches this frame then
13750 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
13751 * This function returns a pointer to the first dmabuf in the sequence list that
13752 * the frame was linked to.
13753 **/
13754static struct hbq_dmabuf *
13755lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
13756{
13757 struct fc_frame_header *new_hdr;
13758 struct fc_frame_header *temp_hdr;
13759 struct lpfc_dmabuf *d_buf;
13760 struct lpfc_dmabuf *h_buf;
13761 struct hbq_dmabuf *seq_dmabuf = NULL;
13762 struct hbq_dmabuf *temp_dmabuf = NULL;
13763
4d9ab994 13764 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 13765 dmabuf->time_stamp = jiffies;
4f774513
JS
13766 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13767 /* Use the hdr_buf to find the sequence that this frame belongs to */
13768 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13769 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13770 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13771 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13772 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13773 continue;
13774 /* found a pending sequence that matches this frame */
13775 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13776 break;
13777 }
13778 if (!seq_dmabuf) {
13779 /*
13780 * This indicates first frame received for this sequence.
13781 * Queue the buffer on the vport's rcv_buffer_list.
13782 */
13783 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 13784 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
13785 return dmabuf;
13786 }
13787 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
13788 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
13789 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
13790 list_del_init(&seq_dmabuf->hbuf.list);
13791 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13792 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 13793 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
13794 return dmabuf;
13795 }
45ed1190
JS
13796 /* move this sequence to the tail to indicate a young sequence */
13797 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
13798 seq_dmabuf->time_stamp = jiffies;
13799 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
13800 if (list_empty(&seq_dmabuf->dbuf.list)) {
13801 temp_hdr = dmabuf->hbuf.virt;
13802 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13803 return seq_dmabuf;
13804 }
4f774513
JS
13805 /* find the correct place in the sequence to insert this frame */
13806 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
13807 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13808 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
13809 /*
13810 * If the frame's sequence count is greater than the frame on
13811 * the list then insert the frame right after this frame
13812 */
eeead811
JS
13813 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
13814 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513
JS
13815 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
13816 return seq_dmabuf;
13817 }
13818 }
13819 return NULL;
13820}
13821
6669f9bb
JS
13822/**
13823 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
13824 * @vport: pointer to a vitural port
13825 * @dmabuf: pointer to a dmabuf that describes the FC sequence
13826 *
13827 * This function tries to abort from the partially assembed sequence, described
13828 * by the information from basic abbort @dmabuf. It checks to see whether such
13829 * partially assembled sequence held by the driver. If so, it shall free up all
13830 * the frames from the partially assembled sequence.
13831 *
13832 * Return
13833 * true -- if there is matching partially assembled sequence present and all
13834 * the frames freed with the sequence;
13835 * false -- if there is no matching partially assembled sequence present so
13836 * nothing got aborted in the lower layer driver
13837 **/
13838static bool
13839lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
13840 struct hbq_dmabuf *dmabuf)
13841{
13842 struct fc_frame_header *new_hdr;
13843 struct fc_frame_header *temp_hdr;
13844 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
13845 struct hbq_dmabuf *seq_dmabuf = NULL;
13846
13847 /* Use the hdr_buf to find the sequence that matches this frame */
13848 INIT_LIST_HEAD(&dmabuf->dbuf.list);
13849 INIT_LIST_HEAD(&dmabuf->hbuf.list);
13850 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13851 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13852 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13853 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13854 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13855 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13856 continue;
13857 /* found a pending sequence that matches this frame */
13858 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13859 break;
13860 }
13861
13862 /* Free up all the frames from the partially assembled sequence */
13863 if (seq_dmabuf) {
13864 list_for_each_entry_safe(d_buf, n_buf,
13865 &seq_dmabuf->dbuf.list, list) {
13866 list_del_init(&d_buf->list);
13867 lpfc_in_buf_free(vport->phba, d_buf);
13868 }
13869 return true;
13870 }
13871 return false;
13872}
13873
13874/**
546fc854 13875 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
13876 * @phba: Pointer to HBA context object.
13877 * @cmd_iocbq: pointer to the command iocbq structure.
13878 * @rsp_iocbq: pointer to the response iocbq structure.
13879 *
546fc854 13880 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
13881 * event. It properly releases the memory allocated to the sequence abort
13882 * accept iocb.
13883 **/
13884static void
546fc854 13885lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
13886 struct lpfc_iocbq *cmd_iocbq,
13887 struct lpfc_iocbq *rsp_iocbq)
13888{
13889 if (cmd_iocbq)
13890 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6b5151fd
JS
13891
13892 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
13893 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
13894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13895 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
13896 rsp_iocbq->iocb.ulpStatus,
13897 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
13898}
13899
6d368e53
JS
13900/**
13901 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13902 * @phba: Pointer to HBA context object.
13903 * @xri: xri id in transaction.
13904 *
13905 * This function validates the xri maps to the known range of XRIs allocated an
13906 * used by the driver.
13907 **/
7851fe2c 13908uint16_t
6d368e53
JS
13909lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13910 uint16_t xri)
13911{
13912 int i;
13913
13914 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13915 if (xri == phba->sli4_hba.xri_ids[i])
13916 return i;
13917 }
13918 return NO_XRI;
13919}
13920
13921
6669f9bb 13922/**
546fc854 13923 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
13924 * @phba: Pointer to HBA context object.
13925 * @fc_hdr: pointer to a FC frame header.
13926 *
546fc854 13927 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
13928 * event after aborting the sequence handling.
13929 **/
13930static void
546fc854 13931lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
6669f9bb
JS
13932 struct fc_frame_header *fc_hdr)
13933{
13934 struct lpfc_iocbq *ctiocb = NULL;
13935 struct lpfc_nodelist *ndlp;
5ffc266e
JS
13936 uint16_t oxid, rxid;
13937 uint32_t sid, fctl;
6669f9bb 13938 IOCB_t *icmd;
546fc854 13939 int rc;
6669f9bb
JS
13940
13941 if (!lpfc_is_link_up(phba))
13942 return;
13943
13944 sid = sli4_sid_from_fc_hdr(fc_hdr);
13945 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 13946 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb
JS
13947
13948 ndlp = lpfc_findnode_did(phba->pport, sid);
13949 if (!ndlp) {
13950 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13951 "1268 Find ndlp returned NULL for oxid:x%x "
13952 "SID:x%x\n", oxid, sid);
13953 return;
13954 }
6d368e53 13955 if (lpfc_sli4_xri_inrange(phba, rxid))
19ca7609 13956 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
6669f9bb 13957
546fc854 13958 /* Allocate buffer for rsp iocb */
6669f9bb
JS
13959 ctiocb = lpfc_sli_get_iocbq(phba);
13960 if (!ctiocb)
13961 return;
13962
5ffc266e
JS
13963 /* Extract the F_CTL field from FC_HDR */
13964 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
13965
6669f9bb 13966 icmd = &ctiocb->iocb;
6669f9bb 13967 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 13968 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
13969 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
13970 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
13971 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
13972
13973 /* Fill in the rest of iocb fields */
13974 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
13975 icmd->ulpBdeCount = 0;
13976 icmd->ulpLe = 1;
13977 icmd->ulpClass = CLASS3;
6d368e53 13978 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
be858b65 13979 ctiocb->context1 = ndlp;
6669f9bb 13980
6669f9bb
JS
13981 ctiocb->iocb_cmpl = NULL;
13982 ctiocb->vport = phba->pport;
546fc854 13983 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 13984 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
13985 ctiocb->sli4_xritag = NO_XRI;
13986
13987 /* If the oxid maps to the FCP XRI range or if it is out of range,
13988 * send a BLS_RJT. The driver no longer has that exchange.
13989 * Override the IOCB for a BA_RJT.
13990 */
13991 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
13992 phba->sli4_hba.max_cfg_param.xri_base) ||
13993 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
13994 phba->sli4_hba.max_cfg_param.xri_base)) {
13995 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
13996 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
13997 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
13998 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
13999 }
6669f9bb 14000
5ffc266e
JS
14001 if (fctl & FC_FC_EX_CTX) {
14002 /* ABTS sent by responder to CT exchange, construction
14003 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14004 * field and RX_ID from ABTS for RX_ID field.
14005 */
546fc854 14006 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
14007 } else {
14008 /* ABTS sent by initiator to CT exchange, construction
14009 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 14010 * XRI_TAG field.
5ffc266e 14011 */
546fc854 14012 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 14013 }
f09c3acc 14014 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 14015 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 14016
546fc854 14017 /* Xmit CT abts response on exchange <xid> */
6669f9bb 14018 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
14019 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14020 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14021
14022 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14023 if (rc == IOCB_ERROR) {
14024 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
14025 "2925 Failed to issue CT ABTS RSP x%x on "
14026 "xri x%x, Data x%x\n",
14027 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14028 phba->link_state);
14029 lpfc_sli_release_iocbq(phba, ctiocb);
14030 }
6669f9bb
JS
14031}
14032
14033/**
14034 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14035 * @vport: Pointer to the vport on which this sequence was received
14036 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14037 *
14038 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14039 * receive sequence is only partially assembed by the driver, it shall abort
14040 * the partially assembled frames for the sequence. Otherwise, if the
14041 * unsolicited receive sequence has been completely assembled and passed to
14042 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14043 * unsolicited sequence has been aborted. After that, it will issue a basic
14044 * accept to accept the abort.
14045 **/
14046void
14047lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14048 struct hbq_dmabuf *dmabuf)
14049{
14050 struct lpfc_hba *phba = vport->phba;
14051 struct fc_frame_header fc_hdr;
5ffc266e 14052 uint32_t fctl;
6669f9bb
JS
14053 bool abts_par;
14054
6669f9bb
JS
14055 /* Make a copy of fc_hdr before the dmabuf being released */
14056 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 14057 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 14058
5ffc266e
JS
14059 if (fctl & FC_FC_EX_CTX) {
14060 /*
14061 * ABTS sent by responder to exchange, just free the buffer
14062 */
6669f9bb 14063 lpfc_in_buf_free(phba, &dmabuf->dbuf);
5ffc266e
JS
14064 } else {
14065 /*
14066 * ABTS sent by initiator to exchange, need to do cleanup
14067 */
14068 /* Try to abort partially assembled seq */
14069 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14070
14071 /* Send abort to ULP if partially seq abort failed */
14072 if (abts_par == false)
14073 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14074 else
14075 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14076 }
6669f9bb 14077 /* Send basic accept (BA_ACC) to the abort requester */
546fc854 14078 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
6669f9bb
JS
14079}
14080
4f774513
JS
14081/**
14082 * lpfc_seq_complete - Indicates if a sequence is complete
14083 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14084 *
14085 * This function checks the sequence, starting with the frame described by
14086 * @dmabuf, to see if all the frames associated with this sequence are present.
14087 * the frames associated with this sequence are linked to the @dmabuf using the
14088 * dbuf list. This function looks for two major things. 1) That the first frame
14089 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14090 * set. 3) That there are no holes in the sequence count. The function will
14091 * return 1 when the sequence is complete, otherwise it will return 0.
14092 **/
14093static int
14094lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14095{
14096 struct fc_frame_header *hdr;
14097 struct lpfc_dmabuf *d_buf;
14098 struct hbq_dmabuf *seq_dmabuf;
14099 uint32_t fctl;
14100 int seq_count = 0;
14101
14102 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14103 /* make sure first fame of sequence has a sequence count of zero */
14104 if (hdr->fh_seq_cnt != seq_count)
14105 return 0;
14106 fctl = (hdr->fh_f_ctl[0] << 16 |
14107 hdr->fh_f_ctl[1] << 8 |
14108 hdr->fh_f_ctl[2]);
14109 /* If last frame of sequence we can return success. */
14110 if (fctl & FC_FC_END_SEQ)
14111 return 1;
14112 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14113 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14114 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14115 /* If there is a hole in the sequence count then fail. */
eeead811 14116 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
14117 return 0;
14118 fctl = (hdr->fh_f_ctl[0] << 16 |
14119 hdr->fh_f_ctl[1] << 8 |
14120 hdr->fh_f_ctl[2]);
14121 /* If last frame of sequence we can return success. */
14122 if (fctl & FC_FC_END_SEQ)
14123 return 1;
14124 }
14125 return 0;
14126}
14127
14128/**
14129 * lpfc_prep_seq - Prep sequence for ULP processing
14130 * @vport: Pointer to the vport on which this sequence was received
14131 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14132 *
14133 * This function takes a sequence, described by a list of frames, and creates
14134 * a list of iocbq structures to describe the sequence. This iocbq list will be
14135 * used to issue to the generic unsolicited sequence handler. This routine
14136 * returns a pointer to the first iocbq in the list. If the function is unable
14137 * to allocate an iocbq then it throw out the received frames that were not
14138 * able to be described and return a pointer to the first iocbq. If unable to
14139 * allocate any iocbqs (including the first) this function will return NULL.
14140 **/
14141static struct lpfc_iocbq *
14142lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14143{
7851fe2c 14144 struct hbq_dmabuf *hbq_buf;
4f774513
JS
14145 struct lpfc_dmabuf *d_buf, *n_buf;
14146 struct lpfc_iocbq *first_iocbq, *iocbq;
14147 struct fc_frame_header *fc_hdr;
14148 uint32_t sid;
7851fe2c 14149 uint32_t len, tot_len;
eeead811 14150 struct ulp_bde64 *pbde;
4f774513
JS
14151
14152 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14153 /* remove from receive buffer list */
14154 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 14155 lpfc_update_rcv_time_stamp(vport);
4f774513 14156 /* get the Remote Port's SID */
6669f9bb 14157 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 14158 tot_len = 0;
4f774513
JS
14159 /* Get an iocbq struct to fill in. */
14160 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14161 if (first_iocbq) {
14162 /* Initialize the first IOCB. */
8fa38513 14163 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513
JS
14164 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14165 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
14166 first_iocbq->iocb.ulpContext = NO_XRI;
14167 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14168 be16_to_cpu(fc_hdr->fh_ox_id);
14169 /* iocbq is prepped for internal consumption. Physical vpi. */
14170 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14171 vport->phba->vpi_ids[vport->vpi];
4f774513
JS
14172 /* put the first buffer into the first IOCBq */
14173 first_iocbq->context2 = &seq_dmabuf->dbuf;
14174 first_iocbq->context3 = NULL;
14175 first_iocbq->iocb.ulpBdeCount = 1;
14176 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14177 LPFC_DATA_BUF_SIZE;
14178 first_iocbq->iocb.un.rcvels.remoteID = sid;
7851fe2c 14179 tot_len = bf_get(lpfc_rcqe_length,
4d9ab994 14180 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7851fe2c 14181 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
14182 }
14183 iocbq = first_iocbq;
14184 /*
14185 * Each IOCBq can have two Buffers assigned, so go through the list
14186 * of buffers for this sequence and save two buffers in each IOCBq
14187 */
14188 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14189 if (!iocbq) {
14190 lpfc_in_buf_free(vport->phba, d_buf);
14191 continue;
14192 }
14193 if (!iocbq->context3) {
14194 iocbq->context3 = d_buf;
14195 iocbq->iocb.ulpBdeCount++;
eeead811
JS
14196 pbde = (struct ulp_bde64 *)
14197 &iocbq->iocb.unsli3.sli3Words[4];
14198 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14199
14200 /* We need to get the size out of the right CQE */
14201 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14202 len = bf_get(lpfc_rcqe_length,
14203 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14204 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14205 tot_len += len;
4f774513
JS
14206 } else {
14207 iocbq = lpfc_sli_get_iocbq(vport->phba);
14208 if (!iocbq) {
14209 if (first_iocbq) {
14210 first_iocbq->iocb.ulpStatus =
14211 IOSTAT_FCP_RSP_ERROR;
14212 first_iocbq->iocb.un.ulpWord[4] =
14213 IOERR_NO_RESOURCES;
14214 }
14215 lpfc_in_buf_free(vport->phba, d_buf);
14216 continue;
14217 }
14218 iocbq->context2 = d_buf;
14219 iocbq->context3 = NULL;
14220 iocbq->iocb.ulpBdeCount = 1;
14221 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14222 LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14223
14224 /* We need to get the size out of the right CQE */
14225 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14226 len = bf_get(lpfc_rcqe_length,
14227 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14228 tot_len += len;
14229 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14230
4f774513
JS
14231 iocbq->iocb.un.rcvels.remoteID = sid;
14232 list_add_tail(&iocbq->list, &first_iocbq->list);
14233 }
14234 }
14235 return first_iocbq;
14236}
14237
6669f9bb
JS
14238static void
14239lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14240 struct hbq_dmabuf *seq_dmabuf)
14241{
14242 struct fc_frame_header *fc_hdr;
14243 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14244 struct lpfc_hba *phba = vport->phba;
14245
14246 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14247 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14248 if (!iocbq) {
14249 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14250 "2707 Ring %d handler: Failed to allocate "
14251 "iocb Rctl x%x Type x%x received\n",
14252 LPFC_ELS_RING,
14253 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14254 return;
14255 }
14256 if (!lpfc_complete_unsol_iocb(phba,
14257 &phba->sli.ring[LPFC_ELS_RING],
14258 iocbq, fc_hdr->fh_r_ctl,
14259 fc_hdr->fh_type))
6d368e53 14260 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
14261 "2540 Ring %d handler: unexpected Rctl "
14262 "x%x Type x%x received\n",
14263 LPFC_ELS_RING,
14264 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14265
14266 /* Free iocb created in lpfc_prep_seq */
14267 list_for_each_entry_safe(curr_iocb, next_iocb,
14268 &iocbq->list, list) {
14269 list_del_init(&curr_iocb->list);
14270 lpfc_sli_release_iocbq(phba, curr_iocb);
14271 }
14272 lpfc_sli_release_iocbq(phba, iocbq);
14273}
14274
4f774513
JS
14275/**
14276 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14277 * @phba: Pointer to HBA context object.
14278 *
14279 * This function is called with no lock held. This function processes all
14280 * the received buffers and gives it to upper layers when a received buffer
14281 * indicates that it is the final frame in the sequence. The interrupt
14282 * service routine processes received buffers at interrupt contexts and adds
14283 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14284 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14285 * appropriate receive function when the final frame in a sequence is received.
14286 **/
4d9ab994
JS
14287void
14288lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14289 struct hbq_dmabuf *dmabuf)
4f774513 14290{
4d9ab994 14291 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
14292 struct fc_frame_header *fc_hdr;
14293 struct lpfc_vport *vport;
14294 uint32_t fcfi;
4f774513 14295
4f774513 14296 /* Process each received buffer */
4d9ab994
JS
14297 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14298 /* check to see if this a valid type of frame */
14299 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14300 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14301 return;
14302 }
7851fe2c
JS
14303 if ((bf_get(lpfc_cqe_code,
14304 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14305 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14306 &dmabuf->cq_event.cqe.rcqe_cmpl);
14307 else
14308 fcfi = bf_get(lpfc_rcqe_fcf_id,
14309 &dmabuf->cq_event.cqe.rcqe_cmpl);
4d9ab994 14310 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
c868595d 14311 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
4d9ab994
JS
14312 /* throw out the frame */
14313 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14314 return;
14315 }
6669f9bb
JS
14316 /* Handle the basic abort sequence (BA_ABTS) event */
14317 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14318 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14319 return;
14320 }
14321
4d9ab994
JS
14322 /* Link this frame */
14323 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14324 if (!seq_dmabuf) {
14325 /* unable to add frame to vport - throw it out */
14326 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14327 return;
14328 }
14329 /* If not last frame in sequence continue processing frames. */
def9c7a9 14330 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 14331 return;
def9c7a9 14332
6669f9bb
JS
14333 /* Send the complete sequence to the upper layer protocol */
14334 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 14335}
6fb120a7
JS
14336
14337/**
14338 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14339 * @phba: pointer to lpfc hba data structure.
14340 *
14341 * This routine is invoked to post rpi header templates to the
14342 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14343 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14344 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14345 *
14346 * This routine does not require any locks. It's usage is expected
14347 * to be driver load or reset recovery when the driver is
14348 * sequential.
14349 *
14350 * Return codes
af901ca1 14351 * 0 - successful
d439d286 14352 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14353 * When this error occurs, the driver is not guaranteed
14354 * to have any rpi regions posted to the device and
14355 * must either attempt to repost the regions or take a
14356 * fatal error.
14357 **/
14358int
14359lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14360{
14361 struct lpfc_rpi_hdr *rpi_page;
14362 uint32_t rc = 0;
6d368e53
JS
14363 uint16_t lrpi = 0;
14364
14365 /* SLI4 ports that support extents do not require RPI headers. */
14366 if (!phba->sli4_hba.rpi_hdrs_in_use)
14367 goto exit;
14368 if (phba->sli4_hba.extents_in_use)
14369 return -EIO;
6fb120a7 14370
6fb120a7 14371 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
14372 /*
14373 * Assign the rpi headers a physical rpi only if the driver
14374 * has not initialized those resources. A port reset only
14375 * needs the headers posted.
14376 */
14377 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14378 LPFC_RPI_RSRC_RDY)
14379 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14380
6fb120a7
JS
14381 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14382 if (rc != MBX_SUCCESS) {
14383 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14384 "2008 Error %d posting all rpi "
14385 "headers\n", rc);
14386 rc = -EIO;
14387 break;
14388 }
14389 }
14390
6d368e53
JS
14391 exit:
14392 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14393 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
14394 return rc;
14395}
14396
14397/**
14398 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14399 * @phba: pointer to lpfc hba data structure.
14400 * @rpi_page: pointer to the rpi memory region.
14401 *
14402 * This routine is invoked to post a single rpi header to the
14403 * HBA consistent with the SLI-4 interface spec. This memory region
14404 * maps up to 64 rpi context regions.
14405 *
14406 * Return codes
af901ca1 14407 * 0 - successful
d439d286
JS
14408 * -ENOMEM - No available memory
14409 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14410 **/
14411int
14412lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14413{
14414 LPFC_MBOXQ_t *mboxq;
14415 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14416 uint32_t rc = 0;
6fb120a7
JS
14417 uint32_t shdr_status, shdr_add_status;
14418 union lpfc_sli4_cfg_shdr *shdr;
14419
6d368e53
JS
14420 /* SLI4 ports that support extents do not require RPI headers. */
14421 if (!phba->sli4_hba.rpi_hdrs_in_use)
14422 return rc;
14423 if (phba->sli4_hba.extents_in_use)
14424 return -EIO;
14425
6fb120a7
JS
14426 /* The port is notified of the header region via a mailbox command. */
14427 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14428 if (!mboxq) {
14429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14430 "2001 Unable to allocate memory for issuing "
14431 "SLI_CONFIG_SPECIAL mailbox command\n");
14432 return -ENOMEM;
14433 }
14434
14435 /* Post all rpi memory regions to the port. */
14436 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
14437 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14438 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14439 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
14440 sizeof(struct lpfc_sli4_cfg_mhdr),
14441 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
14442
14443
14444 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
14445 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14446 rpi_page->start_rpi);
6d368e53
JS
14447 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14448 hdr_tmpl, rpi_page->page_count);
14449
6fb120a7
JS
14450 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14451 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 14452 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
14453 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14454 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14455 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14456 if (rc != MBX_TIMEOUT)
14457 mempool_free(mboxq, phba->mbox_mem_pool);
14458 if (shdr_status || shdr_add_status || rc) {
14459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14460 "2514 POST_RPI_HDR mailbox failed with "
14461 "status x%x add_status x%x, mbx status x%x\n",
14462 shdr_status, shdr_add_status, rc);
14463 rc = -ENXIO;
14464 }
14465 return rc;
14466}
14467
14468/**
14469 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14470 * @phba: pointer to lpfc hba data structure.
14471 *
14472 * This routine is invoked to post rpi header templates to the
14473 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14474 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14475 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14476 *
14477 * Returns
af901ca1 14478 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
14479 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14480 **/
14481int
14482lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14483{
6d368e53
JS
14484 unsigned long rpi;
14485 uint16_t max_rpi, rpi_limit;
14486 uint16_t rpi_remaining, lrpi = 0;
6fb120a7
JS
14487 struct lpfc_rpi_hdr *rpi_hdr;
14488
14489 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6fb120a7
JS
14490 rpi_limit = phba->sli4_hba.next_rpi;
14491
14492 /*
6d368e53
JS
14493 * Fetch the next logical rpi. Because this index is logical,
14494 * the driver starts at 0 each time.
6fb120a7
JS
14495 */
14496 spin_lock_irq(&phba->hbalock);
6d368e53
JS
14497 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14498 if (rpi >= rpi_limit)
6fb120a7
JS
14499 rpi = LPFC_RPI_ALLOC_ERROR;
14500 else {
14501 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14502 phba->sli4_hba.max_cfg_param.rpi_used++;
14503 phba->sli4_hba.rpi_count++;
14504 }
14505
14506 /*
14507 * Don't try to allocate more rpi header regions if the device limit
6d368e53 14508 * has been exhausted.
6fb120a7
JS
14509 */
14510 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14511 (phba->sli4_hba.rpi_count >= max_rpi)) {
14512 spin_unlock_irq(&phba->hbalock);
14513 return rpi;
14514 }
14515
6d368e53
JS
14516 /*
14517 * RPI header postings are not required for SLI4 ports capable of
14518 * extents.
14519 */
14520 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14521 spin_unlock_irq(&phba->hbalock);
14522 return rpi;
14523 }
14524
6fb120a7
JS
14525 /*
14526 * If the driver is running low on rpi resources, allocate another
14527 * page now. Note that the next_rpi value is used because
14528 * it represents how many are actually in use whereas max_rpi notes
14529 * how many are supported max by the device.
14530 */
6d368e53 14531 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
6fb120a7
JS
14532 spin_unlock_irq(&phba->hbalock);
14533 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
14534 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
14535 if (!rpi_hdr) {
14536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14537 "2002 Error Could not grow rpi "
14538 "count\n");
14539 } else {
6d368e53
JS
14540 lrpi = rpi_hdr->start_rpi;
14541 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
14542 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
14543 }
14544 }
14545
14546 return rpi;
14547}
14548
d7c47992
JS
14549/**
14550 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14551 * @phba: pointer to lpfc hba data structure.
14552 *
14553 * This routine is invoked to release an rpi to the pool of
14554 * available rpis maintained by the driver.
14555 **/
14556void
14557__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14558{
14559 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
14560 phba->sli4_hba.rpi_count--;
14561 phba->sli4_hba.max_cfg_param.rpi_used--;
14562 }
14563}
14564
6fb120a7
JS
14565/**
14566 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14567 * @phba: pointer to lpfc hba data structure.
14568 *
14569 * This routine is invoked to release an rpi to the pool of
14570 * available rpis maintained by the driver.
14571 **/
14572void
14573lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14574{
14575 spin_lock_irq(&phba->hbalock);
d7c47992 14576 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
14577 spin_unlock_irq(&phba->hbalock);
14578}
14579
14580/**
14581 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
14582 * @phba: pointer to lpfc hba data structure.
14583 *
14584 * This routine is invoked to remove the memory region that
14585 * provided rpi via a bitmask.
14586 **/
14587void
14588lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14589{
14590 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
14591 kfree(phba->sli4_hba.rpi_ids);
14592 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
14593}
14594
14595/**
14596 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
14597 * @phba: pointer to lpfc hba data structure.
14598 *
14599 * This routine is invoked to remove the memory region that
14600 * provided rpi via a bitmask.
14601 **/
14602int
6b5151fd
JS
14603lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
14604 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
14605{
14606 LPFC_MBOXQ_t *mboxq;
14607 struct lpfc_hba *phba = ndlp->phba;
14608 int rc;
14609
14610 /* The port is notified of the header region via a mailbox command. */
14611 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14612 if (!mboxq)
14613 return -ENOMEM;
14614
14615 /* Post all rpi memory regions to the port. */
14616 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
14617 if (cmpl) {
14618 mboxq->mbox_cmpl = cmpl;
14619 mboxq->context1 = arg;
14620 mboxq->context2 = ndlp;
72859909
JS
14621 } else
14622 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 14623 mboxq->vport = ndlp->vport;
6fb120a7
JS
14624 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14625 if (rc == MBX_NOT_FINISHED) {
14626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14627 "2010 Resume RPI Mailbox failed "
14628 "status %d, mbxStatus x%x\n", rc,
14629 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14630 mempool_free(mboxq, phba->mbox_mem_pool);
14631 return -EIO;
14632 }
14633 return 0;
14634}
14635
14636/**
14637 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 14638 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 14639 *
76a95d75 14640 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
14641 *
14642 * Returns:
14643 * 0 success
14644 * -Evalue otherwise
14645 **/
14646int
76a95d75 14647lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
14648{
14649 LPFC_MBOXQ_t *mboxq;
14650 int rc = 0;
6a9c52cf 14651 int retval = MBX_SUCCESS;
6fb120a7 14652 uint32_t mbox_tmo;
76a95d75 14653 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
14654 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14655 if (!mboxq)
14656 return -ENOMEM;
76a95d75 14657 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 14658 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 14659 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 14660 if (rc != MBX_SUCCESS) {
76a95d75 14661 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
14662 "2022 INIT VPI Mailbox failed "
14663 "status %d, mbxStatus x%x\n", rc,
14664 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 14665 retval = -EIO;
6fb120a7 14666 }
6a9c52cf 14667 if (rc != MBX_TIMEOUT)
76a95d75 14668 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
14669
14670 return retval;
6fb120a7
JS
14671}
14672
14673/**
14674 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
14675 * @phba: pointer to lpfc hba data structure.
14676 * @mboxq: Pointer to mailbox object.
14677 *
14678 * This routine is invoked to manually add a single FCF record. The caller
14679 * must pass a completely initialized FCF_Record. This routine takes
14680 * care of the nonembedded mailbox operations.
14681 **/
14682static void
14683lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
14684{
14685 void *virt_addr;
14686 union lpfc_sli4_cfg_shdr *shdr;
14687 uint32_t shdr_status, shdr_add_status;
14688
14689 virt_addr = mboxq->sge_array->addr[0];
14690 /* The IOCTL status is embedded in the mailbox subheader. */
14691 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
14692 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14693 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14694
14695 if ((shdr_status || shdr_add_status) &&
14696 (shdr_status != STATUS_FCF_IN_USE))
14697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14698 "2558 ADD_FCF_RECORD mailbox failed with "
14699 "status x%x add_status x%x\n",
14700 shdr_status, shdr_add_status);
14701
14702 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14703}
14704
14705/**
14706 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
14707 * @phba: pointer to lpfc hba data structure.
14708 * @fcf_record: pointer to the initialized fcf record to add.
14709 *
14710 * This routine is invoked to manually add a single FCF record. The caller
14711 * must pass a completely initialized FCF_Record. This routine takes
14712 * care of the nonembedded mailbox operations.
14713 **/
14714int
14715lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
14716{
14717 int rc = 0;
14718 LPFC_MBOXQ_t *mboxq;
14719 uint8_t *bytep;
14720 void *virt_addr;
14721 dma_addr_t phys_addr;
14722 struct lpfc_mbx_sge sge;
14723 uint32_t alloc_len, req_len;
14724 uint32_t fcfindex;
14725
14726 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14727 if (!mboxq) {
14728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14729 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
14730 return -ENOMEM;
14731 }
14732
14733 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
14734 sizeof(uint32_t);
14735
14736 /* Allocate DMA memory and set up the non-embedded mailbox command */
14737 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14738 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
14739 req_len, LPFC_SLI4_MBX_NEMBED);
14740 if (alloc_len < req_len) {
14741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14742 "2523 Allocated DMA memory size (x%x) is "
14743 "less than the requested DMA memory "
14744 "size (x%x)\n", alloc_len, req_len);
14745 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14746 return -ENOMEM;
14747 }
14748
14749 /*
14750 * Get the first SGE entry from the non-embedded DMA memory. This
14751 * routine only uses a single SGE.
14752 */
14753 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
14754 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
14755 virt_addr = mboxq->sge_array->addr[0];
14756 /*
14757 * Configure the FCF record for FCFI 0. This is the driver's
14758 * hardcoded default and gets used in nonFIP mode.
14759 */
14760 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
14761 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
14762 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
14763
14764 /*
14765 * Copy the fcf_index and the FCF Record Data. The data starts after
14766 * the FCoE header plus word10. The data copy needs to be endian
14767 * correct.
14768 */
14769 bytep += sizeof(uint32_t);
14770 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
14771 mboxq->vport = phba->pport;
14772 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
14773 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14774 if (rc == MBX_NOT_FINISHED) {
14775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14776 "2515 ADD_FCF_RECORD mailbox failed with "
14777 "status 0x%x\n", rc);
14778 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14779 rc = -EIO;
14780 } else
14781 rc = 0;
14782
14783 return rc;
14784}
14785
14786/**
14787 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
14788 * @phba: pointer to lpfc hba data structure.
14789 * @fcf_record: pointer to the fcf record to write the default data.
14790 * @fcf_index: FCF table entry index.
14791 *
14792 * This routine is invoked to build the driver's default FCF record. The
14793 * values used are hardcoded. This routine handles memory initialization.
14794 *
14795 **/
14796void
14797lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
14798 struct fcf_record *fcf_record,
14799 uint16_t fcf_index)
14800{
14801 memset(fcf_record, 0, sizeof(struct fcf_record));
14802 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
14803 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
14804 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
14805 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
14806 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
14807 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
14808 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
14809 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
14810 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
14811 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
14812 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
14813 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
14814 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 14815 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
14816 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
14817 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
14818 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
14819 /* Set the VLAN bit map */
14820 if (phba->valid_vlan) {
14821 fcf_record->vlan_bitmap[phba->vlan_id / 8]
14822 = 1 << (phba->vlan_id % 8);
14823 }
14824}
14825
14826/**
0c9ab6f5 14827 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
14828 * @phba: pointer to lpfc hba data structure.
14829 * @fcf_index: FCF table entry offset.
14830 *
0c9ab6f5
JS
14831 * This routine is invoked to scan the entire FCF table by reading FCF
14832 * record and processing it one at a time starting from the @fcf_index
14833 * for initial FCF discovery or fast FCF failover rediscovery.
14834 *
25985edc 14835 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 14836 * otherwise.
6fb120a7
JS
14837 **/
14838int
0c9ab6f5 14839lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
14840{
14841 int rc = 0, error;
14842 LPFC_MBOXQ_t *mboxq;
6fb120a7 14843
32b9793f 14844 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 14845 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
14846 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14847 if (!mboxq) {
14848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14849 "2000 Failed to allocate mbox for "
14850 "READ_FCF cmd\n");
4d9ab994 14851 error = -ENOMEM;
0c9ab6f5 14852 goto fail_fcf_scan;
6fb120a7 14853 }
ecfd03c6 14854 /* Construct the read FCF record mailbox command */
0c9ab6f5 14855 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
14856 if (rc) {
14857 error = -EINVAL;
0c9ab6f5 14858 goto fail_fcf_scan;
6fb120a7 14859 }
ecfd03c6 14860 /* Issue the mailbox command asynchronously */
6fb120a7 14861 mboxq->vport = phba->pport;
0c9ab6f5 14862 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
14863
14864 spin_lock_irq(&phba->hbalock);
14865 phba->hba_flag |= FCF_TS_INPROG;
14866 spin_unlock_irq(&phba->hbalock);
14867
6fb120a7 14868 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 14869 if (rc == MBX_NOT_FINISHED)
6fb120a7 14870 error = -EIO;
ecfd03c6 14871 else {
38b92ef8
JS
14872 /* Reset eligible FCF count for new scan */
14873 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 14874 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 14875 error = 0;
32b9793f 14876 }
0c9ab6f5 14877fail_fcf_scan:
4d9ab994
JS
14878 if (error) {
14879 if (mboxq)
14880 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 14881 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 14882 spin_lock_irq(&phba->hbalock);
a93ff37a 14883 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
14884 spin_unlock_irq(&phba->hbalock);
14885 }
6fb120a7
JS
14886 return error;
14887}
a0c87cbd 14888
0c9ab6f5 14889/**
a93ff37a 14890 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
14891 * @phba: pointer to lpfc hba data structure.
14892 * @fcf_index: FCF table entry offset.
14893 *
14894 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 14895 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 14896 *
25985edc 14897 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
14898 * otherwise.
14899 **/
14900int
14901lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14902{
14903 int rc = 0, error;
14904 LPFC_MBOXQ_t *mboxq;
14905
14906 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14907 if (!mboxq) {
14908 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14909 "2763 Failed to allocate mbox for "
14910 "READ_FCF cmd\n");
14911 error = -ENOMEM;
14912 goto fail_fcf_read;
14913 }
14914 /* Construct the read FCF record mailbox command */
14915 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14916 if (rc) {
14917 error = -EINVAL;
14918 goto fail_fcf_read;
14919 }
14920 /* Issue the mailbox command asynchronously */
14921 mboxq->vport = phba->pport;
14922 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
14923 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14924 if (rc == MBX_NOT_FINISHED)
14925 error = -EIO;
14926 else
14927 error = 0;
14928
14929fail_fcf_read:
14930 if (error && mboxq)
14931 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14932 return error;
14933}
14934
14935/**
14936 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
14937 * @phba: pointer to lpfc hba data structure.
14938 * @fcf_index: FCF table entry offset.
14939 *
14940 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 14941 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 14942 *
25985edc 14943 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
14944 * otherwise.
14945 **/
14946int
14947lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14948{
14949 int rc = 0, error;
14950 LPFC_MBOXQ_t *mboxq;
14951
14952 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14953 if (!mboxq) {
14954 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14955 "2758 Failed to allocate mbox for "
14956 "READ_FCF cmd\n");
14957 error = -ENOMEM;
14958 goto fail_fcf_read;
14959 }
14960 /* Construct the read FCF record mailbox command */
14961 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14962 if (rc) {
14963 error = -EINVAL;
14964 goto fail_fcf_read;
14965 }
14966 /* Issue the mailbox command asynchronously */
14967 mboxq->vport = phba->pport;
14968 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
14969 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14970 if (rc == MBX_NOT_FINISHED)
14971 error = -EIO;
14972 else
14973 error = 0;
14974
14975fail_fcf_read:
14976 if (error && mboxq)
14977 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14978 return error;
14979}
14980
7d791df7
JS
14981/**
14982 * lpfc_check_next_fcf_pri
14983 * phba pointer to the lpfc_hba struct for this port.
14984 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14985 * routine when the rr_bmask is empty. The FCF indecies are put into the
14986 * rr_bmask based on their priority level. Starting from the highest priority
14987 * to the lowest. The most likely FCF candidate will be in the highest
14988 * priority group. When this routine is called it searches the fcf_pri list for
14989 * next lowest priority group and repopulates the rr_bmask with only those
14990 * fcf_indexes.
14991 * returns:
14992 * 1=success 0=failure
14993 **/
14994int
14995lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14996{
14997 uint16_t next_fcf_pri;
14998 uint16_t last_index;
14999 struct lpfc_fcf_pri *fcf_pri;
15000 int rc;
15001 int ret = 0;
15002
15003 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15004 LPFC_SLI4_FCF_TBL_INDX_MAX);
15005 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15006 "3060 Last IDX %d\n", last_index);
15007 if (list_empty(&phba->fcf.fcf_pri_list)) {
15008 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15009 "3061 Last IDX %d\n", last_index);
15010 return 0; /* Empty rr list */
15011 }
15012 next_fcf_pri = 0;
15013 /*
15014 * Clear the rr_bmask and set all of the bits that are at this
15015 * priority.
15016 */
15017 memset(phba->fcf.fcf_rr_bmask, 0,
15018 sizeof(*phba->fcf.fcf_rr_bmask));
15019 spin_lock_irq(&phba->hbalock);
15020 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15021 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15022 continue;
15023 /*
15024 * the 1st priority that has not FLOGI failed
15025 * will be the highest.
15026 */
15027 if (!next_fcf_pri)
15028 next_fcf_pri = fcf_pri->fcf_rec.priority;
15029 spin_unlock_irq(&phba->hbalock);
15030 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15031 rc = lpfc_sli4_fcf_rr_index_set(phba,
15032 fcf_pri->fcf_rec.fcf_index);
15033 if (rc)
15034 return 0;
15035 }
15036 spin_lock_irq(&phba->hbalock);
15037 }
15038 /*
15039 * if next_fcf_pri was not set above and the list is not empty then
15040 * we have failed flogis on all of them. So reset flogi failed
15041 * and start at the begining.
15042 */
15043 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15044 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15045 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15046 /*
15047 * the 1st priority that has not FLOGI failed
15048 * will be the highest.
15049 */
15050 if (!next_fcf_pri)
15051 next_fcf_pri = fcf_pri->fcf_rec.priority;
15052 spin_unlock_irq(&phba->hbalock);
15053 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15054 rc = lpfc_sli4_fcf_rr_index_set(phba,
15055 fcf_pri->fcf_rec.fcf_index);
15056 if (rc)
15057 return 0;
15058 }
15059 spin_lock_irq(&phba->hbalock);
15060 }
15061 } else
15062 ret = 1;
15063 spin_unlock_irq(&phba->hbalock);
15064
15065 return ret;
15066}
0c9ab6f5
JS
15067/**
15068 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15069 * @phba: pointer to lpfc hba data structure.
15070 *
15071 * This routine is to get the next eligible FCF record index in a round
15072 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 15073 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
15074 * shall be returned, otherwise, the next eligible FCF record's index
15075 * shall be returned.
15076 **/
15077uint16_t
15078lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15079{
15080 uint16_t next_fcf_index;
15081
3804dc84 15082 /* Search start from next bit of currently registered FCF index */
7d791df7 15083next_priority:
3804dc84
JS
15084 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
15085 LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
15086 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15087 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
15088 next_fcf_index);
15089
0c9ab6f5 15090 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
15091 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15092 /*
15093 * If we have wrapped then we need to clear the bits that
15094 * have been tested so that we can detect when we should
15095 * change the priority level.
15096 */
0c9ab6f5
JS
15097 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15098 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
15099 }
15100
3804dc84
JS
15101
15102 /* Check roundrobin failover list empty condition */
7d791df7
JS
15103 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15104 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15105 /*
15106 * If next fcf index is not found check if there are lower
15107 * Priority level fcf's in the fcf_priority list.
15108 * Set up the rr_bmask with all of the avaiable fcf bits
15109 * at that level and continue the selection process.
15110 */
15111 if (lpfc_check_next_fcf_pri_level(phba))
15112 goto next_priority;
3804dc84
JS
15113 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15114 "2844 No roundrobin failover FCF available\n");
7d791df7
JS
15115 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15116 return LPFC_FCOE_FCF_NEXT_NONE;
15117 else {
15118 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15119 "3063 Only FCF available idx %d, flag %x\n",
15120 next_fcf_index,
15121 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15122 return next_fcf_index;
15123 }
3804dc84
JS
15124 }
15125
7d791df7
JS
15126 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15127 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15128 LPFC_FCF_FLOGI_FAILED)
15129 goto next_priority;
15130
3804dc84 15131 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
15132 "2845 Get next roundrobin failover FCF (x%x)\n",
15133 next_fcf_index);
15134
0c9ab6f5
JS
15135 return next_fcf_index;
15136}
15137
15138/**
15139 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15140 * @phba: pointer to lpfc hba data structure.
15141 *
15142 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 15143 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15144 * does not go beyond the range of the driver allocated bmask dimension
15145 * before setting the bit.
15146 *
15147 * Returns 0 if the index bit successfully set, otherwise, it returns
15148 * -EINVAL.
15149 **/
15150int
15151lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15152{
15153 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15154 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15155 "2610 FCF (x%x) reached driver's book "
15156 "keeping dimension:x%x\n",
0c9ab6f5
JS
15157 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15158 return -EINVAL;
15159 }
15160 /* Set the eligible FCF record index bmask */
15161 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15162
3804dc84 15163 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15164 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
15165 "bmask\n", fcf_index);
15166
0c9ab6f5
JS
15167 return 0;
15168}
15169
15170/**
3804dc84 15171 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
15172 * @phba: pointer to lpfc hba data structure.
15173 *
15174 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 15175 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15176 * does not go beyond the range of the driver allocated bmask dimension
15177 * before clearing the bit.
15178 **/
15179void
15180lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15181{
7d791df7 15182 struct lpfc_fcf_pri *fcf_pri;
0c9ab6f5
JS
15183 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15184 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15185 "2762 FCF (x%x) reached driver's book "
15186 "keeping dimension:x%x\n",
0c9ab6f5
JS
15187 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15188 return;
15189 }
15190 /* Clear the eligible FCF record index bmask */
7d791df7
JS
15191 spin_lock_irq(&phba->hbalock);
15192 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15193 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15194 list_del_init(&fcf_pri->list);
15195 break;
15196 }
15197 }
15198 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 15199 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
15200
15201 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15202 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 15203 "bmask\n", fcf_index);
0c9ab6f5
JS
15204}
15205
ecfd03c6
JS
15206/**
15207 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15208 * @phba: pointer to lpfc hba data structure.
15209 *
15210 * This routine is the completion routine for the rediscover FCF table mailbox
15211 * command. If the mailbox command returned failure, it will try to stop the
15212 * FCF rediscover wait timer.
15213 **/
15214void
15215lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15216{
15217 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15218 uint32_t shdr_status, shdr_add_status;
15219
15220 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15221
15222 shdr_status = bf_get(lpfc_mbox_hdr_status,
15223 &redisc_fcf->header.cfg_shdr.response);
15224 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15225 &redisc_fcf->header.cfg_shdr.response);
15226 if (shdr_status || shdr_add_status) {
0c9ab6f5 15227 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
15228 "2746 Requesting for FCF rediscovery failed "
15229 "status x%x add_status x%x\n",
15230 shdr_status, shdr_add_status);
0c9ab6f5 15231 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 15232 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15233 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
15234 spin_unlock_irq(&phba->hbalock);
15235 /*
15236 * CVL event triggered FCF rediscover request failed,
15237 * last resort to re-try current registered FCF entry.
15238 */
15239 lpfc_retry_pport_discovery(phba);
15240 } else {
15241 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15242 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
15243 spin_unlock_irq(&phba->hbalock);
15244 /*
15245 * DEAD FCF event triggered FCF rediscover request
15246 * failed, last resort to fail over as a link down
15247 * to FCF registration.
15248 */
15249 lpfc_sli4_fcf_dead_failthrough(phba);
15250 }
0c9ab6f5
JS
15251 } else {
15252 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15253 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
15254 /*
15255 * Start FCF rediscovery wait timer for pending FCF
15256 * before rescan FCF record table.
15257 */
15258 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 15259 }
ecfd03c6
JS
15260
15261 mempool_free(mbox, phba->mbox_mem_pool);
15262}
15263
15264/**
3804dc84 15265 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
15266 * @phba: pointer to lpfc hba data structure.
15267 *
15268 * This routine is invoked to request for rediscovery of the entire FCF table
15269 * by the port.
15270 **/
15271int
15272lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15273{
15274 LPFC_MBOXQ_t *mbox;
15275 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15276 int rc, length;
15277
0c9ab6f5
JS
15278 /* Cancel retry delay timers to all vports before FCF rediscover */
15279 lpfc_cancel_all_vport_retry_delay_timer(phba);
15280
ecfd03c6
JS
15281 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15282 if (!mbox) {
15283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15284 "2745 Failed to allocate mbox for "
15285 "requesting FCF rediscover.\n");
15286 return -ENOMEM;
15287 }
15288
15289 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15290 sizeof(struct lpfc_sli4_cfg_mhdr));
15291 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15292 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15293 length, LPFC_SLI4_MBX_EMBED);
15294
15295 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15296 /* Set count to 0 for invalidating the entire FCF database */
15297 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15298
15299 /* Issue the mailbox command asynchronously */
15300 mbox->vport = phba->pport;
15301 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15302 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15303
15304 if (rc == MBX_NOT_FINISHED) {
15305 mempool_free(mbox, phba->mbox_mem_pool);
15306 return -EIO;
15307 }
15308 return 0;
15309}
15310
fc2b989b
JS
15311/**
15312 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15313 * @phba: pointer to lpfc hba data structure.
15314 *
15315 * This function is the failover routine as a last resort to the FCF DEAD
15316 * event when driver failed to perform fast FCF failover.
15317 **/
15318void
15319lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15320{
15321 uint32_t link_state;
15322
15323 /*
15324 * Last resort as FCF DEAD event failover will treat this as
15325 * a link down, but save the link state because we don't want
15326 * it to be changed to Link Down unless it is already down.
15327 */
15328 link_state = phba->link_state;
15329 lpfc_linkdown(phba);
15330 phba->link_state = link_state;
15331
15332 /* Unregister FCF if no devices connected to it */
15333 lpfc_unregister_unused_fcf(phba);
15334}
15335
a0c87cbd 15336/**
026abb87 15337 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 15338 * @phba: pointer to lpfc hba data structure.
026abb87 15339 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 15340 *
026abb87
JS
15341 * This function gets SLI3 port configure region 23 data through memory dump
15342 * mailbox command. When it successfully retrieves data, the size of the data
15343 * will be returned, otherwise, 0 will be returned.
a0c87cbd 15344 **/
026abb87
JS
15345static uint32_t
15346lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
15347{
15348 LPFC_MBOXQ_t *pmb = NULL;
15349 MAILBOX_t *mb;
026abb87 15350 uint32_t offset = 0;
a0c87cbd
JS
15351 int rc;
15352
026abb87
JS
15353 if (!rgn23_data)
15354 return 0;
15355
a0c87cbd
JS
15356 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15357 if (!pmb) {
15358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
15359 "2600 failed to allocate mailbox memory\n");
15360 return 0;
a0c87cbd
JS
15361 }
15362 mb = &pmb->u.mb;
15363
a0c87cbd
JS
15364 do {
15365 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15366 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15367
15368 if (rc != MBX_SUCCESS) {
15369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
15370 "2601 failed to read config "
15371 "region 23, rc 0x%x Status 0x%x\n",
15372 rc, mb->mbxStatus);
a0c87cbd
JS
15373 mb->un.varDmp.word_cnt = 0;
15374 }
15375 /*
15376 * dump mem may return a zero when finished or we got a
15377 * mailbox error, either way we are done.
15378 */
15379 if (mb->un.varDmp.word_cnt == 0)
15380 break;
15381 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15382 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15383
15384 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
15385 rgn23_data + offset,
15386 mb->un.varDmp.word_cnt);
a0c87cbd
JS
15387 offset += mb->un.varDmp.word_cnt;
15388 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15389
026abb87
JS
15390 mempool_free(pmb, phba->mbox_mem_pool);
15391 return offset;
15392}
15393
15394/**
15395 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15396 * @phba: pointer to lpfc hba data structure.
15397 * @rgn23_data: pointer to configure region 23 data.
15398 *
15399 * This function gets SLI4 port configure region 23 data through memory dump
15400 * mailbox command. When it successfully retrieves data, the size of the data
15401 * will be returned, otherwise, 0 will be returned.
15402 **/
15403static uint32_t
15404lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15405{
15406 LPFC_MBOXQ_t *mboxq = NULL;
15407 struct lpfc_dmabuf *mp = NULL;
15408 struct lpfc_mqe *mqe;
15409 uint32_t data_length = 0;
15410 int rc;
15411
15412 if (!rgn23_data)
15413 return 0;
15414
15415 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15416 if (!mboxq) {
15417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15418 "3105 failed to allocate mailbox memory\n");
15419 return 0;
15420 }
15421
15422 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15423 goto out;
15424 mqe = &mboxq->u.mqe;
15425 mp = (struct lpfc_dmabuf *) mboxq->context1;
15426 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15427 if (rc)
15428 goto out;
15429 data_length = mqe->un.mb_words[5];
15430 if (data_length == 0)
15431 goto out;
15432 if (data_length > DMP_RGN23_SIZE) {
15433 data_length = 0;
15434 goto out;
15435 }
15436 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15437out:
15438 mempool_free(mboxq, phba->mbox_mem_pool);
15439 if (mp) {
15440 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15441 kfree(mp);
15442 }
15443 return data_length;
15444}
15445
15446/**
15447 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15448 * @phba: pointer to lpfc hba data structure.
15449 *
15450 * This function read region 23 and parse TLV for port status to
15451 * decide if the user disaled the port. If the TLV indicates the
15452 * port is disabled, the hba_flag is set accordingly.
15453 **/
15454void
15455lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15456{
15457 uint8_t *rgn23_data = NULL;
15458 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15459 uint32_t offset = 0;
15460
15461 /* Get adapter Region 23 data */
15462 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15463 if (!rgn23_data)
15464 goto out;
15465
15466 if (phba->sli_rev < LPFC_SLI_REV4)
15467 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15468 else {
15469 if_type = bf_get(lpfc_sli_intf_if_type,
15470 &phba->sli4_hba.sli_intf);
15471 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15472 goto out;
15473 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15474 }
a0c87cbd
JS
15475
15476 if (!data_size)
15477 goto out;
15478
15479 /* Check the region signature first */
15480 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15482 "2619 Config region 23 has bad signature\n");
15483 goto out;
15484 }
15485 offset += 4;
15486
15487 /* Check the data structure version */
15488 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15490 "2620 Config region 23 has bad version\n");
15491 goto out;
15492 }
15493 offset += 4;
15494
15495 /* Parse TLV entries in the region */
15496 while (offset < data_size) {
15497 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15498 break;
15499 /*
15500 * If the TLV is not driver specific TLV or driver id is
15501 * not linux driver id, skip the record.
15502 */
15503 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15504 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15505 (rgn23_data[offset + 3] != 0)) {
15506 offset += rgn23_data[offset + 1] * 4 + 4;
15507 continue;
15508 }
15509
15510 /* Driver found a driver specific TLV in the config region */
15511 sub_tlv_len = rgn23_data[offset + 1] * 4;
15512 offset += 4;
15513 tlv_offset = 0;
15514
15515 /*
15516 * Search for configured port state sub-TLV.
15517 */
15518 while ((offset < data_size) &&
15519 (tlv_offset < sub_tlv_len)) {
15520 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
15521 offset += 4;
15522 tlv_offset += 4;
15523 break;
15524 }
15525 if (rgn23_data[offset] != PORT_STE_TYPE) {
15526 offset += rgn23_data[offset + 1] * 4 + 4;
15527 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
15528 continue;
15529 }
15530
15531 /* This HBA contains PORT_STE configured */
15532 if (!rgn23_data[offset + 2])
15533 phba->hba_flag |= LINK_DISABLED;
15534
15535 goto out;
15536 }
15537 }
026abb87 15538
a0c87cbd 15539out:
a0c87cbd
JS
15540 kfree(rgn23_data);
15541 return;
15542}
695a814e 15543
52d52440
JS
15544/**
15545 * lpfc_wr_object - write an object to the firmware
15546 * @phba: HBA structure that indicates port to create a queue on.
15547 * @dmabuf_list: list of dmabufs to write to the port.
15548 * @size: the total byte value of the objects to write to the port.
15549 * @offset: the current offset to be used to start the transfer.
15550 *
15551 * This routine will create a wr_object mailbox command to send to the port.
15552 * the mailbox command will be constructed using the dma buffers described in
15553 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
15554 * BDEs that the imbedded mailbox can support. The @offset variable will be
15555 * used to indicate the starting offset of the transfer and will also return
15556 * the offset after the write object mailbox has completed. @size is used to
15557 * determine the end of the object and whether the eof bit should be set.
15558 *
15559 * Return 0 is successful and offset will contain the the new offset to use
15560 * for the next write.
15561 * Return negative value for error cases.
15562 **/
15563int
15564lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15565 uint32_t size, uint32_t *offset)
15566{
15567 struct lpfc_mbx_wr_object *wr_object;
15568 LPFC_MBOXQ_t *mbox;
15569 int rc = 0, i = 0;
15570 uint32_t shdr_status, shdr_add_status;
15571 uint32_t mbox_tmo;
15572 union lpfc_sli4_cfg_shdr *shdr;
15573 struct lpfc_dmabuf *dmabuf;
15574 uint32_t written = 0;
15575
15576 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15577 if (!mbox)
15578 return -ENOMEM;
15579
15580 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15581 LPFC_MBOX_OPCODE_WRITE_OBJECT,
15582 sizeof(struct lpfc_mbx_wr_object) -
15583 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15584
15585 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
15586 wr_object->u.request.write_offset = *offset;
15587 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
15588 wr_object->u.request.object_name[0] =
15589 cpu_to_le32(wr_object->u.request.object_name[0]);
15590 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
15591 list_for_each_entry(dmabuf, dmabuf_list, list) {
15592 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
15593 break;
15594 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
15595 wr_object->u.request.bde[i].addrHigh =
15596 putPaddrHigh(dmabuf->phys);
15597 if (written + SLI4_PAGE_SIZE >= size) {
15598 wr_object->u.request.bde[i].tus.f.bdeSize =
15599 (size - written);
15600 written += (size - written);
15601 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
15602 } else {
15603 wr_object->u.request.bde[i].tus.f.bdeSize =
15604 SLI4_PAGE_SIZE;
15605 written += SLI4_PAGE_SIZE;
15606 }
15607 i++;
15608 }
15609 wr_object->u.request.bde_count = i;
15610 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
15611 if (!phba->sli4_hba.intr_enable)
15612 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15613 else {
a183a15f 15614 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
15615 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15616 }
15617 /* The IOCTL status is embedded in the mailbox subheader. */
15618 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
15619 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15620 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15621 if (rc != MBX_TIMEOUT)
15622 mempool_free(mbox, phba->mbox_mem_pool);
15623 if (shdr_status || shdr_add_status || rc) {
15624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15625 "3025 Write Object mailbox failed with "
15626 "status x%x add_status x%x, mbx status x%x\n",
15627 shdr_status, shdr_add_status, rc);
15628 rc = -ENXIO;
15629 } else
15630 *offset += wr_object->u.response.actual_write_length;
15631 return rc;
15632}
15633
695a814e
JS
15634/**
15635 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
15636 * @vport: pointer to vport data structure.
15637 *
15638 * This function iterate through the mailboxq and clean up all REG_LOGIN
15639 * and REG_VPI mailbox commands associated with the vport. This function
15640 * is called when driver want to restart discovery of the vport due to
15641 * a Clear Virtual Link event.
15642 **/
15643void
15644lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
15645{
15646 struct lpfc_hba *phba = vport->phba;
15647 LPFC_MBOXQ_t *mb, *nextmb;
15648 struct lpfc_dmabuf *mp;
78730cfe 15649 struct lpfc_nodelist *ndlp;
d439d286 15650 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 15651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 15652 LIST_HEAD(mbox_cmd_list);
63e801ce 15653 uint8_t restart_loop;
695a814e 15654
d439d286 15655 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
15656 spin_lock_irq(&phba->hbalock);
15657 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
15658 if (mb->vport != vport)
15659 continue;
15660
15661 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15662 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15663 continue;
15664
d439d286
JS
15665 list_del(&mb->list);
15666 list_add_tail(&mb->list, &mbox_cmd_list);
15667 }
15668 /* Clean up active mailbox command with the vport */
15669 mb = phba->sli.mbox_active;
15670 if (mb && (mb->vport == vport)) {
15671 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
15672 (mb->u.mb.mbxCommand == MBX_REG_VPI))
15673 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15674 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15675 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
15676 /* Put reference count for delayed processing */
15677 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
15678 /* Unregister the RPI when mailbox complete */
15679 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15680 }
15681 }
63e801ce
JS
15682 /* Cleanup any mailbox completions which are not yet processed */
15683 do {
15684 restart_loop = 0;
15685 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
15686 /*
15687 * If this mailox is already processed or it is
15688 * for another vport ignore it.
15689 */
15690 if ((mb->vport != vport) ||
15691 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
15692 continue;
15693
15694 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15695 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15696 continue;
15697
15698 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15699 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15700 ndlp = (struct lpfc_nodelist *)mb->context2;
15701 /* Unregister the RPI when mailbox complete */
15702 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15703 restart_loop = 1;
15704 spin_unlock_irq(&phba->hbalock);
15705 spin_lock(shost->host_lock);
15706 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15707 spin_unlock(shost->host_lock);
15708 spin_lock_irq(&phba->hbalock);
15709 break;
15710 }
15711 }
15712 } while (restart_loop);
15713
d439d286
JS
15714 spin_unlock_irq(&phba->hbalock);
15715
15716 /* Release the cleaned-up mailbox commands */
15717 while (!list_empty(&mbox_cmd_list)) {
15718 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e
JS
15719 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15720 mp = (struct lpfc_dmabuf *) (mb->context1);
15721 if (mp) {
15722 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
15723 kfree(mp);
15724 }
78730cfe 15725 ndlp = (struct lpfc_nodelist *) mb->context2;
d439d286 15726 mb->context2 = NULL;
78730cfe 15727 if (ndlp) {
ec21b3b0 15728 spin_lock(shost->host_lock);
589a52d6 15729 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 15730 spin_unlock(shost->host_lock);
78730cfe 15731 lpfc_nlp_put(ndlp);
78730cfe 15732 }
695a814e 15733 }
695a814e
JS
15734 mempool_free(mb, phba->mbox_mem_pool);
15735 }
d439d286
JS
15736
15737 /* Release the ndlp with the cleaned-up active mailbox command */
15738 if (act_mbx_ndlp) {
15739 spin_lock(shost->host_lock);
15740 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15741 spin_unlock(shost->host_lock);
15742 lpfc_nlp_put(act_mbx_ndlp);
695a814e 15743 }
695a814e
JS
15744}
15745
2a9bf3d0
JS
15746/**
15747 * lpfc_drain_txq - Drain the txq
15748 * @phba: Pointer to HBA context object.
15749 *
15750 * This function attempt to submit IOCBs on the txq
15751 * to the adapter. For SLI4 adapters, the txq contains
15752 * ELS IOCBs that have been deferred because the there
15753 * are no SGLs. This congestion can occur with large
15754 * vport counts during node discovery.
15755 **/
15756
15757uint32_t
15758lpfc_drain_txq(struct lpfc_hba *phba)
15759{
15760 LIST_HEAD(completions);
15761 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
15762 struct lpfc_iocbq *piocbq = 0;
15763 unsigned long iflags = 0;
15764 char *fail_msg = NULL;
15765 struct lpfc_sglq *sglq;
15766 union lpfc_wqe wqe;
15767
15768 spin_lock_irqsave(&phba->hbalock, iflags);
15769 if (pring->txq_cnt > pring->txq_max)
15770 pring->txq_max = pring->txq_cnt;
15771
15772 spin_unlock_irqrestore(&phba->hbalock, iflags);
15773
15774 while (pring->txq_cnt) {
15775 spin_lock_irqsave(&phba->hbalock, iflags);
15776
19ca7609
JS
15777 piocbq = lpfc_sli_ringtx_get(phba, pring);
15778 sglq = __lpfc_sli_get_sglq(phba, piocbq);
2a9bf3d0 15779 if (!sglq) {
19ca7609 15780 __lpfc_sli_ringtx_put(phba, pring, piocbq);
2a9bf3d0
JS
15781 spin_unlock_irqrestore(&phba->hbalock, iflags);
15782 break;
15783 } else {
2a9bf3d0
JS
15784 if (!piocbq) {
15785 /* The txq_cnt out of sync. This should
15786 * never happen
15787 */
15788 sglq = __lpfc_clear_active_sglq(phba,
6d368e53 15789 sglq->sli4_lxritag);
2a9bf3d0
JS
15790 spin_unlock_irqrestore(&phba->hbalock, iflags);
15791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15792 "2823 txq empty and txq_cnt is %d\n ",
15793 pring->txq_cnt);
15794 break;
15795 }
15796 }
15797
15798 /* The xri and iocb resources secured,
15799 * attempt to issue request
15800 */
6d368e53 15801 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
15802 piocbq->sli4_xritag = sglq->sli4_xritag;
15803 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
15804 fail_msg = "to convert bpl to sgl";
15805 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
15806 fail_msg = "to convert iocb to wqe";
15807 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
15808 fail_msg = " - Wq is full";
15809 else
15810 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
15811
15812 if (fail_msg) {
15813 /* Failed means we can't issue and need to cancel */
15814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15815 "2822 IOCB failed %s iotag 0x%x "
15816 "xri 0x%x\n",
15817 fail_msg,
15818 piocbq->iotag, piocbq->sli4_xritag);
15819 list_add_tail(&piocbq->list, &completions);
15820 }
15821 spin_unlock_irqrestore(&phba->hbalock, iflags);
15822 }
15823
2a9bf3d0
JS
15824 /* Cancel all the IOCBs that cannot be issued */
15825 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
15826 IOERR_SLI_ABORTED);
15827
15828 return pring->txq_cnt;
15829}