2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #include <linux/interrupt.h>
40 #include <linux/spinlock.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
53 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq
*cq
);
55 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res
*res
,
56 struct bnxt_qplib_qp
*qp
)
58 struct bnxt_qplib_q
*rq
= &qp
->rq
;
59 struct bnxt_qplib_q
*sq
= &qp
->sq
;
62 dma_free_coherent(&res
->pdev
->dev
,
63 rq
->hwq
.max_elements
* qp
->rq_hdr_buf_size
,
64 qp
->rq_hdr_buf
, qp
->rq_hdr_buf_map
);
66 dma_free_coherent(&res
->pdev
->dev
,
67 sq
->hwq
.max_elements
* qp
->sq_hdr_buf_size
,
68 qp
->sq_hdr_buf
, qp
->sq_hdr_buf_map
);
69 qp
->rq_hdr_buf
= NULL
;
70 qp
->sq_hdr_buf
= NULL
;
71 qp
->rq_hdr_buf_map
= 0;
72 qp
->sq_hdr_buf_map
= 0;
73 qp
->sq_hdr_buf_size
= 0;
74 qp
->rq_hdr_buf_size
= 0;
77 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res
*res
,
78 struct bnxt_qplib_qp
*qp
)
80 struct bnxt_qplib_q
*rq
= &qp
->rq
;
81 struct bnxt_qplib_q
*sq
= &qp
->rq
;
84 if (qp
->sq_hdr_buf_size
&& sq
->hwq
.max_elements
) {
85 qp
->sq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
86 sq
->hwq
.max_elements
*
88 &qp
->sq_hdr_buf_map
, GFP_KERNEL
);
89 if (!qp
->sq_hdr_buf
) {
91 dev_err(&res
->pdev
->dev
,
92 "QPLIB: Failed to create sq_hdr_buf");
97 if (qp
->rq_hdr_buf_size
&& rq
->hwq
.max_elements
) {
98 qp
->rq_hdr_buf
= dma_alloc_coherent(&res
->pdev
->dev
,
99 rq
->hwq
.max_elements
*
103 if (!qp
->rq_hdr_buf
) {
105 dev_err(&res
->pdev
->dev
,
106 "QPLIB: Failed to create rq_hdr_buf");
113 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
117 static void bnxt_qplib_service_nq(unsigned long data
)
119 struct bnxt_qplib_nq
*nq
= (struct bnxt_qplib_nq
*)data
;
120 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
121 struct nq_base
*nqe
, **nq_ptr
;
122 int num_cqne_processed
= 0;
123 u32 sw_cons
, raw_cons
;
125 int budget
= nq
->budget
;
128 /* Service the NQ until empty */
129 raw_cons
= hwq
->cons
;
131 sw_cons
= HWQ_CMP(raw_cons
, hwq
);
132 nq_ptr
= (struct nq_base
**)hwq
->pbl_ptr
;
133 nqe
= &nq_ptr
[NQE_PG(sw_cons
)][NQE_IDX(sw_cons
)];
134 if (!NQE_CMP_VALID(nqe
, raw_cons
, hwq
->max_elements
))
137 type
= le16_to_cpu(nqe
->info10_type
) & NQ_BASE_TYPE_MASK
;
139 case NQ_BASE_TYPE_CQ_NOTIFICATION
:
141 struct nq_cn
*nqcne
= (struct nq_cn
*)nqe
;
143 q_handle
= le32_to_cpu(nqcne
->cq_handle_low
);
144 q_handle
|= (u64
)le32_to_cpu(nqcne
->cq_handle_high
)
146 bnxt_qplib_arm_cq_enable((struct bnxt_qplib_cq
*)
147 ((unsigned long)q_handle
));
148 if (!nq
->cqn_handler(nq
, (struct bnxt_qplib_cq
*)
149 ((unsigned long)q_handle
)))
150 num_cqne_processed
++;
152 dev_warn(&nq
->pdev
->dev
,
153 "QPLIB: cqn - type 0x%x not handled",
157 case NQ_BASE_TYPE_DBQ_EVENT
:
160 dev_warn(&nq
->pdev
->dev
,
161 "QPLIB: nqe with type = 0x%x not handled",
167 if (hwq
->cons
!= raw_cons
) {
168 hwq
->cons
= raw_cons
;
169 NQ_DB_REARM(nq
->bar_reg_iomem
, hwq
->cons
, hwq
->max_elements
);
173 static irqreturn_t
bnxt_qplib_nq_irq(int irq
, void *dev_instance
)
175 struct bnxt_qplib_nq
*nq
= dev_instance
;
176 struct bnxt_qplib_hwq
*hwq
= &nq
->hwq
;
177 struct nq_base
**nq_ptr
;
180 /* Prefetch the NQ element */
181 sw_cons
= HWQ_CMP(hwq
->cons
, hwq
);
182 nq_ptr
= (struct nq_base
**)nq
->hwq
.pbl_ptr
;
183 prefetch(&nq_ptr
[NQE_PG(sw_cons
)][NQE_IDX(sw_cons
)]);
185 /* Fan out to CPU affinitized kthreads? */
186 tasklet_schedule(&nq
->worker
);
191 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
)
193 /* Make sure the HW is stopped! */
194 synchronize_irq(nq
->vector
);
195 tasklet_disable(&nq
->worker
);
196 tasklet_kill(&nq
->worker
);
199 free_irq(nq
->vector
, nq
);
200 nq
->requested
= false;
202 if (nq
->bar_reg_iomem
)
203 iounmap(nq
->bar_reg_iomem
);
204 nq
->bar_reg_iomem
= NULL
;
206 nq
->cqn_handler
= NULL
;
207 nq
->srqn_handler
= NULL
;
211 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
212 int msix_vector
, int bar_reg_offset
,
213 int (*cqn_handler
)(struct bnxt_qplib_nq
*nq
,
214 struct bnxt_qplib_cq
*),
215 int (*srqn_handler
)(struct bnxt_qplib_nq
*nq
,
218 resource_size_t nq_base
;
222 nq
->vector
= msix_vector
;
224 nq
->cqn_handler
= cqn_handler
;
226 nq
->srqn_handler
= srqn_handler
;
228 tasklet_init(&nq
->worker
, bnxt_qplib_service_nq
, (unsigned long)nq
);
230 nq
->requested
= false;
231 rc
= request_irq(nq
->vector
, bnxt_qplib_nq_irq
, 0, "bnxt_qplib_nq", nq
);
233 dev_err(&nq
->pdev
->dev
,
234 "Failed to request IRQ for NQ: %#x", rc
);
235 bnxt_qplib_disable_nq(nq
);
238 nq
->requested
= true;
239 nq
->bar_reg
= NQ_CONS_PCI_BAR_REGION
;
240 nq
->bar_reg_off
= bar_reg_offset
;
241 nq_base
= pci_resource_start(pdev
, nq
->bar_reg
);
246 nq
->bar_reg_iomem
= ioremap_nocache(nq_base
+ nq
->bar_reg_off
, 4);
247 if (!nq
->bar_reg_iomem
) {
251 NQ_DB_REARM(nq
->bar_reg_iomem
, nq
->hwq
.cons
, nq
->hwq
.max_elements
);
255 bnxt_qplib_disable_nq(nq
);
259 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
)
261 if (nq
->hwq
.max_elements
)
262 bnxt_qplib_free_hwq(nq
->pdev
, &nq
->hwq
);
265 int bnxt_qplib_alloc_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
)
268 if (!nq
->hwq
.max_elements
||
269 nq
->hwq
.max_elements
> BNXT_QPLIB_NQE_MAX_CNT
)
270 nq
->hwq
.max_elements
= BNXT_QPLIB_NQE_MAX_CNT
;
272 if (bnxt_qplib_alloc_init_hwq(nq
->pdev
, &nq
->hwq
, NULL
, 0,
273 &nq
->hwq
.max_elements
,
274 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE
, 0,
275 PAGE_SIZE
, HWQ_TYPE_L2_CMPL
))
283 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
285 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
286 struct cmdq_create_qp1 req
;
287 struct creq_create_qp1_resp resp
;
288 struct bnxt_qplib_pbl
*pbl
;
289 struct bnxt_qplib_q
*sq
= &qp
->sq
;
290 struct bnxt_qplib_q
*rq
= &qp
->rq
;
295 RCFW_CMD_PREP(req
, CREATE_QP1
, cmd_flags
);
299 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
300 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
303 sq
->hwq
.max_elements
= sq
->max_wqe
;
304 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, &sq
->hwq
, NULL
, 0,
305 &sq
->hwq
.max_elements
,
306 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE
, 0,
307 PAGE_SIZE
, HWQ_TYPE_QUEUE
);
311 sq
->swq
= kcalloc(sq
->hwq
.max_elements
, sizeof(*sq
->swq
), GFP_KERNEL
);
316 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
317 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
318 req
.sq_pg_size_sq_lvl
=
319 ((sq
->hwq
.level
& CMDQ_CREATE_QP1_SQ_LVL_MASK
)
320 << CMDQ_CREATE_QP1_SQ_LVL_SFT
) |
321 (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
322 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K
:
323 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
324 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K
:
325 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
326 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K
:
327 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
328 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M
:
329 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
330 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M
:
331 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
332 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G
:
333 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K
);
336 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
338 qp_flags
|= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE
;
342 rq
->hwq
.max_elements
= qp
->rq
.max_wqe
;
343 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, &rq
->hwq
, NULL
, 0,
344 &rq
->hwq
.max_elements
,
345 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
, 0,
346 PAGE_SIZE
, HWQ_TYPE_QUEUE
);
350 rq
->swq
= kcalloc(rq
->hwq
.max_elements
, sizeof(*rq
->swq
),
356 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
357 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
358 req
.rq_pg_size_rq_lvl
=
359 ((rq
->hwq
.level
& CMDQ_CREATE_QP1_RQ_LVL_MASK
) <<
360 CMDQ_CREATE_QP1_RQ_LVL_SFT
) |
361 (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
362 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K
:
363 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
364 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K
:
365 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
366 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K
:
367 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
368 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M
:
369 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
370 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M
:
371 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
372 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G
:
373 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K
);
375 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
378 /* Header buffer - allow hdr_buf pass in */
379 rc
= bnxt_qplib_alloc_qp_hdr_buf(res
, qp
);
384 req
.qp_flags
= cpu_to_le32(qp_flags
);
385 req
.sq_size
= cpu_to_le32(sq
->hwq
.max_elements
);
386 req
.rq_size
= cpu_to_le32(rq
->hwq
.max_elements
);
389 cpu_to_le16((sq
->max_sge
& CMDQ_CREATE_QP1_SQ_SGE_MASK
) <<
390 CMDQ_CREATE_QP1_SQ_SGE_SFT
);
392 cpu_to_le16((rq
->max_sge
& CMDQ_CREATE_QP1_RQ_SGE_MASK
) <<
393 CMDQ_CREATE_QP1_RQ_SGE_SFT
);
395 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
397 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
398 (void *)&resp
, NULL
, 0);
402 qp
->id
= le32_to_cpu(resp
.xid
);
403 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
404 sq
->flush_in_progress
= false;
405 rq
->flush_in_progress
= false;
410 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
412 bnxt_qplib_free_hwq(res
->pdev
, &rq
->hwq
);
415 bnxt_qplib_free_hwq(res
->pdev
, &sq
->hwq
);
421 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
423 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
424 struct sq_send
*hw_sq_send_hdr
, **hw_sq_send_ptr
;
425 struct cmdq_create_qp req
;
426 struct creq_create_qp_resp resp
;
427 struct bnxt_qplib_pbl
*pbl
;
428 struct sq_psn_search
**psn_search_ptr
;
429 unsigned long int psn_search
, poff
= 0;
430 struct bnxt_qplib_q
*sq
= &qp
->sq
;
431 struct bnxt_qplib_q
*rq
= &qp
->rq
;
432 struct bnxt_qplib_hwq
*xrrq
;
433 int i
, rc
, req_size
, psn_sz
;
434 u16 cmd_flags
= 0, max_ssge
;
435 u32 sw_prod
, qp_flags
= 0;
437 RCFW_CMD_PREP(req
, CREATE_QP
, cmd_flags
);
441 req
.dpi
= cpu_to_le32(qp
->dpi
->dpi
);
442 req
.qp_handle
= cpu_to_le64(qp
->qp_handle
);
445 psn_sz
= (qp
->type
== CMDQ_CREATE_QP_TYPE_RC
) ?
446 sizeof(struct sq_psn_search
) : 0;
447 sq
->hwq
.max_elements
= sq
->max_wqe
;
448 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, &sq
->hwq
, sq
->sglist
,
449 sq
->nmap
, &sq
->hwq
.max_elements
,
450 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE
,
452 PAGE_SIZE
, HWQ_TYPE_QUEUE
);
456 sq
->swq
= kcalloc(sq
->hwq
.max_elements
, sizeof(*sq
->swq
), GFP_KERNEL
);
461 hw_sq_send_ptr
= (struct sq_send
**)sq
->hwq
.pbl_ptr
;
463 psn_search_ptr
= (struct sq_psn_search
**)
464 &hw_sq_send_ptr
[get_sqe_pg
465 (sq
->hwq
.max_elements
)];
466 psn_search
= (unsigned long int)
467 &hw_sq_send_ptr
[get_sqe_pg(sq
->hwq
.max_elements
)]
468 [get_sqe_idx(sq
->hwq
.max_elements
)];
469 if (psn_search
& ~PAGE_MASK
) {
470 /* If the psn_search does not start on a page boundary,
471 * then calculate the offset
473 poff
= (psn_search
& ~PAGE_MASK
) /
474 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE
;
476 for (i
= 0; i
< sq
->hwq
.max_elements
; i
++)
477 sq
->swq
[i
].psn_search
=
478 &psn_search_ptr
[get_psne_pg(i
+ poff
)]
479 [get_psne_idx(i
+ poff
)];
481 pbl
= &sq
->hwq
.pbl
[PBL_LVL_0
];
482 req
.sq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
483 req
.sq_pg_size_sq_lvl
=
484 ((sq
->hwq
.level
& CMDQ_CREATE_QP_SQ_LVL_MASK
)
485 << CMDQ_CREATE_QP_SQ_LVL_SFT
) |
486 (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
487 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K
:
488 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
489 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K
:
490 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
491 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K
:
492 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
493 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M
:
494 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
495 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M
:
496 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
497 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G
:
498 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K
);
500 /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
501 hw_sq_send_ptr
= (struct sq_send
**)sq
->hwq
.pbl_ptr
;
502 for (sw_prod
= 0; sw_prod
< sq
->hwq
.max_elements
; sw_prod
++) {
503 hw_sq_send_hdr
= &hw_sq_send_ptr
[get_sqe_pg(sw_prod
)]
504 [get_sqe_idx(sw_prod
)];
505 hw_sq_send_hdr
->wqe_type
= SQ_BASE_WQE_TYPE_LOCAL_INVALID
;
509 req
.scq_cid
= cpu_to_le32(qp
->scq
->id
);
511 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE
;
512 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED
;
514 qp_flags
|= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION
;
518 rq
->hwq
.max_elements
= rq
->max_wqe
;
519 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, &rq
->hwq
, rq
->sglist
,
520 rq
->nmap
, &rq
->hwq
.max_elements
,
521 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
, 0,
522 PAGE_SIZE
, HWQ_TYPE_QUEUE
);
526 rq
->swq
= kcalloc(rq
->hwq
.max_elements
, sizeof(*rq
->swq
),
532 pbl
= &rq
->hwq
.pbl
[PBL_LVL_0
];
533 req
.rq_pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
534 req
.rq_pg_size_rq_lvl
=
535 ((rq
->hwq
.level
& CMDQ_CREATE_QP_RQ_LVL_MASK
) <<
536 CMDQ_CREATE_QP_RQ_LVL_SFT
) |
537 (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
538 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K
:
539 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
540 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K
:
541 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
542 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K
:
543 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
544 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M
:
545 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
546 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M
:
547 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
548 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G
:
549 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K
);
553 req
.rcq_cid
= cpu_to_le32(qp
->rcq
->id
);
554 req
.qp_flags
= cpu_to_le32(qp_flags
);
555 req
.sq_size
= cpu_to_le32(sq
->hwq
.max_elements
);
556 req
.rq_size
= cpu_to_le32(rq
->hwq
.max_elements
);
557 qp
->sq_hdr_buf
= NULL
;
558 qp
->rq_hdr_buf
= NULL
;
560 rc
= bnxt_qplib_alloc_qp_hdr_buf(res
, qp
);
564 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
565 * always create the QP with max send sges possible if the requested
566 * inline size is greater than 0.
568 max_ssge
= qp
->max_inline_data
? 6 : sq
->max_sge
;
569 req
.sq_fwo_sq_sge
= cpu_to_le16(
570 ((max_ssge
& CMDQ_CREATE_QP_SQ_SGE_MASK
)
571 << CMDQ_CREATE_QP_SQ_SGE_SFT
) | 0);
572 req
.rq_fwo_rq_sge
= cpu_to_le16(
573 ((rq
->max_sge
& CMDQ_CREATE_QP_RQ_SGE_MASK
)
574 << CMDQ_CREATE_QP_RQ_SGE_SFT
) | 0);
579 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
580 req_size
= xrrq
->max_elements
*
581 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
582 req_size
&= ~(PAGE_SIZE
- 1);
583 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, xrrq
, NULL
, 0,
585 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE
,
586 0, req_size
, HWQ_TYPE_CTX
);
589 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
590 req
.orrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
593 xrrq
->max_elements
= IRD_LIMIT_TO_IRRQ_SLOTS(
594 qp
->max_dest_rd_atomic
);
595 req_size
= xrrq
->max_elements
*
596 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
+ PAGE_SIZE
- 1;
597 req_size
&= ~(PAGE_SIZE
- 1);
599 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, xrrq
, NULL
, 0,
601 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE
,
602 0, req_size
, HWQ_TYPE_CTX
);
606 pbl
= &xrrq
->pbl
[PBL_LVL_0
];
607 req
.irrq_addr
= cpu_to_le64(pbl
->pg_map_arr
[0]);
609 req
.pd_id
= cpu_to_le32(qp
->pd
->id
);
611 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
612 (void *)&resp
, NULL
, 0);
616 qp
->id
= le32_to_cpu(resp
.xid
);
617 qp
->cur_qp_state
= CMDQ_MODIFY_QP_NEW_STATE_RESET
;
618 sq
->flush_in_progress
= false;
619 rq
->flush_in_progress
= false;
624 if (qp
->irrq
.max_elements
)
625 bnxt_qplib_free_hwq(res
->pdev
, &qp
->irrq
);
627 if (qp
->orrq
.max_elements
)
628 bnxt_qplib_free_hwq(res
->pdev
, &qp
->orrq
);
630 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
632 bnxt_qplib_free_hwq(res
->pdev
, &rq
->hwq
);
635 bnxt_qplib_free_hwq(res
->pdev
, &sq
->hwq
);
641 static void __modify_flags_from_init_state(struct bnxt_qplib_qp
*qp
)
644 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
645 /* INIT->RTR, configure the path_mtu to the default
646 * 2048 if not being requested
648 if (!(qp
->modify_flags
&
649 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)) {
651 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
653 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
656 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
657 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
658 if (qp
->max_dest_rd_atomic
< 1)
659 qp
->max_dest_rd_atomic
= 1;
660 qp
->modify_flags
&= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC
;
661 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
662 if (!(qp
->modify_flags
&
663 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)) {
665 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
;
666 qp
->ah
.sgid_index
= 0;
674 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp
*qp
)
677 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
678 /* Bono FW requires the max_rd_atomic to be >= 1 */
679 if (qp
->max_rd_atomic
< 1)
680 qp
->max_rd_atomic
= 1;
681 /* Bono FW does not allow PKEY_INDEX,
682 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
683 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
684 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
688 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
|
689 CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
690 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
691 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
692 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
693 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
694 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
695 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
|
696 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
|
697 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
|
698 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
|
699 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
);
706 static void __filter_modify_flags(struct bnxt_qplib_qp
*qp
)
708 switch (qp
->cur_qp_state
) {
709 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
711 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
712 __modify_flags_from_init_state(qp
);
714 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
715 __modify_flags_from_rtr_state(qp
);
717 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
719 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
721 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
723 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
730 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
732 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
733 struct cmdq_modify_qp req
;
734 struct creq_modify_qp_resp resp
;
735 u16 cmd_flags
= 0, pkey
;
740 RCFW_CMD_PREP(req
, MODIFY_QP
, cmd_flags
);
742 /* Filter out the qp_attr_mask based on the state->new transition */
743 __filter_modify_flags(qp
);
744 bmask
= qp
->modify_flags
;
745 req
.modify_mask
= cpu_to_le32(qp
->modify_flags
);
746 req
.qp_cid
= cpu_to_le32(qp
->id
);
747 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_STATE
) {
748 req
.network_type_en_sqd_async_notify_new_state
=
749 (qp
->state
& CMDQ_MODIFY_QP_NEW_STATE_MASK
) |
750 (qp
->en_sqd_async_notify
?
751 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY
: 0);
753 req
.network_type_en_sqd_async_notify_new_state
|= qp
->nw_type
;
755 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
)
756 req
.access
= qp
->access
;
758 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
) {
759 if (!bnxt_qplib_get_pkey(res
, &res
->pkey_tbl
,
760 qp
->pkey_index
, &pkey
))
761 req
.pkey
= cpu_to_le16(pkey
);
763 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
)
764 req
.qkey
= cpu_to_le32(qp
->qkey
);
766 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DGID
) {
767 memcpy(temp32
, qp
->ah
.dgid
.data
, sizeof(struct bnxt_qplib_gid
));
768 req
.dgid
[0] = cpu_to_le32(temp32
[0]);
769 req
.dgid
[1] = cpu_to_le32(temp32
[1]);
770 req
.dgid
[2] = cpu_to_le32(temp32
[2]);
771 req
.dgid
[3] = cpu_to_le32(temp32
[3]);
773 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
)
774 req
.flow_label
= cpu_to_le32(qp
->ah
.flow_label
);
776 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
)
777 req
.sgid_index
= cpu_to_le16(res
->sgid_tbl
.hw_id
778 [qp
->ah
.sgid_index
]);
780 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
)
781 req
.hop_limit
= qp
->ah
.hop_limit
;
783 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
)
784 req
.traffic_class
= qp
->ah
.traffic_class
;
786 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
)
787 memcpy(req
.dest_mac
, qp
->ah
.dmac
, 6);
789 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
)
790 req
.path_mtu
= qp
->path_mtu
;
792 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
)
793 req
.timeout
= qp
->timeout
;
795 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
)
796 req
.retry_cnt
= qp
->retry_cnt
;
798 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
)
799 req
.rnr_retry
= qp
->rnr_retry
;
801 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
)
802 req
.min_rnr_timer
= qp
->min_rnr_timer
;
804 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
)
805 req
.rq_psn
= cpu_to_le32(qp
->rq
.psn
);
807 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
)
808 req
.sq_psn
= cpu_to_le32(qp
->sq
.psn
);
810 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
)
812 ORD_LIMIT_TO_ORRQ_SLOTS(qp
->max_rd_atomic
);
814 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
)
815 req
.max_dest_rd_atomic
=
816 IRD_LIMIT_TO_IRRQ_SLOTS(qp
->max_dest_rd_atomic
);
818 req
.sq_size
= cpu_to_le32(qp
->sq
.hwq
.max_elements
);
819 req
.rq_size
= cpu_to_le32(qp
->rq
.hwq
.max_elements
);
820 req
.sq_sge
= cpu_to_le16(qp
->sq
.max_sge
);
821 req
.rq_sge
= cpu_to_le16(qp
->rq
.max_sge
);
822 req
.max_inline_data
= cpu_to_le32(qp
->max_inline_data
);
823 if (bmask
& CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
)
824 req
.dest_qp_id
= cpu_to_le32(qp
->dest_qpn
);
826 req
.vlan_pcp_vlan_dei_vlan_id
= cpu_to_le16(qp
->vlan_id
);
828 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
829 (void *)&resp
, NULL
, 0);
832 qp
->cur_qp_state
= qp
->state
;
836 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
)
838 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
839 struct cmdq_query_qp req
;
840 struct creq_query_qp_resp resp
;
841 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
842 struct creq_query_qp_resp_sb
*sb
;
847 RCFW_CMD_PREP(req
, QUERY_QP
, cmd_flags
);
849 sbuf
= bnxt_qplib_rcfw_alloc_sbuf(rcfw
, sizeof(*sb
));
854 req
.qp_cid
= cpu_to_le32(qp
->id
);
855 req
.resp_size
= sizeof(*sb
) / BNXT_QPLIB_CMDQE_UNITS
;
856 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
860 /* Extract the context from the side buffer */
861 qp
->state
= sb
->en_sqd_async_notify_state
&
862 CREQ_QUERY_QP_RESP_SB_STATE_MASK
;
863 qp
->en_sqd_async_notify
= sb
->en_sqd_async_notify_state
&
864 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY
?
866 qp
->access
= sb
->access
;
867 qp
->pkey_index
= le16_to_cpu(sb
->pkey
);
868 qp
->qkey
= le32_to_cpu(sb
->qkey
);
870 temp32
[0] = le32_to_cpu(sb
->dgid
[0]);
871 temp32
[1] = le32_to_cpu(sb
->dgid
[1]);
872 temp32
[2] = le32_to_cpu(sb
->dgid
[2]);
873 temp32
[3] = le32_to_cpu(sb
->dgid
[3]);
874 memcpy(qp
->ah
.dgid
.data
, temp32
, sizeof(qp
->ah
.dgid
.data
));
876 qp
->ah
.flow_label
= le32_to_cpu(sb
->flow_label
);
878 qp
->ah
.sgid_index
= 0;
879 for (i
= 0; i
< res
->sgid_tbl
.max
; i
++) {
880 if (res
->sgid_tbl
.hw_id
[i
] == le16_to_cpu(sb
->sgid_index
)) {
881 qp
->ah
.sgid_index
= i
;
885 if (i
== res
->sgid_tbl
.max
)
886 dev_warn(&res
->pdev
->dev
, "QPLIB: SGID not found??");
888 qp
->ah
.hop_limit
= sb
->hop_limit
;
889 qp
->ah
.traffic_class
= sb
->traffic_class
;
890 memcpy(qp
->ah
.dmac
, sb
->dest_mac
, 6);
891 qp
->ah
.vlan_id
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
892 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK
) >>
893 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT
;
894 qp
->path_mtu
= (le16_to_cpu(sb
->path_mtu_dest_vlan_id
) &
895 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) >>
896 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT
;
897 qp
->timeout
= sb
->timeout
;
898 qp
->retry_cnt
= sb
->retry_cnt
;
899 qp
->rnr_retry
= sb
->rnr_retry
;
900 qp
->min_rnr_timer
= sb
->min_rnr_timer
;
901 qp
->rq
.psn
= le32_to_cpu(sb
->rq_psn
);
902 qp
->max_rd_atomic
= ORRQ_SLOTS_TO_ORD_LIMIT(sb
->max_rd_atomic
);
903 qp
->sq
.psn
= le32_to_cpu(sb
->sq_psn
);
904 qp
->max_dest_rd_atomic
=
905 IRRQ_SLOTS_TO_IRD_LIMIT(sb
->max_dest_rd_atomic
);
906 qp
->sq
.max_wqe
= qp
->sq
.hwq
.max_elements
;
907 qp
->rq
.max_wqe
= qp
->rq
.hwq
.max_elements
;
908 qp
->sq
.max_sge
= le16_to_cpu(sb
->sq_sge
);
909 qp
->rq
.max_sge
= le16_to_cpu(sb
->rq_sge
);
910 qp
->max_inline_data
= le32_to_cpu(sb
->max_inline_data
);
911 qp
->dest_qpn
= le32_to_cpu(sb
->dest_qp_id
);
912 memcpy(qp
->smac
, sb
->src_mac
, 6);
913 qp
->vlan_id
= le16_to_cpu(sb
->vlan_pcp_vlan_dei_vlan_id
);
915 bnxt_qplib_rcfw_free_sbuf(rcfw
, sbuf
);
919 static void __clean_cq(struct bnxt_qplib_cq
*cq
, u64 qp
)
921 struct bnxt_qplib_hwq
*cq_hwq
= &cq
->hwq
;
922 struct cq_base
*hw_cqe
, **hw_cqe_ptr
;
925 for (i
= 0; i
< cq_hwq
->max_elements
; i
++) {
926 hw_cqe_ptr
= (struct cq_base
**)cq_hwq
->pbl_ptr
;
927 hw_cqe
= &hw_cqe_ptr
[CQE_PG(i
)][CQE_IDX(i
)];
928 if (!CQE_CMP_VALID(hw_cqe
, i
, cq_hwq
->max_elements
))
930 switch (hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
) {
931 case CQ_BASE_CQE_TYPE_REQ
:
932 case CQ_BASE_CQE_TYPE_TERMINAL
:
934 struct cq_req
*cqe
= (struct cq_req
*)hw_cqe
;
936 if (qp
== le64_to_cpu(cqe
->qp_handle
))
940 case CQ_BASE_CQE_TYPE_RES_RC
:
941 case CQ_BASE_CQE_TYPE_RES_UD
:
942 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
944 struct cq_res_rc
*cqe
= (struct cq_res_rc
*)hw_cqe
;
946 if (qp
== le64_to_cpu(cqe
->qp_handle
))
956 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
,
957 struct bnxt_qplib_qp
*qp
)
959 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
960 struct cmdq_destroy_qp req
;
961 struct creq_destroy_qp_resp resp
;
966 RCFW_CMD_PREP(req
, DESTROY_QP
, cmd_flags
);
968 req
.qp_cid
= cpu_to_le32(qp
->id
);
969 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
970 (void *)&resp
, NULL
, 0);
974 /* Must walk the associated CQs to nullified the QP ptr */
975 spin_lock_irqsave(&qp
->scq
->hwq
.lock
, flags
);
977 __clean_cq(qp
->scq
, (u64
)(unsigned long)qp
);
979 if (qp
->rcq
&& qp
->rcq
!= qp
->scq
) {
980 spin_lock(&qp
->rcq
->hwq
.lock
);
981 __clean_cq(qp
->rcq
, (u64
)(unsigned long)qp
);
982 spin_unlock(&qp
->rcq
->hwq
.lock
);
985 spin_unlock_irqrestore(&qp
->scq
->hwq
.lock
, flags
);
987 bnxt_qplib_free_qp_hdr_buf(res
, qp
);
988 bnxt_qplib_free_hwq(res
->pdev
, &qp
->sq
.hwq
);
991 bnxt_qplib_free_hwq(res
->pdev
, &qp
->rq
.hwq
);
994 if (qp
->irrq
.max_elements
)
995 bnxt_qplib_free_hwq(res
->pdev
, &qp
->irrq
);
996 if (qp
->orrq
.max_elements
)
997 bnxt_qplib_free_hwq(res
->pdev
, &qp
->orrq
);
1002 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
1003 struct bnxt_qplib_sge
*sge
)
1005 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1008 memset(sge
, 0, sizeof(*sge
));
1010 if (qp
->sq_hdr_buf
) {
1011 sw_prod
= HWQ_CMP(sq
->hwq
.prod
, &sq
->hwq
);
1012 sge
->addr
= (dma_addr_t
)(qp
->sq_hdr_buf_map
+
1013 sw_prod
* qp
->sq_hdr_buf_size
);
1014 sge
->lkey
= 0xFFFFFFFF;
1015 sge
->size
= qp
->sq_hdr_buf_size
;
1016 return qp
->sq_hdr_buf
+ sw_prod
* sge
->size
;
1021 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
)
1023 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1025 return HWQ_CMP(rq
->hwq
.prod
, &rq
->hwq
);
1028 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
, u32 index
)
1030 return (qp
->rq_hdr_buf_map
+ index
* qp
->rq_hdr_buf_size
);
1033 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
1034 struct bnxt_qplib_sge
*sge
)
1036 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1039 memset(sge
, 0, sizeof(*sge
));
1041 if (qp
->rq_hdr_buf
) {
1042 sw_prod
= HWQ_CMP(rq
->hwq
.prod
, &rq
->hwq
);
1043 sge
->addr
= (dma_addr_t
)(qp
->rq_hdr_buf_map
+
1044 sw_prod
* qp
->rq_hdr_buf_size
);
1045 sge
->lkey
= 0xFFFFFFFF;
1046 sge
->size
= qp
->rq_hdr_buf_size
;
1047 return qp
->rq_hdr_buf
+ sw_prod
* sge
->size
;
1052 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
)
1054 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1055 struct dbr_dbr db_msg
= { 0 };
1058 sw_prod
= HWQ_CMP(sq
->hwq
.prod
, &sq
->hwq
);
1060 db_msg
.index
= cpu_to_le32((sw_prod
<< DBR_DBR_INDEX_SFT
) &
1061 DBR_DBR_INDEX_MASK
);
1063 cpu_to_le32(((qp
->id
<< DBR_DBR_XID_SFT
) & DBR_DBR_XID_MASK
) |
1065 /* Flush all the WQE writes to HW */
1067 __iowrite64_copy(qp
->dpi
->dbr
, &db_msg
, sizeof(db_msg
) / sizeof(u64
));
1070 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
1071 struct bnxt_qplib_swqe
*wqe
)
1073 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1074 struct bnxt_qplib_swq
*swq
;
1075 struct sq_send
*hw_sq_send_hdr
, **hw_sq_send_ptr
;
1076 struct sq_sge
*hw_sge
;
1079 int i
, rc
= 0, data_len
= 0, pkt_num
= 0;
1082 if (qp
->state
!= CMDQ_MODIFY_QP_NEW_STATE_RTS
) {
1087 if (bnxt_qplib_queue_full(sq
)) {
1088 dev_err(&sq
->hwq
.pdev
->dev
,
1089 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1090 sq
->hwq
.prod
, sq
->hwq
.cons
, sq
->hwq
.max_elements
,
1095 sw_prod
= HWQ_CMP(sq
->hwq
.prod
, &sq
->hwq
);
1096 swq
= &sq
->swq
[sw_prod
];
1097 swq
->wr_id
= wqe
->wr_id
;
1098 swq
->type
= wqe
->type
;
1099 swq
->flags
= wqe
->flags
;
1101 swq
->flags
|= SQ_SEND_FLAGS_SIGNAL_COMP
;
1102 swq
->start_psn
= sq
->psn
& BTH_PSN_MASK
;
1104 hw_sq_send_ptr
= (struct sq_send
**)sq
->hwq
.pbl_ptr
;
1105 hw_sq_send_hdr
= &hw_sq_send_ptr
[get_sqe_pg(sw_prod
)]
1106 [get_sqe_idx(sw_prod
)];
1108 memset(hw_sq_send_hdr
, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE
);
1110 if (wqe
->flags
& BNXT_QPLIB_SWQE_FLAGS_INLINE
) {
1111 /* Copy the inline data */
1112 if (wqe
->inline_len
> BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
) {
1113 dev_warn(&sq
->hwq
.pdev
->dev
,
1114 "QPLIB: Inline data length > 96 detected");
1115 data_len
= BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
;
1117 data_len
= wqe
->inline_len
;
1119 memcpy(hw_sq_send_hdr
->data
, wqe
->inline_data
, data_len
);
1120 wqe_size16
= (data_len
+ 15) >> 4;
1122 for (i
= 0, hw_sge
= (struct sq_sge
*)hw_sq_send_hdr
->data
;
1123 i
< wqe
->num_sge
; i
++, hw_sge
++) {
1124 hw_sge
->va_or_pa
= cpu_to_le64(wqe
->sg_list
[i
].addr
);
1125 hw_sge
->l_key
= cpu_to_le32(wqe
->sg_list
[i
].lkey
);
1126 hw_sge
->size
= cpu_to_le32(wqe
->sg_list
[i
].size
);
1127 data_len
+= wqe
->sg_list
[i
].size
;
1129 /* Each SGE entry = 1 WQE size16 */
1130 wqe_size16
= wqe
->num_sge
;
1131 /* HW requires wqe size has room for atleast one SGE even if
1132 * none was supplied by ULP
1139 switch (wqe
->type
) {
1140 case BNXT_QPLIB_SWQE_TYPE_SEND
:
1141 if (qp
->type
== CMDQ_CREATE_QP1_TYPE_GSI
) {
1142 /* Assemble info for Raw Ethertype QPs */
1143 struct sq_send_raweth_qp1
*sqe
=
1144 (struct sq_send_raweth_qp1
*)hw_sq_send_hdr
;
1146 sqe
->wqe_type
= wqe
->type
;
1147 sqe
->flags
= wqe
->flags
;
1148 sqe
->wqe_size
= wqe_size16
+
1149 ((offsetof(typeof(*sqe
), data
) + 15) >> 4);
1150 sqe
->cfa_action
= cpu_to_le16(wqe
->rawqp1
.cfa_action
);
1151 sqe
->lflags
= cpu_to_le16(wqe
->rawqp1
.lflags
);
1152 sqe
->length
= cpu_to_le32(data_len
);
1153 sqe
->cfa_meta
= cpu_to_le32((wqe
->rawqp1
.cfa_meta
&
1154 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK
) <<
1155 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT
);
1159 /* else, just fall thru */
1160 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
1161 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
1163 struct sq_send
*sqe
= (struct sq_send
*)hw_sq_send_hdr
;
1165 sqe
->wqe_type
= wqe
->type
;
1166 sqe
->flags
= wqe
->flags
;
1167 sqe
->wqe_size
= wqe_size16
+
1168 ((offsetof(typeof(*sqe
), data
) + 15) >> 4);
1169 sqe
->inv_key_or_imm_data
= cpu_to_le32(
1171 if (qp
->type
== CMDQ_CREATE_QP_TYPE_UD
) {
1172 sqe
->q_key
= cpu_to_le32(wqe
->send
.q_key
);
1173 sqe
->dst_qp
= cpu_to_le32(
1174 wqe
->send
.dst_qp
& SQ_SEND_DST_QP_MASK
);
1175 sqe
->length
= cpu_to_le32(data_len
);
1176 sqe
->avid
= cpu_to_le32(wqe
->send
.avid
&
1178 sq
->psn
= (sq
->psn
+ 1) & BTH_PSN_MASK
;
1180 sqe
->length
= cpu_to_le32(data_len
);
1184 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1187 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1191 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
1192 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
1193 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
1195 struct sq_rdma
*sqe
= (struct sq_rdma
*)hw_sq_send_hdr
;
1197 sqe
->wqe_type
= wqe
->type
;
1198 sqe
->flags
= wqe
->flags
;
1199 sqe
->wqe_size
= wqe_size16
+
1200 ((offsetof(typeof(*sqe
), data
) + 15) >> 4);
1201 sqe
->imm_data
= cpu_to_le32(wqe
->rdma
.inv_key
);
1202 sqe
->length
= cpu_to_le32((u32
)data_len
);
1203 sqe
->remote_va
= cpu_to_le64(wqe
->rdma
.remote_va
);
1204 sqe
->remote_key
= cpu_to_le32(wqe
->rdma
.r_key
);
1206 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1209 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1212 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
1213 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
1215 struct sq_atomic
*sqe
= (struct sq_atomic
*)hw_sq_send_hdr
;
1217 sqe
->wqe_type
= wqe
->type
;
1218 sqe
->flags
= wqe
->flags
;
1219 sqe
->remote_key
= cpu_to_le32(wqe
->atomic
.r_key
);
1220 sqe
->remote_va
= cpu_to_le64(wqe
->atomic
.remote_va
);
1221 sqe
->swap_data
= cpu_to_le64(wqe
->atomic
.swap_data
);
1222 sqe
->cmp_data
= cpu_to_le64(wqe
->atomic
.cmp_data
);
1224 pkt_num
= (data_len
+ qp
->mtu
- 1) / qp
->mtu
;
1227 sq
->psn
= (sq
->psn
+ pkt_num
) & BTH_PSN_MASK
;
1230 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
1232 struct sq_localinvalidate
*sqe
=
1233 (struct sq_localinvalidate
*)hw_sq_send_hdr
;
1235 sqe
->wqe_type
= wqe
->type
;
1236 sqe
->flags
= wqe
->flags
;
1237 sqe
->inv_l_key
= cpu_to_le32(wqe
->local_inv
.inv_l_key
);
1241 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR
:
1243 struct sq_fr_pmr
*sqe
= (struct sq_fr_pmr
*)hw_sq_send_hdr
;
1245 sqe
->wqe_type
= wqe
->type
;
1246 sqe
->flags
= wqe
->flags
;
1247 sqe
->access_cntl
= wqe
->frmr
.access_cntl
|
1248 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
1249 sqe
->zero_based_page_size_log
=
1250 (wqe
->frmr
.pg_sz_log
& SQ_FR_PMR_PAGE_SIZE_LOG_MASK
) <<
1251 SQ_FR_PMR_PAGE_SIZE_LOG_SFT
|
1252 (wqe
->frmr
.zero_based
? SQ_FR_PMR_ZERO_BASED
: 0);
1253 sqe
->l_key
= cpu_to_le32(wqe
->frmr
.l_key
);
1254 temp32
= cpu_to_le32(wqe
->frmr
.length
);
1255 memcpy(sqe
->length
, &temp32
, sizeof(wqe
->frmr
.length
));
1256 sqe
->numlevels_pbl_page_size_log
=
1257 ((wqe
->frmr
.pbl_pg_sz_log
<<
1258 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT
) &
1259 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK
) |
1260 ((wqe
->frmr
.levels
<< SQ_FR_PMR_NUMLEVELS_SFT
) &
1261 SQ_FR_PMR_NUMLEVELS_MASK
);
1263 for (i
= 0; i
< wqe
->frmr
.page_list_len
; i
++)
1264 wqe
->frmr
.pbl_ptr
[i
] = cpu_to_le64(
1265 wqe
->frmr
.page_list
[i
] |
1267 sqe
->pblptr
= cpu_to_le64(wqe
->frmr
.pbl_dma_ptr
);
1268 sqe
->va
= cpu_to_le64(wqe
->frmr
.va
);
1272 case BNXT_QPLIB_SWQE_TYPE_BIND_MW
:
1274 struct sq_bind
*sqe
= (struct sq_bind
*)hw_sq_send_hdr
;
1276 sqe
->wqe_type
= wqe
->type
;
1277 sqe
->flags
= wqe
->flags
;
1278 sqe
->access_cntl
= wqe
->bind
.access_cntl
;
1279 sqe
->mw_type_zero_based
= wqe
->bind
.mw_type
|
1280 (wqe
->bind
.zero_based
? SQ_BIND_ZERO_BASED
: 0);
1281 sqe
->parent_l_key
= cpu_to_le32(wqe
->bind
.parent_l_key
);
1282 sqe
->l_key
= cpu_to_le32(wqe
->bind
.r_key
);
1283 sqe
->va
= cpu_to_le64(wqe
->bind
.va
);
1284 temp32
= cpu_to_le32(wqe
->bind
.length
);
1285 memcpy(&sqe
->length
, &temp32
, sizeof(wqe
->bind
.length
));
1289 /* Bad wqe, return error */
1293 swq
->next_psn
= sq
->psn
& BTH_PSN_MASK
;
1294 if (swq
->psn_search
) {
1295 swq
->psn_search
->opcode_start_psn
= cpu_to_le32(
1296 ((swq
->start_psn
<< SQ_PSN_SEARCH_START_PSN_SFT
) &
1297 SQ_PSN_SEARCH_START_PSN_MASK
) |
1298 ((wqe
->type
<< SQ_PSN_SEARCH_OPCODE_SFT
) &
1299 SQ_PSN_SEARCH_OPCODE_MASK
));
1300 swq
->psn_search
->flags_next_psn
= cpu_to_le32(
1301 ((swq
->next_psn
<< SQ_PSN_SEARCH_NEXT_PSN_SFT
) &
1302 SQ_PSN_SEARCH_NEXT_PSN_MASK
));
1313 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
)
1315 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1316 struct dbr_dbr db_msg
= { 0 };
1319 sw_prod
= HWQ_CMP(rq
->hwq
.prod
, &rq
->hwq
);
1320 db_msg
.index
= cpu_to_le32((sw_prod
<< DBR_DBR_INDEX_SFT
) &
1321 DBR_DBR_INDEX_MASK
);
1323 cpu_to_le32(((qp
->id
<< DBR_DBR_XID_SFT
) & DBR_DBR_XID_MASK
) |
1326 /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1328 __iowrite64_copy(qp
->dpi
->dbr
, &db_msg
, sizeof(db_msg
) / sizeof(u64
));
1331 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
1332 struct bnxt_qplib_swqe
*wqe
)
1334 struct bnxt_qplib_q
*rq
= &qp
->rq
;
1335 struct rq_wqe
*rqe
, **rqe_ptr
;
1336 struct sq_sge
*hw_sge
;
1340 if (qp
->state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1341 dev_err(&rq
->hwq
.pdev
->dev
,
1342 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1347 if (bnxt_qplib_queue_full(rq
)) {
1348 dev_err(&rq
->hwq
.pdev
->dev
,
1349 "QPLIB: FP: QP (0x%x) RQ is full!", qp
->id
);
1353 sw_prod
= HWQ_CMP(rq
->hwq
.prod
, &rq
->hwq
);
1354 rq
->swq
[sw_prod
].wr_id
= wqe
->wr_id
;
1356 rqe_ptr
= (struct rq_wqe
**)rq
->hwq
.pbl_ptr
;
1357 rqe
= &rqe_ptr
[RQE_PG(sw_prod
)][RQE_IDX(sw_prod
)];
1359 memset(rqe
, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
);
1361 /* Calculate wqe_size16 and data_len */
1362 for (i
= 0, hw_sge
= (struct sq_sge
*)rqe
->data
;
1363 i
< wqe
->num_sge
; i
++, hw_sge
++) {
1364 hw_sge
->va_or_pa
= cpu_to_le64(wqe
->sg_list
[i
].addr
);
1365 hw_sge
->l_key
= cpu_to_le32(wqe
->sg_list
[i
].lkey
);
1366 hw_sge
->size
= cpu_to_le32(wqe
->sg_list
[i
].size
);
1368 rqe
->wqe_type
= wqe
->type
;
1369 rqe
->flags
= wqe
->flags
;
1370 rqe
->wqe_size
= wqe
->num_sge
+
1371 ((offsetof(typeof(*rqe
), data
) + 15) >> 4);
1372 /* HW requires wqe size has room for atleast one SGE even if none
1373 * was supplied by ULP
1378 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1379 rqe
->wr_id
[0] = cpu_to_le32(sw_prod
);
1388 /* Spinlock must be held */
1389 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq
*cq
)
1391 struct dbr_dbr db_msg
= { 0 };
1394 cpu_to_le32(((cq
->id
<< DBR_DBR_XID_SFT
) & DBR_DBR_XID_MASK
) |
1395 DBR_DBR_TYPE_CQ_ARMENA
);
1396 /* Flush memory writes before enabling the CQ */
1398 __iowrite64_copy(cq
->dbr_base
, &db_msg
, sizeof(db_msg
) / sizeof(u64
));
1401 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
)
1403 struct bnxt_qplib_hwq
*cq_hwq
= &cq
->hwq
;
1404 struct dbr_dbr db_msg
= { 0 };
1408 sw_cons
= HWQ_CMP(cq_hwq
->cons
, cq_hwq
);
1409 db_msg
.index
= cpu_to_le32((sw_cons
<< DBR_DBR_INDEX_SFT
) &
1410 DBR_DBR_INDEX_MASK
);
1412 cpu_to_le32(((cq
->id
<< DBR_DBR_XID_SFT
) & DBR_DBR_XID_MASK
) |
1414 /* flush memory writes before arming the CQ */
1416 __iowrite64_copy(cq
->dpi
->dbr
, &db_msg
, sizeof(db_msg
) / sizeof(u64
));
1419 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
1421 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1422 struct cmdq_create_cq req
;
1423 struct creq_create_cq_resp resp
;
1424 struct bnxt_qplib_pbl
*pbl
;
1428 cq
->hwq
.max_elements
= cq
->max_wqe
;
1429 rc
= bnxt_qplib_alloc_init_hwq(res
->pdev
, &cq
->hwq
, cq
->sghead
,
1430 cq
->nmap
, &cq
->hwq
.max_elements
,
1431 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE
, 0,
1432 PAGE_SIZE
, HWQ_TYPE_QUEUE
);
1436 RCFW_CMD_PREP(req
, CREATE_CQ
, cmd_flags
);
1439 dev_err(&rcfw
->pdev
->dev
,
1440 "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1443 req
.dpi
= cpu_to_le32(cq
->dpi
->dpi
);
1444 req
.cq_handle
= cpu_to_le64(cq
->cq_handle
);
1446 req
.cq_size
= cpu_to_le32(cq
->hwq
.max_elements
);
1447 pbl
= &cq
->hwq
.pbl
[PBL_LVL_0
];
1448 req
.pg_size_lvl
= cpu_to_le32(
1449 ((cq
->hwq
.level
& CMDQ_CREATE_CQ_LVL_MASK
) <<
1450 CMDQ_CREATE_CQ_LVL_SFT
) |
1451 (pbl
->pg_size
== ROCE_PG_SIZE_4K
? CMDQ_CREATE_CQ_PG_SIZE_PG_4K
:
1452 pbl
->pg_size
== ROCE_PG_SIZE_8K
? CMDQ_CREATE_CQ_PG_SIZE_PG_8K
:
1453 pbl
->pg_size
== ROCE_PG_SIZE_64K
? CMDQ_CREATE_CQ_PG_SIZE_PG_64K
:
1454 pbl
->pg_size
== ROCE_PG_SIZE_2M
? CMDQ_CREATE_CQ_PG_SIZE_PG_2M
:
1455 pbl
->pg_size
== ROCE_PG_SIZE_8M
? CMDQ_CREATE_CQ_PG_SIZE_PG_8M
:
1456 pbl
->pg_size
== ROCE_PG_SIZE_1G
? CMDQ_CREATE_CQ_PG_SIZE_PG_1G
:
1457 CMDQ_CREATE_CQ_PG_SIZE_PG_4K
));
1459 req
.pbl
= cpu_to_le64(pbl
->pg_map_arr
[0]);
1461 req
.cq_fco_cnq_id
= cpu_to_le32(
1462 (cq
->cnq_hw_ring_id
& CMDQ_CREATE_CQ_CNQ_ID_MASK
) <<
1463 CMDQ_CREATE_CQ_CNQ_ID_SFT
);
1465 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
1466 (void *)&resp
, NULL
, 0);
1470 cq
->id
= le32_to_cpu(resp
.xid
);
1471 cq
->dbr_base
= res
->dpi_tbl
.dbr_bar_reg_iomem
;
1472 cq
->period
= BNXT_QPLIB_QUEUE_START_PERIOD
;
1473 init_waitqueue_head(&cq
->waitq
);
1475 bnxt_qplib_arm_cq_enable(cq
);
1479 bnxt_qplib_free_hwq(res
->pdev
, &cq
->hwq
);
1484 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
)
1486 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
1487 struct cmdq_destroy_cq req
;
1488 struct creq_destroy_cq_resp resp
;
1492 RCFW_CMD_PREP(req
, DESTROY_CQ
, cmd_flags
);
1494 req
.cq_cid
= cpu_to_le32(cq
->id
);
1495 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
1496 (void *)&resp
, NULL
, 0);
1499 bnxt_qplib_free_hwq(res
->pdev
, &cq
->hwq
);
1503 static int __flush_sq(struct bnxt_qplib_q
*sq
, struct bnxt_qplib_qp
*qp
,
1504 struct bnxt_qplib_cqe
**pcqe
, int *budget
)
1506 u32 sw_prod
, sw_cons
;
1507 struct bnxt_qplib_cqe
*cqe
;
1510 /* Now complete all outstanding SQEs with FLUSHED_ERR */
1511 sw_prod
= HWQ_CMP(sq
->hwq
.prod
, &sq
->hwq
);
1514 sw_cons
= HWQ_CMP(sq
->hwq
.cons
, &sq
->hwq
);
1515 if (sw_cons
== sw_prod
) {
1516 sq
->flush_in_progress
= false;
1519 memset(cqe
, 0, sizeof(*cqe
));
1520 cqe
->status
= CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
;
1521 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
1522 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
1523 cqe
->wr_id
= sq
->swq
[sw_cons
].wr_id
;
1524 cqe
->src_qp
= qp
->id
;
1525 cqe
->type
= sq
->swq
[sw_cons
].type
;
1531 if (!(*budget
) && HWQ_CMP(sq
->hwq
.cons
, &sq
->hwq
) != sw_prod
)
1538 static int __flush_rq(struct bnxt_qplib_q
*rq
, struct bnxt_qplib_qp
*qp
,
1539 int opcode
, struct bnxt_qplib_cqe
**pcqe
, int *budget
)
1541 struct bnxt_qplib_cqe
*cqe
;
1542 u32 sw_prod
, sw_cons
;
1545 /* Flush the rest of the RQ */
1546 sw_prod
= HWQ_CMP(rq
->hwq
.prod
, &rq
->hwq
);
1549 sw_cons
= HWQ_CMP(rq
->hwq
.cons
, &rq
->hwq
);
1550 if (sw_cons
== sw_prod
)
1552 memset(cqe
, 0, sizeof(*cqe
));
1554 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
;
1555 cqe
->opcode
= opcode
;
1556 cqe
->qp_handle
= (unsigned long)qp
;
1557 cqe
->wr_id
= rq
->swq
[sw_cons
].wr_id
;
1563 if (!*budget
&& HWQ_CMP(rq
->hwq
.cons
, &rq
->hwq
) != sw_prod
)
1570 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1571 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1573 static int do_wa9060(struct bnxt_qplib_qp
*qp
, struct bnxt_qplib_cq
*cq
,
1574 u32 cq_cons
, u32 sw_sq_cons
, u32 cqe_sq_cons
)
1576 struct bnxt_qplib_q
*sq
= &qp
->sq
;
1577 struct bnxt_qplib_swq
*swq
;
1578 u32 peek_sw_cq_cons
, peek_raw_cq_cons
, peek_sq_cons_idx
;
1579 struct cq_base
*peek_hwcqe
, **peek_hw_cqe_ptr
;
1580 struct cq_req
*peek_req_hwcqe
;
1581 struct bnxt_qplib_qp
*peek_qp
;
1582 struct bnxt_qplib_q
*peek_sq
;
1586 /* Check for the psn_search marking before completing */
1587 swq
= &sq
->swq
[sw_sq_cons
];
1588 if (swq
->psn_search
&&
1589 le32_to_cpu(swq
->psn_search
->flags_next_psn
) & 0x80000000) {
1591 swq
->psn_search
->flags_next_psn
= cpu_to_le32
1592 (le32_to_cpu(swq
->psn_search
->flags_next_psn
)
1594 dev_dbg(&cq
->hwq
.pdev
->dev
,
1595 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1596 cq_cons
, qp
->id
, sw_sq_cons
, cqe_sq_cons
);
1597 sq
->condition
= true;
1598 sq
->send_phantom
= true;
1600 /* TODO: Only ARM if the previous SQE is ARMALL */
1601 bnxt_qplib_arm_cq(cq
, DBR_DBR_TYPE_CQ_ARMALL
);
1606 if (sq
->condition
) {
1607 /* Peek at the completions */
1608 peek_raw_cq_cons
= cq
->hwq
.cons
;
1609 peek_sw_cq_cons
= cq_cons
;
1610 i
= cq
->hwq
.max_elements
;
1612 peek_sw_cq_cons
= HWQ_CMP((peek_sw_cq_cons
), &cq
->hwq
);
1613 peek_hw_cqe_ptr
= (struct cq_base
**)cq
->hwq
.pbl_ptr
;
1614 peek_hwcqe
= &peek_hw_cqe_ptr
[CQE_PG(peek_sw_cq_cons
)]
1615 [CQE_IDX(peek_sw_cq_cons
)];
1616 /* If the next hwcqe is VALID */
1617 if (CQE_CMP_VALID(peek_hwcqe
, peek_raw_cq_cons
,
1618 cq
->hwq
.max_elements
)) {
1619 /* If the next hwcqe is a REQ */
1620 if ((peek_hwcqe
->cqe_type_toggle
&
1621 CQ_BASE_CQE_TYPE_MASK
) ==
1622 CQ_BASE_CQE_TYPE_REQ
) {
1623 peek_req_hwcqe
= (struct cq_req
*)
1625 peek_qp
= (struct bnxt_qplib_qp
*)
1628 (peek_req_hwcqe
->qp_handle
));
1629 peek_sq
= &peek_qp
->sq
;
1630 peek_sq_cons_idx
= HWQ_CMP(le16_to_cpu(
1631 peek_req_hwcqe
->sq_cons_idx
) - 1
1633 /* If the hwcqe's sq's wr_id matches */
1634 if (peek_sq
== sq
&&
1635 sq
->swq
[peek_sq_cons_idx
].wr_id
==
1636 BNXT_QPLIB_FENCE_WRID
) {
1638 * Unbreak only if the phantom
1641 dev_dbg(&cq
->hwq
.pdev
->dev
,
1642 "FP:Got Phantom CQE");
1643 sq
->condition
= false;
1649 /* Valid but not the phantom, so keep looping */
1651 /* Not valid yet, just exit and wait */
1658 dev_err(&cq
->hwq
.pdev
->dev
,
1659 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1660 cq_cons
, qp
->id
, sw_sq_cons
, cqe_sq_cons
);
1667 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq
*cq
,
1668 struct cq_req
*hwcqe
,
1669 struct bnxt_qplib_cqe
**pcqe
, int *budget
,
1670 u32 cq_cons
, struct bnxt_qplib_qp
**lib_qp
)
1672 struct bnxt_qplib_qp
*qp
;
1673 struct bnxt_qplib_q
*sq
;
1674 struct bnxt_qplib_cqe
*cqe
;
1675 u32 sw_sq_cons
, cqe_sq_cons
;
1676 struct bnxt_qplib_swq
*swq
;
1679 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
1680 le64_to_cpu(hwcqe
->qp_handle
));
1682 dev_err(&cq
->hwq
.pdev
->dev
,
1683 "QPLIB: FP: Process Req qp is NULL");
1688 cqe_sq_cons
= HWQ_CMP(le16_to_cpu(hwcqe
->sq_cons_idx
), &sq
->hwq
);
1689 if (cqe_sq_cons
> sq
->hwq
.max_elements
) {
1690 dev_err(&cq
->hwq
.pdev
->dev
,
1691 "QPLIB: FP: CQ Process req reported ");
1692 dev_err(&cq
->hwq
.pdev
->dev
,
1693 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
1694 cqe_sq_cons
, sq
->hwq
.max_elements
);
1697 /* If we were in the middle of flushing the SQ, continue */
1698 if (sq
->flush_in_progress
)
1701 /* Require to walk the sq's swq to fabricate CQEs for all previously
1702 * signaled SWQEs due to CQE aggregation from the current sq cons
1703 * to the cqe_sq_cons
1707 sw_sq_cons
= HWQ_CMP(sq
->hwq
.cons
, &sq
->hwq
);
1708 if (sw_sq_cons
== cqe_sq_cons
)
1712 swq
= &sq
->swq
[sw_sq_cons
];
1713 memset(cqe
, 0, sizeof(*cqe
));
1714 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
1715 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
1716 cqe
->src_qp
= qp
->id
;
1717 cqe
->wr_id
= swq
->wr_id
;
1718 if (cqe
->wr_id
== BNXT_QPLIB_FENCE_WRID
)
1720 cqe
->type
= swq
->type
;
1722 /* For the last CQE, check for status. For errors, regardless
1723 * of the request being signaled or not, it must complete with
1724 * the hwcqe error status
1726 if (HWQ_CMP((sw_sq_cons
+ 1), &sq
->hwq
) == cqe_sq_cons
&&
1727 hwcqe
->status
!= CQ_REQ_STATUS_OK
) {
1728 cqe
->status
= hwcqe
->status
;
1729 dev_err(&cq
->hwq
.pdev
->dev
,
1730 "QPLIB: FP: CQ Processed Req ");
1731 dev_err(&cq
->hwq
.pdev
->dev
,
1732 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
1733 sw_sq_cons
, cqe
->wr_id
, cqe
->status
);
1736 sq
->flush_in_progress
= true;
1737 /* Must block new posting of SQ and RQ */
1738 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
1739 sq
->condition
= false;
1742 if (swq
->flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
1743 /* Before we complete, do WA 9060 */
1744 if (do_wa9060(qp
, cq
, cq_cons
, sw_sq_cons
,
1749 cqe
->status
= CQ_REQ_STATUS_OK
;
1761 if (HWQ_CMP(sq
->hwq
.cons
, &sq
->hwq
) != cqe_sq_cons
) {
1767 * Back to normal completion mode only after it has completed all of
1768 * the WC for this CQE
1771 if (!sq
->flush_in_progress
)
1774 /* Require to walk the sq's swq to fabricate CQEs for all
1775 * previously posted SWQEs due to the error CQE received
1777 rc
= __flush_sq(sq
, qp
, pcqe
, budget
);
1779 sq
->flush_in_progress
= false;
1784 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq
*cq
,
1785 struct cq_res_rc
*hwcqe
,
1786 struct bnxt_qplib_cqe
**pcqe
,
1789 struct bnxt_qplib_qp
*qp
;
1790 struct bnxt_qplib_q
*rq
;
1791 struct bnxt_qplib_cqe
*cqe
;
1795 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
1796 le64_to_cpu(hwcqe
->qp_handle
));
1798 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: process_cq RC qp is NULL");
1802 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
1803 cqe
->length
= le32_to_cpu(hwcqe
->length
);
1804 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data_or_inv_r_key
);
1805 cqe
->mr_handle
= le64_to_cpu(hwcqe
->mr_handle
);
1806 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
1807 cqe
->status
= hwcqe
->status
;
1808 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
1810 wr_id_idx
= le32_to_cpu(hwcqe
->srq_or_rq_wr_id
) &
1811 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK
;
1813 if (wr_id_idx
> rq
->hwq
.max_elements
) {
1814 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: FP: CQ Process RC ");
1815 dev_err(&cq
->hwq
.pdev
->dev
,
1816 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
1817 wr_id_idx
, rq
->hwq
.max_elements
);
1820 if (rq
->flush_in_progress
)
1823 cqe
->wr_id
= rq
->swq
[wr_id_idx
].wr_id
;
1829 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
1830 rq
->flush_in_progress
= true;
1832 rc
= __flush_rq(rq
, qp
, CQ_BASE_CQE_TYPE_RES_RC
, pcqe
, budget
);
1834 rq
->flush_in_progress
= false;
1839 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq
*cq
,
1840 struct cq_res_ud
*hwcqe
,
1841 struct bnxt_qplib_cqe
**pcqe
,
1844 struct bnxt_qplib_qp
*qp
;
1845 struct bnxt_qplib_q
*rq
;
1846 struct bnxt_qplib_cqe
*cqe
;
1850 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
1851 le64_to_cpu(hwcqe
->qp_handle
));
1853 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: process_cq UD qp is NULL");
1857 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
1858 cqe
->length
= le32_to_cpu(hwcqe
->length
);
1859 cqe
->invrkey
= le32_to_cpu(hwcqe
->imm_data
);
1860 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
1861 cqe
->status
= hwcqe
->status
;
1862 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
1863 memcpy(cqe
->smac
, hwcqe
->src_mac
, 6);
1864 wr_id_idx
= le32_to_cpu(hwcqe
->src_qp_high_srq_or_rq_wr_id
)
1865 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK
;
1866 cqe
->src_qp
= le16_to_cpu(hwcqe
->src_qp_low
) |
1868 hwcqe
->src_qp_high_srq_or_rq_wr_id
) &
1869 CQ_RES_UD_SRC_QP_HIGH_MASK
) >> 8);
1872 if (wr_id_idx
> rq
->hwq
.max_elements
) {
1873 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: FP: CQ Process UD ");
1874 dev_err(&cq
->hwq
.pdev
->dev
,
1875 "QPLIB: wr_id idx %#x exceeded RQ max %#x",
1876 wr_id_idx
, rq
->hwq
.max_elements
);
1879 if (rq
->flush_in_progress
)
1882 cqe
->wr_id
= rq
->swq
[wr_id_idx
].wr_id
;
1888 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
1889 rq
->flush_in_progress
= true;
1891 rc
= __flush_rq(rq
, qp
, CQ_BASE_CQE_TYPE_RES_UD
, pcqe
, budget
);
1893 rq
->flush_in_progress
= false;
1898 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq
*cq
)
1900 struct cq_base
*hw_cqe
, **hw_cqe_ptr
;
1901 unsigned long flags
;
1902 u32 sw_cons
, raw_cons
;
1905 spin_lock_irqsave(&cq
->hwq
.lock
, flags
);
1906 raw_cons
= cq
->hwq
.cons
;
1907 sw_cons
= HWQ_CMP(raw_cons
, &cq
->hwq
);
1908 hw_cqe_ptr
= (struct cq_base
**)cq
->hwq
.pbl_ptr
;
1909 hw_cqe
= &hw_cqe_ptr
[CQE_PG(sw_cons
)][CQE_IDX(sw_cons
)];
1911 /* Check for Valid bit. If the CQE is valid, return false */
1912 rc
= !CQE_CMP_VALID(hw_cqe
, raw_cons
, cq
->hwq
.max_elements
);
1913 spin_unlock_irqrestore(&cq
->hwq
.lock
, flags
);
1917 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq
*cq
,
1918 struct cq_res_raweth_qp1
*hwcqe
,
1919 struct bnxt_qplib_cqe
**pcqe
,
1922 struct bnxt_qplib_qp
*qp
;
1923 struct bnxt_qplib_q
*rq
;
1924 struct bnxt_qplib_cqe
*cqe
;
1928 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
1929 le64_to_cpu(hwcqe
->qp_handle
));
1931 dev_err(&cq
->hwq
.pdev
->dev
,
1932 "QPLIB: process_cq Raw/QP1 qp is NULL");
1936 cqe
->opcode
= hwcqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
;
1937 cqe
->flags
= le16_to_cpu(hwcqe
->flags
);
1938 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
1941 le32_to_cpu(hwcqe
->raweth_qp1_payload_offset_srq_or_rq_wr_id
)
1942 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK
;
1943 cqe
->src_qp
= qp
->id
;
1944 if (qp
->id
== 1 && !cqe
->length
) {
1945 /* Add workaround for the length misdetection */
1948 cqe
->length
= le16_to_cpu(hwcqe
->length
);
1950 cqe
->pkey_index
= qp
->pkey_index
;
1951 memcpy(cqe
->smac
, qp
->smac
, 6);
1953 cqe
->raweth_qp1_flags
= le16_to_cpu(hwcqe
->raweth_qp1_flags
);
1954 cqe
->raweth_qp1_flags2
= le32_to_cpu(hwcqe
->raweth_qp1_flags2
);
1957 if (wr_id_idx
> rq
->hwq
.max_elements
) {
1958 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
1959 dev_err(&cq
->hwq
.pdev
->dev
, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
1960 wr_id_idx
, rq
->hwq
.max_elements
);
1963 if (rq
->flush_in_progress
)
1966 cqe
->wr_id
= rq
->swq
[wr_id_idx
].wr_id
;
1972 if (hwcqe
->status
!= CQ_RES_RC_STATUS_OK
) {
1973 rq
->flush_in_progress
= true;
1975 rc
= __flush_rq(rq
, qp
, CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
, pcqe
,
1978 rq
->flush_in_progress
= false;
1983 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq
*cq
,
1984 struct cq_terminal
*hwcqe
,
1985 struct bnxt_qplib_cqe
**pcqe
,
1988 struct bnxt_qplib_qp
*qp
;
1989 struct bnxt_qplib_q
*sq
, *rq
;
1990 struct bnxt_qplib_cqe
*cqe
;
1991 u32 sw_cons
= 0, cqe_cons
;
1995 /* Check the Status */
1996 if (hwcqe
->status
!= CQ_TERMINAL_STATUS_OK
)
1997 dev_warn(&cq
->hwq
.pdev
->dev
,
1998 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2001 qp
= (struct bnxt_qplib_qp
*)((unsigned long)
2002 le64_to_cpu(hwcqe
->qp_handle
));
2004 dev_err(&cq
->hwq
.pdev
->dev
,
2005 "QPLIB: FP: CQ Process terminal qp is NULL");
2008 /* Must block new posting of SQ and RQ */
2009 qp
->state
= CMDQ_MODIFY_QP_NEW_STATE_ERR
;
2014 cqe_cons
= le16_to_cpu(hwcqe
->sq_cons_idx
);
2015 if (cqe_cons
== 0xFFFF)
2018 if (cqe_cons
> sq
->hwq
.max_elements
) {
2019 dev_err(&cq
->hwq
.pdev
->dev
,
2020 "QPLIB: FP: CQ Process terminal reported ");
2021 dev_err(&cq
->hwq
.pdev
->dev
,
2022 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2023 cqe_cons
, sq
->hwq
.max_elements
);
2026 /* If we were in the middle of flushing, continue */
2027 if (sq
->flush_in_progress
)
2030 /* Terminal CQE can also include aggregated successful CQEs prior.
2031 * So we must complete all CQEs from the current sq's cons to the
2032 * cq_cons with status OK
2036 sw_cons
= HWQ_CMP(sq
->hwq
.cons
, &sq
->hwq
);
2037 if (sw_cons
== cqe_cons
)
2039 if (sq
->swq
[sw_cons
].flags
& SQ_SEND_FLAGS_SIGNAL_COMP
) {
2040 memset(cqe
, 0, sizeof(*cqe
));
2041 cqe
->status
= CQ_REQ_STATUS_OK
;
2042 cqe
->opcode
= CQ_BASE_CQE_TYPE_REQ
;
2043 cqe
->qp_handle
= (u64
)(unsigned long)qp
;
2044 cqe
->src_qp
= qp
->id
;
2045 cqe
->wr_id
= sq
->swq
[sw_cons
].wr_id
;
2046 cqe
->type
= sq
->swq
[sw_cons
].type
;
2053 if (!(*budget
) && sw_cons
!= cqe_cons
) {
2058 sq
->flush_in_progress
= true;
2060 rc
= __flush_sq(sq
, qp
, pcqe
, budget
);
2062 sq
->flush_in_progress
= false;
2067 cqe_cons
= le16_to_cpu(hwcqe
->rq_cons_idx
);
2068 if (cqe_cons
== 0xFFFF) {
2070 } else if (cqe_cons
> rq
->hwq
.max_elements
) {
2071 dev_err(&cq
->hwq
.pdev
->dev
,
2072 "QPLIB: FP: CQ Processed terminal ");
2073 dev_err(&cq
->hwq
.pdev
->dev
,
2074 "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2075 cqe_cons
, rq
->hwq
.max_elements
);
2078 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2079 * from the current rq->cons to the rq->prod regardless what the
2080 * rq->cons the terminal CQE indicates
2082 rq
->flush_in_progress
= true;
2084 case CMDQ_CREATE_QP1_TYPE_GSI
:
2085 opcode
= CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
;
2087 case CMDQ_CREATE_QP_TYPE_RC
:
2088 opcode
= CQ_BASE_CQE_TYPE_RES_RC
;
2090 case CMDQ_CREATE_QP_TYPE_UD
:
2091 opcode
= CQ_BASE_CQE_TYPE_RES_UD
;
2095 rc
= __flush_rq(rq
, qp
, opcode
, pcqe
, budget
);
2097 rq
->flush_in_progress
= false;
2102 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq
*cq
,
2103 struct cq_cutoff
*hwcqe
)
2105 /* Check the Status */
2106 if (hwcqe
->status
!= CQ_CUTOFF_STATUS_OK
) {
2107 dev_err(&cq
->hwq
.pdev
->dev
,
2108 "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2112 clear_bit(CQ_FLAGS_RESIZE_IN_PROG
, &cq
->flags
);
2113 wake_up_interruptible(&cq
->waitq
);
2118 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
2119 int num_cqes
, struct bnxt_qplib_qp
**lib_qp
)
2121 struct cq_base
*hw_cqe
, **hw_cqe_ptr
;
2122 unsigned long flags
;
2123 u32 sw_cons
, raw_cons
;
2126 spin_lock_irqsave(&cq
->hwq
.lock
, flags
);
2127 raw_cons
= cq
->hwq
.cons
;
2131 sw_cons
= HWQ_CMP(raw_cons
, &cq
->hwq
);
2132 hw_cqe_ptr
= (struct cq_base
**)cq
->hwq
.pbl_ptr
;
2133 hw_cqe
= &hw_cqe_ptr
[CQE_PG(sw_cons
)][CQE_IDX(sw_cons
)];
2135 /* Check for Valid bit */
2136 if (!CQE_CMP_VALID(hw_cqe
, raw_cons
, cq
->hwq
.max_elements
))
2139 /* From the device's respective CQE format to qplib_wc*/
2140 switch (hw_cqe
->cqe_type_toggle
& CQ_BASE_CQE_TYPE_MASK
) {
2141 case CQ_BASE_CQE_TYPE_REQ
:
2142 rc
= bnxt_qplib_cq_process_req(cq
,
2143 (struct cq_req
*)hw_cqe
,
2147 case CQ_BASE_CQE_TYPE_RES_RC
:
2148 rc
= bnxt_qplib_cq_process_res_rc(cq
,
2149 (struct cq_res_rc
*)
2153 case CQ_BASE_CQE_TYPE_RES_UD
:
2154 rc
= bnxt_qplib_cq_process_res_ud
2155 (cq
, (struct cq_res_ud
*)hw_cqe
, &cqe
,
2158 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
2159 rc
= bnxt_qplib_cq_process_res_raweth_qp1
2160 (cq
, (struct cq_res_raweth_qp1
*)
2161 hw_cqe
, &cqe
, &budget
);
2163 case CQ_BASE_CQE_TYPE_TERMINAL
:
2164 rc
= bnxt_qplib_cq_process_terminal
2165 (cq
, (struct cq_terminal
*)hw_cqe
,
2168 case CQ_BASE_CQE_TYPE_CUT_OFF
:
2169 bnxt_qplib_cq_process_cutoff
2170 (cq
, (struct cq_cutoff
*)hw_cqe
);
2171 /* Done processing this CQ */
2174 dev_err(&cq
->hwq
.pdev
->dev
,
2175 "QPLIB: process_cq unknown type 0x%lx",
2176 hw_cqe
->cqe_type_toggle
&
2177 CQ_BASE_CQE_TYPE_MASK
);
2184 /* Error while processing the CQE, just skip to the
2187 dev_err(&cq
->hwq
.pdev
->dev
,
2188 "QPLIB: process_cqe error rc = 0x%x", rc
);
2192 if (cq
->hwq
.cons
!= raw_cons
) {
2193 cq
->hwq
.cons
= raw_cons
;
2194 bnxt_qplib_arm_cq(cq
, DBR_DBR_TYPE_CQ
);
2197 spin_unlock_irqrestore(&cq
->hwq
.lock
, flags
);
2198 return num_cqes
- budget
;
2201 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
)
2203 unsigned long flags
;
2205 spin_lock_irqsave(&cq
->hwq
.lock
, flags
);
2207 bnxt_qplib_arm_cq(cq
, arm_type
);
2209 spin_unlock_irqrestore(&cq
->hwq
.lock
, flags
);