2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: RDMA Controller HW interface
38 #include <linux/interrupt.h>
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/prefetch.h>
42 #include <linux/delay.h>
45 #include "qplib_res.h"
46 #include "qplib_rcfw.h"
50 static void bnxt_qplib_service_creq(unsigned long data
);
52 /* Hardware communication channel */
53 static int __wait_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
58 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
59 rc
= wait_event_timeout(rcfw
->waitq
,
60 !test_bit(cbit
, rcfw
->cmdq_bitmap
),
61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS
));
62 return rc
? 0 : -ETIMEDOUT
;
65 static int __block_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
67 u32 count
= RCFW_BLOCKED_CMD_WAIT_COUNT
;
70 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
71 if (!test_bit(cbit
, rcfw
->cmdq_bitmap
))
74 mdelay(1); /* 1m sec */
75 bnxt_qplib_service_creq((unsigned long)rcfw
);
76 } while (test_bit(cbit
, rcfw
->cmdq_bitmap
) && --count
);
78 return count
? 0 : -ETIMEDOUT
;
81 static int __send_message(struct bnxt_qplib_rcfw
*rcfw
, struct cmdq_base
*req
,
82 struct creq_base
*resp
, void *sb
, u8 is_block
)
84 struct bnxt_qplib_cmdqe
*cmdqe
, **cmdq_ptr
;
85 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
86 struct bnxt_qplib_crsq
*crsqe
;
87 u32 sw_prod
, cmdq_prod
;
95 if (!test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
96 (opcode
!= CMDQ_BASE_OPCODE_QUERY_FUNC
&&
97 opcode
!= CMDQ_BASE_OPCODE_INITIALIZE_FW
)) {
98 dev_err(&rcfw
->pdev
->dev
,
99 "QPLIB: RCFW not initialized, reject opcode 0x%x",
104 if (test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
105 opcode
== CMDQ_BASE_OPCODE_INITIALIZE_FW
) {
106 dev_err(&rcfw
->pdev
->dev
, "QPLIB: RCFW already initialized!");
110 /* Cmdq are in 16-byte units, each request can consume 1 or more
113 spin_lock_irqsave(&cmdq
->lock
, flags
);
114 if (req
->cmd_size
>= HWQ_FREE_SLOTS(cmdq
)) {
115 dev_err(&rcfw
->pdev
->dev
, "QPLIB: RCFW: CMDQ is full!");
116 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
121 cookie
= rcfw
->seq_num
& RCFW_MAX_COOKIE_VALUE
;
122 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
124 cookie
|= RCFW_CMD_IS_BLOCKING
;
126 set_bit(cbit
, rcfw
->cmdq_bitmap
);
127 req
->cookie
= cpu_to_le16(cookie
);
128 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
130 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
133 memset(resp
, 0, sizeof(*resp
));
134 crsqe
->resp
= (struct creq_qp_event
*)resp
;
135 crsqe
->resp
->cookie
= req
->cookie
;
136 crsqe
->req_size
= req
->cmd_size
;
137 if (req
->resp_size
&& sb
) {
138 struct bnxt_qplib_rcfw_sbuf
*sbuf
= sb
;
140 req
->resp_addr
= cpu_to_le64(sbuf
->dma_addr
);
141 req
->resp_size
= (sbuf
->size
+ BNXT_QPLIB_CMDQE_UNITS
- 1) /
142 BNXT_QPLIB_CMDQE_UNITS
;
145 cmdq_ptr
= (struct bnxt_qplib_cmdqe
**)cmdq
->pbl_ptr
;
147 size
= req
->cmd_size
* BNXT_QPLIB_CMDQE_UNITS
;
152 /* Locate the next cmdq slot */
153 sw_prod
= HWQ_CMP(cmdq
->prod
, cmdq
);
154 cmdqe
= &cmdq_ptr
[get_cmdq_pg(sw_prod
)][get_cmdq_idx(sw_prod
)];
156 dev_err(&rcfw
->pdev
->dev
,
157 "QPLIB: RCFW request failed with no cmdqe!");
160 /* Copy a segment of the req cmd to the cmdq */
161 memset(cmdqe
, 0, sizeof(*cmdqe
));
162 memcpy(cmdqe
, preq
, min_t(u32
, size
, sizeof(*cmdqe
)));
163 preq
+= min_t(u32
, size
, sizeof(*cmdqe
));
164 size
-= min_t(u32
, size
, sizeof(*cmdqe
));
171 cmdq_prod
= cmdq
->prod
;
172 if (rcfw
->flags
& FIRMWARE_FIRST_FLAG
) {
173 /* The very first doorbell write
174 * is required to set this flag
175 * which prompts the FW to reset
176 * its internal pointers
178 cmdq_prod
|= FIRMWARE_FIRST_FLAG
;
179 rcfw
->flags
&= ~FIRMWARE_FIRST_FLAG
;
184 writel(cmdq_prod
, rcfw
->cmdq_bar_reg_iomem
+
185 rcfw
->cmdq_bar_reg_prod_off
);
186 writel(RCFW_CMDQ_TRIG_VAL
, rcfw
->cmdq_bar_reg_iomem
+
187 rcfw
->cmdq_bar_reg_trig_off
);
189 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
190 /* Return the CREQ response pointer */
194 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw
*rcfw
,
195 struct cmdq_base
*req
,
196 struct creq_base
*resp
,
197 void *sb
, u8 is_block
)
199 struct creq_qp_event
*evnt
= (struct creq_qp_event
*)resp
;
201 u8 opcode
, retry_cnt
= 0xFF;
205 opcode
= req
->opcode
;
206 rc
= __send_message(rcfw
, req
, resp
, sb
, is_block
);
207 cookie
= le16_to_cpu(req
->cookie
) & RCFW_MAX_COOKIE_VALUE
;
211 if (!retry_cnt
|| (rc
!= -EAGAIN
&& rc
!= -EBUSY
)) {
213 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x send failed",
217 is_block
? mdelay(1) : usleep_range(500, 1000);
219 } while (retry_cnt
--);
222 rc
= __block_for_resp(rcfw
, cookie
);
224 rc
= __wait_for_resp(rcfw
, cookie
);
227 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
228 cookie
, opcode
, RCFW_CMD_WAIT_TIME_MS
);
233 /* failed with status */
234 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x status %#x",
235 cookie
, opcode
, evnt
->status
);
242 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw
*rcfw
,
243 struct creq_func_event
*func_event
)
245 switch (func_event
->event
) {
246 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
248 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
250 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
252 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
254 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
256 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
258 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
260 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
261 /* SRQ ctx error, call srq_handler??
262 * But there's no SRQ handle!
265 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
267 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
269 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
271 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST
:
273 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
:
281 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw
*rcfw
,
282 struct creq_qp_event
*qp_event
)
284 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
285 struct creq_qp_error_notification
*err_event
;
286 struct bnxt_qplib_crsq
*crsqe
;
288 struct bnxt_qplib_qp
*qp
;
289 u16 cbit
, blocked
= 0;
294 switch (qp_event
->event
) {
295 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
:
296 err_event
= (struct creq_qp_error_notification
*)qp_event
;
297 qp_id
= le32_to_cpu(err_event
->xid
);
298 qp
= rcfw
->qp_tbl
[qp_id
].qp_handle
;
299 dev_dbg(&rcfw
->pdev
->dev
,
300 "QPLIB: Received QP error notification");
301 dev_dbg(&rcfw
->pdev
->dev
,
302 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
303 qp_id
, err_event
->req_err_state_reason
,
304 err_event
->res_err_state_reason
);
305 bnxt_qplib_acquire_cq_locks(qp
, &flags
);
306 bnxt_qplib_mark_qp_error(qp
);
307 bnxt_qplib_release_cq_locks(qp
, &flags
);
310 /* Command Response */
311 spin_lock_irqsave(&cmdq
->lock
, flags
);
312 cookie
= le16_to_cpu(qp_event
->cookie
);
313 mcookie
= qp_event
->cookie
;
314 blocked
= cookie
& RCFW_CMD_IS_BLOCKING
;
315 cookie
&= RCFW_MAX_COOKIE_VALUE
;
316 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
317 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
319 crsqe
->resp
->cookie
== mcookie
) {
320 memcpy(crsqe
->resp
, qp_event
, sizeof(*qp_event
));
323 dev_err(&rcfw
->pdev
->dev
,
324 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
325 crsqe
->resp
? "mismatch" : "collision",
326 crsqe
->resp
? crsqe
->resp
->cookie
: 0, mcookie
);
328 if (!test_and_clear_bit(cbit
, rcfw
->cmdq_bitmap
))
329 dev_warn(&rcfw
->pdev
->dev
,
330 "QPLIB: CMD bit %d was not requested", cbit
);
331 cmdq
->cons
+= crsqe
->req_size
;
335 wake_up(&rcfw
->waitq
);
336 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
341 /* SP - CREQ Completion handlers */
342 static void bnxt_qplib_service_creq(unsigned long data
)
344 struct bnxt_qplib_rcfw
*rcfw
= (struct bnxt_qplib_rcfw
*)data
;
345 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
346 struct creq_base
*creqe
, **creq_ptr
;
347 u32 sw_cons
, raw_cons
;
349 u32 type
, budget
= CREQ_ENTRY_POLL_BUDGET
;
351 /* Service the CREQ until budget is over */
352 spin_lock_irqsave(&creq
->lock
, flags
);
353 raw_cons
= creq
->cons
;
355 sw_cons
= HWQ_CMP(raw_cons
, creq
);
356 creq_ptr
= (struct creq_base
**)creq
->pbl_ptr
;
357 creqe
= &creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)];
358 if (!CREQ_CMP_VALID(creqe
, raw_cons
, creq
->max_elements
))
361 type
= creqe
->type
& CREQ_BASE_TYPE_MASK
;
363 case CREQ_BASE_TYPE_QP_EVENT
:
364 bnxt_qplib_process_qp_event
365 (rcfw
, (struct creq_qp_event
*)creqe
);
366 rcfw
->creq_qp_event_processed
++;
368 case CREQ_BASE_TYPE_FUNC_EVENT
:
369 if (!bnxt_qplib_process_func_event
370 (rcfw
, (struct creq_func_event
*)creqe
))
371 rcfw
->creq_func_event_processed
++;
374 (&rcfw
->pdev
->dev
, "QPLIB:aeqe:%#x Not handled",
378 dev_warn(&rcfw
->pdev
->dev
, "QPLIB: creqe with ");
379 dev_warn(&rcfw
->pdev
->dev
,
380 "QPLIB: op_event = 0x%x not handled", type
);
387 if (creq
->cons
!= raw_cons
) {
388 creq
->cons
= raw_cons
;
389 CREQ_DB_REARM(rcfw
->creq_bar_reg_iomem
, raw_cons
,
392 spin_unlock_irqrestore(&creq
->lock
, flags
);
395 static irqreturn_t
bnxt_qplib_creq_irq(int irq
, void *dev_instance
)
397 struct bnxt_qplib_rcfw
*rcfw
= dev_instance
;
398 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
399 struct creq_base
**creq_ptr
;
402 /* Prefetch the CREQ element */
403 sw_cons
= HWQ_CMP(creq
->cons
, creq
);
404 creq_ptr
= (struct creq_base
**)rcfw
->creq
.pbl_ptr
;
405 prefetch(&creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)]);
407 tasklet_schedule(&rcfw
->worker
);
413 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw
*rcfw
)
415 struct cmdq_deinitialize_fw req
;
416 struct creq_deinitialize_fw_resp resp
;
420 RCFW_CMD_PREP(req
, DEINITIALIZE_FW
, cmd_flags
);
421 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
426 clear_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
430 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl
*pbl
)
432 return (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
433 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
:
434 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
435 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K
:
436 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
437 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K
:
438 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
439 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M
:
440 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
441 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M
:
442 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
443 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G
:
444 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
);
447 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw
*rcfw
,
448 struct bnxt_qplib_ctx
*ctx
, int is_virtfn
)
450 struct cmdq_initialize_fw req
;
451 struct creq_initialize_fw_resp resp
;
452 u16 cmd_flags
= 0, level
;
455 RCFW_CMD_PREP(req
, INITIALIZE_FW
, cmd_flags
);
458 * VFs need not setup the HW context area, PF
459 * shall setup this area for VF. Skipping the
465 level
= ctx
->qpc_tbl
.level
;
466 req
.qpc_pg_size_qpc_lvl
= (level
<< CMDQ_INITIALIZE_FW_QPC_LVL_SFT
) |
467 __get_pbl_pg_idx(&ctx
->qpc_tbl
.pbl
[level
]);
468 level
= ctx
->mrw_tbl
.level
;
469 req
.mrw_pg_size_mrw_lvl
= (level
<< CMDQ_INITIALIZE_FW_MRW_LVL_SFT
) |
470 __get_pbl_pg_idx(&ctx
->mrw_tbl
.pbl
[level
]);
471 level
= ctx
->srqc_tbl
.level
;
472 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
473 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
474 level
= ctx
->cq_tbl
.level
;
475 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
476 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
477 level
= ctx
->srqc_tbl
.level
;
478 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
479 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
480 level
= ctx
->cq_tbl
.level
;
481 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
482 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
483 level
= ctx
->tim_tbl
.level
;
484 req
.tim_pg_size_tim_lvl
= (level
<< CMDQ_INITIALIZE_FW_TIM_LVL_SFT
) |
485 __get_pbl_pg_idx(&ctx
->tim_tbl
.pbl
[level
]);
486 level
= ctx
->tqm_pde_level
;
487 req
.tqm_pg_size_tqm_lvl
= (level
<< CMDQ_INITIALIZE_FW_TQM_LVL_SFT
) |
488 __get_pbl_pg_idx(&ctx
->tqm_pde
.pbl
[level
]);
491 cpu_to_le64(ctx
->qpc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
493 cpu_to_le64(ctx
->mrw_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
495 cpu_to_le64(ctx
->srqc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
497 cpu_to_le64(ctx
->cq_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
499 cpu_to_le64(ctx
->tim_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
501 cpu_to_le64(ctx
->tqm_pde
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
503 req
.number_of_qp
= cpu_to_le32(ctx
->qpc_tbl
.max_elements
);
504 req
.number_of_mrw
= cpu_to_le32(ctx
->mrw_tbl
.max_elements
);
505 req
.number_of_srq
= cpu_to_le32(ctx
->srqc_tbl
.max_elements
);
506 req
.number_of_cq
= cpu_to_le32(ctx
->cq_tbl
.max_elements
);
508 req
.max_qp_per_vf
= cpu_to_le32(ctx
->vf_res
.max_qp_per_vf
);
509 req
.max_mrw_per_vf
= cpu_to_le32(ctx
->vf_res
.max_mrw_per_vf
);
510 req
.max_srq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_srq_per_vf
);
511 req
.max_cq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_cq_per_vf
);
512 req
.max_gid_per_vf
= cpu_to_le32(ctx
->vf_res
.max_gid_per_vf
);
515 req
.stat_ctx_id
= cpu_to_le32(ctx
->stats
.fw_id
);
516 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
520 set_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
524 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
527 kfree(rcfw
->crsqe_tbl
);
528 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->cmdq
);
529 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->creq
);
533 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev
*pdev
,
534 struct bnxt_qplib_rcfw
*rcfw
,
538 rcfw
->creq
.max_elements
= BNXT_QPLIB_CREQE_MAX_CNT
;
539 if (bnxt_qplib_alloc_init_hwq(rcfw
->pdev
, &rcfw
->creq
, NULL
, 0,
540 &rcfw
->creq
.max_elements
,
541 BNXT_QPLIB_CREQE_UNITS
, 0, PAGE_SIZE
,
543 dev_err(&rcfw
->pdev
->dev
,
544 "QPLIB: HW channel CREQ allocation failed");
547 rcfw
->cmdq
.max_elements
= BNXT_QPLIB_CMDQE_MAX_CNT
;
548 if (bnxt_qplib_alloc_init_hwq(rcfw
->pdev
, &rcfw
->cmdq
, NULL
, 0,
549 &rcfw
->cmdq
.max_elements
,
550 BNXT_QPLIB_CMDQE_UNITS
, 0, PAGE_SIZE
,
552 dev_err(&rcfw
->pdev
->dev
,
553 "QPLIB: HW channel CMDQ allocation failed");
557 rcfw
->crsqe_tbl
= kcalloc(rcfw
->cmdq
.max_elements
,
558 sizeof(*rcfw
->crsqe_tbl
), GFP_KERNEL
);
559 if (!rcfw
->crsqe_tbl
)
562 rcfw
->qp_tbl_size
= qp_tbl_sz
;
563 rcfw
->qp_tbl
= kcalloc(qp_tbl_sz
, sizeof(struct bnxt_qplib_qp_node
),
571 bnxt_qplib_free_rcfw_channel(rcfw
);
575 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
579 /* Make sure the HW channel is stopped! */
580 synchronize_irq(rcfw
->vector
);
581 tasklet_disable(&rcfw
->worker
);
582 tasklet_kill(&rcfw
->worker
);
584 if (rcfw
->requested
) {
585 free_irq(rcfw
->vector
, rcfw
);
586 rcfw
->requested
= false;
588 if (rcfw
->cmdq_bar_reg_iomem
)
589 iounmap(rcfw
->cmdq_bar_reg_iomem
);
590 rcfw
->cmdq_bar_reg_iomem
= NULL
;
592 if (rcfw
->creq_bar_reg_iomem
)
593 iounmap(rcfw
->creq_bar_reg_iomem
);
594 rcfw
->creq_bar_reg_iomem
= NULL
;
596 indx
= find_first_bit(rcfw
->cmdq_bitmap
, rcfw
->bmap_size
);
597 if (indx
!= rcfw
->bmap_size
)
598 dev_err(&rcfw
->pdev
->dev
,
599 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx
);
600 kfree(rcfw
->cmdq_bitmap
);
603 rcfw
->aeq_handler
= NULL
;
607 int bnxt_qplib_enable_rcfw_channel(struct pci_dev
*pdev
,
608 struct bnxt_qplib_rcfw
*rcfw
,
610 int cp_bar_reg_off
, int virt_fn
,
611 int (*aeq_handler
)(struct bnxt_qplib_rcfw
*,
612 struct creq_func_event
*))
614 resource_size_t res_base
;
615 struct cmdq_init init
;
621 rcfw
->flags
= FIRMWARE_FIRST_FLAG
;
622 bmap_size
= BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD
*
623 sizeof(unsigned long));
624 rcfw
->cmdq_bitmap
= kzalloc(bmap_size
, GFP_KERNEL
);
625 if (!rcfw
->cmdq_bitmap
)
627 rcfw
->bmap_size
= bmap_size
;
630 rcfw
->cmdq_bar_reg
= RCFW_COMM_PCI_BAR_REGION
;
631 res_base
= pci_resource_start(pdev
, rcfw
->cmdq_bar_reg
);
635 rcfw
->cmdq_bar_reg_iomem
= ioremap_nocache(res_base
+
636 RCFW_COMM_BASE_OFFSET
,
638 if (!rcfw
->cmdq_bar_reg_iomem
) {
639 dev_err(&rcfw
->pdev
->dev
,
640 "QPLIB: CMDQ BAR region %d mapping failed",
645 rcfw
->cmdq_bar_reg_prod_off
= virt_fn
? RCFW_VF_COMM_PROD_OFFSET
:
646 RCFW_PF_COMM_PROD_OFFSET
;
648 rcfw
->cmdq_bar_reg_trig_off
= RCFW_COMM_TRIG_OFFSET
;
651 rcfw
->creq_bar_reg
= RCFW_COMM_CONS_PCI_BAR_REGION
;
652 res_base
= pci_resource_start(pdev
, rcfw
->creq_bar_reg
);
654 dev_err(&rcfw
->pdev
->dev
,
655 "QPLIB: CREQ BAR region %d resc start is 0!",
657 rcfw
->creq_bar_reg_iomem
= ioremap_nocache(res_base
+ cp_bar_reg_off
,
659 if (!rcfw
->creq_bar_reg_iomem
) {
660 dev_err(&rcfw
->pdev
->dev
,
661 "QPLIB: CREQ BAR region %d mapping failed",
665 rcfw
->creq_qp_event_processed
= 0;
666 rcfw
->creq_func_event_processed
= 0;
668 rcfw
->vector
= msix_vector
;
670 rcfw
->aeq_handler
= aeq_handler
;
672 tasklet_init(&rcfw
->worker
, bnxt_qplib_service_creq
,
673 (unsigned long)rcfw
);
675 rcfw
->requested
= false;
676 rc
= request_irq(rcfw
->vector
, bnxt_qplib_creq_irq
, 0,
677 "bnxt_qplib_creq", rcfw
);
679 dev_err(&rcfw
->pdev
->dev
,
680 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc
);
681 bnxt_qplib_disable_rcfw_channel(rcfw
);
684 rcfw
->requested
= true;
686 init_waitqueue_head(&rcfw
->waitq
);
688 CREQ_DB_REARM(rcfw
->creq_bar_reg_iomem
, 0, rcfw
->creq
.max_elements
);
690 init
.cmdq_pbl
= cpu_to_le64(rcfw
->cmdq
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
691 init
.cmdq_size_cmdq_lvl
= cpu_to_le16(
692 ((BNXT_QPLIB_CMDQE_MAX_CNT
<< CMDQ_INIT_CMDQ_SIZE_SFT
) &
693 CMDQ_INIT_CMDQ_SIZE_MASK
) |
694 ((rcfw
->cmdq
.level
<< CMDQ_INIT_CMDQ_LVL_SFT
) &
695 CMDQ_INIT_CMDQ_LVL_MASK
));
696 init
.creq_ring_id
= cpu_to_le16(rcfw
->creq_ring_id
);
698 /* Write to the Bono mailbox register */
699 __iowrite32_copy(rcfw
->cmdq_bar_reg_iomem
, &init
, sizeof(init
) / 4);
703 struct bnxt_qplib_rcfw_sbuf
*bnxt_qplib_rcfw_alloc_sbuf(
704 struct bnxt_qplib_rcfw
*rcfw
,
707 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
709 sbuf
= kzalloc(sizeof(*sbuf
), GFP_ATOMIC
);
714 sbuf
->sb
= dma_zalloc_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
715 &sbuf
->dma_addr
, GFP_ATOMIC
);
725 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw
*rcfw
,
726 struct bnxt_qplib_rcfw_sbuf
*sbuf
)
729 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
730 sbuf
->sb
, sbuf
->dma_addr
);