]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
Merge tag 'reset-fixes-for-4.14' of git://git.pengutronix.de/git/pza/linux into fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / bnxt_re / qplib_rcfw.c
CommitLineData
1ac5a404
SX
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: RDMA Controller HW interface
37 */
38#include <linux/interrupt.h>
39#include <linux/spinlock.h>
40#include <linux/pci.h>
41#include <linux/prefetch.h>
cc1ec769
DS
42#include <linux/delay.h>
43
1ac5a404
SX
44#include "roce_hsi.h"
45#include "qplib_res.h"
46#include "qplib_rcfw.h"
f218d67e
SX
47#include "qplib_sp.h"
48#include "qplib_fp.h"
49
1ac5a404
SX
50static void bnxt_qplib_service_creq(unsigned long data);
51
52/* Hardware communication channel */
cc1ec769 53static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
1ac5a404
SX
54{
55 u16 cbit;
56 int rc;
57
1ac5a404 58 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
1ac5a404
SX
59 rc = wait_event_timeout(rcfw->waitq,
60 !test_bit(cbit, rcfw->cmdq_bitmap),
61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
cc1ec769 62 return rc ? 0 : -ETIMEDOUT;
1ac5a404
SX
63};
64
cc1ec769 65static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
1ac5a404 66{
cc1ec769 67 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
1ac5a404
SX
68 u16 cbit;
69
1ac5a404
SX
70 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
71 if (!test_bit(cbit, rcfw->cmdq_bitmap))
72 goto done;
73 do {
cc1ec769 74 mdelay(1); /* 1m sec */
1ac5a404
SX
75 bnxt_qplib_service_creq((unsigned long)rcfw);
76 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
77done:
cc1ec769 78 return count ? 0 : -ETIMEDOUT;
1ac5a404
SX
79};
80
cc1ec769
DS
81static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
82 struct creq_base *resp, void *sb, u8 is_block)
1ac5a404 83{
1ac5a404
SX
84 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
85 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
cc1ec769 86 struct bnxt_qplib_crsq *crsqe;
1ac5a404 87 u32 sw_prod, cmdq_prod;
1ac5a404
SX
88 unsigned long flags;
89 u32 size, opcode;
90 u16 cookie, cbit;
91 int pg, idx;
92 u8 *preq;
93
1ac5a404
SX
94 opcode = req->opcode;
95 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
96 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
97 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
98 dev_err(&rcfw->pdev->dev,
99 "QPLIB: RCFW not initialized, reject opcode 0x%x",
100 opcode);
cc1ec769 101 return -EINVAL;
1ac5a404
SX
102 }
103
104 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
105 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
106 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
cc1ec769 107 return -EINVAL;
1ac5a404
SX
108 }
109
2b637630
SK
110 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
111 return -ETIMEDOUT;
112
1ac5a404
SX
113 /* Cmdq are in 16-byte units, each request can consume 1 or more
114 * cmdqe
115 */
116 spin_lock_irqsave(&cmdq->lock, flags);
cc1ec769 117 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
1ac5a404
SX
118 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
119 spin_unlock_irqrestore(&cmdq->lock, flags);
cc1ec769 120 return -EAGAIN;
1ac5a404
SX
121 }
122
1ac5a404 123
cc1ec769 124 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
1ac5a404
SX
125 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
126 if (is_block)
127 cookie |= RCFW_CMD_IS_BLOCKING;
cc1ec769
DS
128
129 set_bit(cbit, rcfw->cmdq_bitmap);
1ac5a404 130 req->cookie = cpu_to_le16(cookie);
cc1ec769
DS
131 crsqe = &rcfw->crsqe_tbl[cbit];
132 if (crsqe->resp) {
1ac5a404 133 spin_unlock_irqrestore(&cmdq->lock, flags);
cc1ec769 134 return -EBUSY;
1ac5a404 135 }
cc1ec769
DS
136 memset(resp, 0, sizeof(*resp));
137 crsqe->resp = (struct creq_qp_event *)resp;
138 crsqe->resp->cookie = req->cookie;
139 crsqe->req_size = req->cmd_size;
140 if (req->resp_size && sb) {
141 struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
142
143 req->resp_addr = cpu_to_le64(sbuf->dma_addr);
144 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
145 BNXT_QPLIB_CMDQE_UNITS;
1ac5a404 146 }
cc1ec769 147
1ac5a404
SX
148 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
149 preq = (u8 *)req;
150 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
151 do {
152 pg = 0;
153 idx = 0;
154
155 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
157 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
158 if (!cmdqe) {
159 dev_err(&rcfw->pdev->dev,
160 "QPLIB: RCFW request failed with no cmdqe!");
161 goto done;
162 }
163 /* Copy a segment of the req cmd to the cmdq */
164 memset(cmdqe, 0, sizeof(*cmdqe));
165 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
166 preq += min_t(u32, size, sizeof(*cmdqe));
167 size -= min_t(u32, size, sizeof(*cmdqe));
168 cmdq->prod++;
cc1ec769 169 rcfw->seq_num++;
1ac5a404
SX
170 } while (size > 0);
171
cc1ec769
DS
172 rcfw->seq_num++;
173
1ac5a404
SX
174 cmdq_prod = cmdq->prod;
175 if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
cc1ec769
DS
176 /* The very first doorbell write
177 * is required to set this flag
178 * which prompts the FW to reset
179 * its internal pointers
1ac5a404
SX
180 */
181 cmdq_prod |= FIRMWARE_FIRST_FLAG;
182 rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
183 }
1ac5a404
SX
184
185 /* ring CMDQ DB */
cc1ec769 186 wmb();
1ac5a404
SX
187 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
188 rcfw->cmdq_bar_reg_prod_off);
189 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
190 rcfw->cmdq_bar_reg_trig_off);
191done:
192 spin_unlock_irqrestore(&cmdq->lock, flags);
193 /* Return the CREQ response pointer */
cc1ec769 194 return 0;
1ac5a404
SX
195}
196
cc1ec769
DS
197int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
198 struct cmdq_base *req,
199 struct creq_base *resp,
200 void *sb, u8 is_block)
201{
202 struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
203 u16 cookie;
204 u8 opcode, retry_cnt = 0xFF;
205 int rc = 0;
206
207 do {
208 opcode = req->opcode;
209 rc = __send_message(rcfw, req, resp, sb, is_block);
210 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
211 if (!rc)
212 break;
213
214 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
215 /* send failed */
216 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
217 cookie, opcode);
218 return rc;
219 }
220 is_block ? mdelay(1) : usleep_range(500, 1000);
221
222 } while (retry_cnt--);
223
224 if (is_block)
225 rc = __block_for_resp(rcfw, cookie);
226 else
227 rc = __wait_for_resp(rcfw, cookie);
228 if (rc) {
229 /* timed out */
230 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
231 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
2b637630 232 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
cc1ec769
DS
233 return rc;
234 }
235
236 if (evnt->status) {
237 /* failed with status */
238 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
239 cookie, opcode, evnt->status);
240 rc = -EFAULT;
241 }
242
243 return rc;
244}
1ac5a404
SX
245/* Completions */
246static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
247 struct creq_func_event *func_event)
248{
249 switch (func_event->event) {
250 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
251 break;
252 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
253 break;
254 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
255 break;
256 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
257 break;
258 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
259 break;
260 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
261 break;
262 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
263 break;
264 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
265 /* SRQ ctx error, call srq_handler??
266 * But there's no SRQ handle!
267 */
268 break;
269 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
270 break;
271 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
272 break;
273 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
274 break;
275 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
276 break;
277 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
278 break;
279 default:
280 return -EINVAL;
281 }
282 return 0;
283}
284
285static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
286 struct creq_qp_event *qp_event)
287{
1ac5a404 288 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
f218d67e 289 struct creq_qp_error_notification *err_event;
cc1ec769 290 struct bnxt_qplib_crsq *crsqe;
1ac5a404 291 unsigned long flags;
f218d67e 292 struct bnxt_qplib_qp *qp;
cc1ec769
DS
293 u16 cbit, blocked = 0;
294 u16 cookie;
295 __le16 mcookie;
f218d67e 296 u32 qp_id;
1ac5a404
SX
297
298 switch (qp_event->event) {
299 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
f218d67e
SX
300 err_event = (struct creq_qp_error_notification *)qp_event;
301 qp_id = le32_to_cpu(err_event->xid);
302 qp = rcfw->qp_tbl[qp_id].qp_handle;
1ac5a404
SX
303 dev_dbg(&rcfw->pdev->dev,
304 "QPLIB: Received QP error notification");
f218d67e
SX
305 dev_dbg(&rcfw->pdev->dev,
306 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
307 qp_id, err_event->req_err_state_reason,
308 err_event->res_err_state_reason);
309 bnxt_qplib_acquire_cq_locks(qp, &flags);
310 bnxt_qplib_mark_qp_error(qp);
311 bnxt_qplib_release_cq_locks(qp, &flags);
1ac5a404
SX
312 break;
313 default:
314 /* Command Response */
315 spin_lock_irqsave(&cmdq->lock, flags);
cc1ec769
DS
316 cookie = le16_to_cpu(qp_event->cookie);
317 mcookie = qp_event->cookie;
1ac5a404
SX
318 blocked = cookie & RCFW_CMD_IS_BLOCKING;
319 cookie &= RCFW_MAX_COOKIE_VALUE;
320 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
cc1ec769
DS
321 crsqe = &rcfw->crsqe_tbl[cbit];
322 if (crsqe->resp &&
323 crsqe->resp->cookie == mcookie) {
324 memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
325 crsqe->resp = NULL;
326 } else {
327 dev_err(&rcfw->pdev->dev,
328 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
329 crsqe->resp ? "mismatch" : "collision",
330 crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
331 }
1ac5a404
SX
332 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
333 dev_warn(&rcfw->pdev->dev,
334 "QPLIB: CMD bit %d was not requested", cbit);
1ac5a404 335 cmdq->cons += crsqe->req_size;
cc1ec769
DS
336 crsqe->req_size = 0;
337
1ac5a404
SX
338 if (!blocked)
339 wake_up(&rcfw->waitq);
cc1ec769 340 spin_unlock_irqrestore(&cmdq->lock, flags);
1ac5a404
SX
341 }
342 return 0;
343}
344
345/* SP - CREQ Completion handlers */
346static void bnxt_qplib_service_creq(unsigned long data)
347{
348 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
349 struct bnxt_qplib_hwq *creq = &rcfw->creq;
350 struct creq_base *creqe, **creq_ptr;
351 u32 sw_cons, raw_cons;
352 unsigned long flags;
cc1ec769 353 u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
1ac5a404 354
cc1ec769 355 /* Service the CREQ until budget is over */
1ac5a404
SX
356 spin_lock_irqsave(&creq->lock, flags);
357 raw_cons = creq->cons;
cc1ec769 358 while (budget > 0) {
1ac5a404
SX
359 sw_cons = HWQ_CMP(raw_cons, creq);
360 creq_ptr = (struct creq_base **)creq->pbl_ptr;
361 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
362 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
363 break;
364
365 type = creqe->type & CREQ_BASE_TYPE_MASK;
366 switch (type) {
367 case CREQ_BASE_TYPE_QP_EVENT:
cc1ec769
DS
368 bnxt_qplib_process_qp_event
369 (rcfw, (struct creq_qp_event *)creqe);
370 rcfw->creq_qp_event_processed++;
1ac5a404
SX
371 break;
372 case CREQ_BASE_TYPE_FUNC_EVENT:
373 if (!bnxt_qplib_process_func_event
374 (rcfw, (struct creq_func_event *)creqe))
375 rcfw->creq_func_event_processed++;
376 else
377 dev_warn
378 (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
379 type);
380 break;
381 default:
382 dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
383 dev_warn(&rcfw->pdev->dev,
384 "QPLIB: op_event = 0x%x not handled", type);
385 break;
386 }
387 raw_cons++;
cc1ec769 388 budget--;
1ac5a404 389 }
cc1ec769 390
1ac5a404
SX
391 if (creq->cons != raw_cons) {
392 creq->cons = raw_cons;
393 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
394 creq->max_elements);
395 }
396 spin_unlock_irqrestore(&creq->lock, flags);
397}
398
399static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
400{
401 struct bnxt_qplib_rcfw *rcfw = dev_instance;
402 struct bnxt_qplib_hwq *creq = &rcfw->creq;
403 struct creq_base **creq_ptr;
404 u32 sw_cons;
405
406 /* Prefetch the CREQ element */
407 sw_cons = HWQ_CMP(creq->cons, creq);
408 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
409 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
410
411 tasklet_schedule(&rcfw->worker);
412
413 return IRQ_HANDLED;
414}
415
416/* RCFW */
417int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
418{
1ac5a404 419 struct cmdq_deinitialize_fw req;
cc1ec769 420 struct creq_deinitialize_fw_resp resp;
1ac5a404 421 u16 cmd_flags = 0;
cc1ec769 422 int rc;
1ac5a404
SX
423
424 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
cc1ec769
DS
425 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
426 NULL, 0);
427 if (rc)
428 return rc;
1ac5a404
SX
429
430 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
431 return 0;
432}
433
434static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
435{
436 return (pbl->pg_size == ROCE_PG_SIZE_4K ?
437 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
438 pbl->pg_size == ROCE_PG_SIZE_8K ?
439 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
440 pbl->pg_size == ROCE_PG_SIZE_64K ?
441 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
442 pbl->pg_size == ROCE_PG_SIZE_2M ?
443 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
444 pbl->pg_size == ROCE_PG_SIZE_8M ?
445 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
446 pbl->pg_size == ROCE_PG_SIZE_1G ?
447 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
448 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
449}
450
451int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
452 struct bnxt_qplib_ctx *ctx, int is_virtfn)
453{
1ac5a404 454 struct cmdq_initialize_fw req;
cc1ec769 455 struct creq_initialize_fw_resp resp;
1ac5a404 456 u16 cmd_flags = 0, level;
cc1ec769 457 int rc;
1ac5a404
SX
458
459 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
460
461 /*
462 * VFs need not setup the HW context area, PF
463 * shall setup this area for VF. Skipping the
464 * HW programming
465 */
466 if (is_virtfn)
467 goto skip_ctx_setup;
468
469 level = ctx->qpc_tbl.level;
470 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
471 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
472 level = ctx->mrw_tbl.level;
473 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
474 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
475 level = ctx->srqc_tbl.level;
476 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
477 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
478 level = ctx->cq_tbl.level;
479 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
480 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
481 level = ctx->srqc_tbl.level;
482 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
483 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
484 level = ctx->cq_tbl.level;
485 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
486 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
487 level = ctx->tim_tbl.level;
488 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
489 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
490 level = ctx->tqm_pde_level;
491 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
492 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
493
494 req.qpc_page_dir =
495 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
496 req.mrw_page_dir =
497 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
498 req.srq_page_dir =
499 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
500 req.cq_page_dir =
501 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
502 req.tim_page_dir =
503 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
504 req.tqm_page_dir =
505 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
506
507 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
508 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
509 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
510 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
511
512 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
513 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
514 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
515 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
516 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
517
518skip_ctx_setup:
519 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
cc1ec769
DS
520 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
521 NULL, 0);
522 if (rc)
523 return rc;
1ac5a404
SX
524 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
525 return 0;
526}
527
528void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
529{
f218d67e 530 kfree(rcfw->qp_tbl);
cc1ec769 531 kfree(rcfw->crsqe_tbl);
1ac5a404
SX
532 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
533 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
1ac5a404
SX
534 rcfw->pdev = NULL;
535}
536
537int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
f218d67e
SX
538 struct bnxt_qplib_rcfw *rcfw,
539 int qp_tbl_sz)
1ac5a404
SX
540{
541 rcfw->pdev = pdev;
542 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
543 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
544 &rcfw->creq.max_elements,
545 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
546 HWQ_TYPE_L2_CMPL)) {
547 dev_err(&rcfw->pdev->dev,
548 "QPLIB: HW channel CREQ allocation failed");
549 goto fail;
550 }
551 rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
552 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
553 &rcfw->cmdq.max_elements,
554 BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
555 HWQ_TYPE_CTX)) {
556 dev_err(&rcfw->pdev->dev,
557 "QPLIB: HW channel CMDQ allocation failed");
558 goto fail;
559 }
560
cc1ec769
DS
561 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
562 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
563 if (!rcfw->crsqe_tbl)
1ac5a404
SX
564 goto fail;
565
f218d67e
SX
566 rcfw->qp_tbl_size = qp_tbl_sz;
567 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
568 GFP_KERNEL);
569 if (!rcfw->qp_tbl)
570 goto fail;
571
1ac5a404
SX
572 return 0;
573
574fail:
575 bnxt_qplib_free_rcfw_channel(rcfw);
576 return -ENOMEM;
577}
578
579void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
580{
581 unsigned long indx;
582
583 /* Make sure the HW channel is stopped! */
584 synchronize_irq(rcfw->vector);
585 tasklet_disable(&rcfw->worker);
586 tasklet_kill(&rcfw->worker);
587
588 if (rcfw->requested) {
589 free_irq(rcfw->vector, rcfw);
590 rcfw->requested = false;
591 }
592 if (rcfw->cmdq_bar_reg_iomem)
593 iounmap(rcfw->cmdq_bar_reg_iomem);
594 rcfw->cmdq_bar_reg_iomem = NULL;
595
596 if (rcfw->creq_bar_reg_iomem)
597 iounmap(rcfw->creq_bar_reg_iomem);
598 rcfw->creq_bar_reg_iomem = NULL;
599
600 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
601 if (indx != rcfw->bmap_size)
602 dev_err(&rcfw->pdev->dev,
603 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
604 kfree(rcfw->cmdq_bitmap);
605 rcfw->bmap_size = 0;
606
607 rcfw->aeq_handler = NULL;
608 rcfw->vector = 0;
609}
610
611int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
612 struct bnxt_qplib_rcfw *rcfw,
613 int msix_vector,
614 int cp_bar_reg_off, int virt_fn,
615 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
616 struct creq_func_event *))
617{
618 resource_size_t res_base;
619 struct cmdq_init init;
620 u16 bmap_size;
621 int rc;
622
623 /* General */
cc1ec769 624 rcfw->seq_num = 0;
1ac5a404
SX
625 rcfw->flags = FIRMWARE_FIRST_FLAG;
626 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
627 sizeof(unsigned long));
628 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
629 if (!rcfw->cmdq_bitmap)
630 return -ENOMEM;
631 rcfw->bmap_size = bmap_size;
632
633 /* CMDQ */
634 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
635 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
636 if (!res_base)
637 return -ENOMEM;
638
639 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
640 RCFW_COMM_BASE_OFFSET,
641 RCFW_COMM_SIZE);
642 if (!rcfw->cmdq_bar_reg_iomem) {
643 dev_err(&rcfw->pdev->dev,
644 "QPLIB: CMDQ BAR region %d mapping failed",
645 rcfw->cmdq_bar_reg);
646 return -ENOMEM;
647 }
648
649 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
650 RCFW_PF_COMM_PROD_OFFSET;
651
652 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
653
1ac5a404
SX
654 /* CREQ */
655 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
656 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
657 if (!res_base)
658 dev_err(&rcfw->pdev->dev,
659 "QPLIB: CREQ BAR region %d resc start is 0!",
660 rcfw->creq_bar_reg);
661 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
662 4);
663 if (!rcfw->creq_bar_reg_iomem) {
664 dev_err(&rcfw->pdev->dev,
665 "QPLIB: CREQ BAR region %d mapping failed",
666 rcfw->creq_bar_reg);
667 return -ENOMEM;
668 }
669 rcfw->creq_qp_event_processed = 0;
670 rcfw->creq_func_event_processed = 0;
671
672 rcfw->vector = msix_vector;
673 if (aeq_handler)
674 rcfw->aeq_handler = aeq_handler;
675
676 tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
677 (unsigned long)rcfw);
678
679 rcfw->requested = false;
680 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
681 "bnxt_qplib_creq", rcfw);
682 if (rc) {
683 dev_err(&rcfw->pdev->dev,
684 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
685 bnxt_qplib_disable_rcfw_channel(rcfw);
686 return rc;
687 }
688 rcfw->requested = true;
689
690 init_waitqueue_head(&rcfw->waitq);
691
692 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
693
694 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
695 init.cmdq_size_cmdq_lvl = cpu_to_le16(
696 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
697 CMDQ_INIT_CMDQ_SIZE_MASK) |
698 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
699 CMDQ_INIT_CMDQ_LVL_MASK));
700 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
701
702 /* Write to the Bono mailbox register */
703 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
704 return 0;
705}
cc1ec769
DS
706
707struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
708 struct bnxt_qplib_rcfw *rcfw,
709 u32 size)
710{
711 struct bnxt_qplib_rcfw_sbuf *sbuf;
712
713 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
714 if (!sbuf)
715 return NULL;
716
717 sbuf->size = size;
718 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
719 &sbuf->dma_addr, GFP_ATOMIC);
720 if (!sbuf->sb)
721 goto bail;
722
723 return sbuf;
724bail:
725 kfree(sbuf);
726 return NULL;
727}
728
729void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
730 struct bnxt_qplib_rcfw_sbuf *sbuf)
731{
732 if (sbuf->sb)
733 dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
734 sbuf->sb, sbuf->dma_addr);
735 kfree(sbuf);
736}