1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4 #include<rte_bus_pci.h>
6 #include "hinic_compat.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_wq.h"
10 #include "hinic_pmd_mgmt.h"
11 #include "hinic_pmd_cmdq.h"
12 #include "hinic_pmd_cfg.h"
13 #include "hinic_pmd_niccfg.h"
14 #include "hinic_pmd_nicio.h"
16 #define WQ_PREFETCH_MAX 6
17 #define WQ_PREFETCH_MIN 1
18 #define WQ_PREFETCH_THRESHOLD 256
20 #define DEFAULT_RX_BUF_SIZE ((u16)0xB)
23 RECYCLE_MODE_NIC
= 0x0,
24 RECYCLE_MODE_DPDK
= 0x1,
27 /* Queue buffer related define */
28 enum hinic_rx_buf_size
{
29 HINIC_RX_BUF_SIZE_32B
= 0x20,
30 HINIC_RX_BUF_SIZE_64B
= 0x40,
31 HINIC_RX_BUF_SIZE_96B
= 0x60,
32 HINIC_RX_BUF_SIZE_128B
= 0x80,
33 HINIC_RX_BUF_SIZE_192B
= 0xC0,
34 HINIC_RX_BUF_SIZE_256B
= 0x100,
35 HINIC_RX_BUF_SIZE_384B
= 0x180,
36 HINIC_RX_BUF_SIZE_512B
= 0x200,
37 HINIC_RX_BUF_SIZE_768B
= 0x300,
38 HINIC_RX_BUF_SIZE_1K
= 0x400,
39 HINIC_RX_BUF_SIZE_1_5K
= 0x600,
40 HINIC_RX_BUF_SIZE_2K
= 0x800,
41 HINIC_RX_BUF_SIZE_3K
= 0xC00,
42 HINIC_RX_BUF_SIZE_4K
= 0x1000,
43 HINIC_RX_BUF_SIZE_8K
= 0x2000,
44 HINIC_RX_BUF_SIZE_16K
= 0x4000,
47 const u32 hinic_hw_rx_buf_size
[] = {
48 HINIC_RX_BUF_SIZE_32B
,
49 HINIC_RX_BUF_SIZE_64B
,
50 HINIC_RX_BUF_SIZE_96B
,
51 HINIC_RX_BUF_SIZE_128B
,
52 HINIC_RX_BUF_SIZE_192B
,
53 HINIC_RX_BUF_SIZE_256B
,
54 HINIC_RX_BUF_SIZE_384B
,
55 HINIC_RX_BUF_SIZE_512B
,
56 HINIC_RX_BUF_SIZE_768B
,
58 HINIC_RX_BUF_SIZE_1_5K
,
63 HINIC_RX_BUF_SIZE_16K
,
66 struct hinic_qp_ctxt_header
{
72 struct hinic_sq_ctxt
{
82 u32 pref_wq_pfn_hi_ci
;
92 struct hinic_rq_ctxt
{
103 u32 pref_wq_pfn_hi_ci
;
113 struct hinic_sq_ctxt_block
{
114 struct hinic_qp_ctxt_header cmdq_hdr
;
115 struct hinic_sq_ctxt sq_ctxt
[HINIC_Q_CTXT_MAX
];
118 struct hinic_rq_ctxt_block
{
119 struct hinic_qp_ctxt_header cmdq_hdr
;
120 struct hinic_rq_ctxt rq_ctxt
[HINIC_Q_CTXT_MAX
];
123 struct hinic_clean_queue_ctxt
{
124 struct hinic_qp_ctxt_header cmdq_hdr
;
130 hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header
*qp_ctxt_hdr
,
131 enum hinic_qp_ctxt_type ctxt_type
,
132 u16 num_queues
, u16 max_queues
, u16 q_id
)
134 qp_ctxt_hdr
->queue_type
= ctxt_type
;
135 qp_ctxt_hdr
->num_queues
= num_queues
;
137 if (ctxt_type
== HINIC_QP_CTXT_TYPE_SQ
)
138 qp_ctxt_hdr
->addr_offset
=
139 SQ_CTXT_OFFSET(max_queues
, max_queues
, q_id
);
141 qp_ctxt_hdr
->addr_offset
=
142 RQ_CTXT_OFFSET(max_queues
, max_queues
, q_id
);
144 qp_ctxt_hdr
->addr_offset
= SIZE_16BYTES(qp_ctxt_hdr
->addr_offset
);
146 hinic_cpu_to_be32(qp_ctxt_hdr
, sizeof(*qp_ctxt_hdr
));
149 static void hinic_sq_prepare_ctxt(struct hinic_sq
*sq
, u16 global_qpn
,
150 struct hinic_sq_ctxt
*sq_ctxt
)
152 struct hinic_wq
*wq
= sq
->wq
;
154 u64 wq_page_pfn
, wq_block_pfn
;
155 u32 wq_page_pfn_hi
, wq_page_pfn_lo
;
156 u32 wq_block_pfn_hi
, wq_block_pfn_lo
;
157 u16 pi_start
, ci_start
;
159 ci_start
= (u16
)(wq
->cons_idx
);
160 pi_start
= (u16
)(wq
->prod_idx
);
162 /* read the first page from the HW table */
163 wq_page_addr
= wq
->queue_buf_paddr
;
165 wq_page_pfn
= WQ_PAGE_PFN(wq_page_addr
);
166 wq_page_pfn_hi
= upper_32_bits(wq_page_pfn
);
167 wq_page_pfn_lo
= lower_32_bits(wq_page_pfn
);
169 wq_block_pfn
= WQ_BLOCK_PFN(wq_page_addr
);
170 wq_block_pfn_hi
= upper_32_bits(wq_block_pfn
);
171 wq_block_pfn_lo
= lower_32_bits(wq_block_pfn
);
173 /* must config as ceq disabled */
174 sq_ctxt
->ceq_attr
= SQ_CTXT_CEQ_ATTR_SET(global_qpn
, GLOBAL_SQ_ID
) |
175 SQ_CTXT_CEQ_ATTR_SET(0, ARM
) |
176 SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID
) |
177 SQ_CTXT_CEQ_ATTR_SET(0, EN
);
179 sq_ctxt
->ci_owner
= SQ_CTXT_CI_SET(ci_start
, IDX
) |
180 SQ_CTXT_CI_SET(1, OWNER
);
183 SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi
, HI_PFN
) |
184 SQ_CTXT_WQ_PAGE_SET(pi_start
, PI
);
186 sq_ctxt
->wq_pfn_lo
= wq_page_pfn_lo
;
188 sq_ctxt
->pref_cache
=
189 SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN
, CACHE_MIN
) |
190 SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX
, CACHE_MAX
) |
191 SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD
, CACHE_THRESHOLD
);
193 sq_ctxt
->pref_owner
= 1;
195 sq_ctxt
->pref_wq_pfn_hi_ci
=
196 SQ_CTXT_PREF_SET(ci_start
, CI
) |
197 SQ_CTXT_PREF_SET(wq_page_pfn_hi
, WQ_PFN_HI
);
199 sq_ctxt
->pref_wq_pfn_lo
= wq_page_pfn_lo
;
201 sq_ctxt
->wq_block_pfn_hi
=
202 SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi
, PFN_HI
);
204 sq_ctxt
->wq_block_pfn_lo
= wq_block_pfn_lo
;
206 hinic_cpu_to_be32(sq_ctxt
, sizeof(*sq_ctxt
));
209 static void hinic_rq_prepare_ctxt(struct hinic_rq
*rq
,
210 struct hinic_rq_ctxt
*rq_ctxt
)
212 struct hinic_wq
*wq
= rq
->wq
;
214 u64 wq_page_pfn
, wq_block_pfn
;
215 u32 wq_page_pfn_hi
, wq_page_pfn_lo
;
216 u32 wq_block_pfn_hi
, wq_block_pfn_lo
;
217 u16 pi_start
, ci_start
;
219 ci_start
= (u16
)(wq
->cons_idx
);
220 pi_start
= (u16
)(wq
->prod_idx
);
222 /* read the first page from the HW table */
223 wq_page_addr
= wq
->queue_buf_paddr
;
225 wq_page_pfn
= WQ_PAGE_PFN(wq_page_addr
);
226 wq_page_pfn_hi
= upper_32_bits(wq_page_pfn
);
227 wq_page_pfn_lo
= lower_32_bits(wq_page_pfn
);
229 wq_block_pfn
= WQ_BLOCK_PFN(wq_page_addr
);
230 wq_block_pfn_hi
= upper_32_bits(wq_block_pfn
);
231 wq_block_pfn_lo
= lower_32_bits(wq_block_pfn
);
233 /* must config as ceq enable but do not generate ceq */
234 rq_ctxt
->ceq_attr
= RQ_CTXT_CEQ_ATTR_SET(1, EN
) |
235 RQ_CTXT_CEQ_ATTR_SET(1, OWNER
);
237 rq_ctxt
->pi_intr_attr
= RQ_CTXT_PI_SET(pi_start
, IDX
) |
238 RQ_CTXT_PI_SET(rq
->msix_entry_idx
, INTR
) |
239 RQ_CTXT_PI_SET(0, CEQ_ARM
);
241 rq_ctxt
->wq_pfn_hi_ci
= RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi
, HI_PFN
) |
242 RQ_CTXT_WQ_PAGE_SET(ci_start
, CI
);
244 rq_ctxt
->wq_pfn_lo
= wq_page_pfn_lo
;
246 rq_ctxt
->pref_cache
=
247 RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN
, CACHE_MIN
) |
248 RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX
, CACHE_MAX
) |
249 RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD
, CACHE_THRESHOLD
);
251 rq_ctxt
->pref_owner
= 1;
253 rq_ctxt
->pref_wq_pfn_hi_ci
=
254 RQ_CTXT_PREF_SET(wq_page_pfn_hi
, WQ_PFN_HI
) |
255 RQ_CTXT_PREF_SET(ci_start
, CI
);
257 rq_ctxt
->pref_wq_pfn_lo
= wq_page_pfn_lo
;
259 rq_ctxt
->pi_paddr_hi
= upper_32_bits(rq
->pi_dma_addr
);
260 rq_ctxt
->pi_paddr_lo
= lower_32_bits(rq
->pi_dma_addr
);
262 rq_ctxt
->wq_block_pfn_hi
=
263 RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi
, PFN_HI
);
265 rq_ctxt
->wq_block_pfn_lo
= wq_block_pfn_lo
;
267 hinic_cpu_to_be32(rq_ctxt
, sizeof(*rq_ctxt
));
270 static int init_sq_ctxts(struct hinic_nic_io
*nic_io
)
272 struct hinic_hwdev
*hwdev
= nic_io
->hwdev
;
273 struct hinic_sq_ctxt_block
*sq_ctxt_block
;
274 struct hinic_sq_ctxt
*sq_ctxt
;
275 struct hinic_cmd_buf
*cmd_buf
;
278 u16 q_id
, curr_id
, global_qpn
, max_ctxts
, i
;
281 cmd_buf
= hinic_alloc_cmd_buf(hwdev
);
283 PMD_DRV_LOG(ERR
, "Failed to allocate cmd buf");
288 /* sq and rq number may not equal */
289 while (q_id
< nic_io
->num_sqs
) {
290 sq_ctxt_block
= cmd_buf
->buf
;
291 sq_ctxt
= sq_ctxt_block
->sq_ctxt
;
293 max_ctxts
= (nic_io
->num_sqs
- q_id
) > HINIC_Q_CTXT_MAX
?
294 HINIC_Q_CTXT_MAX
: (nic_io
->num_sqs
- q_id
);
296 hinic_qp_prepare_cmdq_header(&sq_ctxt_block
->cmdq_hdr
,
297 HINIC_QP_CTXT_TYPE_SQ
, max_ctxts
,
298 nic_io
->max_qps
, q_id
);
300 for (i
= 0; i
< max_ctxts
; i
++) {
302 qp
= &nic_io
->qps
[curr_id
];
303 global_qpn
= nic_io
->global_qpn
+ curr_id
;
305 hinic_sq_prepare_ctxt(&qp
->sq
, global_qpn
, &sq_ctxt
[i
]);
308 cmd_buf
->size
= SQ_CTXT_SIZE(max_ctxts
);
310 err
= hinic_cmdq_direct_resp(hwdev
, HINIC_ACK_TYPE_CMDQ
,
312 HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT
,
313 cmd_buf
, &out_param
, 0);
314 if (err
|| out_param
!= 0) {
315 PMD_DRV_LOG(ERR
, "Failed to set SQ ctxts, err: %d",
324 hinic_free_cmd_buf(hwdev
, cmd_buf
);
329 static int init_rq_ctxts(struct hinic_nic_io
*nic_io
)
331 struct hinic_hwdev
*hwdev
= nic_io
->hwdev
;
332 struct hinic_rq_ctxt_block
*rq_ctxt_block
;
333 struct hinic_rq_ctxt
*rq_ctxt
;
334 struct hinic_cmd_buf
*cmd_buf
;
337 u16 q_id
, curr_id
, max_ctxts
, i
;
340 cmd_buf
= hinic_alloc_cmd_buf(hwdev
);
342 PMD_DRV_LOG(ERR
, "Failed to allocate cmd buf");
347 /* sq and rq number may not equal */
348 while (q_id
< nic_io
->num_rqs
) {
349 rq_ctxt_block
= cmd_buf
->buf
;
350 rq_ctxt
= rq_ctxt_block
->rq_ctxt
;
352 max_ctxts
= (nic_io
->num_rqs
- q_id
) > HINIC_Q_CTXT_MAX
?
353 HINIC_Q_CTXT_MAX
: (nic_io
->num_rqs
- q_id
);
355 hinic_qp_prepare_cmdq_header(&rq_ctxt_block
->cmdq_hdr
,
356 HINIC_QP_CTXT_TYPE_RQ
, max_ctxts
,
357 nic_io
->max_qps
, q_id
);
359 for (i
= 0; i
< max_ctxts
; i
++) {
361 qp
= &nic_io
->qps
[curr_id
];
363 hinic_rq_prepare_ctxt(&qp
->rq
, &rq_ctxt
[i
]);
366 cmd_buf
->size
= RQ_CTXT_SIZE(max_ctxts
);
368 err
= hinic_cmdq_direct_resp(hwdev
, HINIC_ACK_TYPE_CMDQ
,
370 HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT
,
371 cmd_buf
, &out_param
, 0);
372 if ((err
) || out_param
!= 0) {
373 PMD_DRV_LOG(ERR
, "Failed to set RQ ctxts");
381 hinic_free_cmd_buf(hwdev
, cmd_buf
);
386 static int init_qp_ctxts(struct hinic_nic_io
*nic_io
)
388 return (init_sq_ctxts(nic_io
) || init_rq_ctxts(nic_io
));
391 static int clean_queue_offload_ctxt(struct hinic_nic_io
*nic_io
,
392 enum hinic_qp_ctxt_type ctxt_type
)
394 struct hinic_hwdev
*hwdev
= nic_io
->hwdev
;
395 struct hinic_clean_queue_ctxt
*ctxt_block
;
396 struct hinic_cmd_buf
*cmd_buf
;
400 cmd_buf
= hinic_alloc_cmd_buf(hwdev
);
402 PMD_DRV_LOG(ERR
, "Failed to allocate cmd buf");
406 ctxt_block
= cmd_buf
->buf
;
407 ctxt_block
->cmdq_hdr
.num_queues
= nic_io
->max_qps
;
408 ctxt_block
->cmdq_hdr
.queue_type
= ctxt_type
;
409 ctxt_block
->cmdq_hdr
.addr_offset
= 0;
411 /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
412 ctxt_block
->ctxt_size
= 0x3;
414 hinic_cpu_to_be32(ctxt_block
, sizeof(*ctxt_block
));
416 cmd_buf
->size
= sizeof(*ctxt_block
);
418 err
= hinic_cmdq_direct_resp(hwdev
, HINIC_ACK_TYPE_CMDQ
,
420 HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT
,
421 cmd_buf
, &out_param
, 0);
423 if ((err
) || (out_param
)) {
424 PMD_DRV_LOG(ERR
, "Failed to clean queue offload ctxts");
428 hinic_free_cmd_buf(hwdev
, cmd_buf
);
433 static int clean_qp_offload_ctxt(struct hinic_nic_io
*nic_io
)
435 /* clean LRO/TSO context space */
436 return (clean_queue_offload_ctxt(nic_io
, HINIC_QP_CTXT_TYPE_SQ
) ||
437 clean_queue_offload_ctxt(nic_io
, HINIC_QP_CTXT_TYPE_RQ
));
441 * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size
442 * @rx_buf_sz: receive buffer size
446 static u16
get_hw_rx_buf_size(u32 rx_buf_sz
)
448 u16 num_hw_types
= sizeof(hinic_hw_rx_buf_size
)
449 / sizeof(hinic_hw_rx_buf_size
[0]);
452 for (i
= 0; i
< num_hw_types
; i
++) {
453 if (hinic_hw_rx_buf_size
[i
] == rx_buf_sz
)
457 PMD_DRV_LOG(ERR
, "Hw can't support rx buf size of %u", rx_buf_sz
);
459 return DEFAULT_RX_BUF_SIZE
; /* default 2K */
463 * hinic_set_root_ctxt - init root context in NIC
464 * @hwdev: the hardware interface of a nic device
465 * @rq_depth: the depth of receive queue
466 * @sq_depth: the depth of transmit queue
467 * @rx_buf_sz: receive buffer size from app
468 * Return: 0 on success, negative error value otherwise.
471 hinic_set_root_ctxt(void *hwdev
, u16 rq_depth
, u16 sq_depth
, int rx_buf_sz
)
473 struct hinic_root_ctxt root_ctxt
;
475 memset(&root_ctxt
, 0, sizeof(root_ctxt
));
476 root_ctxt
.mgmt_msg_head
.resp_aeq_num
= HINIC_AEQ1
;
477 root_ctxt
.func_idx
= hinic_global_func_id(hwdev
);
478 root_ctxt
.ppf_idx
= hinic_ppf_idx(hwdev
);
479 root_ctxt
.set_cmdq_depth
= 0;
480 root_ctxt
.cmdq_depth
= 0;
481 root_ctxt
.lro_en
= 1;
482 root_ctxt
.rq_depth
= (u16
)ilog2(rq_depth
);
483 root_ctxt
.rx_buf_sz
= get_hw_rx_buf_size(rx_buf_sz
);
484 root_ctxt
.sq_depth
= (u16
)ilog2(sq_depth
);
486 return hinic_msg_to_mgmt_sync(hwdev
, HINIC_MOD_COMM
,
487 HINIC_MGMT_CMD_VAT_SET
,
488 &root_ctxt
, sizeof(root_ctxt
),
493 * hinic_clean_root_ctxt - clean root context table in NIC
494 * @hwdev: the hardware interface of a nic device
497 * negative error value otherwise.
499 static int hinic_clean_root_ctxt(void *hwdev
)
501 struct hinic_root_ctxt root_ctxt
;
503 memset(&root_ctxt
, 0, sizeof(root_ctxt
));
504 root_ctxt
.mgmt_msg_head
.resp_aeq_num
= HINIC_AEQ1
;
505 root_ctxt
.func_idx
= hinic_global_func_id(hwdev
);
506 root_ctxt
.ppf_idx
= hinic_ppf_idx(hwdev
);
507 root_ctxt
.set_cmdq_depth
= 0;
508 root_ctxt
.cmdq_depth
= 0;
509 root_ctxt
.lro_en
= 0;
510 root_ctxt
.rq_depth
= 0;
511 root_ctxt
.rx_buf_sz
= 0;
512 root_ctxt
.sq_depth
= 0;
514 return hinic_msg_to_mgmt_sync(hwdev
, HINIC_MOD_COMM
,
515 HINIC_MGMT_CMD_VAT_SET
,
516 &root_ctxt
, sizeof(root_ctxt
),
520 /* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
521 int hinic_init_qp_ctxts(struct hinic_hwdev
*hwdev
)
523 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
524 struct hinic_sq_attr sq_attr
;
528 /* set vat page size to max queue depth page_size */
529 err
= hinic_set_pagesize(hwdev
, HINIC_PAGE_SIZE_DPDK
);
530 if (err
!= HINIC_OK
) {
531 PMD_DRV_LOG(ERR
, "Set vat page size: %d failed, rc: %d",
532 HINIC_PAGE_SIZE_DPDK
, err
);
536 if (hwdev
->cmdqs
->status
& HINIC_CMDQ_SET_FAIL
) {
537 err
= hinic_reinit_cmdq_ctxts(hwdev
);
539 PMD_DRV_LOG(ERR
, "Reinit cmdq context failed when dev start, err: %d",
545 err
= init_qp_ctxts(nic_io
);
547 PMD_DRV_LOG(ERR
, "Init QP ctxts failed, rc: %d", err
);
551 /* clean LRO/TSO context space */
552 err
= clean_qp_offload_ctxt(nic_io
);
554 PMD_DRV_LOG(ERR
, "Clean qp offload ctxts failed, rc: %d", err
);
558 rx_buf_sz
= nic_io
->rq_buf_size
;
560 /* update rx buf size to function table */
561 err
= hinic_set_rx_vhd_mode(hwdev
, 0, rx_buf_sz
);
563 PMD_DRV_LOG(ERR
, "Set rx vhd mode failed, rc: %d", err
);
567 err
= hinic_set_root_ctxt(hwdev
, nic_io
->rq_depth
,
568 nic_io
->sq_depth
, rx_buf_sz
);
570 PMD_DRV_LOG(ERR
, "Set root context failed, rc: %d", err
);
574 for (q_id
= 0; q_id
< nic_io
->num_sqs
; q_id
++) {
575 sq_attr
.ci_dma_base
=
576 HINIC_CI_PADDR(nic_io
->ci_dma_base
, q_id
) >> 2;
577 /* performance: sq ci update threshold as 8 */
578 sq_attr
.pending_limit
= 1;
579 sq_attr
.coalescing_time
= 1;
581 sq_attr
.l2nic_sqn
= q_id
;
582 sq_attr
.dma_attr_off
= 0;
583 err
= hinic_set_ci_table(hwdev
, q_id
, &sq_attr
);
585 PMD_DRV_LOG(ERR
, "Set ci table failed, rc: %d", err
);
586 goto set_cons_idx_table_err
;
592 set_cons_idx_table_err
:
593 (void)hinic_clean_root_ctxt(hwdev
);
597 void hinic_free_qp_ctxts(struct hinic_hwdev
*hwdev
)
601 err
= hinic_clean_root_ctxt(hwdev
);
603 PMD_DRV_LOG(ERR
, "Failed to clean root ctxt");
606 static int hinic_init_nic_hwdev(struct hinic_hwdev
*hwdev
)
608 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
609 u16 global_qpn
, rx_buf_sz
;
612 err
= hinic_get_base_qpn(hwdev
, &global_qpn
);
614 PMD_DRV_LOG(ERR
, "Failed to get base qpn");
615 goto err_init_nic_hwdev
;
618 nic_io
->global_qpn
= global_qpn
;
619 rx_buf_sz
= HINIC_IS_VF(hwdev
) ? RX_BUF_LEN_1_5K
: RX_BUF_LEN_16K
;
620 err
= hinic_init_function_table(hwdev
, rx_buf_sz
);
622 PMD_DRV_LOG(ERR
, "Failed to init function table");
623 goto err_init_nic_hwdev
;
626 err
= hinic_vf_func_init(hwdev
);
628 PMD_DRV_LOG(ERR
, "Failed to init nic mbox");
629 goto err_init_nic_hwdev
;
632 err
= hinic_set_fast_recycle_mode(hwdev
, RECYCLE_MODE_DPDK
);
634 PMD_DRV_LOG(ERR
, "Failed to set fast recycle mode");
635 goto err_init_nic_hwdev
;
644 static void hinic_free_nic_hwdev(struct hinic_hwdev
*hwdev
)
646 hinic_vf_func_free(hwdev
);
647 hwdev
->nic_io
= NULL
;
650 int hinic_rx_tx_flush(struct hinic_hwdev
*hwdev
)
652 return hinic_func_rx_tx_flush(hwdev
);
655 int hinic_get_sq_free_wqebbs(struct hinic_hwdev
*hwdev
, u16 q_id
)
657 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
658 struct hinic_wq
*wq
= &nic_io
->sq_wq
[q_id
];
660 return (wq
->delta
) - 1;
663 int hinic_get_rq_free_wqebbs(struct hinic_hwdev
*hwdev
, u16 q_id
)
665 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
666 struct hinic_wq
*wq
= &nic_io
->rq_wq
[q_id
];
668 return (wq
->delta
) - 1;
671 u16
hinic_get_sq_local_ci(struct hinic_hwdev
*hwdev
, u16 q_id
)
673 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
674 struct hinic_wq
*wq
= &nic_io
->sq_wq
[q_id
];
676 return (wq
->cons_idx
) & wq
->mask
;
679 void hinic_return_sq_wqe(struct hinic_hwdev
*hwdev
, u16 q_id
,
680 int num_wqebbs
, u16 owner
)
682 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
683 struct hinic_sq
*sq
= &nic_io
->qps
[q_id
].sq
;
685 if (owner
!= sq
->owner
)
688 sq
->wq
->delta
+= num_wqebbs
;
689 sq
->wq
->prod_idx
-= num_wqebbs
;
692 void hinic_update_sq_local_ci(struct hinic_hwdev
*hwdev
,
693 u16 q_id
, int wqebb_cnt
)
695 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
696 struct hinic_sq
*sq
= &nic_io
->qps
[q_id
].sq
;
698 hinic_put_wqe(sq
->wq
, wqebb_cnt
);
701 void *hinic_get_rq_wqe(struct hinic_hwdev
*hwdev
, u16 q_id
, u16
*pi
)
703 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
704 struct hinic_rq
*rq
= &nic_io
->qps
[q_id
].rq
;
706 return hinic_get_wqe(rq
->wq
, 1, pi
);
709 void hinic_return_rq_wqe(struct hinic_hwdev
*hwdev
, u16 q_id
, int num_wqebbs
)
711 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
712 struct hinic_rq
*rq
= &nic_io
->qps
[q_id
].rq
;
714 rq
->wq
->delta
+= num_wqebbs
;
715 rq
->wq
->prod_idx
-= num_wqebbs
;
718 u16
hinic_get_rq_local_ci(struct hinic_hwdev
*hwdev
, u16 q_id
)
720 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
721 struct hinic_wq
*wq
= &nic_io
->rq_wq
[q_id
];
723 return (wq
->cons_idx
) & wq
->mask
;
726 void hinic_update_rq_local_ci(struct hinic_hwdev
*hwdev
, u16 q_id
, int wqe_cnt
)
728 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
729 struct hinic_rq
*rq
= &nic_io
->qps
[q_id
].rq
;
731 hinic_put_wqe(rq
->wq
, wqe_cnt
);
734 static int hinic_alloc_nicio(struct hinic_hwdev
*hwdev
)
736 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
737 struct rte_pci_device
*pdev
= hwdev
->pcidev_hdl
;
741 max_qps
= hinic_func_max_qnum(hwdev
);
742 if ((max_qps
& (max_qps
- 1))) {
743 PMD_DRV_LOG(ERR
, "Wrong number of max_qps: %d",
748 nic_io
->max_qps
= max_qps
;
749 nic_io
->num_qps
= max_qps
;
752 nic_io
->qps
= kzalloc_aligned(num_qp
* sizeof(*nic_io
->qps
),
755 PMD_DRV_LOG(ERR
, "Failed to allocate qps");
760 nic_io
->ci_vaddr_base
= dma_zalloc_coherent(hwdev
,
761 CI_TABLE_SIZE(num_qp
, HINIC_PAGE_SIZE
),
762 &nic_io
->ci_dma_base
,
763 pdev
->device
.numa_node
);
764 if (!nic_io
->ci_vaddr_base
) {
765 PMD_DRV_LOG(ERR
, "Failed to allocate ci area");
770 nic_io
->sq_wq
= kzalloc_aligned(num_qp
* sizeof(*nic_io
->sq_wq
),
772 if (!nic_io
->sq_wq
) {
773 PMD_DRV_LOG(ERR
, "Failed to allocate sq wq array");
778 nic_io
->rq_wq
= kzalloc_aligned(num_qp
* sizeof(*nic_io
->rq_wq
),
780 if (!nic_io
->rq_wq
) {
781 PMD_DRV_LOG(ERR
, "Failed to allocate rq wq array");
789 kfree(nic_io
->sq_wq
);
792 dma_free_coherent(hwdev
, CI_TABLE_SIZE(num_qp
, HINIC_PAGE_SIZE
),
793 nic_io
->ci_vaddr_base
, nic_io
->ci_dma_base
);
802 static void hinic_free_nicio(struct hinic_hwdev
*hwdev
)
804 struct hinic_nic_io
*nic_io
= hwdev
->nic_io
;
807 kfree(nic_io
->rq_wq
);
810 kfree(nic_io
->sq_wq
);
812 /* nic_io->ci_vaddr_base */
813 dma_free_coherent(hwdev
,
814 CI_TABLE_SIZE(nic_io
->max_qps
, HINIC_PAGE_SIZE
),
815 nic_io
->ci_vaddr_base
, nic_io
->ci_dma_base
);
821 /* alloc nic hwdev and init function table */
822 int hinic_init_nicio(struct hinic_hwdev
*hwdev
)
826 hwdev
->nic_io
= rte_zmalloc("hinic_nicio", sizeof(*hwdev
->nic_io
),
827 RTE_CACHE_LINE_SIZE
);
828 if (!hwdev
->nic_io
) {
829 PMD_DRV_LOG(ERR
, "Allocate nic_io failed, dev_name: %s",
830 hwdev
->pcidev_hdl
->name
);
833 hwdev
->nic_io
->hwdev
= hwdev
;
835 /* alloc root working queue set */
836 rc
= hinic_alloc_nicio(hwdev
);
838 PMD_DRV_LOG(ERR
, "Allocate nic_io failed, dev_name: %s",
839 hwdev
->pcidev_hdl
->name
);
840 goto allc_nicio_fail
;
843 rc
= hinic_init_nic_hwdev(hwdev
);
845 PMD_DRV_LOG(ERR
, "Initialize hwdev failed, dev_name: %s",
846 hwdev
->pcidev_hdl
->name
);
847 goto init_nic_hwdev_fail
;
853 hinic_free_nicio(hwdev
);
856 rte_free(hwdev
->nic_io
);
860 void hinic_deinit_nicio(struct hinic_hwdev
*hwdev
)
862 hinic_free_nicio(hwdev
);
864 hinic_free_nic_hwdev(hwdev
);
866 rte_free(hwdev
->nic_io
);
867 hwdev
->nic_io
= NULL
;
871 * hinic_convert_rx_buf_size - convert rx buffer size to hw size
872 * @rx_buf_sz: receive buffer size of mbuf
873 * @match_sz: receive buffer size of hardware
876 * negative error value otherwise.
878 int hinic_convert_rx_buf_size(u32 rx_buf_sz
, u32
*match_sz
)
880 u32 i
, num_hw_types
, best_match_sz
;
882 if (unlikely(!match_sz
|| rx_buf_sz
< HINIC_RX_BUF_SIZE_32B
))
885 if (rx_buf_sz
>= HINIC_RX_BUF_SIZE_16K
) {
886 best_match_sz
= HINIC_RX_BUF_SIZE_16K
;
890 num_hw_types
= sizeof(hinic_hw_rx_buf_size
) /
891 sizeof(hinic_hw_rx_buf_size
[0]);
892 best_match_sz
= hinic_hw_rx_buf_size
[0];
893 for (i
= 0; i
< num_hw_types
; i
++) {
894 if (rx_buf_sz
== hinic_hw_rx_buf_size
[i
]) {
895 best_match_sz
= hinic_hw_rx_buf_size
[i
];
897 } else if (rx_buf_sz
< hinic_hw_rx_buf_size
[i
]) {
900 best_match_sz
= hinic_hw_rx_buf_size
[i
];
904 *match_sz
= best_match_sz
;