]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / hinic / base / hinic_pmd_nicio.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
3 */
4 #include<rte_bus_pci.h>
5
6 #include "hinic_compat.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_wq.h"
10 #include "hinic_pmd_mgmt.h"
11 #include "hinic_pmd_cmdq.h"
12 #include "hinic_pmd_cfg.h"
13 #include "hinic_pmd_niccfg.h"
14 #include "hinic_pmd_nicio.h"
15
16 #define WQ_PREFETCH_MAX 6
17 #define WQ_PREFETCH_MIN 1
18 #define WQ_PREFETCH_THRESHOLD 256
19
20 #define DEFAULT_RX_BUF_SIZE ((u16)0xB)
21
22 enum {
23 RECYCLE_MODE_NIC = 0x0,
24 RECYCLE_MODE_DPDK = 0x1,
25 };
26
27 /* Queue buffer related define */
28 enum hinic_rx_buf_size {
29 HINIC_RX_BUF_SIZE_32B = 0x20,
30 HINIC_RX_BUF_SIZE_64B = 0x40,
31 HINIC_RX_BUF_SIZE_96B = 0x60,
32 HINIC_RX_BUF_SIZE_128B = 0x80,
33 HINIC_RX_BUF_SIZE_192B = 0xC0,
34 HINIC_RX_BUF_SIZE_256B = 0x100,
35 HINIC_RX_BUF_SIZE_384B = 0x180,
36 HINIC_RX_BUF_SIZE_512B = 0x200,
37 HINIC_RX_BUF_SIZE_768B = 0x300,
38 HINIC_RX_BUF_SIZE_1K = 0x400,
39 HINIC_RX_BUF_SIZE_1_5K = 0x600,
40 HINIC_RX_BUF_SIZE_2K = 0x800,
41 HINIC_RX_BUF_SIZE_3K = 0xC00,
42 HINIC_RX_BUF_SIZE_4K = 0x1000,
43 HINIC_RX_BUF_SIZE_8K = 0x2000,
44 HINIC_RX_BUF_SIZE_16K = 0x4000,
45 };
46
47 const u32 hinic_hw_rx_buf_size[] = {
48 HINIC_RX_BUF_SIZE_32B,
49 HINIC_RX_BUF_SIZE_64B,
50 HINIC_RX_BUF_SIZE_96B,
51 HINIC_RX_BUF_SIZE_128B,
52 HINIC_RX_BUF_SIZE_192B,
53 HINIC_RX_BUF_SIZE_256B,
54 HINIC_RX_BUF_SIZE_384B,
55 HINIC_RX_BUF_SIZE_512B,
56 HINIC_RX_BUF_SIZE_768B,
57 HINIC_RX_BUF_SIZE_1K,
58 HINIC_RX_BUF_SIZE_1_5K,
59 HINIC_RX_BUF_SIZE_2K,
60 HINIC_RX_BUF_SIZE_3K,
61 HINIC_RX_BUF_SIZE_4K,
62 HINIC_RX_BUF_SIZE_8K,
63 HINIC_RX_BUF_SIZE_16K,
64 };
65
66 struct hinic_qp_ctxt_header {
67 u16 num_queues;
68 u16 queue_type;
69 u32 addr_offset;
70 };
71
72 struct hinic_sq_ctxt {
73 u32 ceq_attr;
74
75 u32 ci_owner;
76
77 u32 wq_pfn_hi;
78 u32 wq_pfn_lo;
79
80 u32 pref_cache;
81 u32 pref_owner;
82 u32 pref_wq_pfn_hi_ci;
83 u32 pref_wq_pfn_lo;
84
85 u32 rsvd8;
86 u32 rsvd9;
87
88 u32 wq_block_pfn_hi;
89 u32 wq_block_pfn_lo;
90 };
91
92 struct hinic_rq_ctxt {
93 u32 ceq_attr;
94
95 u32 pi_intr_attr;
96
97 u32 wq_pfn_hi_ci;
98 u32 wq_pfn_lo;
99
100 u32 pref_cache;
101 u32 pref_owner;
102
103 u32 pref_wq_pfn_hi_ci;
104 u32 pref_wq_pfn_lo;
105
106 u32 pi_paddr_hi;
107 u32 pi_paddr_lo;
108
109 u32 wq_block_pfn_hi;
110 u32 wq_block_pfn_lo;
111 };
112
113 struct hinic_sq_ctxt_block {
114 struct hinic_qp_ctxt_header cmdq_hdr;
115 struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
116 };
117
118 struct hinic_rq_ctxt_block {
119 struct hinic_qp_ctxt_header cmdq_hdr;
120 struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
121 };
122
123 struct hinic_clean_queue_ctxt {
124 struct hinic_qp_ctxt_header cmdq_hdr;
125 u32 ctxt_size;
126 };
127
128
129 static void
130 hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
131 enum hinic_qp_ctxt_type ctxt_type,
132 u16 num_queues, u16 max_queues, u16 q_id)
133 {
134 qp_ctxt_hdr->queue_type = ctxt_type;
135 qp_ctxt_hdr->num_queues = num_queues;
136
137 if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
138 qp_ctxt_hdr->addr_offset =
139 SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
140 else
141 qp_ctxt_hdr->addr_offset =
142 RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
143
144 qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
145
146 hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
147 }
148
149 static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
150 struct hinic_sq_ctxt *sq_ctxt)
151 {
152 struct hinic_wq *wq = sq->wq;
153 u64 wq_page_addr;
154 u64 wq_page_pfn, wq_block_pfn;
155 u32 wq_page_pfn_hi, wq_page_pfn_lo;
156 u32 wq_block_pfn_hi, wq_block_pfn_lo;
157 u16 pi_start, ci_start;
158
159 ci_start = (u16)(wq->cons_idx);
160 pi_start = (u16)(wq->prod_idx);
161
162 /* read the first page from the HW table */
163 wq_page_addr = wq->queue_buf_paddr;
164
165 wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
166 wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
167 wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
168
169 wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
170 wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
171 wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
172
173 /* must config as ceq disabled */
174 sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
175 SQ_CTXT_CEQ_ATTR_SET(0, ARM) |
176 SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |
177 SQ_CTXT_CEQ_ATTR_SET(0, EN);
178
179 sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
180 SQ_CTXT_CI_SET(1, OWNER);
181
182 sq_ctxt->wq_pfn_hi =
183 SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
184 SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
185
186 sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
187
188 sq_ctxt->pref_cache =
189 SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
190 SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
191 SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
192
193 sq_ctxt->pref_owner = 1;
194
195 sq_ctxt->pref_wq_pfn_hi_ci =
196 SQ_CTXT_PREF_SET(ci_start, CI) |
197 SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
198
199 sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
200
201 sq_ctxt->wq_block_pfn_hi =
202 SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
203
204 sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
205
206 hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
207 }
208
209 static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
210 struct hinic_rq_ctxt *rq_ctxt)
211 {
212 struct hinic_wq *wq = rq->wq;
213 u64 wq_page_addr;
214 u64 wq_page_pfn, wq_block_pfn;
215 u32 wq_page_pfn_hi, wq_page_pfn_lo;
216 u32 wq_block_pfn_hi, wq_block_pfn_lo;
217 u16 pi_start, ci_start;
218
219 ci_start = (u16)(wq->cons_idx);
220 pi_start = (u16)(wq->prod_idx);
221
222 /* read the first page from the HW table */
223 wq_page_addr = wq->queue_buf_paddr;
224
225 wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
226 wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
227 wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
228
229 wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
230 wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
231 wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
232
233 /* must config as ceq enable but do not generate ceq */
234 rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
235 RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
236
237 rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
238 RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |
239 RQ_CTXT_PI_SET(0, CEQ_ARM);
240
241 rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
242 RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
243
244 rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
245
246 rq_ctxt->pref_cache =
247 RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
248 RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
249 RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
250
251 rq_ctxt->pref_owner = 1;
252
253 rq_ctxt->pref_wq_pfn_hi_ci =
254 RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
255 RQ_CTXT_PREF_SET(ci_start, CI);
256
257 rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
258
259 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
260 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
261
262 rq_ctxt->wq_block_pfn_hi =
263 RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
264
265 rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
266
267 hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
268 }
269
270 static int init_sq_ctxts(struct hinic_nic_io *nic_io)
271 {
272 struct hinic_hwdev *hwdev = nic_io->hwdev;
273 struct hinic_sq_ctxt_block *sq_ctxt_block;
274 struct hinic_sq_ctxt *sq_ctxt;
275 struct hinic_cmd_buf *cmd_buf;
276 struct hinic_qp *qp;
277 u64 out_param = EIO;
278 u16 q_id, curr_id, global_qpn, max_ctxts, i;
279 int err = 0;
280
281 cmd_buf = hinic_alloc_cmd_buf(hwdev);
282 if (!cmd_buf) {
283 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
284 return -ENOMEM;
285 }
286
287 q_id = 0;
288 /* sq and rq number may not equal */
289 while (q_id < nic_io->num_sqs) {
290 sq_ctxt_block = cmd_buf->buf;
291 sq_ctxt = sq_ctxt_block->sq_ctxt;
292
293 max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?
294 HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);
295
296 hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
297 HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
298 nic_io->max_qps, q_id);
299
300 for (i = 0; i < max_ctxts; i++) {
301 curr_id = q_id + i;
302 qp = &nic_io->qps[curr_id];
303 global_qpn = nic_io->global_qpn + curr_id;
304
305 hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
306 }
307
308 cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
309
310 err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
311 HINIC_MOD_L2NIC,
312 HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
313 cmd_buf, &out_param, 0);
314 if (err || out_param != 0) {
315 PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err: %d",
316 err);
317 err = -EFAULT;
318 break;
319 }
320
321 q_id += max_ctxts;
322 }
323
324 hinic_free_cmd_buf(hwdev, cmd_buf);
325
326 return err;
327 }
328
329 static int init_rq_ctxts(struct hinic_nic_io *nic_io)
330 {
331 struct hinic_hwdev *hwdev = nic_io->hwdev;
332 struct hinic_rq_ctxt_block *rq_ctxt_block;
333 struct hinic_rq_ctxt *rq_ctxt;
334 struct hinic_cmd_buf *cmd_buf;
335 struct hinic_qp *qp;
336 u64 out_param = 0;
337 u16 q_id, curr_id, max_ctxts, i;
338 int err = 0;
339
340 cmd_buf = hinic_alloc_cmd_buf(hwdev);
341 if (!cmd_buf) {
342 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
343 return -ENOMEM;
344 }
345
346 q_id = 0;
347 /* sq and rq number may not equal */
348 while (q_id < nic_io->num_rqs) {
349 rq_ctxt_block = cmd_buf->buf;
350 rq_ctxt = rq_ctxt_block->rq_ctxt;
351
352 max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?
353 HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);
354
355 hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
356 HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
357 nic_io->max_qps, q_id);
358
359 for (i = 0; i < max_ctxts; i++) {
360 curr_id = q_id + i;
361 qp = &nic_io->qps[curr_id];
362
363 hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
364 }
365
366 cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
367
368 err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
369 HINIC_MOD_L2NIC,
370 HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
371 cmd_buf, &out_param, 0);
372 if ((err) || out_param != 0) {
373 PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
374 err = -EFAULT;
375 break;
376 }
377
378 q_id += max_ctxts;
379 }
380
381 hinic_free_cmd_buf(hwdev, cmd_buf);
382
383 return err;
384 }
385
386 static int init_qp_ctxts(struct hinic_nic_io *nic_io)
387 {
388 return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));
389 }
390
391 static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
392 enum hinic_qp_ctxt_type ctxt_type)
393 {
394 struct hinic_hwdev *hwdev = nic_io->hwdev;
395 struct hinic_clean_queue_ctxt *ctxt_block;
396 struct hinic_cmd_buf *cmd_buf;
397 u64 out_param = 0;
398 int err;
399
400 cmd_buf = hinic_alloc_cmd_buf(hwdev);
401 if (!cmd_buf) {
402 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
403 return -ENOMEM;
404 }
405
406 ctxt_block = cmd_buf->buf;
407 ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
408 ctxt_block->cmdq_hdr.queue_type = ctxt_type;
409 ctxt_block->cmdq_hdr.addr_offset = 0;
410
411 /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
412 ctxt_block->ctxt_size = 0x3;
413
414 hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
415
416 cmd_buf->size = sizeof(*ctxt_block);
417
418 err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
419 HINIC_MOD_L2NIC,
420 HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
421 cmd_buf, &out_param, 0);
422
423 if ((err) || (out_param)) {
424 PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts");
425 err = -EFAULT;
426 }
427
428 hinic_free_cmd_buf(hwdev, cmd_buf);
429
430 return err;
431 }
432
433 static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
434 {
435 /* clean LRO/TSO context space */
436 return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
437 clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
438 }
439
440 /**
441 * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size
442 * @rx_buf_sz: receive buffer size
443 * @return
444 * hw rx buffer size
445 */
446 static u16 get_hw_rx_buf_size(u32 rx_buf_sz)
447 {
448 u16 num_hw_types = sizeof(hinic_hw_rx_buf_size)
449 / sizeof(hinic_hw_rx_buf_size[0]);
450 u16 i;
451
452 for (i = 0; i < num_hw_types; i++) {
453 if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
454 return i;
455 }
456
457 PMD_DRV_LOG(ERR, "Hw can't support rx buf size of %u", rx_buf_sz);
458
459 return DEFAULT_RX_BUF_SIZE; /* default 2K */
460 }
461
462 /**
463 * hinic_set_root_ctxt - init root context in NIC
464 * @hwdev: the hardware interface of a nic device
465 * @rq_depth: the depth of receive queue
466 * @sq_depth: the depth of transmit queue
467 * @rx_buf_sz: receive buffer size from app
468 * Return: 0 on success, negative error value otherwise.
469 */
470 static int
471 hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
472 {
473 struct hinic_root_ctxt root_ctxt;
474
475 memset(&root_ctxt, 0, sizeof(root_ctxt));
476 root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
477 root_ctxt.func_idx = hinic_global_func_id(hwdev);
478 root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
479 root_ctxt.set_cmdq_depth = 0;
480 root_ctxt.cmdq_depth = 0;
481 root_ctxt.lro_en = 1;
482 root_ctxt.rq_depth = (u16)ilog2(rq_depth);
483 root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
484 root_ctxt.sq_depth = (u16)ilog2(sq_depth);
485
486 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
487 HINIC_MGMT_CMD_VAT_SET,
488 &root_ctxt, sizeof(root_ctxt),
489 NULL, NULL, 0);
490 }
491
492 /**
493 * hinic_clean_root_ctxt - clean root context table in NIC
494 * @hwdev: the hardware interface of a nic device
495 * @return
496 * 0 on success,
497 * negative error value otherwise.
498 */
499 static int hinic_clean_root_ctxt(void *hwdev)
500 {
501 struct hinic_root_ctxt root_ctxt;
502
503 memset(&root_ctxt, 0, sizeof(root_ctxt));
504 root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
505 root_ctxt.func_idx = hinic_global_func_id(hwdev);
506 root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
507 root_ctxt.set_cmdq_depth = 0;
508 root_ctxt.cmdq_depth = 0;
509 root_ctxt.lro_en = 0;
510 root_ctxt.rq_depth = 0;
511 root_ctxt.rx_buf_sz = 0;
512 root_ctxt.sq_depth = 0;
513
514 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
515 HINIC_MGMT_CMD_VAT_SET,
516 &root_ctxt, sizeof(root_ctxt),
517 NULL, NULL, 0);
518 }
519
520 /* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
521 int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
522 {
523 struct hinic_nic_io *nic_io = hwdev->nic_io;
524 struct hinic_sq_attr sq_attr;
525 u16 q_id;
526 int err, rx_buf_sz;
527
528 /* set vat page size to max queue depth page_size */
529 err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);
530 if (err != HINIC_OK) {
531 PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d",
532 HINIC_PAGE_SIZE_DPDK, err);
533 return err;
534 }
535
536 if (hwdev->cmdqs->status & HINIC_CMDQ_SET_FAIL) {
537 err = hinic_reinit_cmdq_ctxts(hwdev);
538 if (err) {
539 PMD_DRV_LOG(ERR, "Reinit cmdq context failed when dev start, err: %d",
540 err);
541 return err;
542 }
543 }
544
545 err = init_qp_ctxts(nic_io);
546 if (err) {
547 PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err);
548 return err;
549 }
550
551 /* clean LRO/TSO context space */
552 err = clean_qp_offload_ctxt(nic_io);
553 if (err) {
554 PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d", err);
555 return err;
556 }
557
558 rx_buf_sz = nic_io->rq_buf_size;
559
560 /* update rx buf size to function table */
561 err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);
562 if (err) {
563 PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d", err);
564 return err;
565 }
566
567 err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
568 nic_io->sq_depth, rx_buf_sz);
569 if (err) {
570 PMD_DRV_LOG(ERR, "Set root context failed, rc: %d", err);
571 return err;
572 }
573
574 for (q_id = 0; q_id < nic_io->num_sqs; q_id++) {
575 sq_attr.ci_dma_base =
576 HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
577 /* performance: sq ci update threshold as 8 */
578 sq_attr.pending_limit = 1;
579 sq_attr.coalescing_time = 1;
580 sq_attr.intr_en = 0;
581 sq_attr.l2nic_sqn = q_id;
582 sq_attr.dma_attr_off = 0;
583 err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
584 if (err) {
585 PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d", err);
586 goto set_cons_idx_table_err;
587 }
588 }
589
590 return 0;
591
592 set_cons_idx_table_err:
593 (void)hinic_clean_root_ctxt(hwdev);
594 return err;
595 }
596
597 void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)
598 {
599 int err;
600
601 err = hinic_clean_root_ctxt(hwdev);
602 if (err)
603 PMD_DRV_LOG(ERR, "Failed to clean root ctxt");
604 }
605
606 static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)
607 {
608 struct hinic_nic_io *nic_io = hwdev->nic_io;
609 u16 global_qpn, rx_buf_sz;
610 int err;
611
612 err = hinic_get_base_qpn(hwdev, &global_qpn);
613 if (err) {
614 PMD_DRV_LOG(ERR, "Failed to get base qpn");
615 goto err_init_nic_hwdev;
616 }
617
618 nic_io->global_qpn = global_qpn;
619 rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;
620 err = hinic_init_function_table(hwdev, rx_buf_sz);
621 if (err) {
622 PMD_DRV_LOG(ERR, "Failed to init function table");
623 goto err_init_nic_hwdev;
624 }
625
626 err = hinic_vf_func_init(hwdev);
627 if (err) {
628 PMD_DRV_LOG(ERR, "Failed to init nic mbox");
629 goto err_init_nic_hwdev;
630 }
631
632 err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);
633 if (err) {
634 PMD_DRV_LOG(ERR, "Failed to set fast recycle mode");
635 goto err_init_nic_hwdev;
636 }
637
638 return 0;
639
640 err_init_nic_hwdev:
641 return err;
642 }
643
644 static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)
645 {
646 hinic_vf_func_free(hwdev);
647 hwdev->nic_io = NULL;
648 }
649
650 int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)
651 {
652 return hinic_func_rx_tx_flush(hwdev);
653 }
654
655 int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
656 {
657 struct hinic_nic_io *nic_io = hwdev->nic_io;
658 struct hinic_wq *wq = &nic_io->sq_wq[q_id];
659
660 return (wq->delta) - 1;
661 }
662
663 int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
664 {
665 struct hinic_nic_io *nic_io = hwdev->nic_io;
666 struct hinic_wq *wq = &nic_io->rq_wq[q_id];
667
668 return (wq->delta) - 1;
669 }
670
671 u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
672 {
673 struct hinic_nic_io *nic_io = hwdev->nic_io;
674 struct hinic_wq *wq = &nic_io->sq_wq[q_id];
675
676 return (wq->cons_idx) & wq->mask;
677 }
678
679 void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
680 int num_wqebbs, u16 owner)
681 {
682 struct hinic_nic_io *nic_io = hwdev->nic_io;
683 struct hinic_sq *sq = &nic_io->qps[q_id].sq;
684
685 if (owner != sq->owner)
686 sq->owner = owner;
687
688 sq->wq->delta += num_wqebbs;
689 sq->wq->prod_idx -= num_wqebbs;
690 }
691
692 void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,
693 u16 q_id, int wqebb_cnt)
694 {
695 struct hinic_nic_io *nic_io = hwdev->nic_io;
696 struct hinic_sq *sq = &nic_io->qps[q_id].sq;
697
698 hinic_put_wqe(sq->wq, wqebb_cnt);
699 }
700
701 void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)
702 {
703 struct hinic_nic_io *nic_io = hwdev->nic_io;
704 struct hinic_rq *rq = &nic_io->qps[q_id].rq;
705
706 return hinic_get_wqe(rq->wq, 1, pi);
707 }
708
709 void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)
710 {
711 struct hinic_nic_io *nic_io = hwdev->nic_io;
712 struct hinic_rq *rq = &nic_io->qps[q_id].rq;
713
714 rq->wq->delta += num_wqebbs;
715 rq->wq->prod_idx -= num_wqebbs;
716 }
717
718 u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
719 {
720 struct hinic_nic_io *nic_io = hwdev->nic_io;
721 struct hinic_wq *wq = &nic_io->rq_wq[q_id];
722
723 return (wq->cons_idx) & wq->mask;
724 }
725
726 void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)
727 {
728 struct hinic_nic_io *nic_io = hwdev->nic_io;
729 struct hinic_rq *rq = &nic_io->qps[q_id].rq;
730
731 hinic_put_wqe(rq->wq, wqe_cnt);
732 }
733
734 static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
735 {
736 struct hinic_nic_io *nic_io = hwdev->nic_io;
737 struct rte_pci_device *pdev = hwdev->pcidev_hdl;
738 u16 max_qps, num_qp;
739 int err;
740
741 max_qps = hinic_func_max_qnum(hwdev);
742 if ((max_qps & (max_qps - 1))) {
743 PMD_DRV_LOG(ERR, "Wrong number of max_qps: %d",
744 max_qps);
745 return -EINVAL;
746 }
747
748 nic_io->max_qps = max_qps;
749 nic_io->num_qps = max_qps;
750 num_qp = max_qps;
751
752 nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),
753 GFP_KERNEL);
754 if (!nic_io->qps) {
755 PMD_DRV_LOG(ERR, "Failed to allocate qps");
756 err = -ENOMEM;
757 goto alloc_qps_err;
758 }
759
760 nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev,
761 CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
762 &nic_io->ci_dma_base,
763 pdev->device.numa_node);
764 if (!nic_io->ci_vaddr_base) {
765 PMD_DRV_LOG(ERR, "Failed to allocate ci area");
766 err = -ENOMEM;
767 goto ci_base_err;
768 }
769
770 nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),
771 GFP_KERNEL);
772 if (!nic_io->sq_wq) {
773 PMD_DRV_LOG(ERR, "Failed to allocate sq wq array");
774 err = -ENOMEM;
775 goto sq_wq_err;
776 }
777
778 nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),
779 GFP_KERNEL);
780 if (!nic_io->rq_wq) {
781 PMD_DRV_LOG(ERR, "Failed to allocate rq wq array");
782 err = -ENOMEM;
783 goto rq_wq_err;
784 }
785
786 return HINIC_OK;
787
788 rq_wq_err:
789 kfree(nic_io->sq_wq);
790
791 sq_wq_err:
792 dma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
793 nic_io->ci_vaddr_base, nic_io->ci_dma_base);
794
795 ci_base_err:
796 kfree(nic_io->qps);
797
798 alloc_qps_err:
799 return err;
800 }
801
802 static void hinic_free_nicio(struct hinic_hwdev *hwdev)
803 {
804 struct hinic_nic_io *nic_io = hwdev->nic_io;
805
806 /* nic_io->rq_wq */
807 kfree(nic_io->rq_wq);
808
809 /* nic_io->sq_wq */
810 kfree(nic_io->sq_wq);
811
812 /* nic_io->ci_vaddr_base */
813 dma_free_coherent(hwdev,
814 CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE),
815 nic_io->ci_vaddr_base, nic_io->ci_dma_base);
816
817 /* nic_io->qps */
818 kfree(nic_io->qps);
819 }
820
821 /* alloc nic hwdev and init function table */
822 int hinic_init_nicio(struct hinic_hwdev *hwdev)
823 {
824 int rc;
825
826 hwdev->nic_io = rte_zmalloc("hinic_nicio", sizeof(*hwdev->nic_io),
827 RTE_CACHE_LINE_SIZE);
828 if (!hwdev->nic_io) {
829 PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
830 hwdev->pcidev_hdl->name);
831 return -ENOMEM;
832 }
833 hwdev->nic_io->hwdev = hwdev;
834
835 /* alloc root working queue set */
836 rc = hinic_alloc_nicio(hwdev);
837 if (rc) {
838 PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
839 hwdev->pcidev_hdl->name);
840 goto allc_nicio_fail;
841 }
842
843 rc = hinic_init_nic_hwdev(hwdev);
844 if (rc) {
845 PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s",
846 hwdev->pcidev_hdl->name);
847 goto init_nic_hwdev_fail;
848 }
849
850 return 0;
851
852 init_nic_hwdev_fail:
853 hinic_free_nicio(hwdev);
854
855 allc_nicio_fail:
856 rte_free(hwdev->nic_io);
857 return rc;
858 }
859
860 void hinic_deinit_nicio(struct hinic_hwdev *hwdev)
861 {
862 hinic_free_nicio(hwdev);
863
864 hinic_free_nic_hwdev(hwdev);
865
866 rte_free(hwdev->nic_io);
867 hwdev->nic_io = NULL;
868 }
869
870 /**
871 * hinic_convert_rx_buf_size - convert rx buffer size to hw size
872 * @rx_buf_sz: receive buffer size of mbuf
873 * @match_sz: receive buffer size of hardware
874 * @return
875 * 0 on success,
876 * negative error value otherwise.
877 */
878 int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz)
879 {
880 u32 i, num_hw_types, best_match_sz;
881
882 if (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B))
883 return -EINVAL;
884
885 if (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) {
886 best_match_sz = HINIC_RX_BUF_SIZE_16K;
887 goto size_matched;
888 }
889
890 num_hw_types = sizeof(hinic_hw_rx_buf_size) /
891 sizeof(hinic_hw_rx_buf_size[0]);
892 best_match_sz = hinic_hw_rx_buf_size[0];
893 for (i = 0; i < num_hw_types; i++) {
894 if (rx_buf_sz == hinic_hw_rx_buf_size[i]) {
895 best_match_sz = hinic_hw_rx_buf_size[i];
896 break;
897 } else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) {
898 break;
899 }
900 best_match_sz = hinic_hw_rx_buf_size[i];
901 }
902
903 size_matched:
904 *match_sz = best_match_sz;
905
906 return 0;
907 }