1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
14 #include <rte_atomic.h>
16 #include <rte_bus_pci.h>
17 #include <rte_errno.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_spinlock.h>
23 #include "octeontx_mbox.h"
24 #include "octeontx_fpavf.h"
26 /* FPA Mbox Message */
29 #define FPA_CONFIGSET 0x1
30 #define FPA_CONFIGGET 0x2
31 #define FPA_START_COUNT 0x3
32 #define FPA_STOP_COUNT 0x4
33 #define FPA_ATTACHAURA 0x5
34 #define FPA_DETACHAURA 0x6
35 #define FPA_SETAURALVL 0x7
36 #define FPA_GETAURALVL 0x8
38 #define FPA_COPROC 0x1
41 struct octeontx_mbox_fpa_cfg
{
44 uint64_t pool_stack_base
;
45 uint64_t pool_stack_end
;
49 struct __attribute__((__packed__
)) gen_req
{
53 struct __attribute__((__packed__
)) idn_req
{
57 struct __attribute__((__packed__
)) gen_resp
{
62 struct __attribute__((__packed__
)) dcfg_resp
{
68 uint8_t net_port_count
;
69 uint8_t virt_port_count
;
72 #define FPA_MAX_POOL 32
73 #define FPA_PF_PAGE_SZ 4096
75 #define FPA_LN_SIZE 128
76 #define FPA_ROUND_UP(x, size) \
77 ((((unsigned long)(x)) + size-1) & (~(size-1)))
78 #define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
79 #define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
81 #define POOL_ENA (0x1 << 0)
82 #define POOL_DIS (0x0 << 0)
83 #define POOL_SET_NAT_ALIGN (0x1 << 1)
84 #define POOL_DIS_NAT_ALIGN (0x0 << 1)
85 #define POOL_STYPE(x) (((x) & 0x1) << 2)
86 #define POOL_LTYPE(x) (((x) & 0x3) << 3)
87 #define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
88 #define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
91 void *pool_stack_base
;
93 uint64_t stack_ln_ptr
;
95 uint16_t vf_id
; /* gpool_id */
96 uint16_t sz128
; /* Block size in cache lines */
100 struct octeontx_fpadev
{
102 uint8_t total_gpool_cnt
;
103 struct fpavf_res pool
[FPA_VF_MAX
];
106 static struct octeontx_fpadev fpadev
;
108 int octeontx_logtype_fpavf
;
109 int octeontx_logtype_fpavf_mbox
;
111 RTE_INIT(otx_pool_init_log
)
113 octeontx_logtype_fpavf
= rte_log_register("pmd.mempool.octeontx");
114 if (octeontx_logtype_fpavf
>= 0)
115 rte_log_set_level(octeontx_logtype_fpavf
, RTE_LOG_NOTICE
);
118 /* lock is taken by caller */
120 octeontx_fpa_gpool_alloc(unsigned int object_size
)
122 struct fpavf_res
*res
= NULL
;
126 sz128
= FPA_OBJSZ_2_CACHE_LINE(object_size
);
128 for (gpool
= 0; gpool
< FPA_VF_MAX
; gpool
++) {
130 /* Skip VF that is not mapped Or _inuse */
131 if ((fpadev
.pool
[gpool
].bar0
== NULL
) ||
132 (fpadev
.pool
[gpool
].is_inuse
== true))
135 res
= &fpadev
.pool
[gpool
];
137 RTE_ASSERT(res
->domain_id
!= (uint16_t)~0);
138 RTE_ASSERT(res
->vf_id
!= (uint16_t)~0);
139 RTE_ASSERT(res
->stack_ln_ptr
!= 0);
141 if (res
->sz128
== 0) {
144 fpavf_log_dbg("gpool %d blk_sz %d\n", gpool
, sz128
);
152 /* lock is taken by caller */
153 static __rte_always_inline
uintptr_t
154 octeontx_fpa_gpool2handle(uint16_t gpool
)
156 struct fpavf_res
*res
= NULL
;
158 RTE_ASSERT(gpool
< FPA_VF_MAX
);
160 res
= &fpadev
.pool
[gpool
];
161 return (uintptr_t)res
->bar0
| gpool
;
164 static __rte_always_inline
bool
165 octeontx_fpa_handle_valid(uintptr_t handle
)
167 struct fpavf_res
*res
= NULL
;
172 if (unlikely(!handle
))
176 gpool
= octeontx_fpa_bufpool_gpool(handle
);
178 /* get the bar address */
179 handle
&= ~(uint64_t)FPA_GPOOL_MASK
;
180 for (i
= 0; i
< FPA_VF_MAX
; i
++) {
181 if ((uintptr_t)fpadev
.pool
[i
].bar0
!= handle
)
188 res
= &fpadev
.pool
[i
];
190 if (res
->sz128
== 0 || res
->domain_id
== (uint16_t)~0 ||
191 res
->stack_ln_ptr
== 0)
202 octeontx_fpapf_pool_setup(unsigned int gpool
, unsigned int buf_size
,
203 signed short buf_offset
, unsigned int max_buf_count
)
206 rte_iova_t phys_addr
;
208 struct fpavf_res
*fpa
= NULL
;
210 struct octeontx_mbox_hdr hdr
;
211 struct dcfg_resp resp
;
212 struct octeontx_mbox_fpa_cfg cfg
;
215 fpa
= &fpadev
.pool
[gpool
];
216 memsz
= FPA_ROUND_UP(max_buf_count
/ fpa
->stack_ln_ptr
, FPA_LN_SIZE
) *
219 /* Round-up to page size */
220 memsz
= (memsz
+ FPA_PF_PAGE_SZ
- 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ
-1);
221 memptr
= rte_malloc(NULL
, memsz
, RTE_CACHE_LINE_SIZE
);
222 if (memptr
== NULL
) {
227 /* Configure stack */
228 fpa
->pool_stack_base
= memptr
;
229 phys_addr
= rte_malloc_virt2iova(memptr
);
231 buf_size
/= FPA_LN_SIZE
;
234 hdr
.coproc
= FPA_COPROC
;
235 hdr
.msg
= FPA_CONFIGSET
;
236 hdr
.vfid
= fpa
->vf_id
;
239 buf_offset
/= FPA_LN_SIZE
;
240 reg
= POOL_BUF_SIZE(buf_size
) | POOL_BUF_OFFSET(buf_offset
) |
241 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN
|
244 cfg
.aid
= FPA_AURA_IDX(gpool
);
246 cfg
.pool_stack_base
= phys_addr
;
247 cfg
.pool_stack_end
= phys_addr
+ memsz
;
248 cfg
.aura_cfg
= (1 << 9);
250 ret
= octeontx_mbox_send(&hdr
, &cfg
,
251 sizeof(struct octeontx_mbox_fpa_cfg
),
252 &resp
, sizeof(resp
));
258 fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64
" pool_stack_end %" PRIx64
" aura_cfg %" PRIx64
"\n",
259 fpa
->vf_id
, gpool
, cfg
.aid
, (unsigned int)cfg
.pool_cfg
,
260 cfg
.pool_stack_base
, cfg
.pool_stack_end
, cfg
.aura_cfg
);
262 /* Now pool is in_use */
263 fpa
->is_inuse
= true;
273 octeontx_fpapf_pool_destroy(unsigned int gpool_index
)
275 struct octeontx_mbox_hdr hdr
;
276 struct dcfg_resp resp
;
277 struct octeontx_mbox_fpa_cfg cfg
;
278 struct fpavf_res
*fpa
= NULL
;
281 fpa
= &fpadev
.pool
[gpool_index
];
283 hdr
.coproc
= FPA_COPROC
;
284 hdr
.msg
= FPA_CONFIGSET
;
285 hdr
.vfid
= fpa
->vf_id
;
288 /* reset and free the pool */
291 cfg
.pool_stack_base
= 0;
292 cfg
.pool_stack_end
= 0;
295 ret
= octeontx_mbox_send(&hdr
, &cfg
,
296 sizeof(struct octeontx_mbox_fpa_cfg
),
297 &resp
, sizeof(resp
));
305 /* anycase free pool stack memory */
306 rte_free(fpa
->pool_stack_base
);
307 fpa
->pool_stack_base
= NULL
;
312 octeontx_fpapf_aura_attach(unsigned int gpool_index
)
314 struct octeontx_mbox_hdr hdr
;
315 struct dcfg_resp resp
;
316 struct octeontx_mbox_fpa_cfg cfg
;
319 if (gpool_index
>= FPA_MAX_POOL
) {
323 hdr
.coproc
= FPA_COPROC
;
324 hdr
.msg
= FPA_ATTACHAURA
;
325 hdr
.vfid
= gpool_index
;
327 memset(&cfg
, 0x0, sizeof(struct octeontx_mbox_fpa_cfg
));
328 cfg
.aid
= FPA_AURA_IDX(gpool_index
);
330 ret
= octeontx_mbox_send(&hdr
, &cfg
,
331 sizeof(struct octeontx_mbox_fpa_cfg
),
332 &resp
, sizeof(resp
));
334 fpavf_log_err("Could not attach fpa ");
335 fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
336 FPA_AURA_IDX(gpool_index
), gpool_index
, ret
,
346 octeontx_fpapf_aura_detach(unsigned int gpool_index
)
348 struct octeontx_mbox_fpa_cfg cfg
= {0};
349 struct octeontx_mbox_hdr hdr
= {0};
352 if (gpool_index
>= FPA_MAX_POOL
) {
357 cfg
.aid
= FPA_AURA_IDX(gpool_index
);
358 hdr
.coproc
= FPA_COPROC
;
359 hdr
.msg
= FPA_DETACHAURA
;
360 hdr
.vfid
= gpool_index
;
361 ret
= octeontx_mbox_send(&hdr
, &cfg
, sizeof(cfg
), NULL
, 0);
363 fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
364 FPA_AURA_IDX(gpool_index
), ret
,
374 octeontx_fpavf_pool_set_range(uintptr_t handle
, unsigned long memsz
,
375 void *memva
, uint16_t gpool
)
379 if (unlikely(!handle
))
382 va_end
= (uintptr_t)memva
+ memsz
;
383 va_end
&= ~RTE_CACHE_LINE_MASK
;
386 fpavf_write64((uintptr_t)memva
,
387 (void *)((uintptr_t)handle
+
388 FPA_VF_VHPOOL_START_ADDR(gpool
)));
389 fpavf_write64(va_end
,
390 (void *)((uintptr_t)handle
+
391 FPA_VF_VHPOOL_END_ADDR(gpool
)));
396 octeontx_fpapf_start_count(uint16_t gpool_index
)
399 struct octeontx_mbox_hdr hdr
= {0};
401 if (gpool_index
>= FPA_MAX_POOL
) {
406 hdr
.coproc
= FPA_COPROC
;
407 hdr
.msg
= FPA_START_COUNT
;
408 hdr
.vfid
= gpool_index
;
409 ret
= octeontx_mbox_send(&hdr
, NULL
, 0, NULL
, 0);
411 fpavf_log_err("Could not start buffer counting for ");
412 fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
413 gpool_index
, ret
, hdr
.res_code
);
422 static __rte_always_inline
int
423 octeontx_fpavf_free(unsigned int gpool
)
427 if (gpool
>= FPA_MAX_POOL
) {
433 fpadev
.pool
[gpool
].is_inuse
= false;
439 static __rte_always_inline
int
440 octeontx_gpool_free(uint16_t gpool
)
442 if (fpadev
.pool
[gpool
].sz128
!= 0) {
443 fpadev
.pool
[gpool
].sz128
= 0;
450 * Return buffer size for a given pool
453 octeontx_fpa_bufpool_block_size(uintptr_t handle
)
455 struct fpavf_res
*res
= NULL
;
458 if (unlikely(!octeontx_fpa_handle_valid(handle
)))
462 gpool
= octeontx_fpa_bufpool_gpool(handle
);
463 res
= &fpadev
.pool
[gpool
];
464 return FPA_CACHE_LINE_2_OBJSZ(res
->sz128
);
468 octeontx_fpa_bufpool_free_count(uintptr_t handle
)
470 uint64_t cnt
, limit
, avail
;
475 if (unlikely(!octeontx_fpa_handle_valid(handle
)))
479 gpool
= octeontx_fpa_bufpool_gpool(handle
);
481 gaura
= octeontx_fpa_bufpool_gaura(handle
);
483 /* Get pool bar address from handle */
484 pool_bar
= handle
& ~(uint64_t)FPA_GPOOL_MASK
;
486 cnt
= fpavf_read64((void *)((uintptr_t)pool_bar
+
487 FPA_VF_VHAURA_CNT(gaura
)));
488 limit
= fpavf_read64((void *)((uintptr_t)pool_bar
+
489 FPA_VF_VHAURA_CNT_LIMIT(gaura
)));
491 avail
= fpavf_read64((void *)((uintptr_t)pool_bar
+
492 FPA_VF_VHPOOL_AVAILABLE(gpool
)));
494 return RTE_MIN(avail
, (limit
- cnt
));
498 octeontx_fpa_bufpool_create(unsigned int object_size
, unsigned int object_count
,
499 unsigned int buf_offset
, int node_id
)
503 uintptr_t gpool_handle
;
507 RTE_SET_USED(node_id
);
508 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf
) > OCTEONTX_FPAVF_BUF_OFFSET
);
510 object_size
= RTE_CACHE_LINE_ROUNDUP(object_size
);
511 if (object_size
> FPA_MAX_OBJ_SIZE
) {
516 rte_spinlock_lock(&fpadev
.lock
);
517 res
= octeontx_fpa_gpool_alloc(object_size
);
520 if (unlikely(res
< 0)) {
528 /* get pool handle */
529 gpool_handle
= octeontx_fpa_gpool2handle(gpool
);
530 if (!octeontx_fpa_handle_valid(gpool_handle
)) {
532 goto error_gpool_free
;
535 /* Get pool bar address from handle */
536 pool_bar
= gpool_handle
& ~(uint64_t)FPA_GPOOL_MASK
;
538 res
= octeontx_fpapf_pool_setup(gpool
, object_size
, buf_offset
,
542 goto error_gpool_free
;
545 /* populate AURA fields */
546 res
= octeontx_fpapf_aura_attach(gpool
);
549 goto error_pool_destroy
;
552 gaura
= FPA_AURA_IDX(gpool
);
555 rte_spinlock_unlock(&fpadev
.lock
);
557 /* populate AURA registers */
558 fpavf_write64(object_count
, (void *)((uintptr_t)pool_bar
+
559 FPA_VF_VHAURA_CNT(gaura
)));
560 fpavf_write64(object_count
, (void *)((uintptr_t)pool_bar
+
561 FPA_VF_VHAURA_CNT_LIMIT(gaura
)));
562 fpavf_write64(object_count
+ 1, (void *)((uintptr_t)pool_bar
+
563 FPA_VF_VHAURA_CNT_THRESHOLD(gaura
)));
565 octeontx_fpapf_start_count(gpool
);
570 octeontx_fpavf_free(gpool
);
571 octeontx_fpapf_pool_destroy(gpool
);
573 octeontx_gpool_free(gpool
);
575 rte_spinlock_unlock(&fpadev
.lock
);
577 return (uintptr_t)NULL
;
581 * Destroy a buffer pool.
584 octeontx_fpa_bufpool_destroy(uintptr_t handle
, int node_id
)
586 void **node
, **curr
, *head
= NULL
;
594 RTE_SET_USED(node_id
);
596 /* Wait for all outstanding writes to be committed */
599 if (unlikely(!octeontx_fpa_handle_valid(handle
)))
603 gpool
= octeontx_fpa_bufpool_gpool(handle
);
605 gaura
= octeontx_fpa_bufpool_gaura(handle
);
607 /* Get pool bar address from handle */
608 pool_bar
= handle
& ~(uint64_t)FPA_GPOOL_MASK
;
610 /* Check for no outstanding buffers */
611 cnt
= fpavf_read64((void *)((uintptr_t)pool_bar
+
612 FPA_VF_VHAURA_CNT(gaura
)));
614 fpavf_log_dbg("buffer exist in pool cnt %" PRId64
"\n", cnt
);
618 rte_spinlock_lock(&fpadev
.lock
);
620 avail
= fpavf_read64((void *)((uintptr_t)pool_bar
+
621 FPA_VF_VHPOOL_AVAILABLE(gpool
)));
623 /* Prepare to empty the entire POOL */
624 fpavf_write64(avail
, (void *)((uintptr_t)pool_bar
+
625 FPA_VF_VHAURA_CNT_LIMIT(gaura
)));
626 fpavf_write64(avail
+ 1, (void *)((uintptr_t)pool_bar
+
627 FPA_VF_VHAURA_CNT_THRESHOLD(gaura
)));
630 /* Invalidate the POOL */
631 octeontx_gpool_free(gpool
);
633 /* Process all buffers in the pool */
636 /* Yank a buffer from the pool */
637 node
= (void *)(uintptr_t)
638 fpavf_read64((void *)
639 (pool_bar
+ FPA_VF_VHAURA_OP_ALLOC(gaura
)));
642 fpavf_log_err("GAURA[%u] missing %" PRIx64
" buf\n",
647 /* Imsert it into an ordered linked list */
648 for (curr
= &head
; curr
[0] != NULL
; curr
= curr
[0]) {
649 if ((uintptr_t)node
<= (uintptr_t)curr
[0])
656 /* Verify the linked list to be a perfect series */
657 sz
= octeontx_fpa_bufpool_block_size(handle
) << 7;
658 for (curr
= head
; curr
!= NULL
&& curr
[0] != NULL
;
660 if (curr
== curr
[0] ||
661 ((uintptr_t)curr
!= ((uintptr_t)curr
[0] - sz
))) {
662 fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
663 gpool
, curr
, curr
[0]);
667 /* Disable pool operation */
668 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar
+
669 FPA_VF_VHPOOL_START_ADDR(gpool
)));
670 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar
+
671 FPA_VF_VHPOOL_END_ADDR(gpool
)));
673 (void)octeontx_fpapf_pool_destroy(gpool
);
675 /* Deactivate the AURA */
676 fpavf_write64(0, (void *)((uintptr_t)pool_bar
+
677 FPA_VF_VHAURA_CNT_LIMIT(gaura
)));
678 fpavf_write64(0, (void *)((uintptr_t)pool_bar
+
679 FPA_VF_VHAURA_CNT_THRESHOLD(gaura
)));
681 ret
= octeontx_fpapf_aura_detach(gpool
);
683 fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
688 (void)octeontx_fpavf_free(gpool
);
690 rte_spinlock_unlock(&fpadev
.lock
);
695 octeontx_fpavf_setup(void)
698 static bool init_once
;
701 rte_spinlock_init(&fpadev
.lock
);
702 fpadev
.total_gpool_cnt
= 0;
704 for (i
= 0; i
< FPA_VF_MAX
; i
++) {
706 fpadev
.pool
[i
].domain_id
= ~0;
707 fpadev
.pool
[i
].stack_ln_ptr
= 0;
708 fpadev
.pool
[i
].sz128
= 0;
709 fpadev
.pool
[i
].bar0
= NULL
;
710 fpadev
.pool
[i
].pool_stack_base
= NULL
;
711 fpadev
.pool
[i
].is_inuse
= false;
718 octeontx_fpavf_identify(void *bar0
)
723 uint64_t stack_ln_ptr
;
725 val
= fpavf_read64((void *)((uintptr_t)bar0
+
726 FPA_VF_VHAURA_CNT_THRESHOLD(0)));
728 domain_id
= (val
>> 8) & 0xffff;
729 vf_id
= (val
>> 24) & 0xffff;
731 stack_ln_ptr
= fpavf_read64((void *)((uintptr_t)bar0
+
732 FPA_VF_VHPOOL_THRESHOLD(0)));
733 if (vf_id
>= FPA_VF_MAX
) {
734 fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id
);
738 if (fpadev
.pool
[vf_id
].is_inuse
) {
739 fpavf_log_err("vf_id %d is_inuse\n", vf_id
);
743 fpadev
.pool
[vf_id
].domain_id
= domain_id
;
744 fpadev
.pool
[vf_id
].vf_id
= vf_id
;
745 fpadev
.pool
[vf_id
].bar0
= bar0
;
746 fpadev
.pool
[vf_id
].stack_ln_ptr
= stack_ln_ptr
;
752 /* FPAVF pcie device aka mempool probe */
754 fpavf_probe(struct rte_pci_driver
*pci_drv
, struct rte_pci_device
*pci_dev
)
758 struct fpavf_res
*fpa
= NULL
;
760 RTE_SET_USED(pci_drv
);
763 /* For secondary processes, the primary has done all the work */
764 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
767 if (pci_dev
->mem_resource
[0].addr
== NULL
) {
768 fpavf_log_err("Empty bars %p ", pci_dev
->mem_resource
[0].addr
);
771 idreg
= pci_dev
->mem_resource
[0].addr
;
773 octeontx_fpavf_setup();
775 res
= octeontx_fpavf_identify(idreg
);
779 fpa
= &fpadev
.pool
[res
];
780 fpadev
.total_gpool_cnt
++;
783 fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
784 fpadev
.total_gpool_cnt
, fpa
->bar0
, fpa
->domain_id
,
785 fpa
->vf_id
, (unsigned int)fpa
->stack_ln_ptr
);
790 static const struct rte_pci_id pci_fpavf_map
[] = {
792 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
,
793 PCI_DEVICE_ID_OCTEONTX_FPA_VF
)
800 static struct rte_pci_driver pci_fpavf
= {
801 .id_table
= pci_fpavf_map
,
802 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_IOVA_AS_VA
,
803 .probe
= fpavf_probe
,
806 RTE_PMD_REGISTER_PCI(octeontx_fpavf
, pci_fpavf
);