1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017,2019 NXP
15 #include <sys/types.h>
16 #include <sys/syscall.h>
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
25 #include <rte_malloc.h>
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
31 /* List of all the memseg information locally maintained in dpaa driver. This
32 * is to optimize the PA_to_VA searches until a better mechanism (algo) is
35 struct dpaa_memseg_list rte_dpaa_memsegs
36 = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs
);
38 struct dpaa_bp_info
*rte_dpaa_bpid_info
;
39 int dpaa_logtype_mempool
;
42 dpaa_mbuf_create_pool(struct rte_mempool
*mp
)
45 struct bm_buffer bufs
[8];
46 struct dpaa_bp_info
*bp_info
;
48 int num_bufs
= 0, ret
= 0;
49 struct bman_pool_params params
= {
50 .flags
= BMAN_POOL_FLAG_DYNAMIC_BPID
53 MEMPOOL_INIT_FUNC_TRACE();
55 if (unlikely(!RTE_PER_LCORE(dpaa_io
))) {
56 ret
= rte_dpaa_portal_init((void *)0);
59 "rte_dpaa_portal_init failed with ret: %d",
64 bp
= bman_new_pool(¶ms
);
66 DPAA_MEMPOOL_ERR("bman_new_pool() failed");
69 bpid
= bman_get_params(bp
)->bpid
;
71 /* Drain the pool of anything already in it. */
73 /* Acquire is all-or-nothing, so we drain in 8s,
74 * then in 1s for the remainder.
77 ret
= bman_acquire(bp
, bufs
, 8, 0);
79 ret
= bman_acquire(bp
, bufs
, 1, 0);
84 DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
87 if (rte_dpaa_bpid_info
== NULL
) {
88 rte_dpaa_bpid_info
= (struct dpaa_bp_info
*)rte_zmalloc(NULL
,
89 sizeof(struct dpaa_bp_info
) * DPAA_MAX_BPOOLS
,
91 if (rte_dpaa_bpid_info
== NULL
) {
97 rte_dpaa_bpid_info
[bpid
].mp
= mp
;
98 rte_dpaa_bpid_info
[bpid
].bpid
= bpid
;
99 rte_dpaa_bpid_info
[bpid
].size
= mp
->elt_size
;
100 rte_dpaa_bpid_info
[bpid
].bp
= bp
;
101 rte_dpaa_bpid_info
[bpid
].meta_data_size
=
102 sizeof(struct rte_mbuf
) + rte_pktmbuf_priv_size(mp
);
103 rte_dpaa_bpid_info
[bpid
].dpaa_ops_index
= mp
->ops_index
;
104 rte_dpaa_bpid_info
[bpid
].ptov_off
= 0;
105 rte_dpaa_bpid_info
[bpid
].flags
= 0;
107 bp_info
= rte_malloc(NULL
,
108 sizeof(struct dpaa_bp_info
),
109 RTE_CACHE_LINE_SIZE
);
111 DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
116 rte_memcpy(bp_info
, (void *)&rte_dpaa_bpid_info
[bpid
],
117 sizeof(struct dpaa_bp_info
));
118 mp
->pool_data
= (void *)bp_info
;
120 DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid
);
125 dpaa_mbuf_free_pool(struct rte_mempool
*mp
)
127 struct dpaa_bp_info
*bp_info
= DPAA_MEMPOOL_TO_POOL_INFO(mp
);
129 MEMPOOL_INIT_FUNC_TRACE();
132 bman_free_pool(bp_info
->bp
);
133 DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
135 rte_free(mp
->pool_data
);
136 mp
->pool_data
= NULL
;
141 dpaa_buf_free(struct dpaa_bp_info
*bp_info
, uint64_t addr
)
143 struct bm_buffer buf
;
146 DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64
" to bpid: %d",
147 addr
, bp_info
->bpid
);
149 bm_buffer_set64(&buf
, addr
);
151 ret
= bman_release(bp_info
->bp
, &buf
, 1, 0);
153 DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
154 cpu_spin(CPU_SPIN_BACKOFF_CYCLES
);
160 dpaa_mbuf_free_bulk(struct rte_mempool
*pool
,
161 void *const *obj_table
,
164 struct dpaa_bp_info
*bp_info
= DPAA_MEMPOOL_TO_POOL_INFO(pool
);
168 DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
171 if (unlikely(!RTE_PER_LCORE(dpaa_io
))) {
172 ret
= rte_dpaa_portal_init((void *)0);
174 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
181 uint64_t phy
= rte_mempool_virt2iova(obj_table
[i
]);
183 if (unlikely(!bp_info
->ptov_off
)) {
184 /* buffers are from single mem segment */
185 if (bp_info
->flags
& DPAA_MPOOL_SINGLE_SEGMENT
) {
186 bp_info
->ptov_off
= (size_t)obj_table
[i
] - phy
;
187 rte_dpaa_bpid_info
[bp_info
->bpid
].ptov_off
192 dpaa_buf_free(bp_info
,
193 (uint64_t)phy
+ bp_info
->meta_data_size
);
197 DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
204 dpaa_mbuf_alloc_bulk(struct rte_mempool
*pool
,
208 struct rte_mbuf
**m
= (struct rte_mbuf
**)obj_table
;
209 struct bm_buffer bufs
[DPAA_MBUF_MAX_ACQ_REL
];
210 struct dpaa_bp_info
*bp_info
;
215 bp_info
= DPAA_MEMPOOL_TO_POOL_INFO(pool
);
217 DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
218 count
, bp_info
->bpid
);
220 if (unlikely(count
>= (RTE_MEMPOOL_CACHE_MAX_SIZE
* 2))) {
221 DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
226 if (unlikely(!RTE_PER_LCORE(dpaa_io
))) {
227 ret
= rte_dpaa_portal_init((void *)0);
229 DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
236 /* Acquire is all-or-nothing, so we drain in 7s,
237 * then the remainder.
239 if ((count
- n
) > DPAA_MBUF_MAX_ACQ_REL
) {
240 ret
= bman_acquire(bp_info
->bp
, bufs
,
241 DPAA_MBUF_MAX_ACQ_REL
, 0);
243 ret
= bman_acquire(bp_info
->bp
, bufs
, count
- n
, 0);
245 /* In case of less than requested number of buffers available
246 * in pool, qbman_swp_acquire returns 0
249 DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
251 /* The API expect the exact number of requested
252 * buffers. Releasing all buffers allocated
254 dpaa_mbuf_free_bulk(pool
, obj_table
, n
);
257 /* assigning mbuf from the acquired objects */
258 for (i
= 0; (i
< ret
) && bufs
[i
].addr
; i
++) {
259 /* TODO-errata - objerved that bufs may be null
260 * i.e. first buffer is valid, remaining 6 buffers
263 bufaddr
= DPAA_MEMPOOL_PTOV(bp_info
, bufs
[i
].addr
);
264 m
[n
] = (struct rte_mbuf
*)((char *)bufaddr
265 - bp_info
->meta_data_size
);
266 DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
267 (void *)bufaddr
, (void *)m
[n
]);
272 DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
278 dpaa_mbuf_get_count(const struct rte_mempool
*mp
)
280 struct dpaa_bp_info
*bp_info
;
282 MEMPOOL_INIT_FUNC_TRACE();
284 if (!mp
|| !mp
->pool_data
) {
285 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
289 bp_info
= DPAA_MEMPOOL_TO_POOL_INFO(mp
);
291 return bman_query_free_buffers(bp_info
->bp
);
295 dpaa_populate(struct rte_mempool
*mp
, unsigned int max_objs
,
296 void *vaddr
, rte_iova_t paddr
, size_t len
,
297 rte_mempool_populate_obj_cb_t
*obj_cb
, void *obj_cb_arg
)
299 struct dpaa_bp_info
*bp_info
;
300 unsigned int total_elt_sz
;
302 if (!mp
|| !mp
->pool_data
) {
303 DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
307 /* Update the PA-VA Table */
308 dpaax_iova_table_update(paddr
, vaddr
, len
);
310 bp_info
= DPAA_MEMPOOL_TO_POOL_INFO(mp
);
311 total_elt_sz
= mp
->header_size
+ mp
->elt_size
+ mp
->trailer_size
;
313 DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64
" vs Available %u\n",
314 (uint64_t)len
, total_elt_sz
* mp
->size
);
316 /* Detect pool area has sufficient space for elements in this memzone */
317 if (len
>= total_elt_sz
* mp
->size
)
318 bp_info
->flags
|= DPAA_MPOOL_SINGLE_SEGMENT
;
319 struct dpaa_memseg
*ms
;
321 /* For each memory chunk pinned to the Mempool, a linked list of the
322 * contained memsegs is created for searching when PA to VA
323 * conversion is required.
325 ms
= rte_zmalloc(NULL
, sizeof(struct dpaa_memseg
), 0);
327 DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
328 DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
329 /* If the element is not added, it would only lead to failure
330 * in searching for the element and the logic would Fallback
331 * to traditional DPDK memseg traversal code. So, this is not
332 * a blocking error - but, error would be printed on screen.
340 /* Head insertions are generally faster than tail insertions as the
341 * buffers pinned are picked from rear end.
343 TAILQ_INSERT_HEAD(&rte_dpaa_memsegs
, ms
, next
);
345 return rte_mempool_op_populate_helper(mp
, 0, max_objs
, vaddr
, paddr
,
346 len
, obj_cb
, obj_cb_arg
);
349 static const struct rte_mempool_ops dpaa_mpool_ops
= {
350 .name
= DPAA_MEMPOOL_OPS_NAME
,
351 .alloc
= dpaa_mbuf_create_pool
,
352 .free
= dpaa_mbuf_free_pool
,
353 .enqueue
= dpaa_mbuf_free_bulk
,
354 .dequeue
= dpaa_mbuf_alloc_bulk
,
355 .get_count
= dpaa_mbuf_get_count
,
356 .populate
= dpaa_populate
,
359 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops
);
361 RTE_INIT(dpaa_mp_init_log
)
363 dpaa_logtype_mempool
= rte_log_register("mempool.dpaa");
364 if (dpaa_logtype_mempool
>= 0)
365 rte_log_set_level(dpaa_logtype_mempool
, RTE_LOG_NOTICE
);